mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-29 18:38:44 +08:00
test: Add expression template tests and use error msg only as assertion (#37618)
Some checks are pending
Code Checker / Code Checker AMD64 Ubuntu 22.04 (push) Waiting to run
Code Checker / Code Checker Amazonlinux 2023 (push) Waiting to run
Code Checker / Code Checker rockylinux8 (push) Waiting to run
Mac Code Checker / Code Checker MacOS 12 (push) Waiting to run
Build and test / Build and test AMD64 Ubuntu 22.04 (push) Waiting to run
Build and test / UT for Cpp (push) Blocked by required conditions
Build and test / UT for Go (push) Blocked by required conditions
Build and test / Integration Test (push) Blocked by required conditions
Build and test / Upload Code Coverage (push) Blocked by required conditions
Some checks are pending
Code Checker / Code Checker AMD64 Ubuntu 22.04 (push) Waiting to run
Code Checker / Code Checker Amazonlinux 2023 (push) Waiting to run
Code Checker / Code Checker rockylinux8 (push) Waiting to run
Mac Code Checker / Code Checker MacOS 12 (push) Waiting to run
Build and test / Build and test AMD64 Ubuntu 22.04 (push) Waiting to run
Build and test / UT for Cpp (push) Blocked by required conditions
Build and test / UT for Go (push) Blocked by required conditions
Build and test / Integration Test (push) Blocked by required conditions
Build and test / Upload Code Coverage (push) Blocked by required conditions
related issue: https://github.com/milvus-io/milvus/issues/37451 --------- Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com>
This commit is contained in:
parent
a654487995
commit
af433ffd68
@ -121,9 +121,13 @@ class ResponseChecker:
|
||||
assert len(error_dict) > 0
|
||||
if isinstance(res, Error):
|
||||
error_code = error_dict[ct.err_code]
|
||||
assert res.code == error_code or error_dict[ct.err_msg] in res.message, (
|
||||
# assert res.code == error_code or error_dict[ct.err_msg] in res.message, (
|
||||
# f"Response of API {self.func_name} "
|
||||
# f"expect get error code {error_dict[ct.err_code]} or error message {error_dict[ct.err_code]}, "
|
||||
# f"but got {res.code} {res.message}")
|
||||
assert error_dict[ct.err_msg] in res.message, (
|
||||
f"Response of API {self.func_name} "
|
||||
f"expect get error code {error_dict[ct.err_code]} or error message {error_dict[ct.err_code]}, "
|
||||
f"expect get error message {error_dict[ct.err_code]}, "
|
||||
f"but got {res.code} {res.message}")
|
||||
|
||||
else:
|
||||
|
@ -34,9 +34,10 @@ class IndexErrorMessage(ExceptionsMessage):
|
||||
WrongFieldName = "cannot create index on non-vector field: %s"
|
||||
DropLoadedIndex = "index cannot be dropped, collection is loaded, please release it first"
|
||||
CheckVectorIndex = "data type {0} can't build with this index {1}"
|
||||
SparseFloatVectorMetricType = "only IP is the supported metric type for sparse index"
|
||||
SparseFloatVectorMetricType = "only IP&BM25 is the supported metric type for sparse index"
|
||||
VectorMetricTypeExist = "metric type not set for vector index"
|
||||
CheckBitmapIndex = "bitmap index are only supported on bool, int, string and array field"
|
||||
# please update the msg below as #37543 fixed
|
||||
CheckBitmapIndex = "bitmap index are only supported on bool, int, string"
|
||||
CheckBitmapOnPK = "create bitmap index on primary key not supported"
|
||||
CheckBitmapCardinality = "failed to check bitmap cardinality limit, should be larger than 0 and smaller than 1000"
|
||||
NotConfigable = "{0} is not configable index param"
|
||||
|
@ -2227,7 +2227,7 @@ def gen_invalid_search_params_type():
|
||||
if index_type == "FLAT":
|
||||
continue
|
||||
# search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}})
|
||||
if index_type in ["IVF_FLAT", "IVF_SQ8", "IVF_PQ"]:
|
||||
if index_type in ["IVF_FLAT", "IVF_SQ8", "IVF_PQ", "BIN_FLAT", "BIN_IVF_FLAT"]:
|
||||
for nprobe in ct.get_invalid_ints:
|
||||
ivf_search_params = {"index_type": index_type, "search_params": {"nprobe": nprobe}}
|
||||
search_params.append(ivf_search_params)
|
||||
@ -2307,35 +2307,6 @@ def gen_autoindex_search_params():
|
||||
return search_params
|
||||
|
||||
|
||||
def gen_invalid_search_param(index_type, metric_type="L2"):
|
||||
search_params = []
|
||||
if index_type in ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_PQ"] \
|
||||
or index_type in ["BIN_FLAT", "BIN_IVF_FLAT"]:
|
||||
for nprobe in [-1]:
|
||||
ivf_search_params = {"metric_type": metric_type, "params": {"nprobe": nprobe}}
|
||||
search_params.append(ivf_search_params)
|
||||
elif index_type in ["HNSW"]:
|
||||
for ef in [-1]:
|
||||
hnsw_search_param = {"metric_type": metric_type, "params": {"ef": ef}}
|
||||
search_params.append(hnsw_search_param)
|
||||
elif index_type == "ANNOY":
|
||||
for search_k in ["-2"]:
|
||||
annoy_search_param = {"metric_type": metric_type, "params": {"search_k": search_k}}
|
||||
search_params.append(annoy_search_param)
|
||||
elif index_type == "DISKANN":
|
||||
for search_list in ["-1"]:
|
||||
diskann_search_param = {"metric_type": metric_type, "params": {"search_list": search_list}}
|
||||
search_params.append(diskann_search_param)
|
||||
elif index_type == "SCANN":
|
||||
for reorder_k in [-1]:
|
||||
scann_search_param = {"metric_type": metric_type, "params": {"reorder_k": reorder_k, "nprobe": 10}}
|
||||
search_params.append(scann_search_param)
|
||||
else:
|
||||
log.error("Invalid index_type.")
|
||||
raise Exception("Invalid index_type.")
|
||||
return search_params
|
||||
|
||||
|
||||
def gen_all_type_fields():
|
||||
fields = []
|
||||
for k, v in DataType.__members__.items():
|
||||
@ -2345,49 +2316,98 @@ def gen_all_type_fields():
|
||||
return fields
|
||||
|
||||
|
||||
def gen_normal_expressions():
|
||||
def gen_normal_expressions_and_templates():
|
||||
"""
|
||||
Gen a list of filter in expression-format(as a string) and template-format(as a dict)
|
||||
The two formats equals to each other.
|
||||
"""
|
||||
expressions = [
|
||||
"",
|
||||
"int64 > 0",
|
||||
"(int64 > 0 && int64 < 400) or (int64 > 500 && int64 < 1000)",
|
||||
"int64 not in [1, 2, 3]",
|
||||
"int64 in [1, 2, 3] and float != 2",
|
||||
"int64 == 0 || float == 10**2 || (int64 + 1) == 3",
|
||||
"0 <= int64 < 400 and int64 % 100 == 0",
|
||||
"200+300 < int64 <= 500+500",
|
||||
"int64 > 400 && int64 < 200",
|
||||
"int64 in [300/2, 900%40, -10*30+800, (100+200)*2] or float in [+3**6, 2**10/2]",
|
||||
"float <= -4**5/2 && float > 500-1 && float != 500/2+260"
|
||||
["", {"expr": "", "expr_params": {}}],
|
||||
["int64 > 0", {"expr": "int64 > {value_0}", "expr_params": {"value_0": 0}}],
|
||||
["(int64 > 0 && int64 < 400) or (int64 > 500 && int64 < 1000)",
|
||||
{"expr": "(int64 > {value_0} && int64 < {value_1}) or (int64 > {value_2} && int64 < {value_3})",
|
||||
"expr_params": {"value_0": 0, "value_1": 400, "value_2": 500, "value_3": 1000}}],
|
||||
["int64 not in [1, 2, 3]", {"expr": "int64 not in {value_0}", "expr_params": {"value_0": [1, 2, 3]}}],
|
||||
["int64 in [1, 2, 3] and float != 2", {"expr": "int64 in {value_0} and float != {value_1}",
|
||||
"expr_params": {"value_0": [1, 2, 3], "value_1": 2}}],
|
||||
["int64 == 0 || float == 10**2 || (int64 + 1) == 3",
|
||||
{"expr": "int64 == {value_0} || float == {value_1} || (int64 + {value_2}) == {value_3}",
|
||||
"expr_params": {"value_0": 0, "value_1": 10**2, "value_2": 1, "value_3": 3}}],
|
||||
["0 <= int64 < 400 and int64 % 100 == 0",
|
||||
{"expr": "{value_0} <= int64 < {value_1} and int64 % {value_2} == {value_0}",
|
||||
"expr_params": {"value_0": 0, "value_1": 400, "value_2": 100}}],
|
||||
["200+300 < int64 <= 500+500", {"expr": "{value_0} < int64 <= {value_1}",
|
||||
"expr_params": {"value_1": 500+500, "value_0": 200+300}}],
|
||||
["int64 > 400 && int64 < 200", {"expr": "int64 > {value_0} && int64 < {value_1}",
|
||||
"expr_params": {"value_0": 400, "value_1": 200}}],
|
||||
["int64 in [300/2, 900%40, -10*30+800, (100+200)*2] or float in [+3**6, 2**10/2]",
|
||||
{"expr": "int64 in {value_0} or float in {value_1}",
|
||||
"expr_params": {"value_0": [int(300/2), 900%40, -10*30+800, (100+200)*2], "value_1": [+3**6*1.0, 2**10/2*1.0]}}],
|
||||
["float <= -4**5/2 && float > 500-1 && float != 500/2+260",
|
||||
{"expr": "float <= {value_0} && float > {value_1} && float != {value_2}",
|
||||
"expr_params": {"value_0": -4**5/2, "value_1": 500-1, "value_2": 500/2+260}}],
|
||||
]
|
||||
return expressions
|
||||
|
||||
|
||||
def gen_json_field_expressions():
|
||||
def gen_json_field_expressions_and_templates():
|
||||
"""
|
||||
Gen a list of filter in expression-format(as a string) and template-format(as a dict)
|
||||
The two formats equals to each other.
|
||||
"""
|
||||
expressions = [
|
||||
"json_field['number'] > 0",
|
||||
"0 <= json_field['number'] < 400 or 1000 > json_field['number'] >= 500",
|
||||
"json_field['number'] not in [1, 2, 3]",
|
||||
"json_field['number'] in [1, 2, 3] and json_field['float'] != 2",
|
||||
"json_field['number'] == 0 || json_field['float'] == 10**2 || json_field['number'] + 1 == 3",
|
||||
"json_field['number'] < 400 and json_field['number'] >= 100 and json_field['number'] % 100 == 0",
|
||||
"json_field['float'] > 400 && json_field['float'] < 200",
|
||||
"json_field['number'] in [300/2, -10*30+800, (100+200)*2] or json_field['float'] in [+3**6, 2**10/2]",
|
||||
"json_field['float'] <= -4**5/2 && json_field['float'] > 500-1 && json_field['float'] != 500/2+260"
|
||||
["json_field['number'] > 0", {"expr": "json_field['number'] > {value_0}", "expr_params": {"value_0": 0}}],
|
||||
["0 <= json_field['number'] < 400 or 1000 > json_field['number'] >= 500",
|
||||
{"expr": "{value_0} <= json_field['number'] < {value_1} or {value_2} > json_field['number'] >= {value_3}",
|
||||
"expr_params": {"value_0": 0, "value_1": 400, "value_2": 1000, "value_3": 500}}],
|
||||
["json_field['number'] not in [1, 2, 3]", {"expr": "json_field['number'] not in {value_0}",
|
||||
"expr_params": {"value_0": [1, 2, 3]}}],
|
||||
["json_field['number'] in [1, 2, 3] and json_field['float'] != 2",
|
||||
{"expr": "json_field['number'] in {value_0} and json_field['float'] != {value_1}",
|
||||
"expr_params": {"value_0": [1, 2, 3], "value_1": 2}}],
|
||||
["json_field['number'] == 0 || json_field['float'] == 10**2 || json_field['number'] + 1 == 3",
|
||||
{"expr": "json_field['number'] == {value_0} || json_field['float'] == {value_1} || json_field['number'] + {value_2} == {value_3}",
|
||||
"expr_params": {"value_0": 0, "value_1": 10**2, "value_2": 1, "value_3": 3}}],
|
||||
["json_field['number'] < 400 and json_field['number'] >= 100 and json_field['number'] % 100 == 0",
|
||||
{"expr": "json_field['number'] < {value_0} and json_field['number'] >= {value_1} and json_field['number'] % {value_1} == 0",
|
||||
"expr_params": {"value_0": 400, "value_1": 100}}],
|
||||
["json_field['float'] > 400 && json_field['float'] < 200", {"expr": "json_field['float'] > {value_0} && json_field['float'] < {value_1}",
|
||||
"expr_params": {"value_0": 400, "value_1": 200}}],
|
||||
["json_field['number'] in [300/2, -10*30+800, (100+200)*2] or json_field['float'] in [+3**6, 2**10/2]",
|
||||
{"expr": "json_field['number'] in {value_0} or json_field['float'] in {value_1}",
|
||||
"expr_params": {"value_0": [int(300/2), -10*30+800, (100+200)*2], "value_1": [+3**6*1.0, 2**10/2*1.0]}}],
|
||||
["json_field['float'] <= -4**5/2 && json_field['float'] > 500-1 && json_field['float'] != 500/2+260",
|
||||
{"expr": "json_field['float'] <= {value_0} && json_field['float'] > {value_1} && json_field['float'] != {value_2}",
|
||||
"expr_params": {"value_0": -4**5/2, "value_1": 500-1, "value_2": 500/2+260}}],
|
||||
]
|
||||
return expressions
|
||||
|
||||
|
||||
def gen_array_field_expressions():
|
||||
def gen_array_field_expressions_and_templates():
|
||||
"""
|
||||
Gen a list of filter in expression-format(as a string) and template-format(as a dict) for a field.
|
||||
The two formats equals to each other.
|
||||
"""
|
||||
expressions = [
|
||||
"int32_array[0] > 0",
|
||||
"0 <= int32_array[0] < 400 or 1000 > float_array[1] >= 500",
|
||||
"int32_array[1] not in [1, 2, 3]",
|
||||
"int32_array[1] in [1, 2, 3] and string_array[1] != '2'",
|
||||
"int32_array == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]",
|
||||
"int32_array[1] + 1 == 3 && int32_array[0] - 1 != 1",
|
||||
"int32_array[1] % 100 == 0 && string_array[1] in ['1', '2']",
|
||||
"int32_array[1] in [300/2, -10*30+800, (200-100)*2] "
|
||||
"or (float_array[1] <= -4**5/2 || 100 <= int32_array[1] < 200)"
|
||||
["int32_array[0] > 0", {"expr": "int32_array[0] > {value_0}", "expr_params": {"value_0": 0}}],
|
||||
["0 <= int32_array[0] < 400 or 1000 > float_array[1] >= 500",
|
||||
{"expr": "{value_0} <= int32_array[0] < {value_1} or {value_2} > float_array[1] >= {value_3}",
|
||||
"expr_params": {"value_0": 0, "value_1": 400, "value_2": 1000, "value_3": 500}}],
|
||||
["int32_array[1] not in [1, 2, 3]", {"expr": "int32_array[1] not in {value_0}", "expr_params": {"value_0": [1, 2, 3]}}],
|
||||
["int32_array[1] in [1, 2, 3] and string_array[1] != '2'",
|
||||
{"expr": "int32_array[1] in {value_0} and string_array[1] != {value_2}",
|
||||
"expr_params": {"value_0": [1, 2, 3], "value_2": "2"}}],
|
||||
["int32_array == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", {"expr": "int32_array == {value_0}",
|
||||
"expr_params": {"value_0": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}}],
|
||||
["int32_array[1] + 1 == 3 && int32_array[0] - 1 != 1",
|
||||
{"expr": "int32_array[1] + {value_0} == {value_2} && int32_array[0] - {value_0} != {value_0}",
|
||||
"expr_params": {"value_0": 1, "value_2": 3}}],
|
||||
["int32_array[1] % 100 == 0 && string_array[1] in ['1', '2']",
|
||||
{"expr": "int32_array[1] % {value_0} == {value_1} && string_array[1] in {value_2}",
|
||||
"expr_params": {"value_0": 100, "value_1": 0, "value_2": ["1", "2"]}}],
|
||||
["int32_array[1] in [300/2, -10*30+800, (200-100)*2] or (float_array[1] <= -4**5/2 || 100 <= int32_array[1] < 200)",
|
||||
{"expr": "int32_array[1] in {value_0} or (float_array[1] <= {value_1} || {value_2} <= int32_array[1] < {value_3})",
|
||||
"expr_params": {"value_0": [int(300/2), -10*30+800, (200-100)*2], "value_1": -4**5/2, "value_2": 100, "value_3": 200}}]
|
||||
]
|
||||
return expressions
|
||||
|
||||
@ -2437,37 +2457,42 @@ def gen_invalid_string_expressions():
|
||||
return expressions
|
||||
|
||||
|
||||
def gen_invalid_bool_expressions():
|
||||
expressions = [
|
||||
"bool",
|
||||
"!bool",
|
||||
"true",
|
||||
"false",
|
||||
"int64 > 0 and bool",
|
||||
"int64 > 0 or false"
|
||||
def gen_normal_expressions_and_templates_field(field):
|
||||
"""
|
||||
Gen a list of filter in expression-format(as a string) and template-format(as a dict) for a field.
|
||||
The two formats equals to each other.
|
||||
"""
|
||||
expressions_and_templates = [
|
||||
["", {"expr": "", "expr_params": {}}],
|
||||
[f"{field} > 0", {"expr": f"{field} > {{value_0}}", "expr_params": {"value_0": 0}}],
|
||||
[f"({field} > 0 && {field} < 400) or ({field} > 500 && {field} < 1000)",
|
||||
{"expr": f"({field} > {{value_0}} && {field} < {{value_1}}) or ({field} > {{value_2}} && {field} < {{value_3}})",
|
||||
"expr_params": {"value_0": 0, "value_1": 400, "value_2": 500, "value_3": 1000}}],
|
||||
[f"{field} not in [1, 2, 3]", {"expr": f"{field} not in {{value_0}}", "expr_params": {"value_0": [1, 2, 3]}}],
|
||||
[f"{field} in [1, 2, 3] and {field} != 2", {"expr": f"{field} in {{value_0}} and {field} != {{value_1}}", "expr_params": {"value_0": [1, 2, 3], "value_1": 2}}],
|
||||
[f"{field} == 0 || {field} == 1 || {field} == 2", {"expr": f"{field} == {{value_0}} || {field} == {{value_1}} || {field} == {{value_2}}",
|
||||
"expr_params": {"value_0": 0, "value_1": 1, "value_2": 2}}],
|
||||
[f"0 < {field} < 400", {"expr": f"{{value_0}} < {field} < {{value_1}}", "expr_params": {"value_0": 0, "value_1": 400}}],
|
||||
[f"500 <= {field} <= 1000", {"expr": f"{{value_0}} <= {field} <= {{value_1}}", "expr_params": {"value_0": 500, "value_1": 1000}}],
|
||||
[f"200+300 <= {field} <= 500+500", {"expr": f"{{value_0}} <= {field} <= {{value_1}}", "expr_params": {"value_0": 200+300, "value_1": 500+500}}],
|
||||
[f"{field} in [300/2, 900%40, -10*30+800, 2048/2%200, (100+200)*2]", {"expr": f"{field} in {{value_0}}", "expr_params": {"value_0": [300*1.0/2, 900*1.0%40, -10*30*1.0+800, 2048*1.0/2%200, (100+200)*1.0*2]}}],
|
||||
[f"{field} in [+3**6, 2**10/2]", {"expr": f"{field} in {{value_0}}", "expr_params": {"value_0": [+3**6*1.0, 2**10*1.0/2]}}],
|
||||
[f"{field} <= 4**5/2 && {field} > 500-1 && {field} != 500/2+260", {"expr": f"{field} <= {{value_0}} && {field} > {{value_1}} && {field} != {{value_2}}",
|
||||
"expr_params": {"value_0": 4**5/2, "value_1": 500-1, "value_2": 500/2+260}}],
|
||||
[f"{field} > 400 && {field} < 200", {"expr": f"{field} > {{value_0}} && {field} < {{value_1}}", "expr_params": {"value_0": 400, "value_1": 200}}],
|
||||
[f"{field} < -2**8", {"expr": f"{field} < {{value_0}}", "expr_params": {"value_0": -2**8}}],
|
||||
[f"({field} + 1) == 3 || {field} * 2 == 64 || {field} == 10**2", {"expr": f"({field} + {{value_0}}) == {{value_1}} || {field} * {{value_2}} == {{value_3}} || {field} == {{value_4}}",
|
||||
"expr_params": {"value_0": 1, "value_1": 3, "value_2": 2, "value_3": 64, "value_4": 10**2}}]
|
||||
]
|
||||
return expressions
|
||||
return expressions_and_templates
|
||||
|
||||
|
||||
def gen_normal_expressions_field(field):
|
||||
expressions = [
|
||||
"",
|
||||
f"{field} > 0",
|
||||
f"({field} > 0 && {field} < 400) or ({field} > 500 && {field} < 1000)",
|
||||
f"{field} not in [1, 2, 3]",
|
||||
f"{field} in [1, 2, 3] and {field} != 2",
|
||||
f"{field} == 0 || {field} == 1 || {field} == 2",
|
||||
f"0 < {field} < 400",
|
||||
f"500 <= {field} <= 1000",
|
||||
f"200+300 <= {field} <= 500+500",
|
||||
f"{field} in [300/2, 900%40, -10*30+800, 2048/2%200, (100+200)*2]",
|
||||
f"{field} in [+3**6, 2**10/2]",
|
||||
f"{field} <= 4**5/2 && {field} > 500-1 && {field} != 500/2+260",
|
||||
f"{field} > 400 && {field} < 200",
|
||||
f"{field} < -2**8",
|
||||
f"({field} + 1) == 3 || {field} * 2 == 64 || {field} == 10**2"
|
||||
]
|
||||
return expressions
|
||||
def get_expr_from_template(template={}):
|
||||
return template.get("expr", None)
|
||||
|
||||
|
||||
def get_expr_params_from_template(template={}):
|
||||
return template.get("expr_params", None)
|
||||
|
||||
|
||||
def gen_integer_overflow_expressions():
|
||||
|
@ -207,15 +207,6 @@ get_dict_without_host_port = [
|
||||
{"": ""}
|
||||
]
|
||||
|
||||
get_dict_invalid_host_port = [
|
||||
{"port": "port"},
|
||||
# ["host", "port"],
|
||||
# ("host", "port"),
|
||||
{"host": -1},
|
||||
{"port": ["192.168.1.1"]},
|
||||
{"port": "-1", "host": "hostlocal"},
|
||||
]
|
||||
|
||||
get_wrong_format_dict = [
|
||||
{"host": "string_host", "port": {}},
|
||||
{"host": 0, "port": 19520}
|
||||
|
@ -92,9 +92,7 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
alias = cf.gen_unique_str("collection_alias")
|
||||
collection_name = "a".join("a" for i in range(256))
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection name must be less than 255 characters"}
|
||||
client_w.create_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -109,8 +107,7 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
alias = cf.gen_unique_str("collection_alias")
|
||||
collection_name = "not_exist_collection_alias"
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection collection not "
|
||||
f"found[database=default][collection={collection_name}]"}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"collection not found[database=default][collection={collection_name}]"}
|
||||
client_w.create_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -128,8 +125,9 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {collection_name}. the first character of a "
|
||||
f"collection name must be an underscore or letter: invalid parameter"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"Invalid collection alias: {alias}. "
|
||||
f"the first character of a collection alias must be an underscore or letter"}
|
||||
client_w.create_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
@ -148,9 +146,7 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection alias must be less than 255 characters"}
|
||||
client_w.create_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
@ -220,8 +216,8 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
expected: create alias successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {alias_name}. the first character of a "
|
||||
f"collection name must be an underscore or letter: invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection alias: {alias_name}. the first character of a "
|
||||
f"collection alias must be an underscore or letter"}
|
||||
client_w.drop_alias(client, alias_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -235,9 +231,7 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
alias = "a".join("a" for i in range(256))
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {alias}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection alias must be less than 255 characters"}
|
||||
client_w.drop_alias(client, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -269,9 +263,7 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
alias = cf.gen_unique_str("collection_alias")
|
||||
collection_name = "a".join("a" for i in range(256))
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection name must be less than 255 characters"}
|
||||
client_w.alter_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -287,8 +279,7 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
alias = cf.gen_unique_str("collection_alias")
|
||||
collection_name = cf.gen_unique_str("not_exist_collection_alias")
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection collection not "
|
||||
f"found[database=default][collection={collection_name}]"}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"collection not found[collection={collection_name}]"}
|
||||
client_w.alter_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
@ -307,10 +298,10 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {collection_name}. the first character of a "
|
||||
f"collection name must be an underscore or letter: invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection alias: {alias}. the first character of a "
|
||||
f"collection alias must be an underscore or letter"}
|
||||
client_w.alter_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -327,11 +318,9 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. create alias
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection alias must be less than 255 characters"}
|
||||
client_w.alter_alias(client, collection_name, alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -362,15 +351,15 @@ class TestMilvusClientAliasInvalid(TestcaseBase):
|
||||
expected: alter alias successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
alias = cf.gen_unique_str("collection_alias")
|
||||
another_alias = cf.gen_unique_str("collection_alias_another")
|
||||
collection_name = cf.gen_unique_str("coll")
|
||||
alias = cf.gen_unique_str("alias")
|
||||
another_alias = cf.gen_unique_str("another_alias")
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. create alias
|
||||
client_w.create_alias(client, collection_name, alias)
|
||||
# 3. alter alias
|
||||
error = {ct.err_code: 1600, ct.err_msg: f"alias not found[database=default][alias={collection_name}]"}
|
||||
error = {ct.err_code: 1600, ct.err_msg: f"alias not found[database=default][alias={another_alias}]"}
|
||||
client_w.alter_alias(client, collection_name, another_alias,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
|
@ -89,9 +89,7 @@ class TestMilvusClientCollectionInvalid(TestcaseBase):
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
# 1. create collection
|
||||
collection_name = "a".join("a" for i in range(256))
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection name must be less than 255 characters"}
|
||||
client_w.create_collection(client, collection_name, default_dim,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -120,7 +118,11 @@ class TestMilvusClientCollectionInvalid(TestcaseBase):
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 1. create collection
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"invalid dimension: {dim}. should be in range 2 ~ 32768"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"invalid dimension: {dim}. "
|
||||
f"float vector dimension should be in range 2 ~ 32768"}
|
||||
if dim < ct.min_dim:
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"invalid dimension: {dim}. "
|
||||
f"should be in range 2 ~ 32768"}
|
||||
client_w.create_collection(client, collection_name, dim,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
@ -625,13 +627,11 @@ class TestMilvusClientCollectionValid(TestcaseBase):
|
||||
|
||||
class TestMilvusClientDropCollectionInvalid(TestcaseBase):
|
||||
""" Test case of drop collection interface """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
|
||||
def test_milvus_client_drop_collection_invalid_collection_name(self, name):
|
||||
@ -641,8 +641,8 @@ class TestMilvusClientDropCollectionInvalid(TestcaseBase):
|
||||
expected: create collection with default schema, index, and load successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {name}. collection name can only "
|
||||
f"contain numbers, letters and underscores: invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {name}. "
|
||||
f"the first character of a collection name must be an underscore or letter"}
|
||||
client_w.drop_collection(client, name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -660,13 +660,11 @@ class TestMilvusClientDropCollectionInvalid(TestcaseBase):
|
||||
|
||||
class TestMilvusClientReleaseCollectionInvalid(TestcaseBase):
|
||||
""" Test case of release collection interface """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
|
||||
def test_milvus_client_release_collection_invalid_collection_name(self, name):
|
||||
@ -676,8 +674,9 @@ class TestMilvusClientReleaseCollectionInvalid(TestcaseBase):
|
||||
expected: create collection with default schema, index, and load successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {name}. collection name can only "
|
||||
f"contain numbers, letters and underscores: invalid parameter"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"Invalid collection name: {name}. "
|
||||
f"the first character of a collection name must be an underscore or letter"}
|
||||
client_w.release_collection(client, name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -705,9 +704,7 @@ class TestMilvusClientReleaseCollectionInvalid(TestcaseBase):
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
# 1. create collection
|
||||
collection_name = "a".join("a" for i in range(256))
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection name must be less than 255 characters"}
|
||||
client_w.release_collection(client, collection_name, default_dim,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -773,13 +770,11 @@ class TestMilvusClientReleaseCollectionValid(TestcaseBase):
|
||||
|
||||
class TestMilvusClientLoadCollectionInvalid(TestcaseBase):
|
||||
""" Test case of search interface """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
|
||||
def test_milvus_client_load_collection_invalid_collection_name(self, name):
|
||||
@ -789,8 +784,9 @@ class TestMilvusClientLoadCollectionInvalid(TestcaseBase):
|
||||
expected: create collection with default schema, index, and load successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {name}. collection name can only "
|
||||
f"contain numbers, letters and underscores: invalid parameter"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"Invalid collection name: {name}. "
|
||||
f"the first character of a collection name must be an underscore or letter"}
|
||||
client_w.load_collection(client, name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -904,13 +900,11 @@ class TestMilvusClientLoadCollectionValid(TestcaseBase):
|
||||
|
||||
class TestMilvusClientDescribeCollectionInvalid(TestcaseBase):
|
||||
""" Test case of search interface """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
|
||||
def test_milvus_client_describe_collection_invalid_collection_name(self, name):
|
||||
@ -920,8 +914,9 @@ class TestMilvusClientDescribeCollectionInvalid(TestcaseBase):
|
||||
expected: create collection with default schema, index, and load successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {name}. collection name can only "
|
||||
f"contain numbers, letters and underscores: invalid parameter"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"Invalid collection name: {name}. "
|
||||
f"the first character of a collection name must be an underscore or letter"}
|
||||
client_w.describe_collection(client, name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -959,13 +954,11 @@ class TestMilvusClientDescribeCollectionInvalid(TestcaseBase):
|
||||
|
||||
class TestMilvusClientHasCollectionInvalid(TestcaseBase):
|
||||
""" Test case of search interface """
|
||||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following are invalid base cases
|
||||
******************************************************************
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
|
||||
def test_milvus_client_has_collection_invalid_collection_name(self, name):
|
||||
@ -975,8 +968,9 @@ class TestMilvusClientHasCollectionInvalid(TestcaseBase):
|
||||
expected: create collection with default schema, index, and load successfully
|
||||
"""
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"Invalid collection name: {name}. collection name can only "
|
||||
f"contain numbers, letters and underscores: invalid parameter"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"Invalid collection name: {name}. "
|
||||
f"the first character of a collection name must be an underscore or letter"}
|
||||
client_w.has_collection(client, name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -128,10 +128,9 @@ class TestMilvusClientIndexInvalid(TestcaseBase):
|
||||
client_w.drop_index(client, collection_name, "vector")
|
||||
# 2. prepare index params
|
||||
index_params = client_w.prepare_index_params(client)[0]
|
||||
index_params.add_index(field_name = "vector")
|
||||
index_params.add_index(field_name="vector")
|
||||
# 3. create index
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection collection not "
|
||||
f"found[database=default][collection=not_existed]"}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection[database=default][collection={not_existed_collection_name}]"}
|
||||
client_w.create_index(client, not_existed_collection_name, index_params,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
client_w.drop_collection(client, collection_name)
|
||||
|
@ -128,9 +128,7 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection name must be less than 255 characters"}
|
||||
client_w.insert(client, collection_name, rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -146,8 +144,7 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection collection not found"
|
||||
f"[database=default][collection={collection_name}]"}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection[database=default][collection={collection_name}]"}
|
||||
client_w.insert(client, collection_name, rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -184,8 +181,9 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i,
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"float vector field 'vector' is illegal, array type mismatch: "
|
||||
f"invalid parameter[expected=need float vector][actual=got nil]"}
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"Insert missed an field `vector` to collection "
|
||||
f"without set nullable==true or set default_value"}
|
||||
client_w.insert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -204,7 +202,8 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"currently not support vector field as PrimaryField: invalid parameter"}
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"Insert missed an field `id` to collection without set nullable==true or set default_value"}
|
||||
client_w.insert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -223,9 +222,9 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"Attempt to insert an unexpected field "
|
||||
f"to collection without enabling dynamic field"}
|
||||
client_w.insert(client, collection_name, data= rows,
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"Attempt to insert an unexpected field `float` to collection without enabling dynamic field"}
|
||||
client_w.insert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -262,9 +261,10 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: str(i), default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"The Input data type is inconsistent with defined schema, "
|
||||
f"please check it."}
|
||||
client_w.insert(client, collection_name, data= rows,
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"The Input data type is inconsistent with defined schema, "
|
||||
f"{{id}} field should be a int64"}
|
||||
client_w.insert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -283,9 +283,10 @@ class TestMilvusClientInsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of "
|
||||
f"a partition name must be an underscore or letter."}
|
||||
client_w.insert(client, collection_name, data= rows, partition_name=partition_name,
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}."}
|
||||
if partition_name == " ":
|
||||
error = {ct.err_code: 1, ct.err_msg: f"Invalid partition name: . Partition name should not be empty."}
|
||||
client_w.insert(client, collection_name, data=rows, partition_name=partition_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -584,9 +585,7 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"invalid dimension: {collection_name}. "
|
||||
f"the length of a collection name must be less than 255 characters: "
|
||||
f"invalid parameter"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the length of a collection name must be less than 255 characters"}
|
||||
client_w.upsert(client, collection_name, rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -602,13 +601,11 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection collection not found"
|
||||
f"[database=default][collection={collection_name}]"}
|
||||
error = {ct.err_code: 100, ct.err_msg: f"can't find collection[database=default][collection={collection_name}]"}
|
||||
client_w.upsert(client, collection_name, rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.xfail(reason="pymilvus issue 1894")
|
||||
@pytest.mark.parametrize("data", ["12-s", "12 s", "(mn)", "中文", "%$#", " "])
|
||||
def test_milvus_client_upsert_data_invalid_type(self, data):
|
||||
"""
|
||||
@ -621,12 +618,11 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. insert
|
||||
error = {ct.err_code: 1, ct.err_msg: f"None rows, please provide valid row data."}
|
||||
error = {ct.err_code: 1, ct.err_msg: f"wrong type of argument 'data',expected 'Dict' or list of 'Dict'"}
|
||||
client_w.upsert(client, collection_name, data,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.xfail(reason="pymilvus issue 1895")
|
||||
def test_milvus_client_upsert_data_empty(self):
|
||||
"""
|
||||
target: test high level api: client.create_collection
|
||||
@ -638,8 +634,9 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, consistency_level="Strong")
|
||||
# 2. insert
|
||||
error = {ct.err_code: 1, ct.err_msg: f"None rows, please provide valid row data."}
|
||||
client_w.upsert(client, collection_name, data= "")
|
||||
error = {ct.err_code: 1, ct.err_msg: f"wrong type of argument 'data',expected 'Dict' or list of 'Dict'"}
|
||||
client_w.upsert(client, collection_name, data="",
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_milvus_client_upsert_data_vector_field_missing(self):
|
||||
@ -655,9 +652,9 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i,
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"float vector field 'vector' is illegal, array type mismatch: "
|
||||
f"invalid parameter[expected=need float vector][actual=got nil]"}
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(10)]
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: "Insert missed an field `vector` to collection without set nullable==true or set default_value"}
|
||||
client_w.upsert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -675,9 +672,10 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"currently not support vector field as PrimaryField: invalid parameter"}
|
||||
client_w.upsert(client, collection_name, data= rows,
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(20)]
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"Insert missed an field `id` to collection without set nullable==true or set default_value"}
|
||||
client_w.upsert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -690,14 +688,15 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
client = self._connect(enable_milvus_client_api=True)
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 1. create collection
|
||||
client_w.create_collection(client, collection_name, default_dim, enable_dynamic_field=False)
|
||||
dim= 32
|
||||
client_w.create_collection(client, collection_name, dim, enable_dynamic_field=False)
|
||||
# 2. insert
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"Attempt to insert an unexpected field "
|
||||
f"to collection without enabling dynamic field"}
|
||||
client_w.upsert(client, collection_name, data= rows,
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(10)]
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: f"Attempt to insert an unexpected field `float` to collection without enabling dynamic field"}
|
||||
client_w.upsert(client, collection_name, data=rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -734,8 +733,8 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: str(i), default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"The Input data type is inconsistent with defined schema, "
|
||||
f"please check it."}
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: "The Input data type is inconsistent with defined schema, {id} field should be a int64"}
|
||||
client_w.upsert(client, collection_name, data= rows,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -755,8 +754,9 @@ class TestMilvusClientUpsertInvalid(TestcaseBase):
|
||||
rng = np.random.default_rng(seed=19530)
|
||||
rows = [{default_primary_key_field_name: i, default_vector_field_name: list(rng.random((1, default_dim))[0]),
|
||||
default_float_field_name: i * 1.0, default_string_field_name: str(i)} for i in range(default_nb)]
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of "
|
||||
f"a partition name must be an underscore or letter."}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}"}
|
||||
if partition_name == " ":
|
||||
error = {ct.err_code: 1, ct.err_msg: f"Invalid partition name: . Partition name should not be empty."}
|
||||
client_w.upsert(client, collection_name, data= rows, partition_name=partition_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -125,8 +125,7 @@ class TestMilvusClientPartitionInvalid(TestcaseBase):
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 2. create partition
|
||||
client_w.create_collection(client, collection_name, default_dim)
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of a "
|
||||
f"partition name must be an underscore or letter.]"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}"}
|
||||
client_w.create_partition(client, collection_name, partition_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -396,8 +395,7 @@ class TestMilvusClientDropPartitionInvalid(TestcaseBase):
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 2. create partition
|
||||
client_w.create_collection(client, collection_name, default_dim)
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of a "
|
||||
f"partition name must be an underscore or letter.]"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}."}
|
||||
client_w.drop_partition(client, collection_name, partition_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -822,8 +820,7 @@ class TestMilvusClientHasPartitionInvalid(TestcaseBase):
|
||||
collection_name = cf.gen_unique_str(prefix)
|
||||
# 2. create partition
|
||||
client_w.create_collection(client, collection_name, default_dim)
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. The first character of a "
|
||||
f"partition name must be an underscore or letter.]"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}"}
|
||||
client_w.has_partition(client, collection_name, partition_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -839,8 +836,8 @@ class TestMilvusClientHasPartitionInvalid(TestcaseBase):
|
||||
partition_name = "a".join("a" for i in range(256))
|
||||
# 2. create partition
|
||||
client_w.create_collection(client, collection_name, default_dim)
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. the length of a collection name "
|
||||
f"must be less than 255 characters: invalid parameter"}
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"Invalid partition name: {partition_name}. "
|
||||
f"The length of a partition name must be less than 255 characters"}
|
||||
client_w.has_partition(client, collection_name, partition_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
@ -297,15 +297,14 @@ class TestAliasOperationInvalid(TestcaseBase):
|
||||
check_items={exp_name: c_1_name, exp_schema: default_schema})
|
||||
alias_a_name = cf.gen_unique_str(prefix)
|
||||
self.utility_wrap.create_alias(collection_1.name, alias_a_name)
|
||||
# collection_1.create_alias(alias_a_name)
|
||||
|
||||
c_2_name = cf.gen_unique_str("collection")
|
||||
collection_2 = self.init_collection_wrap(name=c_2_name, schema=default_schema,
|
||||
check_task=CheckTasks.check_collection_property,
|
||||
check_items={exp_name: c_2_name, exp_schema: default_schema})
|
||||
error = {ct.err_code: 1602,
|
||||
ct.err_msg: f"alias exists and already aliased to another collection, alias: {alias_a_name}, "
|
||||
f"collection: {c_1_name}, other collection: {c_2_name}"}
|
||||
ct.err_msg: f"{alias_a_name} is alias to another collection: {collection_1.name}: "
|
||||
f"alias already exist[database=default][alias={alias_a_name}]"}
|
||||
self.utility_wrap.create_alias(collection_2.name, alias_a_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
@ -330,7 +329,7 @@ class TestAliasOperationInvalid(TestcaseBase):
|
||||
|
||||
alias_not_exist_name = cf.gen_unique_str(prefix)
|
||||
error = {ct.err_code: 1600,
|
||||
ct.err_msg: "Alter alias failed: alias does not exist"}
|
||||
ct.err_msg: f"alias not found[database=default][alias={alias_not_exist_name}]"}
|
||||
self.utility_wrap.alter_alias(collection_w.name, alias_not_exist_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
@ -270,7 +270,8 @@ class TestCollectionParams(TestcaseBase):
|
||||
"""
|
||||
self._connect()
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
error = {ct.err_code: 1, ct.err_msg: "Collection '%s' not exist, or you can pass in schema to create one."}
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: f"Collection '{c_name}' not exist, or you can pass in schema to create one."}
|
||||
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -762,7 +763,7 @@ class TestCollectionParams(TestcaseBase):
|
||||
self._connect()
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
schema = cf.gen_default_collection_schema(description=None)
|
||||
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
|
||||
error = {ct.err_code: 1, ct.err_msg: "description [None] has type NoneType, but expected one of: bytes, str"}
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -847,7 +848,7 @@ class TestCollectionParams(TestcaseBase):
|
||||
"""
|
||||
self._connect()
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
error = {ct.err_code: 1, ct.err_msg: f"expected one of: int, long"}
|
||||
error = {ct.err_code: 999, ct.err_msg: f"invalid num_shards type"}
|
||||
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=error_type_shards_num,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
@ -1086,7 +1087,7 @@ class TestCollectionOperation(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first'}
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connection first'}
|
||||
self.collection_wrap.init_collection(c_name, schema=default_schema,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
assert self.collection_wrap.collection is None
|
||||
@ -1261,7 +1262,7 @@ class TestCollectionDataframe(TestcaseBase):
|
||||
"""
|
||||
self._connect()
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
error = {ct.err_code: 1, ct.err_msg: "Dataframe can not be None."}
|
||||
error = {ct.err_code: 999, ct.err_msg: "Data type must be pandas.DataFrame"}
|
||||
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -1291,7 +1292,8 @@ class TestCollectionDataframe(TestcaseBase):
|
||||
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
|
||||
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
|
||||
error = {ct.err_code: 1,
|
||||
ct.err_msg: "The Input data type is inconsistent with defined schema, please check it."}
|
||||
ct.err_msg: "The Input data type is inconsistent with defined schema, "
|
||||
"{C} field should be a float_vector, but got a {<class 'list'>} instead."}
|
||||
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
|
||||
@ -1965,7 +1967,7 @@ class TestDropCollection(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first'}
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connection first'}
|
||||
collection_wr.drop(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -2066,7 +2068,7 @@ class TestHasCollection(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first'}
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connection first'}
|
||||
self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2181,7 +2183,7 @@ class TestListCollections(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first'}
|
||||
error = {ct.err_code: 999, ct.err_msg: 'should create connection first'}
|
||||
self.utility_wrap.list_collections(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2274,7 +2276,7 @@ class TestLoadCollection(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first'}
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connection first'}
|
||||
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2290,7 +2292,7 @@ class TestLoadCollection(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first'}
|
||||
error = {ct.err_code: 999, ct.err_msg: 'should create connection first'}
|
||||
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2304,8 +2306,8 @@ class TestLoadCollection(TestcaseBase):
|
||||
c_name = cf.gen_unique_str()
|
||||
collection_wr = self.init_collection_wrap(name=c_name)
|
||||
collection_wr.drop()
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: "collection= : collection not found"}
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: "collection not found"}
|
||||
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2319,8 +2321,8 @@ class TestLoadCollection(TestcaseBase):
|
||||
c_name = cf.gen_unique_str()
|
||||
collection_wr = self.init_collection_wrap(name=c_name)
|
||||
collection_wr.drop()
|
||||
error = {ct.err_code: 100,
|
||||
ct.err_msg: "collection= : collection not found"}
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: "collection not found"}
|
||||
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2801,8 +2803,8 @@ class TestLoadCollection(TestcaseBase):
|
||||
assert collection_w.num_entities == ct.default_nb
|
||||
collection_w.create_index(ct.default_float_vec_field_name, index_params=ct.default_flat_index)
|
||||
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: "failed to load collection: failed to spawn replica for collection: nodes not enough"}
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: "failed to spawn replica for collection: resource group node not enough"}
|
||||
collection_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||
@ -3315,7 +3317,7 @@ class TestLoadPartition(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first.'}
|
||||
error = {ct.err_code: 999, ct.err_msg: 'should create connection first.'}
|
||||
partition_w.load(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -3340,7 +3342,7 @@ class TestLoadPartition(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connect first.'}
|
||||
error = {ct.err_code: 1, ct.err_msg: 'should create connection first.'}
|
||||
partition_w.release(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -3956,7 +3958,7 @@ class TestCollectionJSON(TestcaseBase):
|
||||
"""
|
||||
self._connect()
|
||||
cf.gen_unique_str(prefix)
|
||||
error = {ct.err_code: 1, ct.err_msg: "Partition key field type must be DataType.INT64 or DataType.VARCHAR."}
|
||||
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64 or DataType.VARCHAR"}
|
||||
cf.gen_json_default_collection_schema(primary_field=primary_field, is_partition_key=True,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -4015,7 +4017,6 @@ class TestCollectionARRAY(TestcaseBase):
|
||||
check_items={ct.err_code: 65535, ct.err_msg: "element data type None is not valid"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
# @pytest.mark.skip("issue #27522")
|
||||
@pytest.mark.parametrize("element_type", [1001, 'a', [], (), {1}, DataType.BINARY_VECTOR,
|
||||
DataType.FLOAT_VECTOR, DataType.JSON, DataType.ARRAY])
|
||||
def test_collection_array_field_element_type_invalid(self, element_type):
|
||||
@ -4030,9 +4031,20 @@ class TestCollectionARRAY(TestcaseBase):
|
||||
vec_field = cf.gen_float_vec_field()
|
||||
array_field = cf.gen_array_field(element_type=element_type)
|
||||
array_schema = cf.gen_collection_schema([int_field, vec_field, array_field])
|
||||
error = {ct.err_code: 65535, ct.err_msg: "element data type None is not valid"}
|
||||
error = {ct.err_code: 999, ct.err_msg: f"element type {element_type} is not supported"}
|
||||
if element_type in ['a', {1}]:
|
||||
error = {ct.err_code: 1, ct.err_msg: "Unexpected error"}
|
||||
error = {ct.err_code: 999, ct.err_msg: "Unexpected error"}
|
||||
if element_type in [[], ()]:
|
||||
error = {ct.err_code: 65535, ct.err_msg: "element data type None is not valid"}
|
||||
if element_type in [DataType.BINARY_VECTOR, DataType.FLOAT_VECTOR, DataType.JSON, DataType.ARRAY]:
|
||||
data_type = element_type.name
|
||||
if element_type == DataType.BINARY_VECTOR:
|
||||
data_type = "BinaryVector"
|
||||
if element_type == DataType.FLOAT_VECTOR:
|
||||
data_type = "FloatVector"
|
||||
if element_type == DataType.ARRAY:
|
||||
data_type = "Array"
|
||||
error = {ct.err_code: 999, ct.err_msg: f"element type {data_type} is not supported"}
|
||||
self.init_collection_wrap(schema=array_schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -4391,6 +4403,7 @@ class TestCollectionMultipleVectorInvalid(TestcaseBase):
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.skip("issue #37543")
|
||||
def test_create_collection_multiple_vectors_invalid_dim(self, get_invalid_dim):
|
||||
"""
|
||||
target: test create collection with multiple vector fields
|
||||
@ -4645,11 +4658,11 @@ class TestCollectionDefaultValueInvalid(TestcaseBase):
|
||||
int_fields.append(cf.gen_int64_field(is_primary=True))
|
||||
int_fields.append(cf.gen_float_vec_field(vector_data_type=vector_type, default_value=10))
|
||||
schema = cf.gen_collection_schema(fields=int_fields)
|
||||
error = {ct.err_code: 1100, ct.err_msg: "default value type mismatches field schema type"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"type not support default_value"}
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("scalar_type", ["JSON", "ARRAY"])
|
||||
@pytest.mark.parametrize("scalar_type", ["JSON", "Array"])
|
||||
def test_create_collection_default_value_on_not_support_scalar_field(self, scalar_type):
|
||||
"""
|
||||
target: test create collection with set default value on not supported scalar field
|
||||
@ -4662,12 +4675,13 @@ class TestCollectionDefaultValueInvalid(TestcaseBase):
|
||||
# add other vector fields to maximum fields num
|
||||
if scalar_type == "JSON":
|
||||
int_fields.append(cf.gen_json_field(default_value=10))
|
||||
if scalar_type == "ARRAY":
|
||||
if scalar_type == "Array":
|
||||
int_fields.append(cf.gen_array_field(default_value=10))
|
||||
int_fields.append(cf.gen_int64_field(is_primary=True, default_value=10))
|
||||
int_fields.append(cf.gen_float_vec_field())
|
||||
schema = cf.gen_collection_schema(fields=int_fields)
|
||||
error = {ct.err_code: 1100, ct.err_msg: "default value type mismatches field schema type"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"type not support default_value, type:{scalar_type}"}
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@ -4685,7 +4699,8 @@ class TestCollectionDefaultValueInvalid(TestcaseBase):
|
||||
int_fields.append(cf.gen_int8_field(default_value=10.0))
|
||||
int_fields.append(cf.gen_float_vec_field())
|
||||
schema = cf.gen_collection_schema(fields=int_fields)
|
||||
error = {ct.err_code: 1100, ct.err_msg: "default value type mismatches field schema type"}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: "type (Int8) of field (int8) is not equal to the type(DataType_Double) of default_value"}
|
||||
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
@ -31,7 +31,7 @@ class TestCompactionParams(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: "should create connect first"}
|
||||
error = {ct.err_code: 999, ct.err_msg: "should create connection first"}
|
||||
collection_w.compact(check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
@ -39,17 +39,26 @@ class TestConnectionParams(TestcaseBase):
|
||||
('_kwargs', None)]})
|
||||
|
||||
@pytest.mark.tags(ct.CaseLabel.L2)
|
||||
@pytest.mark.parametrize("data", ct.get_dict_invalid_host_port)
|
||||
def test_connection_add_connection_kwargs_invalid_host_port(self, data):
|
||||
def test_connection_add_connection_kwargs_invalid_host_port(self):
|
||||
"""
|
||||
target: test **kwargs of add_connection
|
||||
method: passing invalid value for host and port
|
||||
expected: report error
|
||||
"""
|
||||
|
||||
# check param of **kwargs
|
||||
for data in [{"port": "port"}, {"port": ["192.168.1.1"]}]:
|
||||
self.connection_wrap.add_connection(_kwargs=data, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "Type of 'port' must be str or int"})
|
||||
for data in [{"host": -1}]:
|
||||
self.connection_wrap.add_connection(_kwargs=data, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "Type of 'host' must be str"})
|
||||
|
||||
data = {"port": "-1", "host": "hostlocal"}
|
||||
self.connection_wrap.add_connection(_kwargs=data, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: cem.NoHostPort})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "port number -1 out of range, valid range [0, 65535)"})
|
||||
|
||||
# get addr of default alias
|
||||
self.connection_wrap.get_connection_addr(alias=DefaultConfig.DEFAULT_USING, check_task=ct.CheckTasks.ccr,
|
||||
@ -74,7 +83,7 @@ class TestConnectionParams(TestcaseBase):
|
||||
# No check for **kwargs
|
||||
self.connection_wrap.connect(alias=DefaultConfig.DEFAULT_USING, host=1,
|
||||
check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: cem.NoHostPort})
|
||||
check_items={ct.err_code: 999, ct.err_msg: "Type of 'host' must be str"})
|
||||
|
||||
@pytest.mark.tags(ct.CaseLabel.L2)
|
||||
@pytest.mark.parametrize("alias", ct.get_not_string)
|
||||
@ -444,7 +453,8 @@ class TestConnectionOperation(TestcaseBase):
|
||||
# using default alias to create connection, the connection does not exist
|
||||
err_msg = cem.FailConnect % ("host", str(port))
|
||||
self.connection_wrap.connect(alias=DefaultConfig.DEFAULT_USING, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 2, ct.err_msg: err_msg})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "illegal connection params or server unavailable"})
|
||||
|
||||
# list all connections and check the response
|
||||
self.connection_wrap.list_connections(check_task=ct.CheckTasks.ccr,
|
||||
@ -791,7 +801,7 @@ class TestConnectionOperation(TestcaseBase):
|
||||
|
||||
# drop collection failed
|
||||
self.collection_wrap.drop(check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1, ct.err_msg: "should create connect first"})
|
||||
check_items={ct.err_code: 1, ct.err_msg: "should create connection first"})
|
||||
|
||||
# successfully created default connection
|
||||
self.connection_wrap.connect(alias=DefaultConfig.DEFAULT_USING, host=host, port=port,
|
||||
@ -892,7 +902,7 @@ class TestConnectIPInvalid(TestcaseBase):
|
||||
err_msg = "Type of 'host' must be str."
|
||||
self.connection_wrap.connect(alias=DefaultConfig.DEFAULT_USING, host=host, port=port,
|
||||
check_task=ct.CheckTasks.check_value_equal,
|
||||
check_items={ct.err_code: 1, ct.err_msg: err_msg})
|
||||
check_items={ct.err_code: 999, ct.err_msg: err_msg})
|
||||
|
||||
|
||||
class TestConnectPortInvalid(TestcaseBase):
|
||||
@ -911,7 +921,7 @@ class TestConnectPortInvalid(TestcaseBase):
|
||||
err_msg = "Type of 'host' must be str."
|
||||
self.connection_wrap.connect(alias=DefaultConfig.DEFAULT_USING, host=host, port=port,
|
||||
check_task=ct.CheckTasks.check_value_equal,
|
||||
check_items={ct.err_code: 1, ct.err_msg: err_msg})
|
||||
check_items={ct.err_code: 999, ct.err_msg: err_msg})
|
||||
|
||||
|
||||
class TestConnectUriInvalid(TestcaseBase):
|
||||
@ -930,7 +940,8 @@ class TestConnectUriInvalid(TestcaseBase):
|
||||
|
||||
uri = "{}://{}:{}".format(protocol, host, port)
|
||||
self.connection_wrap.connect(alias=connect_name, uri=uri, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "needs start with [unix, http, https, tcp] or a local file endswith [.db]"})
|
||||
|
||||
@pytest.mark.tags(ct.CaseLabel.L2)
|
||||
@pytest.mark.parametrize("host", ["256.256.256.256", "10.1.0"])
|
||||
@ -945,7 +956,8 @@ class TestConnectUriInvalid(TestcaseBase):
|
||||
|
||||
uri = "{}://{}:{}".format(protocol, host, port)
|
||||
self.connection_wrap.connect(alias=connect_name, uri=uri, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 2})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "illegal connection params or server unavailable"})
|
||||
|
||||
@pytest.mark.tags(ct.CaseLabel.L2)
|
||||
@pytest.mark.parametrize("port", ["8080", "443", "0", "65534"])
|
||||
@ -960,7 +972,8 @@ class TestConnectUriInvalid(TestcaseBase):
|
||||
|
||||
uri = "{}://{}:{}".format(protocol, host, port)
|
||||
self.connection_wrap.connect(alias=connect_name, uri=uri, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 2})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "illegal connection params or server unavailable"})
|
||||
|
||||
@pytest.mark.tags(ct.CaseLabel.L2)
|
||||
@pytest.mark.parametrize("host", ["www.google.com"])
|
||||
@ -976,7 +989,8 @@ class TestConnectUriInvalid(TestcaseBase):
|
||||
|
||||
uri = "{}://{}:{}".format(protocol, host, port)
|
||||
self.connection_wrap.connect(alias=connect_name, uri=uri, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 2})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "illegal connection params or server unavailable"})
|
||||
|
||||
|
||||
class TestConnectAddressInvalid(TestcaseBase):
|
||||
@ -994,7 +1008,8 @@ class TestConnectAddressInvalid(TestcaseBase):
|
||||
"""
|
||||
address = "{}:{}".format(host, port)
|
||||
self.connection_wrap.connect(alias=connect_name, address=address, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 2})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "illegal connection params or server unavailable"})
|
||||
|
||||
@pytest.mark.tags(ct.CaseLabel.L2)
|
||||
@pytest.mark.parametrize("port", ["100", "65536"])
|
||||
@ -1007,7 +1022,8 @@ class TestConnectAddressInvalid(TestcaseBase):
|
||||
"""
|
||||
address = "{}:{}".format(host, port)
|
||||
self.connection_wrap.connect(alias=connect_name, address=address, check_task=ct.CheckTasks.err_res,
|
||||
check_items={ct.err_code: 2})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "illegal connection params or server unavailable"})
|
||||
|
||||
|
||||
class TestConnectUserPasswordInvalid(TestcaseBase):
|
||||
|
@ -95,7 +95,7 @@ class TestDeleteParams(TestcaseBase):
|
||||
self.connection_wrap.remove_connection(ct.default_alias)
|
||||
res_list, _ = self.connection_wrap.list_connections()
|
||||
assert ct.default_alias not in res_list
|
||||
error = {ct.err_code: 1, ct.err_msg: "should create connect first"}
|
||||
error = {ct.err_code: 1, ct.err_msg: "should create connection first"}
|
||||
collection_w.delete(expr=tmp_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
# Not Milvus Exception
|
||||
@ -108,7 +108,7 @@ class TestDeleteParams(TestcaseBase):
|
||||
"""
|
||||
# init collection with tmp_nb default data
|
||||
collection_w = self.init_collection_general(prefix, nb=tmp_nb, insert_data=True)[0]
|
||||
error = {ct.err_code: 1, ct.err_msg: "expr cannot be None"}
|
||||
error = {ct.err_code: 999, ct.err_msg: "cannot be None"}
|
||||
collection_w.delete(expr=None, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -121,7 +121,7 @@ class TestDeleteParams(TestcaseBase):
|
||||
"""
|
||||
# init collection with tmp_nb default data
|
||||
collection_w = self.init_collection_general(prefix, nb=tmp_nb, insert_data=True)[0]
|
||||
error = {ct.err_code: 1, ct.err_msg: f"expr value {expr} is illegal"}
|
||||
error = {ct.err_code: 999, ct.err_msg: f"value {expr} is illegal"}
|
||||
collection_w.delete(expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -195,8 +195,7 @@ class TestDeleteParams(TestcaseBase):
|
||||
is_all_data_type=True, is_index=True)[0]
|
||||
expr = f"{ct.default_float_vec_field_name} in [[0.1]]"
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"failed to create delete plan: cannot parse expression: {expr}, "
|
||||
f"error: value '[0.1]' in list cannot be casted to FloatVector: invalid parameter"}
|
||||
ct.err_msg: f"failed to create delete plan: cannot parse expression: {expr}"}
|
||||
|
||||
collection_w.delete(expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -629,8 +628,8 @@ class TestDeleteOperation(TestcaseBase):
|
||||
collection_w = self.init_collection_general(prefix, nb=tmp_nb, insert_data=True)[0]
|
||||
|
||||
# raise exception
|
||||
error = {ct.err_code: 200,
|
||||
ct.err_msg: f"Failed to get partition id: partition={ct.default_tag}: partition not found"}
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: f"Failed to get partition id: partition not found[partition={ct.default_tag}]"}
|
||||
collection_w.delete(tmp_expr, partition_name=ct.default_tag,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -1934,9 +1933,9 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
"""
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.parametrize("expression", cf.gen_normal_expressions()[1:])
|
||||
@pytest.mark.parametrize("expressions", cf.gen_normal_expressions_and_templates()[1:])
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_normal_expressions(self, expression, enable_dynamic_field):
|
||||
def test_delete_normal_expressions(self, expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
@ -1948,7 +1947,7 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expression = expression.replace("&&", "and").replace("||", "or")
|
||||
expression = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
@ -1967,10 +1966,46 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
# query to check
|
||||
collection_w.query(f"int64 in {filter_ids}", check_task=CheckTasks.check_query_empty)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("expression", cf.gen_array_field_expressions())
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
@pytest.mark.parametrize("expressions", cf.gen_normal_expressions_and_templates()[1:])
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_array_expressions(self, expression, enable_dynamic_field):
|
||||
def test_delete_normal_expressions_templates(self, expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
expected: delete successfully
|
||||
"""
|
||||
# init collection with nb default data
|
||||
collection_w, _vectors, _, insert_ids = \
|
||||
self.init_collection_general(prefix, True, enable_dynamic_field=enable_dynamic_field)[0:4]
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
int64 = _vectors[i][ct.default_int64_field_name]
|
||||
float = _vectors[i][ct.default_float_field_name]
|
||||
else:
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# delete with expressions templates
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
res = collection_w.delete(expr=expr, expr_params=expr_params)[0]
|
||||
assert res.delete_count == len(filter_ids)
|
||||
|
||||
# query to check
|
||||
collection_w.query(f"int64 in {filter_ids}", check_task=CheckTasks.check_query_empty)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("expressions", cf.gen_array_field_expressions_and_templates())
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_array_expressions(self, expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
@ -1993,25 +2028,73 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
data.append(arr)
|
||||
collection_w.insert(data)
|
||||
collection_w.flush()
|
||||
collection_w.create_index(ct.default_float_vec_field_name, ct.default_flat_index)
|
||||
collection_w.load()
|
||||
|
||||
# 3. filter result with expression in collection
|
||||
expression = expression.replace("&&", "and").replace("||", "or")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i in range(nb):
|
||||
int32_array = data[i][ct.default_int32_array_field_name]
|
||||
float_array = data[i][ct.default_float_array_field_name]
|
||||
string_array = data[i][ct.default_string_array_field_name]
|
||||
if not expression or eval(expression):
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(i)
|
||||
|
||||
# 4. delete by array expression
|
||||
collection_w.create_index(ct.default_float_vec_field_name, ct.default_flat_index)
|
||||
collection_w.load()
|
||||
res = collection_w.delete(expression)[0]
|
||||
res = collection_w.delete(expr)[0]
|
||||
assert res.delete_count == len(filter_ids)
|
||||
|
||||
# 5. query to check
|
||||
collection_w.query(expression, check_task=CheckTasks.check_query_empty)
|
||||
collection_w.query(expr, check_task=CheckTasks.check_query_empty)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("expressions", cf.gen_array_field_expressions_and_templates())
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_array_expressions_templates(self, expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
expected: delete successfully
|
||||
"""
|
||||
# 1. create a collection
|
||||
nb = ct.default_nb
|
||||
schema = cf.gen_array_collection_schema()
|
||||
collection_w = self.init_collection_wrap(schema=schema, enable_dynamic_field=enable_dynamic_field)
|
||||
|
||||
# 2. insert data
|
||||
array_length = 100
|
||||
data = []
|
||||
for i in range(nb):
|
||||
arr = {ct.default_int64_field_name: i,
|
||||
ct.default_float_vec_field_name: cf.gen_vectors(1, ct.default_dim)[0],
|
||||
ct.default_int32_array_field_name: [np.int32(i) for i in range(array_length)],
|
||||
ct.default_float_array_field_name: [np.float32(i) for i in range(array_length)],
|
||||
ct.default_string_array_field_name: [str(i) for i in range(array_length)]}
|
||||
data.append(arr)
|
||||
collection_w.insert(data)
|
||||
collection_w.flush()
|
||||
collection_w.create_index(ct.default_float_vec_field_name, ct.default_flat_index)
|
||||
collection_w.load()
|
||||
|
||||
# 3. filter result with expression in collection
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i in range(nb):
|
||||
int32_array = data[i][ct.default_int32_array_field_name]
|
||||
float_array = data[i][ct.default_float_array_field_name]
|
||||
string_array = data[i][ct.default_string_array_field_name]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(i)
|
||||
|
||||
# 4. delete by array expression
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
res = collection_w.delete(expr=expr, expr_params=expr_params)[0]
|
||||
assert res.delete_count == len(filter_ids)
|
||||
|
||||
# 5. query to check
|
||||
collection_w.query(expr=expr, expr_params=expr_params, check_task=CheckTasks.check_query_empty)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("field_name", ["varchar", "json_field['string']", "NewStr"])
|
||||
@ -2069,7 +2152,7 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
collection_w = self.init_collection_general(prefix, True)[0]
|
||||
|
||||
# delete
|
||||
error = {ct.err_code: 1, ct.err_msg: "expr cannot be empty"}
|
||||
error = {ct.err_code: 1, ct.err_msg: "cannot be empty"}
|
||||
collection_w.delete(expr="", check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2206,9 +2289,9 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
collection_w.delete(expressions, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("expression", cf.gen_json_field_expressions())
|
||||
@pytest.mark.parametrize("expressions", cf.gen_json_field_expressions_and_templates())
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_expr_json_field(self, expression, enable_dynamic_field):
|
||||
def test_delete_expr_json_field(self, expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
@ -2220,7 +2303,7 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expression = expression.replace("&&", "and").replace("||", "or")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
json_field = {}
|
||||
for i, _id in enumerate(insert_ids):
|
||||
@ -2230,21 +2313,20 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
else:
|
||||
json_field['number'] = _vectors[ct.default_json_field_name][i]['number']
|
||||
json_field['float'] = _vectors[ct.default_json_field_name][i]['float']
|
||||
if not expression or eval(expression):
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# delete with expressions
|
||||
res = collection_w.delete(expression)[0]
|
||||
res = collection_w.delete(expr)[0]
|
||||
assert res.delete_count == len(filter_ids)
|
||||
|
||||
# query to check
|
||||
collection_w.query(f"int64 in {filter_ids}", check_task=CheckTasks.check_query_empty)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("normal_expression, json_expression", zip(cf.gen_normal_expressions()[1:4],
|
||||
cf.gen_json_field_expressions()[6:9]))
|
||||
@pytest.mark.parametrize("expressions", cf.gen_json_field_expressions_and_templates())
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_expr_complex_mixed(self, normal_expression, json_expression, enable_dynamic_field):
|
||||
def test_delete_expr_templtes_json_field(self, expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
@ -2255,9 +2337,47 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
self.init_collection_general(prefix, True, enable_dynamic_field=enable_dynamic_field)[0:4]
|
||||
|
||||
# filter result with expression in collection
|
||||
expression = normal_expression + ' and ' + json_expression
|
||||
_vectors = _vectors[0]
|
||||
expression = expression.replace("&&", "and").replace("||", "or")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
json_field = {}
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
json_field['number'] = _vectors[i][ct.default_json_field_name]['number']
|
||||
json_field['float'] = _vectors[i][ct.default_json_field_name]['float']
|
||||
else:
|
||||
json_field['number'] = _vectors[ct.default_json_field_name][i]['number']
|
||||
json_field['float'] = _vectors[ct.default_json_field_name][i]['float']
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# delete with expressions template
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
res = collection_w.delete(expr=expr, expr_params=expr_params)[0]
|
||||
assert res.delete_count == len(filter_ids)
|
||||
|
||||
# query to check
|
||||
collection_w.query(f"int64 in {filter_ids}", check_task=CheckTasks.check_query_empty)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("normal_expressions, json_expressions", zip(cf.gen_normal_expressions_and_templates()[1:4],
|
||||
cf.gen_json_field_expressions_and_templates()[6:9]))
|
||||
@pytest.mark.parametrize("enable_dynamic_field", [True, False])
|
||||
def test_delete_expr_complex_mixed(self, normal_expressions, json_expressions, enable_dynamic_field):
|
||||
"""
|
||||
target: test delete entities using normal expression
|
||||
method: delete using normal expression
|
||||
expected: delete successfully
|
||||
"""
|
||||
# init collection with nb default data
|
||||
collection_w, _vectors, _, insert_ids = \
|
||||
self.init_collection_general(prefix, True, enable_dynamic_field=enable_dynamic_field)[0:4]
|
||||
|
||||
# filter result with expression in collection
|
||||
expr = normal_expressions[0] + ' and ' + json_expressions[0]
|
||||
_vectors = _vectors[0]
|
||||
expr = expr.replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
json_field = {}
|
||||
for i, _id in enumerate(insert_ids):
|
||||
@ -2271,11 +2391,14 @@ class TestDeleteComplexExpr(TestcaseBase):
|
||||
json_field['float'] = _vectors[ct.default_json_field_name][i]['float']
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expression or eval(expression):
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# delete with expressions
|
||||
res = collection_w.delete(expression)[0]
|
||||
# delete with expressions and template mixed
|
||||
json_expr = cf.get_expr_from_template(json_expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr = normal_expressions[0] + ' and ' + json_expr
|
||||
json_expr_params = cf.get_expr_params_from_template(json_expressions[1])
|
||||
res = collection_w.delete(expr=expr, expr_params=json_expr_params)[0]
|
||||
assert res.delete_count == len(filter_ids)
|
||||
|
||||
# query to check
|
||||
|
@ -70,7 +70,6 @@ class TestFieldPartialLoad(TestcaseBase):
|
||||
and not_load_int64_field.name in res[0][0].fields.keys()
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.xfail(reason="issue #36353")
|
||||
def test_skip_load_dynamic_field(self):
|
||||
"""
|
||||
target: test skip load dynamic field
|
||||
|
@ -98,13 +98,9 @@ class TestIndexParams(TestcaseBase):
|
||||
collection_w = self.init_collection_wrap(name=c_name)
|
||||
index_params = copy.deepcopy(default_index_params)
|
||||
index_params["index_type"] = index_type
|
||||
if not isinstance(index_params["index_type"], str):
|
||||
msg = "must be str"
|
||||
else:
|
||||
msg = "invalid index type"
|
||||
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100, ct.err_msg: msg})
|
||||
check_items={ct.err_code: 1100, ct.err_msg: "invalid parameter["})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_index_type_not_supported(self):
|
||||
@ -238,8 +234,8 @@ class TestIndexOperation(TestcaseBase):
|
||||
collection_w.create_index(ct.default_int64_field_name, {})
|
||||
collection_w.load(check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"there is no vector index on field: [float_vector], "
|
||||
f"please create index firstly: collection={collection_w.name}: index not found"})
|
||||
ct.err_msg: "there is no vector index on field: [float_vector], "
|
||||
"please create index firstly"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_index_create_on_array_field(self):
|
||||
@ -1092,9 +1088,8 @@ class TestNewIndexBinary(TestcaseBase):
|
||||
binary_index_params = {'index_type': 'BIN_IVF_FLAT', 'metric_type': 'L2', 'params': {'nlist': 64}}
|
||||
collection_w.create_index(default_binary_vec_field_name, binary_index_params,
|
||||
index_name=binary_field_name, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "metric type L2 not found or not supported, supported: "
|
||||
"[HAMMING JACCARD SUBSTRUCTURE SUPERSTRUCTURE]"})
|
||||
check_items={ct.err_code: 999,
|
||||
ct.err_msg: "binary vector index does not support metric type: L2"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("metric_type", ["L2", "IP", "COSINE", "JACCARD", "HAMMING"])
|
||||
@ -1107,12 +1102,12 @@ class TestNewIndexBinary(TestcaseBase):
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
|
||||
binary_index_params = {'index_type': 'HNSW', "M": '18', "efConstruction": '240', 'metric_type': metric_type}
|
||||
error = {ct.err_code: 999, ct.err_msg: f"binary vector index does not support metric type: {metric_type}"}
|
||||
if metric_type in ["JACCARD", "HAMMING"]:
|
||||
error = {ct.err_code: 999, ct.err_msg: f"data type BinaryVector can't build with this index HNSW"}
|
||||
collection_w.create_index(default_binary_vec_field_name, binary_index_params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "HNSW only support float vector data type: invalid "
|
||||
"parameter[expected=valid index params][actual=invalid "
|
||||
"index params]"})
|
||||
check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("metric", ct.binary_metrics)
|
||||
@ -1257,7 +1252,7 @@ class TestIndexInvalid(TestcaseBase):
|
||||
collection_w.create_index(ct.default_json_field_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "create index on JSON field is not supported"})
|
||||
ct.err_msg: "create auto index on type:JSON is not supported"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_create_scalar_index_on_vector_field(self, scalar_index, vector_data_type):
|
||||
@ -1286,7 +1281,7 @@ class TestIndexInvalid(TestcaseBase):
|
||||
collection_w.create_index(ct.default_binary_vec_field_name, index_params=scalar_index_params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: f"invalid index type: {scalar_index}"})
|
||||
ct.err_msg: "metric type not set for vector index"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_create_inverted_index_on_json_field(self, vector_data_type):
|
||||
@ -1300,7 +1295,7 @@ class TestIndexInvalid(TestcaseBase):
|
||||
collection_w.create_index(ct.default_json_field_name, index_params=scalar_index_params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "create index on JSON field is not supported"})
|
||||
ct.err_msg: "INVERTED are not supported on JSON field"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_create_inverted_index_on_array_field(self):
|
||||
@ -1433,7 +1428,7 @@ class TestIndexInvalid(TestcaseBase):
|
||||
collection_w.alter_index(ct.default_index_name, {"error_param_key": 123},
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: f"error_param is not configable index param"})
|
||||
ct.err_msg: f"error_param_key is not configable index param"})
|
||||
collection_w.alter_index(ct.default_index_name, ["error_param_type"],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
@ -1483,8 +1478,8 @@ class TestIndexInvalid(TestcaseBase):
|
||||
data = cf.gen_default_list_sparse_data()
|
||||
collection_w.insert(data=data)
|
||||
params = {"index_type": index, "metric_type": "IP", "params": {"drop_ratio_build": ratio}}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"invalid drop_ratio_build: {ratio}, must be in range [0, 1): invalid parameter[expected=valid index params"}
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: f"Out of range in json: param 'drop_ratio_build' ({ratio*1.0}) should be in range [0.000000, 1.000000)"}
|
||||
index, _ = self.index_wrap.init_index(collection_w.collection, ct.default_sparse_vec_field_name, params,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items=error)
|
||||
@ -2016,7 +2011,7 @@ class TestIndexDiskann(TestcaseBase):
|
||||
collection_w.create_index(default_binary_vec_field_name, ct.default_diskann_index, index_name=binary_field_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: "float or float16 vector are only supported"})
|
||||
ct.err_msg: "binary vector index does not support metric type: COSINE"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_create_diskann_index_multithread(self):
|
||||
@ -2181,7 +2176,7 @@ class TestScaNNIndex(TestcaseBase):
|
||||
"""
|
||||
collection_w = self.init_collection_general(prefix, is_index=False)[0]
|
||||
index_params = {"index_type": "SCANN", "metric_type": "L2", "params": {"nlist": nlist}}
|
||||
error = {ct.err_code: 1100, ct.err_msg: "nlist out of range: [1, 65536]"}
|
||||
error = {ct.err_code: 999, ct.err_msg: f"Out of range in json: param 'nlist' ({nlist}) should be in range [1, 65536]"}
|
||||
collection_w.create_index(default_field_name, index_params,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -2196,7 +2191,7 @@ class TestScaNNIndex(TestcaseBase):
|
||||
collection_w = self.init_collection_general(prefix, is_index=False, dim=dim)[0]
|
||||
index_params = {"index_type": "SCANN", "metric_type": "L2", "params": {"nlist": 1024}}
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"dimension must be able to be divided by 2, dimension: {dim}"}
|
||||
ct.err_msg: f"The dimension of a vector (dim) should be a multiple of 2. Dimension:{dim}"}
|
||||
collection_w.create_index(default_field_name, index_params,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -2386,7 +2381,7 @@ class TestBitmapIndex(TestcaseBase):
|
||||
for msg, index_params in {
|
||||
iem.VectorMetricTypeExist: IndexPrams(index_type=IndexName.BITMAP),
|
||||
iem.SparseFloatVectorMetricType: IndexPrams(index_type=IndexName.BITMAP, metric_type=MetricType.L2),
|
||||
iem.CheckVectorIndex.format(DataType.SPARSE_FLOAT_VECTOR, IndexName.BITMAP): IndexPrams(
|
||||
iem.CheckVectorIndex.format("SparseFloatVector", IndexName.BITMAP): IndexPrams(
|
||||
index_type=IndexName.BITMAP, metric_type=MetricType.IP)
|
||||
}.items():
|
||||
self.collection_wrap.create_index(
|
||||
|
@ -79,7 +79,7 @@ class TestInsertParams(TestcaseBase):
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("data", [pd.DataFrame()])
|
||||
def test_insert_empty_data(self, data):
|
||||
def test_insert_empty_dataframe(self, data):
|
||||
"""
|
||||
target: test insert empty dataFrame()
|
||||
method: insert empty
|
||||
@ -101,7 +101,7 @@ class TestInsertParams(TestcaseBase):
|
||||
"""
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
collection_w = self.init_collection_wrap(name=c_name)
|
||||
error = {ct.err_code: 999, ct.err_msg: "The data don't match with schema fields"}
|
||||
error = {ct.err_code: 999, ct.err_msg: "The data doesn't match with schema fields"}
|
||||
collection_w.insert(
|
||||
data=data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -134,7 +134,7 @@ class TestInsertParams(TestcaseBase):
|
||||
df = cf.gen_default_dataframe_data(10)
|
||||
df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: "The name of field don't match, expected: int64"}
|
||||
ct.err_msg: "The name of field doesn't match, expected: int64"}
|
||||
collection_w.insert(
|
||||
data=df, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -152,7 +152,7 @@ class TestInsertParams(TestcaseBase):
|
||||
df.rename(
|
||||
columns={ct.default_int64_field_name: invalid_field_name}, inplace=True)
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: f"The name of field don't match, expected: int64, got {invalid_field_name}"}
|
||||
ct.err_msg: f"The name of field doesn't match, expected: int64, got {invalid_field_name}"}
|
||||
collection_w.insert(
|
||||
data=df, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -218,6 +218,7 @@ class TestInsertParams(TestcaseBase):
|
||||
assert collection_w.num_entities == 1
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip(reason="issue #37543")
|
||||
def test_insert_dim_not_match(self):
|
||||
"""
|
||||
target: test insert with not match dim
|
||||
@ -227,8 +228,8 @@ class TestInsertParams(TestcaseBase):
|
||||
c_name = cf.gen_unique_str(prefix)
|
||||
collection_w = self.init_collection_wrap(name=c_name)
|
||||
dim = 129
|
||||
df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)
|
||||
error = {ct.err_code: 65535,
|
||||
df = cf.gen_default_dataframe_data(nb=20, dim=dim)
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
|
||||
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -246,7 +247,7 @@ class TestInsertParams(TestcaseBase):
|
||||
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f'the dim ({dim}) of field data(binary_vector) is not equal to schema dim '
|
||||
f'({ct.default_dim}): invalid parameter[expected={dim}][actual={ct.default_dim}]'}
|
||||
f'({ct.default_dim}): invalid parameter[expected={ct.default_dim}][actual={dim}]'}
|
||||
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -260,7 +261,7 @@ class TestInsertParams(TestcaseBase):
|
||||
collection_w = self.init_collection_wrap(name=c_name)
|
||||
df = cf.gen_default_dataframe_data(10)
|
||||
df.rename(columns={ct.default_float_field_name: "int"}, inplace=True)
|
||||
error = {ct.err_code: 999, ct.err_msg: "The name of field don't match, expected: float, got int"}
|
||||
error = {ct.err_code: 999, ct.err_msg: "The name of field doesn't match, expected: float, got int"}
|
||||
collection_w.insert(
|
||||
data=df, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -337,7 +338,7 @@ class TestInsertParams(TestcaseBase):
|
||||
field_data = cf.gen_data_by_collection_field(fields, nb=nb)
|
||||
data.append(field_data)
|
||||
data.append([1 for _ in range(nb)])
|
||||
error = {ct.err_code: 999, ct.err_msg: "The data don't match with schema fields"}
|
||||
error = {ct.err_code: 999, ct.err_msg: "The data doesn't match with schema fields"}
|
||||
collection_w.insert(
|
||||
data=data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -533,7 +534,7 @@ class TestInsertOperation(TestcaseBase):
|
||||
field_data = cf.gen_data_by_collection_field(field, nb=nb)
|
||||
if field.dtype != DataType.FLOAT_VECTOR:
|
||||
data.append(field_data)
|
||||
error = {ct.err_code: 999, ct.err_msg: f"The data don't match with schema fields, "
|
||||
error = {ct.err_code: 999, ct.err_msg: f"The data doesn't match with schema fields, "
|
||||
f"expect {len(fields)} list, got {len(data)}"}
|
||||
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -1320,8 +1321,7 @@ class TestInsertInvalid(TestcaseBase):
|
||||
collection_w = self.init_collection_general(prefix, is_all_data_type=True)[0]
|
||||
data = cf.gen_dataframe_all_data_type(nb=1)
|
||||
data[ct.default_int8_field_name] = [invalid_int8]
|
||||
error = {ct.err_code: 1100, 'err_msg': "The data type of field int8 doesn't match, "
|
||||
"expected: INT8, got INT64"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the 0th element ({invalid_int8}) out of range: [-128, 127]"}
|
||||
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -1335,8 +1335,7 @@ class TestInsertInvalid(TestcaseBase):
|
||||
collection_w = self.init_collection_general(prefix, is_all_data_type=True)[0]
|
||||
data = cf.gen_dataframe_all_data_type(nb=1)
|
||||
data[ct.default_int16_field_name] = [invalid_int16]
|
||||
error = {ct.err_code: 1100, 'err_msg': "The data type of field int16 doesn't match, "
|
||||
"expected: INT16, got INT64"}
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"the 0th element ({invalid_int16}) out of range: [-32768, 32767]"}
|
||||
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2239,7 +2238,7 @@ class TestUpsertInvalid(TestcaseBase):
|
||||
collection_w = self.init_collection_wrap(name=c_name, with_json=False)
|
||||
data = cf.gen_default_binary_dataframe_data()[0]
|
||||
error = {ct.err_code: 999,
|
||||
ct.err_msg: "The name of field don't match, expected: float_vector, got binary_vector"}
|
||||
ct.err_msg: "The name of field doesn't match, expected: float_vector, got binary_vector"}
|
||||
collection_w.upsert(data=data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2254,7 +2253,7 @@ class TestUpsertInvalid(TestcaseBase):
|
||||
collection_w = self.init_collection_general(pre_upsert, True, is_binary=True)[0]
|
||||
data = cf.gen_default_binary_dataframe_data(dim=dim)[0]
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"Collection field dim is 128, but entities field dim is {dim}"}
|
||||
ct.err_msg: f"the dim ({dim}) of field data(binary_vector) is not equal to schema dim ({ct.default_dim})"}
|
||||
collection_w.upsert(data=data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@ -2501,10 +2500,9 @@ class TestInsertArray(TestcaseBase):
|
||||
collection_w = self.init_collection_wrap(schema=schema)
|
||||
# Insert actual array length > max_capacity
|
||||
arr_len = ct.default_max_capacity + 1
|
||||
data = cf.gen_row_data_by_schema(schema=schema,nb=11)
|
||||
data = cf.gen_row_data_by_schema(schema=schema, nb=11)
|
||||
data[1][ct.default_float_array_field_name] = [np.float32(i) for i in range(arr_len)]
|
||||
err_msg = (f"the length (101) of 1th array exceeds max capacity ({ct.default_max_capacity}): "
|
||||
f"expected=valid length array, actual=array length exceeds max capacity: invalid parameter")
|
||||
err_msg = (f"the length ({arr_len}) of 1th array exceeds max capacity ({ct.default_max_capacity})")
|
||||
collection_w.insert(data=data, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100, ct.err_msg: err_msg})
|
||||
|
||||
|
@ -873,7 +873,6 @@ class TestBitmapIndexDQLExpr(TestCaseClassBase):
|
||||
return [(r[self.primary_field], r[expr_field], real_data[r[self.primary_field]]) for r in res if
|
||||
r[expr_field] != real_data[r[self.primary_field]]]
|
||||
|
||||
# https://github.com/milvus-io/milvus/issues/36221
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_bitmap_index_query_with_invalid_array_params(self):
|
||||
"""
|
||||
|
@ -232,7 +232,7 @@ class TestPartitionParams(TestcaseBase):
|
||||
self.partition_wrap.init_partition(collection=None, name=partition_name,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1,
|
||||
ct.err_msg: "must be pymilvus.Collection"})
|
||||
ct.err_msg: "Collection must be of type pymilvus.Collection or String"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_partition_drop(self):
|
||||
@ -1003,8 +1003,10 @@ class TestPartitionOperations(TestcaseBase):
|
||||
|
||||
data = cf.gen_default_list_data(nb=10, dim=dim)
|
||||
# insert data to partition
|
||||
# TODO: update the assert error msg as #37543 fixed
|
||||
partition_w.insert(data, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 65535, ct.err_msg: "but entities field dim"})
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"float data should divide the dim({ct.default_dim})"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("sync", [True, False])
|
||||
@ -1109,7 +1111,8 @@ class TestPartitionOperations(TestcaseBase):
|
||||
|
||||
# upsert mismatched data
|
||||
upsert_data = cf.gen_default_data_for_upsert(dim=ct.default_dim-1)[0]
|
||||
error = {ct.err_code: 65535, ct.err_msg: "Collection field dim is 128, but entities field dim is 127"}
|
||||
# TODO: update the assert error msg as #37543 fixed
|
||||
error = {ct.err_code: 65535, ct.err_msg: f"float data should divide the dim({ct.default_dim})"}
|
||||
partition_w.upsert(upsert_data, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
|
@ -79,9 +79,23 @@ class TestQueryParams(TestcaseBase):
|
||||
"""
|
||||
collection_w, entities = self.init_collection_general(prefix, insert_data=True, nb=10)[0:2]
|
||||
term_expr = f'{default_int_field_name} in {entities[:default_pos]}'
|
||||
error = {ct.err_code: 1100, ct.err_msg: "cannot parse expression: int64 in .."}
|
||||
error = {ct.err_code: 999, ct.err_msg: "cannot parse expression: int64 in"}
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
# check missing the template variable
|
||||
expr = "int64 in {value_0}"
|
||||
expr_params = {"value_1": [0, 1]}
|
||||
error = {ct.err_code: 999, ct.err_msg: "the value of expression template variable name {value_0} is not found"}
|
||||
collection_w.query(expr=expr, expr_params=expr_params,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
# check the template variable type dismatch
|
||||
expr = "int64 in {value_0}"
|
||||
expr_params = {"value_0": 1}
|
||||
error = {ct.err_code: 999, ct.err_msg: "the value of term expression template variable {value_0} is not array"}
|
||||
collection_w.query(expr=expr, expr_params=expr_params,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L0)
|
||||
def test_query(self, enable_dynamic_field):
|
||||
"""
|
||||
@ -409,43 +423,45 @@ class TestQueryParams(TestcaseBase):
|
||||
self.collection_wrap.query(term_expr, output_fields=["float", "int64", "int8", "varchar"],
|
||||
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
|
||||
|
||||
@pytest.fixture(scope="function", params=cf.gen_normal_expressions())
|
||||
def get_normal_expr(self, request):
|
||||
if request.param == "":
|
||||
pytest.skip("query with "" expr is invalid")
|
||||
yield request.param
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_query_with_expression(self, get_normal_expr, enable_dynamic_field):
|
||||
def test_query_with_expression(self, enable_dynamic_field):
|
||||
"""
|
||||
target: test query with different expr
|
||||
method: query with different boolean expr
|
||||
expected: verify query result
|
||||
"""
|
||||
# 1. initialize with data
|
||||
nb = 1000
|
||||
nb = 2000
|
||||
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
|
||||
enable_dynamic_field=enable_dynamic_field)[0:4]
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expr = get_normal_expr
|
||||
expression = expr.replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
int64 = _vectors[i][ct.default_int64_field_name]
|
||||
float = _vectors[i][ct.default_float_field_name]
|
||||
else:
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expression or eval(expression):
|
||||
filter_ids.append(_id)
|
||||
for expressions in cf.gen_normal_expressions_and_templates():
|
||||
log.debug(f"test_query_with_expression: {expressions}")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
int64 = _vectors[i][ct.default_int64_field_name]
|
||||
float = _vectors[i][ct.default_float_field_name]
|
||||
else:
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# query and verify result
|
||||
res = collection_w.query(expr=expression)[0]
|
||||
query_ids = set(map(lambda x: x[ct.default_int64_field_name], res))
|
||||
assert query_ids == set(filter_ids)
|
||||
# query and verify result
|
||||
res = collection_w.query(expr=expr, limit=nb)[0]
|
||||
query_ids = set(map(lambda x: x[ct.default_int64_field_name], res))
|
||||
assert query_ids == set(filter_ids)
|
||||
|
||||
# query again with expression template
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
res = collection_w.query(expr=expr, expr_params=expr_params, limit=nb)[0]
|
||||
query_ids = set(map(lambda x: x[ct.default_int64_field_name], res))
|
||||
assert query_ids == set(filter_ids)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_query_expr_wrong_term_keyword(self):
|
||||
@ -560,13 +576,16 @@ class TestQueryParams(TestcaseBase):
|
||||
expected: raise exception
|
||||
"""
|
||||
exprs = [f'{ct.default_int64_field_name} in 1',
|
||||
f'{ct.default_int64_field_name} in "in"',
|
||||
f'{ct.default_int64_field_name} in (mn)']
|
||||
f'{ct.default_int64_field_name} in "in"']
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
error = {ct.err_code: 1100, ct.err_msg: "cannot parse expression: int64 in 1, "
|
||||
"error: line 1:9 no viable alternative at input 'in1'"}
|
||||
for expr in exprs:
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"cannot parse expression: {expr}, "
|
||||
"error: the right-hand side of 'in' must be a list"}
|
||||
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
expr = f'{ct.default_int64_field_name} in (mn)'
|
||||
error = {ct.err_code: 1100, ct.err_msg: f"cannot parse expression: {expr}, "
|
||||
"error: field mn not exist"}
|
||||
collection_w.query(expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_query_expr_empty_term_array(self):
|
||||
@ -589,12 +608,19 @@ class TestQueryParams(TestcaseBase):
|
||||
expected: raise exception
|
||||
"""
|
||||
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
|
||||
int_values = [[1., 2.], [1, 2.]]
|
||||
values = [1., 2.]
|
||||
term_expr = f'{ct.default_int64_field_name} in {values}'
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: "failed to create query plan: cannot parse expression: int64 in [1, 2.0]"}
|
||||
for values in int_values:
|
||||
term_expr = f'{ct.default_int64_field_name} in {values}'
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
ct.err_msg: f"cannot parse expression: int64 in {values}, "
|
||||
"error: value 'float_val:1' in list cannot be casted to Int64"}
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
values = [1, 2.]
|
||||
term_expr = f'{ct.default_int64_field_name} in {values}'
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"cannot parse expression: int64 in {values}, "
|
||||
"error: value 'float_val:2' in list cannot be casted to Int64"}
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_query_expr_non_constant_array_term(self):
|
||||
@ -605,10 +631,9 @@ class TestQueryParams(TestcaseBase):
|
||||
"""
|
||||
collection_w, vectors = self.init_collection_general(prefix, insert_data=True)[0:2]
|
||||
constants = [[1], (), {}]
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: "cannot parse expression: int64 in [[1]], error: value '[1]' in "
|
||||
"list cannot be casted to Int64"}
|
||||
for constant in constants:
|
||||
error = {ct.err_code: 1100,
|
||||
ct.err_msg: f"cannot parse expression: int64 in [{constant}]"}
|
||||
term_expr = f'{ct.default_int64_field_name} in [{constant}]'
|
||||
collection_w.query(term_expr, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -1797,7 +1822,7 @@ class TestQueryParams(TestcaseBase):
|
||||
assert key_res == int_values[offset: pos + offset]
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_query_pagination_with_expression(self, offset, get_normal_expr):
|
||||
def test_query_pagination_with_expression(self, offset):
|
||||
"""
|
||||
target: test query pagination with different expression
|
||||
method: query with different expression and verify the result
|
||||
@ -1809,20 +1834,27 @@ class TestQueryParams(TestcaseBase):
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expr = get_normal_expr
|
||||
expression = expr.replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expression or eval(expression):
|
||||
filter_ids.append(_id)
|
||||
for expressions in cf.gen_normal_expressions_and_templates()[1:]:
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# query and verify result
|
||||
query_params = {"offset": offset, "limit": 10}
|
||||
res = collection_w.query(expr=expression, params=query_params)[0]
|
||||
key_res = [item[key] for item in res for key in item]
|
||||
assert key_res == filter_ids
|
||||
# query and verify result
|
||||
query_params = {"offset": offset, "limit": 10}
|
||||
res = collection_w.query(expr=expr, params=query_params)[0]
|
||||
key_res = [item[key] for item in res for key in item]
|
||||
assert key_res == filter_ids
|
||||
|
||||
# query again with expression tempalte
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
res = collection_w.query(expr=expr, expr_params=expr_params, params=query_params)[0]
|
||||
key_res = [item[key] for item in res for key in item]
|
||||
assert key_res == filter_ids
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_query_pagination_with_partition(self, offset):
|
||||
@ -1930,11 +1962,14 @@ class TestQueryParams(TestcaseBase):
|
||||
int_values = vectors[0][ct.default_int64_field_name].values.tolist()
|
||||
pos = 10
|
||||
term_expr = f'{ct.default_int64_field_name} in {int_values[10: pos + 10]}'
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, (offset+limit) should be in range [1, 16384], but got 67900"}
|
||||
if limit == -1:
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, limit [{limit}] is invalid, should be greater than 0"}
|
||||
collection_w.query(term_expr, offset=10, limit=limit,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, (offset+limit) "
|
||||
f"should be in range [1, 16384], but got {limit}"})
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("offset", ["12 s", " ", [0, 1], {2}])
|
||||
@ -1967,11 +2002,13 @@ class TestQueryParams(TestcaseBase):
|
||||
int_values = vectors[0][ct.default_int64_field_name].values.tolist()
|
||||
pos = 10
|
||||
term_expr = f'{ct.default_int64_field_name} in {int_values[10: pos + 10]}'
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, (offset+limit) should be in range [1, 16384], but got 67900"}
|
||||
if offset == -1:
|
||||
error = {ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, offset [{offset}] is invalid, should be gte than 0"}
|
||||
collection_w.query(term_expr, offset=offset, limit=10,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 65535,
|
||||
ct.err_msg: f"invalid max query result window, (offset+limit) "
|
||||
f"should be in range [1, 16384], but got {offset}"})
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip("not stable")
|
||||
@ -2029,7 +2066,7 @@ class TestQueryParams(TestcaseBase):
|
||||
check_task=CheckTasks.check_query_results, check_items={exp_res: res})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_enable_mmap_query_with_expression(self, get_normal_expr, enable_dynamic_field):
|
||||
def test_enable_mmap_query_with_expression(self, enable_dynamic_field):
|
||||
"""
|
||||
target: turn on mmap use different expr queries
|
||||
method: turn on mmap and query with different expr
|
||||
@ -2039,7 +2076,6 @@ class TestQueryParams(TestcaseBase):
|
||||
nb = 1000
|
||||
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb, is_index=False,
|
||||
enable_dynamic_field=enable_dynamic_field)[0:4]
|
||||
|
||||
# enable mmap
|
||||
collection_w.set_properties({'mmap.enabled': True})
|
||||
collection_w.create_index(ct.default_float_vec_field_name, default_index_params, index_name="query_expr_index")
|
||||
@ -2047,23 +2083,31 @@ class TestQueryParams(TestcaseBase):
|
||||
collection_w.load()
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expr = get_normal_expr
|
||||
expression = expr.replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
int64 = _vectors[i][ct.default_int64_field_name]
|
||||
float = _vectors[i][ct.default_float_field_name]
|
||||
else:
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expression or eval(expression):
|
||||
filter_ids.append(_id)
|
||||
for expressions in cf.gen_normal_expressions_and_templates()[1:]:
|
||||
log.debug(f"expr: {expressions}")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
if enable_dynamic_field:
|
||||
int64 = _vectors[i][ct.default_int64_field_name]
|
||||
float = _vectors[i][ct.default_float_field_name]
|
||||
else:
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
|
||||
# query and verify result
|
||||
res = collection_w.query(expr=expression)[0]
|
||||
query_ids = set(map(lambda x: x[ct.default_int64_field_name], res))
|
||||
assert query_ids == set(filter_ids)
|
||||
# query and verify result
|
||||
res = collection_w.query(expr=expr)[0]
|
||||
query_ids = set(map(lambda x: x[ct.default_int64_field_name], res))
|
||||
assert query_ids == set(filter_ids)
|
||||
|
||||
# query again with expression template
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
res = collection_w.query(expr=expr, expr_params=expr_params)[0]
|
||||
query_ids = set(map(lambda x: x[ct.default_int64_field_name], res))
|
||||
assert query_ids == set(filter_ids)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_mmap_query_string_field_not_primary_is_empty(self):
|
||||
@ -2686,8 +2730,7 @@ class TestQueryString(TestcaseBase):
|
||||
collection_w = self.init_collection_general(prefix, insert_data=True)[0]
|
||||
collection_w.query(expression, check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 1100,
|
||||
ct.err_msg: f"failed to create query plan: cannot parse expression: {expression}, "
|
||||
f"error: value '1' in list cannot be casted to VarChar: invalid parameter"})
|
||||
ct.err_msg: f"failed to create query plan: cannot parse expression: {expression}"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_query_string_expr_with_binary(self):
|
||||
@ -3823,8 +3866,7 @@ class TestQueryCount(TestcaseBase):
|
||||
check_items={exp_res: [{count: 0}]})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
@pytest.mark.parametrize("expression", cf.gen_normal_expressions())
|
||||
def test_count_expressions(self, expression):
|
||||
def test_count_expressions(self):
|
||||
"""
|
||||
target: test count with expr
|
||||
method: count with expr
|
||||
@ -3835,19 +3877,28 @@ class TestQueryCount(TestcaseBase):
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expression = expression.replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expression or eval(expression):
|
||||
filter_ids.append(_id)
|
||||
res = len(filter_ids)
|
||||
for expressions in cf.gen_normal_expressions_and_templates():
|
||||
log.debug(f"query with expression: {expressions}")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
int64 = _vectors.int64[i]
|
||||
float = _vectors.float[i]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
res = len(filter_ids)
|
||||
|
||||
# count with expr
|
||||
collection_w.query(expr=expression, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: res}]})
|
||||
# count with expr
|
||||
collection_w.query(expr=expr, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: res}]})
|
||||
|
||||
# count agian with expr template
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
collection_w.query(expr=expr, expr_params=expr_params, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: res}]})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("bool_type", [True, False, "true", "false"])
|
||||
@ -3885,8 +3936,7 @@ class TestQueryCount(TestcaseBase):
|
||||
check_items={exp_res: [{count: res}]})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.parametrize("expression", cf.gen_normal_expressions_field(default_float_field_name))
|
||||
def test_count_expression_auto_field(self, expression):
|
||||
def test_count_expression_auto_field(self):
|
||||
"""
|
||||
target: test count with expr
|
||||
method: count with expr
|
||||
@ -3897,21 +3947,26 @@ class TestQueryCount(TestcaseBase):
|
||||
|
||||
# filter result with expression in collection
|
||||
_vectors = _vectors[0]
|
||||
expression = expression.replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
float = _vectors.float[i]
|
||||
if not expression or eval(expression):
|
||||
filter_ids.append(_id)
|
||||
res = len(filter_ids)
|
||||
for expressions in cf.gen_normal_expressions_and_templates_field(default_float_field_name):
|
||||
log.debug(f"query with expression: {expressions}")
|
||||
expr = expressions[0].replace("&&", "and").replace("||", "or")
|
||||
filter_ids = []
|
||||
for i, _id in enumerate(insert_ids):
|
||||
float = _vectors.float[i]
|
||||
if not expr or eval(expr):
|
||||
filter_ids.append(_id)
|
||||
res = len(filter_ids)
|
||||
|
||||
# count with expr
|
||||
collection_w.query(expr=expression, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: res}]})
|
||||
# count with expr
|
||||
collection_w.query(expr=expr, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results, check_items={exp_res: [{count: res}]})
|
||||
# count with expr and expr_params
|
||||
expr = cf.get_expr_from_template(expressions[1]).replace("&&", "and").replace("||", "or")
|
||||
expr_params = cf.get_expr_params_from_template(expressions[1])
|
||||
collection_w.query(expr=expr, expr_params=expr_params, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results, check_items={exp_res: [{count: res}]})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
@pytest.mark.skip(reason="issue #25841")
|
||||
def test_count_expression_all_datatype(self):
|
||||
"""
|
||||
target: test count with expr
|
||||
@ -3922,9 +3977,8 @@ class TestQueryCount(TestcaseBase):
|
||||
collection_w = self.init_collection_general(insert_data=True, is_all_data_type=True)[0]
|
||||
|
||||
# count with expr
|
||||
expression = "int64 >= 0 && int32 >= 1999 && int16 >= 0 && int8 >= 0 && float <= 1999.0 && double >= 0"
|
||||
# expression = "int64 == 1999"
|
||||
collection_w.query(expr=expression, output_fields=[count],
|
||||
expr = "int64 >= 0 && int32 >= 1999 && int16 >= 0 && int8 <= 0 && float <= 1999.0 && double >= 0"
|
||||
collection_w.query(expr=expr, output_fields=[count],
|
||||
check_task=CheckTasks.check_query_results,
|
||||
check_items={exp_res: [{count: 1}]})
|
||||
|
||||
@ -5826,12 +5880,11 @@ class TestQueryFunction(TestcaseBase):
|
||||
expected: raise exception
|
||||
"""
|
||||
collection_w, entities = self.init_collection_general(
|
||||
prefix, insert_data=True, nb=10
|
||||
)[0:2]
|
||||
prefix, insert_data=True, nb=10)[0:2]
|
||||
test_cases = [
|
||||
(
|
||||
"A_FUNCTION_THAT_DOES_NOT_EXIST()",
|
||||
"function A_FUNCTION_THAT_DOES_NOT_EXIST() not found",
|
||||
"A_FUNCTION_THAT_DOES_NOT_EXIST()".lower(),
|
||||
"function A_FUNCTION_THAT_DOES_NOT_EXIST() not found".lower(),
|
||||
),
|
||||
# empty
|
||||
("empty()", "function empty() not found"),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -338,7 +338,7 @@ class TestUtilityParams(TestcaseBase):
|
||||
self.utility_wrap.wait_for_loading_complete(
|
||||
collection_w.name, partition_names=[ct.default_tag],
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={ct.err_code: 200, ct.err_msg: f'partition={ct.default_tag}: partition not found'})
|
||||
check_items={ct.err_code: 200, ct.err_msg: f'partition not found[partition={ct.default_tag}]'})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L2)
|
||||
def test_drop_collection_not_existed(self):
|
||||
@ -491,10 +491,11 @@ class TestUtilityParams(TestcaseBase):
|
||||
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix)
|
||||
old_collection_name = collection_w.name
|
||||
new_collection_name = get_invalid_value_collection_name
|
||||
error = {"err_code": 1100, "err_msg": "Invalid collection name: %s. the first character of a collection name mu"
|
||||
"st be an underscore or letter: invalid parameter" % new_collection_name}
|
||||
error = {"err_code": 1100, "err_msg": "Invalid collection name"}
|
||||
if new_collection_name in [None, ""]:
|
||||
error = {"err_code": 999, "err_msg": f"`collection_name` value {new_collection_name} is illegal"}
|
||||
if new_collection_name == " ":
|
||||
error = {"err_code": 999, "err_msg": "collection name should not be empty"}
|
||||
self.utility_wrap.rename_collection(old_collection_name, new_collection_name,
|
||||
check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@ -547,8 +548,7 @@ class TestUtilityParams(TestcaseBase):
|
||||
self.utility_wrap.rename_collection(old_collection_name, alias,
|
||||
check_task=CheckTasks.err_res,
|
||||
check_items={"err_code": 65535,
|
||||
"err_msg": "duplicated new collection name default:{} with "
|
||||
"other collection name or alias".format(alias)})
|
||||
"err_msg": f"cannot rename collection to an existing alias: {alias}"})
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
def test_rename_collection_using_alias(self):
|
||||
@ -747,7 +747,7 @@ class TestUtilityBase(TestcaseBase):
|
||||
cw = self.init_collection_wrap(name=c_name)
|
||||
data = cf.gen_default_list_data(nb)
|
||||
cw.insert(data=data)
|
||||
error = {ct.err_code: 700, ct.err_msg: f"{c_name}: index not found"}
|
||||
error = {ct.err_code: 999, ct.err_msg: f"index not found[collection={c_name}]"}
|
||||
self.utility_wrap.index_building_progress(c_name, check_task=CheckTasks.err_res, check_items=error)
|
||||
|
||||
@pytest.mark.tags(CaseLabel.L1)
|
||||
|
Loading…
Reference in New Issue
Block a user