[skip ci] Fix single quoted docstring (#9150)

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>
This commit is contained in:
ThreadDao 2021-10-03 20:27:57 +08:00 committed by GitHub
parent 7154edc502
commit bdc7ffd790
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -39,11 +39,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_flush(self, connect, collection, args):
'''
"""
target: return the same row count after server restart
method: call function: create collection, then insert/flush, restart server and assert row count
expected: row count keep the same
'''
"""
ids = connect.bulk_insert(collection, default_entities)
connect.flush([collection])
ids = connect.bulk_insert(collection, default_entities)
@ -62,11 +62,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_during_flushing(self, connect, collection, args):
'''
"""
target: flushing will recover
method: call function: create collection, then insert/flushing, restart server and assert row count
expected: row count equals 0
'''
"""
# disable_autoflush()
ids = connect.bulk_insert(collection, big_entities)
connect.flush([collection], _async=True)
@ -90,11 +90,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_delete_during_flushing(self, connect, collection, args):
'''
"""
target: flushing will recover
method: call function: create collection, then delete/flushing, restart server and assert row count
expected: row count equals (nb - delete_length)
'''
"""
# disable_autoflush()
ids = connect.bulk_insert(collection, big_entities)
connect.flush([collection])
@ -123,11 +123,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_during_indexed(self, connect, collection, args):
'''
"""
target: flushing will recover
method: call function: create collection, then indexed, restart server and assert row count
expected: row count equals nb
'''
"""
# disable_autoflush()
ids = connect.bulk_insert(collection, big_entities)
connect.flush([collection])
@ -153,11 +153,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_during_indexing(self, connect, collection, args):
'''
"""
target: flushing will recover
method: call function: create collection, then indexing, restart server and assert row count
expected: row count equals nb, server contitue to build index after restart
'''
"""
# disable_autoflush()
loop = 5
for i in range(loop):
@ -200,12 +200,12 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_delete_flush_during_compacting(self, connect, collection, args):
'''
"""
target: verify server work after restart during compaction
method: call function: create collection, then delete/flush/compacting, restart server and assert row count
call `compact` again, compact pass
expected: row count equals (nb - delete_length)
'''
"""
# disable_autoflush()
ids = connect.bulk_insert(collection, big_entities)
connect.flush([collection])
@ -240,11 +240,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_during_flushing_multi_collections(self, connect, args):
'''
"""
target: flushing will recover
method: call function: create collections, then insert/flushing, restart server and assert row count
expected: row count equals 0
'''
"""
# disable_autoflush()
collection_num = 2
collection_list = []
@ -283,11 +283,11 @@ class TestRestartBase:
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_during_flushing_multi_partitions(self, connect, collection, args):
'''
"""
target: flushing will recover
method: call function: create collection/partition, then insert/flushing, restart server and assert row count
expected: row count equals 0
'''
"""
# disable_autoflush()
partitions_num = 2
partitions = []