diff --git a/tests20/python_client/base/collection_wrapper.py b/tests20/python_client/base/collection_wrapper.py index 06965f119f16a581b87d115a96f60c916d16bdb6..607f90f591292fc02446695918a66eb1107c68b9 100644 --- a/tests20/python_client/base/collection_wrapper.py +++ b/tests20/python_client/base/collection_wrapper.py @@ -33,7 +33,7 @@ def func_req(_list, **kwargs): if len(_list) > 1: for a in _list[1:]: arg.append(a) - log.debug("(func_req)[%s] Parameters ars arg: %s, kwargs: %s" % (str(func), str(arg), str(kwargs))) + # log.debug("(func_req)[%s] Parameters ars arg: %s, kwargs: %s" % (str(func), str(arg), str(kwargs))) return func(*arg, **kwargs) return False, False @@ -119,7 +119,7 @@ class ApiCollectionWrapper: def insert(self, data, partition_name=None, check_res=None, check_params=None, **kwargs): func_name = sys._getframe().f_code.co_name res, check = func_req([self.collection.insert, data, partition_name], **kwargs) - check_result = CheckFunc(res, func_name, check_res, check_params, check, dat=data, partition_name=partition_name, **kwargs).run() + check_result = CheckFunc(res, func_name, check_res, check_params, check, data=data, partition_name=partition_name, **kwargs).run() return res, check_result def search(self, data, anns_field, param, limit, expression, partition_names=None, output_fields=None, timeout=None, diff --git a/tests20/python_client/requirements.txt b/tests20/python_client/requirements.txt index 63a4611f05869bc5129452d383b1ef4a1c87e42a..3a99b22b2f33128537bd7741c49d7c7c4b3b805e 100644 --- a/tests20/python_client/requirements.txt +++ b/tests20/python_client/requirements.txt @@ -6,4 +6,4 @@ pandas numpy pytest-html==3.1.1 git+https://github.com/Projectplace/pytest-tags -pymilvus-orm==2.0a1.dev8 +pymilvus-orm==2.0a1.dev17 diff --git a/tests20/python_client/testcases/test_collection.py b/tests20/python_client/testcases/test_collection.py index 16758b8185391413e38204e44a222ceb2e4a9222..06fb444ff5e910717acaf0db3b8278bc921ff6e0 100644 --- a/tests20/python_client/testcases/test_collection.py +++ b/tests20/python_client/testcases/test_collection.py @@ -1,6 +1,6 @@ import pandas as pd import pytest -from milvus import DataType +from pymilvus import DataType from pymilvus_orm import FieldSchema from base.client_request import ApiReq @@ -46,9 +46,8 @@ class TestCollectionParams(ApiReq): @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_invalid_type_fields(self, request): - skip_param = [] - if request.param == skip_param: - pytest.skip("skip []") + if isinstance(request.param, list): + pytest.skip("list is valid fields") yield request.param @pytest.fixture(scope="function", params=cf.gen_all_type_fields()) @@ -101,7 +100,6 @@ class TestCollectionParams(ApiReq): assert "invalid" or "illegal" in str(ex) @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5241 #5367") def test_collection_dup_name(self): """ target: test collection with dup name @@ -116,10 +114,9 @@ class TestCollectionParams(ApiReq): assert collection.name == dup_collection.name assert collection.schema == dup_collection.schema assert collection.num_entities == dup_collection.num_entities - assert collection.name in self.utility_wrap.list_collections() + assert collection.name in self.utility_wrap.list_collections()[0] @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5367") def test_collection_dup_name_with_desc(self): """ target: test collection with dup name @@ -204,7 +201,6 @@ class TestCollectionParams(ApiReq): assert_default_collection(collection, c_name) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5241 #5367") def test_collection_dup_name_same_schema(self): """ target: test collection with dup name and same schema @@ -219,7 +215,6 @@ class TestCollectionParams(ApiReq): assert_default_collection(dup_collection, c_name) @pytest.mark.tags(CaseLabel.L1) - # @pytest.mark.xfail(reason="issue #5302") def test_collection_dup_name_none_schema_dataframe(self): """ target: test collection with dup name and insert dataframe @@ -238,20 +233,20 @@ class TestCollectionParams(ApiReq): assert collection.num_entities == nb @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5302") def test_collection_dup_name_none_schema_data_list(self): """ target: test collection with dup name and insert data (list-like) method: create collection with dup name, none schema, data (list-like) expected: two collection object is correct """ - self._connect() + conn = self._connect() nb = ct.default_nb collection = self._collection() c_name = collection.name assert_default_collection(collection) data = cf.gen_default_dataframe_data(nb) dup_collection, _ = self.collection_wrap.collection_init(c_name, schema=None, data=data) + conn.flush([c_name]) assert_default_collection(dup_collection, c_name, exp_num=nb) assert collection.num_entities == nb @@ -280,7 +275,6 @@ class TestCollectionParams(ApiReq): assert "schema type must be schema.CollectionSchema" in str(ex) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5331") def test_collection_invalid_type_fields(self, get_invalid_type_fields): """ target: test collection with invalid fields type, non-list @@ -288,11 +282,9 @@ class TestCollectionParams(ApiReq): expected: exception """ self._connect() - c_name = cf.gen_unique_str(prefix) fields = get_invalid_type_fields - schema = cf.gen_collection_schema(fields=fields) - ex, _ = self.collection_wrap.collection_init(c_name, schema=schema) - log.error(str(ex)) + with pytest.raises(Exception, match="The fields of schema must be type list"): + cf.gen_collection_schema(fields=fields) @pytest.mark.tags(CaseLabel.L1) def test_collection_with_unknown_type(self): @@ -302,7 +294,6 @@ class TestCollectionParams(ApiReq): expected: raise exception """ self._connect() - c_name = cf.gen_unique_str(prefix) with pytest.raises(Exception, match="Field type not support <DataType.UNKNOWN: 999"): FieldSchema("unknown", DataType.UNKNOWN) @@ -332,14 +323,13 @@ class TestCollectionParams(ApiReq): expected: raise exception """ self._connect() - with pytest.raises(Exception, match="Field type not support"): + with pytest.raises(Exception, match="Field type must be of DataType"): FieldSchema(name="test", dtype=dtype) - @pytest.mark.xfail(reason="issue #5317") - def test_collection_float_dtype(self): + def test_collection_field_float_type(self): """ - target: test collection with float dtype - method: create field with float dtype + target: test collection with float type + method: create field with float type expected: """ self._connect() @@ -392,11 +382,9 @@ class TestCollectionParams(ApiReq): c_name = cf.gen_unique_str(prefix) schema = cf.gen_collection_schema(fields=[field]) collection, _ = self.collection_wrap.collection_init(c_name, schema=schema) - log.debug(collection) assert_default_collection(collection, c_name, exp_schema=schema) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5345") def test_collection_multi_float_vectors(self): """ target: test collection with multi float vectors @@ -411,7 +399,6 @@ class TestCollectionParams(ApiReq): assert_default_collection(collection, c_name, exp_schema=schema) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5345") def test_collection_mix_vectors(self): """ target: test collection with mix vectors @@ -452,17 +439,17 @@ class TestCollectionParams(ApiReq): assert collection.primary_field.name == ct.default_int64_field_name @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5345") def test_collection_unsupported_primary_field(self, get_unsupported_primary_field): """ - target: test collection with unsupported parimary field type + target: test collection with unsupported primary field type method: specify non-int64 as primary field expected: raise exception """ self._connect() c_name = cf.gen_unique_str(prefix) field = get_unsupported_primary_field - schema = cf.gen_collection_schema(fields=[field], primary_field=field.name) + vec_field = cf.gen_float_vec_field() + schema = cf.gen_collection_schema(fields=[field, vec_field], primary_field=field.name) ex, _ = self.collection_wrap.collection_init(c_name, schema=schema) assert "the data type of primary key should be int64" in str(ex) @@ -513,7 +500,6 @@ class TestCollectionParams(ApiReq): assert collection.schema.auto_id @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5350") def test_collection_field_invalid_primary(self, get_invalid_string): """ target: test collection with invalid primary @@ -521,12 +507,9 @@ class TestCollectionParams(ApiReq): expected: raise exception """ self._connect() - c_name = cf.gen_unique_str(prefix) - int_field = cf.gen_int64_field(name="int", is_primary=get_invalid_string) - float_vec_field = cf.gen_float_vec_field() - schema = cf.gen_collection_schema(fields=[int_field, float_vec_field]) - ex, _ = self.collection_wrap.collection_init(c_name, schema=schema) - log.info(str(ex)) + cf.gen_unique_str(prefix) + with pytest.raises(Exception, match="Param is_primary must be bool type"): + cf.gen_int64_field(name="int", is_primary=get_invalid_string) @pytest.mark.tags(CaseLabel.L0) @pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]) @@ -616,33 +599,33 @@ class TestCollectionParams(ApiReq): assert_default_collection(collection, c_name, exp_schema=schema) @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5302") def test_collection_with_dataframe(self): """ target: test collection with dataframe data method: create collection and insert with dataframe expected: collection num entities equal to nb """ - self._connect() + conn = self._connect() nb = ct.default_nb c_name = cf.gen_unique_str(prefix) data = cf.gen_default_dataframe_data(nb) collection, _ = self.collection_wrap.collection_init(c_name, schema=default_schema, data=data) + conn.flush([c_name]) assert_default_collection(collection, c_name, exp_num=nb) @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5302") def test_collection_with_data_list(self): """ target: test collection with data (list-like) method: create collection with data (list-like) expected: collection num entities equal to nb """ - self._connect() + conn = self._connect() nb = ct.default_nb c_name = cf.gen_unique_str(prefix) data = cf.gen_default_list_data(nb) collection, _ = self.collection_wrap.collection_init(c_name, schema=default_schema, data=data) + conn.flush([c_name]) assert_default_collection(collection, c_name, exp_num=nb) @pytest.mark.tags(CaseLabel.L0) @@ -666,11 +649,10 @@ class TestCollectionParams(ApiReq): method: create binary collection with dataframe expected: collection num entities equal to nb """ - conn = self._connect() + self._connect() nb = ct.default_nb c_name = cf.gen_unique_str(prefix) data = cf.gen_default_binary_dataframe_data(3) - log.debug(data) collection, _ = self.collection_wrap.collection_init(c_name, schema=default_binary_schema, data=data) assert_default_collection(collection, c_name, exp_schema=default_binary_schema, exp_num=nb) @@ -747,7 +729,6 @@ class TestCollectionOperation(ApiReq): assert c_name not in self.utility_wrap.list_collections() @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5367") def test_collection_dup_name_drop(self): """ target: test collection with dup name, and drop @@ -758,7 +739,6 @@ class TestCollectionOperation(ApiReq): self._connect() collection = self._collection() assert_default_collection(collection) - log.info(collection.schema) dup_collection, _ = self.collection_wrap.collection_init(collection.name) assert_default_collection(dup_collection, collection.name) dup_collection.drop() @@ -768,23 +748,22 @@ class TestCollectionOperation(ApiReq): collection.num_entities @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5302") def test_collection_created_by_dataframe(self): """ target: test collection with dataframe method: create collection with dataframe expected: create successfully """ - self._connect() + conn = self._connect() nb = ct.default_nb c_name = cf.gen_unique_str(prefix) df = cf.gen_default_dataframe_data(nb) schema = cf.gen_default_collection_schema() collection, _ = self.collection_wrap.collection_init(name=c_name, data=df) + conn.flush([c_name]) assert_default_collection(collection, exp_name=c_name, exp_num=nb, exp_schema=schema) @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5404") def test_collection_created_by_empty_dataframe(self): """ target: test create collection by empty dataframe @@ -795,7 +774,7 @@ class TestCollectionOperation(ApiReq): c_name = cf.gen_unique_str(prefix) data = pd.DataFrame() ex, _ = self.collection_wrap.collection_init(name=c_name, schema=None, data=data) - # TODO assert + assert "The field of the schema cannot be empty" in str(ex) @pytest.mark.tags(CaseLabel.L1) def test_collection_created_by_invalid_dataframe(self, get_invalid_df): @@ -813,7 +792,6 @@ class TestCollectionOperation(ApiReq): assert message_one or message_two or message_three in str(ex) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5405") def test_collection_created_by_inconsistent_dataframe(self): """ target: test collection with data inconsistent @@ -826,8 +804,7 @@ class TestCollectionOperation(ApiReq): mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)] df = pd.DataFrame(data=mix_data, columns=list("ABC")) ex, _ = self.collection_wrap.collection_init(name=c_name, schema=None, data=df) - log.info(str(ex)) - # TODO assert + assert "The data in the same column must be of the same type" in str(ex) @pytest.mark.tags(CaseLabel.L0) def test_collection_created_by_non_dataframe(self, get_non_df): @@ -898,4 +875,3 @@ class TestCollectionOperation(ApiReq): data = cf.gen_default_binary_list_data(nb=100) ex, _ = self.collection_wrap.collection_init(name=c_name, schema=None, data=data) assert "Data of not pandas.DataFrame type should bepassed into the schema" in str(ex) - diff --git a/tests20/python_client/testcases/test_insert.py b/tests20/python_client/testcases/test_insert.py index 3df511d529df3bb129b27cfb66fd93bdd688fd15..112b7183d6f5152e567d20d43e45173a4eb2a29c 100644 --- a/tests20/python_client/testcases/test_insert.py +++ b/tests20/python_client/testcases/test_insert.py @@ -18,6 +18,13 @@ default_binary_schema = cf.gen_default_binary_collection_schema() class TestInsertParams(ApiReq): """ Test case of Insert interface """ + def teardown_method(self): + if self.collection_wrap is not None and self.collection_wrap.collection is not None: + self.collection_wrap.drop() + + def setup_method(self): + pass + @pytest.fixture(scope="function", params=ct.get_invalid_strs) def get_non_data_type(self, request): if isinstance(request.param, list): @@ -46,19 +53,18 @@ class TestInsertParams(ApiReq): assert collection.num_entities == nb @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5470") def test_insert_list_data(self): """ target: test insert list-like data method: 1.create 2.insert list data expected: assert num entities """ - self._connect() + conn = self._connect() nb = ct.default_nb collection = self._collection() data = cf.gen_default_list_data(nb) self.collection_wrap.insert(data=data) - self.connection_wrap.connection.get_connection().flush([collection.name]) + conn.flush([collection.name]) assert collection.num_entities == nb @pytest.mark.tags(CaseLabel.L1) @@ -143,7 +149,6 @@ class TestInsertParams(ApiReq): pass @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5445") def test_insert_none(self): """ target: test insert None @@ -151,11 +156,10 @@ class TestInsertParams(ApiReq): expected: raise exception """ self._collection() - ex, _ = self.collection_wrap.insert(data=None) - log.info(str(ex)) + ids, _ = self.collection_wrap.insert(data=None) + assert len(ids) == 0 @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5421") def test_insert_numpy_data(self): """ target: test insert numpy.ndarray data @@ -167,7 +171,7 @@ class TestInsertParams(ApiReq): self._collection() data = cf.gen_numpy_data(nb) ex, _ = self.collection_wrap.insert(data=data) - log.error(str(ex)) + assert "Data type not support numpy.ndarray" in str(ex) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.xfail(reason="issue #5302") @@ -200,7 +204,6 @@ class TestInsertParams(ApiReq): assert collection.num_entities == nb @pytest.mark.tags(CaseLabel.L0) - @pytest.mark.xfail(reason="issue #5470") def test_insert_single(self): """ target: test insert single @@ -275,7 +278,6 @@ class TestInsertParams(ApiReq): assert "The types of schema and data do not match" in str(ex) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5505") def test_insert_value_less(self): """ target: test insert value less than other @@ -284,15 +286,14 @@ class TestInsertParams(ApiReq): """ self._collection() nb = 10 - int_values = [i for i in range(nb-1)] + int_values = [i for i in range(nb - 1)] float_values = [np.float32(i) for i in range(nb)] float_vec_values = cf.gen_vectors(nb, ct.default_dim) data = [int_values, float_values, float_vec_values] - ids, _ = self.collection_wrap.insert(data=data) - log.info(ids) + ex, _ = self.collection_wrap.insert(data=data) + assert "message=arrays must all be same length" in str(ex) @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5508") def test_insert_vector_value_less(self): """ target: test insert vector value less than other @@ -303,10 +304,10 @@ class TestInsertParams(ApiReq): nb = 10 int_values = [i for i in range(nb)] float_values = [np.float32(i) for i in range(nb)] - float_vec_values = cf.gen_vectors(nb-1, ct.default_dim) + float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim) data = [int_values, float_values, float_vec_values] ex, _ = self.collection_wrap.insert(data=data) - log.info(str(ex)) + assert "arrays must all be same length" in str(ex) @pytest.mark.tags(CaseLabel.L1) def test_insert_fields_more(self): @@ -505,7 +506,6 @@ class TestInsertOperation(ApiReq): pass @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.xfail(reason="issue #5470") def test_insert_multi_times(self): """ target: test insert multi times @@ -517,8 +517,7 @@ class TestInsertOperation(ApiReq): for _ in range(ct.default_nb): df = cf.gen_default_dataframe_data(1) self.collection_wrap.insert(data=df) - self.connection_wrap.connection.get_connection().flush([collection.name]) - # conn.flush([collection.name]) + conn.flush([collection.name]) assert collection.num_entities == ct.default_nb diff --git a/tests20/python_client/utils/util_log.py b/tests20/python_client/utils/util_log.py index 702cb059e4ac3726bdc761fa7afc1a5dabedc91b..fe6107da7f59af18a2bdb2a4ee9566745c833daf 100644 --- a/tests20/python_client/utils/util_log.py +++ b/tests20/python_client/utils/util_log.py @@ -31,6 +31,11 @@ class TestLog: eh.setFormatter(formatter) self.log.addHandler(eh) + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(logging.DEBUG) + ch.setFormatter(formatter) + self.log.addHandler(ch) + except Exception as e: print("Can not use %s or %s to log." % (log_file, log_err))