From 84110d2684f794a9ec76c2567f696c382b1723a3 Mon Sep 17 00:00:00 2001
From: Cai Yudong <yudong.cai@zilliz.com>
Date: Thu, 25 Feb 2021 17:35:36 +0800
Subject: [PATCH] Add tests/benchmark and tests/python_test using new python
 SDK

Signed-off-by: Cai Yudong <yudong.cai@zilliz.com>
---
 internal/datanode/data_sync_service.go        |   12 +-
 internal/datanode/flow_graph_dd_node.go       |    6 +-
 internal/datanode/flow_graph_dd_node_test.go  |    2 +-
 .../datanode/flow_graph_filter_dm_node.go     |    9 +-
 internal/datanode/flow_graph_gc_node.go       |    7 +-
 .../datanode/flow_graph_insert_buffer_node.go |    6 +-
 .../flow_graph_insert_buffer_node_test.go     |    2 +-
 internal/dataservice/server.go                |    6 +-
 internal/distributed/indexservice/service.go  |   30 +-
 internal/distributed/masterservice/server.go  |    2 +-
 internal/indexnode/indexnode.go               |   26 +-
 internal/msgstream/msgstream.go               |    2 +-
 internal/msgstream/pulsarms/msg_test.go       |    2 +-
 .../msgstream/pulsarms/pulsar_msgstream.go    |   67 +-
 .../pulsarms/pulsar_msgstream_test.go         |    6 +-
 internal/msgstream/rmqms/rmq_msgstream.go     |    8 +-
 .../msgstream/rmqms/rmq_msgstream_test.go     |    2 +-
 internal/proxyservice/timetick.go             |    7 +-
 internal/querynode/data_sync_service.go       |   14 +-
 internal/querynode/flow_graph_dd_node.go      |    9 +-
 .../querynode/flow_graph_filter_dm_node.go    |    9 +-
 internal/querynode/flow_graph_gc_node.go      |    7 +-
 internal/querynode/flow_graph_insert_node.go  |    6 +-
 .../querynode/flow_graph_service_time_node.go |    6 +-
 internal/querynode/query_node.go              |   35 +-
 internal/querynode/search_service.go          |    2 +-
 internal/timesync/timesync.go                 |    4 +-
 internal/util/flowgraph/flow_graph.go         |   14 +-
 internal/util/flowgraph/flow_graph_test.go    |   46 +-
 internal/util/flowgraph/input_node.go         |   16 +-
 internal/util/flowgraph/node.go               |   84 +-
 internal/util/trace/util.go                   |   35 +-
 tests/benchmark/README.md                     |   39 +
 tests/benchmark/__init__.py                   |    0
 tests/benchmark/assets/Parameters.png         |  Bin 0 -> 50835 bytes
 .../gpu_search_performance_random50m-yaml.png |  Bin 0 -> 66119 bytes
 ...milvus-nightly-performance-new-jenkins.png |  Bin 0 -> 45034 bytes
 .../ci/function/file_transfer.groovy          |   10 +
 tests/benchmark/ci/jenkinsfile/cleanup.groovy |   13 +
 .../ci/jenkinsfile/cleanupShards.groovy       |   13 +
 .../ci/jenkinsfile/deploy_shards_test.groovy  |   21 +
 .../ci/jenkinsfile/deploy_test.groovy         |   19 +
 tests/benchmark/ci/jenkinsfile/notify.groovy  |   15 +
 .../ci/jenkinsfile/publishDailyImages.groovy  |   46 +
 tests/benchmark/ci/main_jenkinsfile           |  148 ++
 .../pod_containers/milvus-testframework.yaml  |   13 +
 tests/benchmark/ci/publish_jenkinsfile        |  104 +
 tests/benchmark/ci/scripts/yaml_processor.py  |  536 +++++
 tests/benchmark/client.py                     |  460 +++++
 tests/benchmark/docker_runner.py              |  366 ++++
 tests/benchmark/docker_utils.py               |  126 ++
 tests/benchmark/executors/__init__.py         |    3 +
 tests/benchmark/executors/shell.py            |    4 +
 tests/benchmark/handlers/__init__.py          |    0
 tests/benchmark/helm_utils.py                 |  370 ++++
 tests/benchmark/k8s_runner.py                 |  927 +++++++++
 tests/benchmark/local_runner.py               |  732 +++++++
 tests/benchmark/locust_file.py                |   30 +
 tests/benchmark/locust_flush_task.py          |   33 +
 tests/benchmark/locust_get_entity_task.py     |   36 +
 tests/benchmark/locust_insert_task.py         |   33 +
 tests/benchmark/locust_search_task.py         |   46 +
 tests/benchmark/locust_task.py                |   37 +
 tests/benchmark/locust_tasks.py               |   45 +
 tests/benchmark/locust_test.py                |   18 +
 tests/benchmark/locust_user.py                |   70 +
 tests/benchmark/main.py                       |  199 ++
 tests/benchmark/mix_task.py                   |   42 +
 tests/benchmark/operation.py                  |   10 +
 tests/benchmark/parser.py                     |   85 +
 tests/benchmark/requirements.txt              |   12 +
 tests/benchmark/results/__init__.py           |   11 +
 tests/benchmark/results/reporter.py           |    0
 tests/benchmark/runner.py                     |  369 ++++
 tests/benchmark/runners/__init__.py           |   11 +
 tests/benchmark/runners/locust_runner.py      |   75 +
 tests/benchmark/scheduler/010_data.json       |   65 +
 tests/benchmark/scheduler/011_data.json       |   62 +
 .../scheduler/011_data_acc_debug.json         |   11 +
 .../scheduler/011_data_gpu_build.json         |   11 +
 .../benchmark/scheduler/011_data_insert.json  |   11 +
 .../scheduler/011_data_search_debug.json      |   11 +
 tests/benchmark/scheduler/011_delete.json     |   15 +
 tests/benchmark/scheduler/080_data.json       |   65 +
 tests/benchmark/scheduler/acc.json            |   15 +
 tests/benchmark/scheduler/build.json          |   11 +
 tests/benchmark/scheduler/clean.json          |   11 +
 tests/benchmark/scheduler/debug.json          |   11 +
 tests/benchmark/scheduler/default_config.json |   53 +
 tests/benchmark/scheduler/file_size.json      |   11 +
 tests/benchmark/scheduler/filter.json         |   11 +
 tests/benchmark/scheduler/idc.json            |   11 +
 tests/benchmark/scheduler/insert.json         |   11 +
 tests/benchmark/scheduler/jaccard.json        |   11 +
 tests/benchmark/scheduler/locust.json         |   11 +
 .../benchmark/scheduler/locust_mix_debug.json |   10 +
 tests/benchmark/scheduler/loop.json           |   10 +
 tests/benchmark/scheduler/search.json         |   11 +
 tests/benchmark/scheduler/shards.json         |   18 +
 tests/benchmark/scheduler/shards_ann.json     |   10 +
 tests/benchmark/scheduler/shards_debug.json   |   15 +
 .../benchmark/scheduler/shards_stability.json |   10 +
 tests/benchmark/scheduler/stability.json      |   11 +
 tests/benchmark/search_task.py                |   50 +
 .../suites/011_add_flush_performance.yaml     |   20 +
 .../suites/011_cluster_cpu_accuracy_ann.yaml  |  336 ++++
 tests/benchmark/suites/011_cpu_accuracy.yaml  |   55 +
 .../suites/011_cpu_accuracy_ann.yaml          |  260 +++
 tests/benchmark/suites/011_cpu_build.yaml     |   40 +
 .../suites/011_cpu_build_binary.yaml          |   11 +
 tests/benchmark/suites/011_cpu_search.yaml    |  255 +++
 .../suites/011_cpu_search_binary.yaml         |   49 +
 .../suites/011_cpu_search_debug.yaml          |   26 +
 .../suites/011_cpu_search_sift10m_filter.yaml |   97 +
 .../suites/011_cpu_search_sift50m.yaml        |   98 +
 tests/benchmark/suites/011_gpu_accuracy.yaml  |   61 +
 .../suites/011_gpu_accuracy_ann.yaml          |  165 ++
 tests/benchmark/suites/011_gpu_build.yaml     |   21 +
 tests/benchmark/suites/011_gpu_search.yaml    |  251 +++
 .../suites/011_gpu_search_sift10m_filter.yaml |  122 ++
 .../suites/011_gpu_search_sift50m.yaml        |  121 ++
 tests/benchmark/suites/011_gpu_stability.yaml |   39 +
 tests/benchmark/suites/011_insert_data.yaml   |   57 +
 .../suites/011_insert_performance.yaml        |  113 ++
 tests/benchmark/suites/011_locust_insert.yaml |   33 +
 tests/benchmark/suites/011_locust_search.yaml |   43 +
 .../suites/011_search_stability.yaml          |   20 +
 .../benchmark/suites/cluster_locust_mix.yaml  |   47 +
 tests/benchmark/suites/cpu_accuracy.yaml      |   61 +
 tests/benchmark/suites/cpu_accuracy_ann.yaml  |  212 ++
 .../suites/cpu_build_performance.yaml         |   19 +
 tests/benchmark/suites/cpu_search_binary.yaml |   67 +
 .../cpu_search_performance_jaccard.yaml       |   20 +
 .../cpu_search_performance_sift50m.yaml       |   20 +
 tests/benchmark/suites/gpu_accuracy.yaml      |   41 +
 tests/benchmark/suites/gpu_accuracy_ann.yaml  |  172 ++
 .../suites/gpu_accuracy_ann_debug.yaml        |   24 +
 .../benchmark/suites/gpu_accuracy_sift1b.yaml |   59 +
 .../gpu_build_performance_jaccard50m.yaml     |   20 +
 .../suites/gpu_search_performance.yaml        |  247 +++
 .../gpu_search_performance_jaccard50m.yaml    |   22 +
 .../gpu_search_performance_sift50m.yaml       |  146 ++
 .../suites/gpu_search_stability.yaml          |   23 +
 .../suites/gpu_stability_sift50m.yaml         |   27 +
 tests/benchmark/suites/insert_binary.yaml     |   39 +
 .../suites/insert_performance_deep1b.yaml     |   87 +
 .../suites/locust_cluster_search.yaml         |   45 +
 tests/benchmark/suites/locust_insert.yaml     |   23 +
 tests/benchmark/suites/locust_mix.yaml        |   47 +
 tests/benchmark/suites/locust_search.yaml     |   49 +
 tests/benchmark/suites/loop_stability.yaml    |   17 +
 tests/benchmark/suites/shards_ann_debug.yaml  |   25 +
 .../suites/shards_insert_performance.yaml     |   17 +
 .../shards_insert_performance_sift1m.yaml     |   19 +
 .../suites/shards_loop_stability.yaml         |   16 +
 .../shards_search_performance_sift1m.yaml     |   12 +
 tests/benchmark/task/task.py                  |    0
 tests/benchmark/test_loop.py                  |   52 +
 tests/benchmark/utils.py                      |  259 +++
 tests/python_test/.dockerignore               |   14 +
 tests/python_test/.gitignore                  |   13 +
 tests/python_test/Dockerfile                  |   15 +
 tests/python_test/README.md                   |   62 +
 .../collection/test_collection_count.py       |  575 ++++++
 .../collection/test_collection_logic.py       |  138 ++
 .../collection/test_collection_stats.py       |  356 ++++
 .../collection/test_create_collection.py      |  290 +++
 .../collection/test_describe_collection.py    |  187 ++
 .../collection/test_drop_collection.py        |  103 +
 .../collection/test_has_collection.py         |  101 +
 .../collection/test_list_collections.py       |   94 +
 .../collection/test_load_collection.py        |  573 ++++++
 tests/python_test/conftest.py                 |  194 ++
 tests/python_test/constants.py                |   22 +
 tests/python_test/docker-entrypoint.sh        |    9 +
 tests/python_test/entity/test_delete.py       |  473 +++++
 .../entity/test_get_entity_by_id.py           |  666 ++++++
 tests/python_test/entity/test_insert.py       | 1090 ++++++++++
 .../entity/test_list_id_in_segment.py         |  318 +++
 tests/python_test/entity/test_search.py       | 1782 +++++++++++++++++
 tests/python_test/pytest.ini                  |   14 +
 tests/python_test/requirements.txt            |   12 +
 tests/python_test/requirements_cluster.txt    |   25 +
 tests/python_test/run.sh                      |    4 +
 tests/python_test/stability/test_mysql.py     |   43 +
 tests/python_test/stability/test_restart.py   |  315 +++
 tests/python_test/test_compact.py             |  722 +++++++
 tests/python_test/test_config.py              | 1402 +++++++++++++
 tests/python_test/test_connect.py             |  232 +++
 tests/python_test/test_flush.py               |  353 ++++
 tests/python_test/test_index.py               |  832 ++++++++
 tests/python_test/test_mix.py                 |  161 ++
 tests/python_test/test_partition.py           |  425 ++++
 tests/python_test/test_ping.py                |  129 ++
 tests/python_test/utils.py                    | 1001 +++++++++
 195 files changed, 22965 insertions(+), 186 deletions(-)
 create mode 100644 tests/benchmark/README.md
 create mode 100644 tests/benchmark/__init__.py
 create mode 100644 tests/benchmark/assets/Parameters.png
 create mode 100644 tests/benchmark/assets/gpu_search_performance_random50m-yaml.png
 create mode 100644 tests/benchmark/assets/milvus-nightly-performance-new-jenkins.png
 create mode 100644 tests/benchmark/ci/function/file_transfer.groovy
 create mode 100644 tests/benchmark/ci/jenkinsfile/cleanup.groovy
 create mode 100644 tests/benchmark/ci/jenkinsfile/cleanupShards.groovy
 create mode 100644 tests/benchmark/ci/jenkinsfile/deploy_shards_test.groovy
 create mode 100644 tests/benchmark/ci/jenkinsfile/deploy_test.groovy
 create mode 100644 tests/benchmark/ci/jenkinsfile/notify.groovy
 create mode 100644 tests/benchmark/ci/jenkinsfile/publishDailyImages.groovy
 create mode 100644 tests/benchmark/ci/main_jenkinsfile
 create mode 100644 tests/benchmark/ci/pod_containers/milvus-testframework.yaml
 create mode 100644 tests/benchmark/ci/publish_jenkinsfile
 create mode 100755 tests/benchmark/ci/scripts/yaml_processor.py
 create mode 100644 tests/benchmark/client.py
 create mode 100644 tests/benchmark/docker_runner.py
 create mode 100644 tests/benchmark/docker_utils.py
 create mode 100644 tests/benchmark/executors/__init__.py
 create mode 100644 tests/benchmark/executors/shell.py
 create mode 100644 tests/benchmark/handlers/__init__.py
 create mode 100644 tests/benchmark/helm_utils.py
 create mode 100644 tests/benchmark/k8s_runner.py
 create mode 100644 tests/benchmark/local_runner.py
 create mode 100644 tests/benchmark/locust_file.py
 create mode 100644 tests/benchmark/locust_flush_task.py
 create mode 100644 tests/benchmark/locust_get_entity_task.py
 create mode 100644 tests/benchmark/locust_insert_task.py
 create mode 100644 tests/benchmark/locust_search_task.py
 create mode 100644 tests/benchmark/locust_task.py
 create mode 100644 tests/benchmark/locust_tasks.py
 create mode 100644 tests/benchmark/locust_test.py
 create mode 100644 tests/benchmark/locust_user.py
 create mode 100644 tests/benchmark/main.py
 create mode 100644 tests/benchmark/mix_task.py
 create mode 100644 tests/benchmark/operation.py
 create mode 100644 tests/benchmark/parser.py
 create mode 100644 tests/benchmark/requirements.txt
 create mode 100644 tests/benchmark/results/__init__.py
 create mode 100644 tests/benchmark/results/reporter.py
 create mode 100644 tests/benchmark/runner.py
 create mode 100644 tests/benchmark/runners/__init__.py
 create mode 100644 tests/benchmark/runners/locust_runner.py
 create mode 100644 tests/benchmark/scheduler/010_data.json
 create mode 100644 tests/benchmark/scheduler/011_data.json
 create mode 100644 tests/benchmark/scheduler/011_data_acc_debug.json
 create mode 100644 tests/benchmark/scheduler/011_data_gpu_build.json
 create mode 100644 tests/benchmark/scheduler/011_data_insert.json
 create mode 100644 tests/benchmark/scheduler/011_data_search_debug.json
 create mode 100644 tests/benchmark/scheduler/011_delete.json
 create mode 100644 tests/benchmark/scheduler/080_data.json
 create mode 100644 tests/benchmark/scheduler/acc.json
 create mode 100644 tests/benchmark/scheduler/build.json
 create mode 100644 tests/benchmark/scheduler/clean.json
 create mode 100644 tests/benchmark/scheduler/debug.json
 create mode 100644 tests/benchmark/scheduler/default_config.json
 create mode 100644 tests/benchmark/scheduler/file_size.json
 create mode 100644 tests/benchmark/scheduler/filter.json
 create mode 100644 tests/benchmark/scheduler/idc.json
 create mode 100644 tests/benchmark/scheduler/insert.json
 create mode 100644 tests/benchmark/scheduler/jaccard.json
 create mode 100644 tests/benchmark/scheduler/locust.json
 create mode 100644 tests/benchmark/scheduler/locust_mix_debug.json
 create mode 100644 tests/benchmark/scheduler/loop.json
 create mode 100644 tests/benchmark/scheduler/search.json
 create mode 100644 tests/benchmark/scheduler/shards.json
 create mode 100644 tests/benchmark/scheduler/shards_ann.json
 create mode 100644 tests/benchmark/scheduler/shards_debug.json
 create mode 100644 tests/benchmark/scheduler/shards_stability.json
 create mode 100644 tests/benchmark/scheduler/stability.json
 create mode 100644 tests/benchmark/search_task.py
 create mode 100644 tests/benchmark/suites/011_add_flush_performance.yaml
 create mode 100644 tests/benchmark/suites/011_cluster_cpu_accuracy_ann.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_accuracy.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_accuracy_ann.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_build.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_build_binary.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_search.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_search_binary.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_search_debug.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_search_sift10m_filter.yaml
 create mode 100644 tests/benchmark/suites/011_cpu_search_sift50m.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_accuracy.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_accuracy_ann.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_build.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_search.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_search_sift10m_filter.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_search_sift50m.yaml
 create mode 100644 tests/benchmark/suites/011_gpu_stability.yaml
 create mode 100644 tests/benchmark/suites/011_insert_data.yaml
 create mode 100644 tests/benchmark/suites/011_insert_performance.yaml
 create mode 100644 tests/benchmark/suites/011_locust_insert.yaml
 create mode 100644 tests/benchmark/suites/011_locust_search.yaml
 create mode 100644 tests/benchmark/suites/011_search_stability.yaml
 create mode 100644 tests/benchmark/suites/cluster_locust_mix.yaml
 create mode 100644 tests/benchmark/suites/cpu_accuracy.yaml
 create mode 100644 tests/benchmark/suites/cpu_accuracy_ann.yaml
 create mode 100644 tests/benchmark/suites/cpu_build_performance.yaml
 create mode 100644 tests/benchmark/suites/cpu_search_binary.yaml
 create mode 100644 tests/benchmark/suites/cpu_search_performance_jaccard.yaml
 create mode 100644 tests/benchmark/suites/cpu_search_performance_sift50m.yaml
 create mode 100644 tests/benchmark/suites/gpu_accuracy.yaml
 create mode 100644 tests/benchmark/suites/gpu_accuracy_ann.yaml
 create mode 100644 tests/benchmark/suites/gpu_accuracy_ann_debug.yaml
 create mode 100644 tests/benchmark/suites/gpu_accuracy_sift1b.yaml
 create mode 100644 tests/benchmark/suites/gpu_build_performance_jaccard50m.yaml
 create mode 100644 tests/benchmark/suites/gpu_search_performance.yaml
 create mode 100644 tests/benchmark/suites/gpu_search_performance_jaccard50m.yaml
 create mode 100644 tests/benchmark/suites/gpu_search_performance_sift50m.yaml
 create mode 100644 tests/benchmark/suites/gpu_search_stability.yaml
 create mode 100644 tests/benchmark/suites/gpu_stability_sift50m.yaml
 create mode 100644 tests/benchmark/suites/insert_binary.yaml
 create mode 100644 tests/benchmark/suites/insert_performance_deep1b.yaml
 create mode 100644 tests/benchmark/suites/locust_cluster_search.yaml
 create mode 100644 tests/benchmark/suites/locust_insert.yaml
 create mode 100644 tests/benchmark/suites/locust_mix.yaml
 create mode 100644 tests/benchmark/suites/locust_search.yaml
 create mode 100644 tests/benchmark/suites/loop_stability.yaml
 create mode 100644 tests/benchmark/suites/shards_ann_debug.yaml
 create mode 100644 tests/benchmark/suites/shards_insert_performance.yaml
 create mode 100644 tests/benchmark/suites/shards_insert_performance_sift1m.yaml
 create mode 100644 tests/benchmark/suites/shards_loop_stability.yaml
 create mode 100644 tests/benchmark/suites/shards_search_performance_sift1m.yaml
 create mode 100644 tests/benchmark/task/task.py
 create mode 100644 tests/benchmark/test_loop.py
 create mode 100644 tests/benchmark/utils.py
 create mode 100644 tests/python_test/.dockerignore
 create mode 100644 tests/python_test/.gitignore
 create mode 100644 tests/python_test/Dockerfile
 create mode 100644 tests/python_test/README.md
 create mode 100644 tests/python_test/collection/test_collection_count.py
 create mode 100644 tests/python_test/collection/test_collection_logic.py
 create mode 100644 tests/python_test/collection/test_collection_stats.py
 create mode 100644 tests/python_test/collection/test_create_collection.py
 create mode 100644 tests/python_test/collection/test_describe_collection.py
 create mode 100644 tests/python_test/collection/test_drop_collection.py
 create mode 100644 tests/python_test/collection/test_has_collection.py
 create mode 100644 tests/python_test/collection/test_list_collections.py
 create mode 100644 tests/python_test/collection/test_load_collection.py
 create mode 100644 tests/python_test/conftest.py
 create mode 100644 tests/python_test/constants.py
 create mode 100755 tests/python_test/docker-entrypoint.sh
 create mode 100644 tests/python_test/entity/test_delete.py
 create mode 100644 tests/python_test/entity/test_get_entity_by_id.py
 create mode 100644 tests/python_test/entity/test_insert.py
 create mode 100644 tests/python_test/entity/test_list_id_in_segment.py
 create mode 100644 tests/python_test/entity/test_search.py
 create mode 100644 tests/python_test/pytest.ini
 create mode 100644 tests/python_test/requirements.txt
 create mode 100644 tests/python_test/requirements_cluster.txt
 create mode 100644 tests/python_test/run.sh
 create mode 100644 tests/python_test/stability/test_mysql.py
 create mode 100644 tests/python_test/stability/test_restart.py
 create mode 100644 tests/python_test/test_compact.py
 create mode 100644 tests/python_test/test_config.py
 create mode 100644 tests/python_test/test_connect.py
 create mode 100644 tests/python_test/test_flush.py
 create mode 100644 tests/python_test/test_index.py
 create mode 100644 tests/python_test/test_mix.py
 create mode 100644 tests/python_test/test_partition.py
 create mode 100644 tests/python_test/test_ping.py
 create mode 100644 tests/python_test/utils.py

diff --git a/internal/datanode/data_sync_service.go b/internal/datanode/data_sync_service.go
index d883b3003..3ca482064 100644
--- a/internal/datanode/data_sync_service.go
+++ b/internal/datanode/data_sync_service.go
@@ -85,14 +85,14 @@ func (dsService *dataSyncService) initNodes() {
 	var insertBufferNode Node = newInsertBufferNode(dsService.ctx, mt, dsService.replica, dsService.idAllocator, dsService.msFactory)
 	var gcNode Node = newGCNode(dsService.replica)
 
-	dsService.fg.AddNode(&dmStreamNode)
-	dsService.fg.AddNode(&ddStreamNode)
+	dsService.fg.AddNode(dmStreamNode)
+	dsService.fg.AddNode(ddStreamNode)
 
-	dsService.fg.AddNode(&filterDmNode)
-	dsService.fg.AddNode(&ddNode)
+	dsService.fg.AddNode(filterDmNode)
+	dsService.fg.AddNode(ddNode)
 
-	dsService.fg.AddNode(&insertBufferNode)
-	dsService.fg.AddNode(&gcNode)
+	dsService.fg.AddNode(insertBufferNode)
+	dsService.fg.AddNode(gcNode)
 
 	// dmStreamNode
 	err = dsService.fg.SetEdges(dmStreamNode.Name(),
diff --git a/internal/datanode/flow_graph_dd_node.go b/internal/datanode/flow_graph_dd_node.go
index 00afd3df9..0135ee4ba 100644
--- a/internal/datanode/flow_graph_dd_node.go
+++ b/internal/datanode/flow_graph_dd_node.go
@@ -66,7 +66,7 @@ func (ddNode *ddNode) Name() string {
 	return "ddNode"
 }
 
-func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
+func (ddNode *ddNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do filterDdNode operation")
 
 	if len(in) != 1 {
@@ -74,7 +74,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	msMsg, ok := (*in[0]).(*MsgStreamMsg)
+	msMsg, ok := in[0].(*MsgStreamMsg)
 	if !ok {
 		log.Println("type assertion failed for MsgStreamMsg")
 		// TODO: add error handling
@@ -141,7 +141,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
 	}
 
 	var res Msg = ddNode.ddMsg
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (ddNode *ddNode) flush() {
diff --git a/internal/datanode/flow_graph_dd_node_test.go b/internal/datanode/flow_graph_dd_node_test.go
index 46b2468b1..8fdac3ad6 100644
--- a/internal/datanode/flow_graph_dd_node_test.go
+++ b/internal/datanode/flow_graph_dd_node_test.go
@@ -154,5 +154,5 @@ func TestFlowGraphDDNode_Operate(t *testing.T) {
 	tsMessages = append(tsMessages, msgstream.TsMsg(&dropPartitionMsg))
 	msgStream := flowgraph.GenerateMsgStreamMsg(tsMessages, Timestamp(0), Timestamp(3), make([]*internalpb2.MsgPosition, 0))
 	var inMsg Msg = msgStream
-	ddNode.Operate([]*Msg{&inMsg})
+	ddNode.Operate(ctx, []Msg{inMsg})
 }
diff --git a/internal/datanode/flow_graph_filter_dm_node.go b/internal/datanode/flow_graph_filter_dm_node.go
index c5171a6be..752c91672 100644
--- a/internal/datanode/flow_graph_filter_dm_node.go
+++ b/internal/datanode/flow_graph_filter_dm_node.go
@@ -1,6 +1,7 @@
 package datanode
 
 import (
+	"context"
 	"log"
 	"math"
 
@@ -18,7 +19,7 @@ func (fdmNode *filterDmNode) Name() string {
 	return "fdmNode"
 }
 
-func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
+func (fdmNode *filterDmNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do filterDmNode operation")
 
 	if len(in) != 2 {
@@ -26,13 +27,13 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	msgStreamMsg, ok := (*in[0]).(*MsgStreamMsg)
+	msgStreamMsg, ok := in[0].(*MsgStreamMsg)
 	if !ok {
 		log.Println("type assertion failed for MsgStreamMsg")
 		// TODO: add error handling
 	}
 
-	ddMsg, ok := (*in[1]).(*ddMsg)
+	ddMsg, ok := in[1].(*ddMsg)
 	if !ok {
 		log.Println("type assertion failed for ddMsg")
 		// TODO: add error handling
@@ -69,7 +70,7 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
 	iMsg.startPositions = append(iMsg.startPositions, msgStreamMsg.StartPositions()...)
 	iMsg.gcRecord = ddMsg.gcRecord
 	var res Msg = &iMsg
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
diff --git a/internal/datanode/flow_graph_gc_node.go b/internal/datanode/flow_graph_gc_node.go
index b5b327787..1732dbb6e 100644
--- a/internal/datanode/flow_graph_gc_node.go
+++ b/internal/datanode/flow_graph_gc_node.go
@@ -1,6 +1,7 @@
 package datanode
 
 import (
+	"context"
 	"log"
 )
 
@@ -13,7 +14,7 @@ func (gcNode *gcNode) Name() string {
 	return "gcNode"
 }
 
-func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
+func (gcNode *gcNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do gcNode operation")
 
 	if len(in) != 1 {
@@ -21,7 +22,7 @@ func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	gcMsg, ok := (*in[0]).(*gcMsg)
+	gcMsg, ok := in[0].(*gcMsg)
 	if !ok {
 		log.Println("type assertion failed for gcMsg")
 		// TODO: add error handling
@@ -35,7 +36,7 @@ func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
 		}
 	}
 
-	return nil
+	return nil, ctx
 }
 
 func newGCNode(replica Replica) *gcNode {
diff --git a/internal/datanode/flow_graph_insert_buffer_node.go b/internal/datanode/flow_graph_insert_buffer_node.go
index 354eb4580..c526946a6 100644
--- a/internal/datanode/flow_graph_insert_buffer_node.go
+++ b/internal/datanode/flow_graph_insert_buffer_node.go
@@ -85,7 +85,7 @@ func (ibNode *insertBufferNode) Name() string {
 	return "ibNode"
 }
 
-func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
+func (ibNode *insertBufferNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	// log.Println("=========== insert buffer Node Operating")
 
 	if len(in) != 1 {
@@ -93,7 +93,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	iMsg, ok := (*in[0]).(*insertMsg)
+	iMsg, ok := in[0].(*insertMsg)
 	if !ok {
 		log.Println("Error: type assertion failed for insertMsg")
 		// TODO: add error handling
@@ -472,7 +472,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
 		timeRange: iMsg.timeRange,
 	}
 
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (ibNode *insertBufferNode) flushSegment(segID UniqueID, partitionID UniqueID, collID UniqueID) error {
diff --git a/internal/datanode/flow_graph_insert_buffer_node_test.go b/internal/datanode/flow_graph_insert_buffer_node_test.go
index 527f7d577..450e961c2 100644
--- a/internal/datanode/flow_graph_insert_buffer_node_test.go
+++ b/internal/datanode/flow_graph_insert_buffer_node_test.go
@@ -53,7 +53,7 @@ func TestFlowGraphInsertBufferNode_Operate(t *testing.T) {
 	iBNode := newInsertBufferNode(ctx, newMetaTable(), replica, idFactory, msFactory)
 	inMsg := genInsertMsg()
 	var iMsg flowgraph.Msg = &inMsg
-	iBNode.Operate([]*flowgraph.Msg{&iMsg})
+	iBNode.Operate(ctx, []flowgraph.Msg{iMsg})
 }
 
 func genInsertMsg() insertMsg {
diff --git a/internal/dataservice/server.go b/internal/dataservice/server.go
index 6c700e098..06375a279 100644
--- a/internal/dataservice/server.go
+++ b/internal/dataservice/server.go
@@ -329,7 +329,7 @@ func (s *Server) startStatsChannel(ctx context.Context) {
 			return
 		default:
 		}
-		msgPack := statsStream.Consume()
+		msgPack, _ := statsStream.Consume()
 		for _, msg := range msgPack.Msgs {
 			statistics, ok := msg.(*msgstream.SegmentStatisticsMsg)
 			if !ok {
@@ -358,7 +358,7 @@ func (s *Server) startSegmentFlushChannel(ctx context.Context) {
 			return
 		default:
 		}
-		msgPack := flushStream.Consume()
+		msgPack, _ := flushStream.Consume()
 		for _, msg := range msgPack.Msgs {
 			if msg.Type() != commonpb.MsgType_kSegmentFlushDone {
 				continue
@@ -393,7 +393,7 @@ func (s *Server) startDDChannel(ctx context.Context) {
 			return
 		default:
 		}
-		msgPack := ddStream.Consume()
+		msgPack, _ := ddStream.Consume()
 		for _, msg := range msgPack.Msgs {
 			if err := s.ddHandler.HandleDDMsg(msg); err != nil {
 				log.Error("handle dd msg error", zap.Error(err))
diff --git a/internal/distributed/indexservice/service.go b/internal/distributed/indexservice/service.go
index a95c0f2a8..b5ea9b099 100644
--- a/internal/distributed/indexservice/service.go
+++ b/internal/distributed/indexservice/service.go
@@ -2,18 +2,21 @@ package grpcindexservice
 
 import (
 	"context"
+	"fmt"
+	"io"
 	"log"
 	"net"
 	"strconv"
 	"sync"
 
-	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
-	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
-	"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
-
+	"github.com/opentracing/opentracing-go"
+	"github.com/uber/jaeger-client-go/config"
 	"github.com/zilliztech/milvus-distributed/internal/indexservice"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
+	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
+	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
+	"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
 	"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
 	"google.golang.org/grpc"
 )
@@ -30,6 +33,8 @@ type Server struct {
 	loopCtx    context.Context
 	loopCancel func()
 	loopWg     sync.WaitGroup
+
+	closer io.Closer
 }
 
 func (s *Server) Run() error {
@@ -71,6 +76,9 @@ func (s *Server) start() error {
 }
 
 func (s *Server) Stop() error {
+	if err := s.closer.Close(); err != nil {
+		return err
+	}
 	if s.impl != nil {
 		s.impl.Stop()
 	}
@@ -191,5 +199,19 @@ func NewServer(ctx context.Context) (*Server, error) {
 		grpcErrChan: make(chan error),
 	}
 
+	cfg := &config.Configuration{
+		ServiceName: "index_service",
+		Sampler: &config.SamplerConfig{
+			Type:  "const",
+			Param: 1,
+		},
+	}
+	tracer, closer, err := cfg.NewTracer()
+	if err != nil {
+		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
+	}
+	opentracing.SetGlobalTracer(tracer)
+	s.closer = closer
+
 	return s, nil
 }
diff --git a/internal/distributed/masterservice/server.go b/internal/distributed/masterservice/server.go
index da86d6506..fc1e3f387 100644
--- a/internal/distributed/masterservice/server.go
+++ b/internal/distributed/masterservice/server.go
@@ -68,7 +68,7 @@ func NewServer(ctx context.Context, factory msgstream.Factory) (*Server, error)
 
 	//TODO
 	cfg := &config.Configuration{
-		ServiceName: "proxy_service",
+		ServiceName: "master_service",
 		Sampler: &config.SamplerConfig{
 			Type:  "const",
 			Param: 1,
diff --git a/internal/indexnode/indexnode.go b/internal/indexnode/indexnode.go
index 994c44963..5aa01ff08 100644
--- a/internal/indexnode/indexnode.go
+++ b/internal/indexnode/indexnode.go
@@ -2,11 +2,15 @@ package indexnode
 
 import (
 	"context"
+	"fmt"
+	"io"
 	"log"
 	"time"
 
 	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
 
+	"github.com/opentracing/opentracing-go"
+	"github.com/uber/jaeger-client-go/config"
 	"github.com/zilliztech/milvus-distributed/internal/errors"
 	"github.com/zilliztech/milvus-distributed/internal/kv"
 	miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
@@ -40,6 +44,8 @@ type NodeImpl struct {
 	// Add callback functions at different stages
 	startCallbacks []func()
 	closeCallbacks []func()
+
+	closer io.Closer
 }
 
 func NewNodeImpl(ctx context.Context) (*NodeImpl, error) {
@@ -57,9 +63,7 @@ func NewNodeImpl(ctx context.Context) (*NodeImpl, error) {
 }
 
 func (i *NodeImpl) Init() error {
-	log.Println("AAAAAAAAAAAAAAAAA", i.serviceClient)
 	err := funcutil.WaitForComponentHealthy(i.serviceClient, "IndexService", 10, time.Second)
-	log.Println("BBBBBBBBB", i.serviceClient)
 
 	if err != nil {
 		return err
@@ -87,6 +91,21 @@ func (i *NodeImpl) Init() error {
 		return err
 	}
 
+	// TODO
+	cfg := &config.Configuration{
+		ServiceName: fmt.Sprintf("index_node_%d", Params.NodeID),
+		Sampler: &config.SamplerConfig{
+			Type:  "const",
+			Param: 1,
+		},
+	}
+	tracer, closer, err := cfg.NewTracer()
+	if err != nil {
+		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
+	}
+	opentracing.SetGlobalTracer(tracer)
+	i.closer = closer
+
 	connectMinIOFn := func() error {
 		option := &miniokv.Option{
 			Address:           Params.MinIOAddress,
@@ -126,6 +145,9 @@ func (i *NodeImpl) Start() error {
 
 // Close closes the server.
 func (i *NodeImpl) Stop() error {
+	if err := i.closer.Close(); err != nil {
+		return err
+	}
 	i.loopCancel()
 	if i.sched != nil {
 		i.sched.Close()
diff --git a/internal/msgstream/msgstream.go b/internal/msgstream/msgstream.go
index 9826c5818..8a92d989d 100644
--- a/internal/msgstream/msgstream.go
+++ b/internal/msgstream/msgstream.go
@@ -32,7 +32,7 @@ type MsgStream interface {
 
 	Produce(context.Context, *MsgPack) error
 	Broadcast(context.Context, *MsgPack) error
-	Consume() *MsgPack
+	Consume() (*MsgPack, context.Context)
 	Seek(offset *MsgPosition) error
 }
 
diff --git a/internal/msgstream/pulsarms/msg_test.go b/internal/msgstream/pulsarms/msg_test.go
index 5d986fa13..48496c99c 100644
--- a/internal/msgstream/pulsarms/msg_test.go
+++ b/internal/msgstream/pulsarms/msg_test.go
@@ -160,7 +160,7 @@ func TestStream_task_Insert(t *testing.T) {
 	}
 	receiveCount := 0
 	for {
-		result := outputStream.Consume()
+		result, _ := outputStream.Consume()
 		if len(result.Msgs) > 0 {
 			msgs := result.Msgs
 			for _, v := range msgs {
diff --git a/internal/msgstream/pulsarms/pulsar_msgstream.go b/internal/msgstream/pulsarms/pulsar_msgstream.go
index c93cbf479..157f0a53d 100644
--- a/internal/msgstream/pulsarms/pulsar_msgstream.go
+++ b/internal/msgstream/pulsarms/pulsar_msgstream.go
@@ -5,14 +5,12 @@ import (
 	"path/filepath"
 	"reflect"
 	"strconv"
-	"strings"
 	"sync"
 	"time"
 
 	"github.com/apache/pulsar-client-go/pulsar"
 	"github.com/golang/protobuf/proto"
-	"go.uber.org/zap"
-
+	"github.com/opentracing/opentracing-go"
 	"github.com/zilliztech/milvus-distributed/internal/errors"
 	"github.com/zilliztech/milvus-distributed/internal/log"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
@@ -21,6 +19,7 @@ import (
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
 	"github.com/zilliztech/milvus-distributed/internal/util/trace"
 	"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
+	"go.uber.org/zap"
 )
 
 type TsMsg = msgstream.TsMsg
@@ -52,6 +51,8 @@ type PulsarMsgStream struct {
 	pulsarBufSize    int64
 	consumerLock     *sync.Mutex
 	consumerReflects []reflect.SelectCase
+
+	scMap *sync.Map
 }
 
 func newPulsarMsgStream(ctx context.Context,
@@ -92,6 +93,7 @@ func newPulsarMsgStream(ctx context.Context,
 		consumerReflects: consumerReflects,
 		consumerLock:     &sync.Mutex{},
 		wait:             &sync.WaitGroup{},
+		scMap:            &sync.Map{},
 	}
 
 	return stream, nil
@@ -182,29 +184,6 @@ func (ms *PulsarMsgStream) Close() {
 	}
 }
 
-type propertiesReaderWriter struct {
-	ppMap map[string]string
-}
-
-func (ppRW *propertiesReaderWriter) Set(key, val string) {
-	// The GRPC HPACK implementation rejects any uppercase keys here.
-	//
-	// As such, since the HTTP_HEADERS format is case-insensitive anyway, we
-	// blindly lowercase the key (which is guaranteed to work in the
-	// Inject/Extract sense per the OpenTracing spec).
-	key = strings.ToLower(key)
-	ppRW.ppMap[key] = val
-}
-
-func (ppRW *propertiesReaderWriter) ForeachKey(handler func(key, val string) error) error {
-	for k, val := range ppRW.ppMap {
-		if err := handler(k, val); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
 func (ms *PulsarMsgStream) Produce(ctx context.Context, msgPack *MsgPack) error {
 	tsMsgs := msgPack.Msgs
 	if len(tsMsgs) <= 0 {
@@ -316,18 +295,31 @@ func (ms *PulsarMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) erro
 	return nil
 }
 
-func (ms *PulsarMsgStream) Consume() *MsgPack {
+func (ms *PulsarMsgStream) Consume() (*MsgPack, context.Context) {
 	for {
 		select {
 		case cm, ok := <-ms.receiveBuf:
 			if !ok {
 				log.Debug("buf chan closed")
-				return nil
+				return nil, nil
 			}
-			return cm
+			var ctx context.Context
+			var opts []opentracing.StartSpanOption
+			for _, msg := range cm.Msgs {
+				sc, loaded := ms.scMap.LoadAndDelete(msg.ID())
+				if loaded {
+					opts = append(opts, opentracing.ChildOf(sc.(opentracing.SpanContext)))
+				}
+			}
+			if len(opts) != 0 {
+				ctx = context.Background()
+			}
+			sp, ctx := trace.StartSpanFromContext(ctx, opts...)
+			sp.Finish()
+			return cm, ctx
 		case <-ms.ctx.Done():
 			log.Debug("context closed")
-			return nil
+			return nil, nil
 		}
 	}
 }
@@ -360,8 +352,15 @@ func (ms *PulsarMsgStream) receiveMsg(consumer Consumer) {
 				MsgID:       typeutil.PulsarMsgIDToString(pulsarMsg.ID()),
 			})
 
+			sp, ok := trace.ExtractFromPulsarMsgProperties(tsMsg, pulsarMsg.Properties())
+			if ok {
+				ms.scMap.Store(tsMsg.ID(), sp.Context())
+			}
+
 			msgPack := MsgPack{Msgs: []TsMsg{tsMsg}}
 			ms.receiveBuf <- &msgPack
+
+			sp.Finish()
 		}
 	}
 }
@@ -687,12 +686,18 @@ func (ms *PulsarTtMsgStream) findTimeTick(consumer Consumer,
 				log.Error("Failed to unmarshal tsMsg", zap.Error(err))
 				continue
 			}
+
 			// set pulsar info to tsMsg
 			tsMsg.SetPosition(&msgstream.MsgPosition{
 				ChannelName: filepath.Base(pulsarMsg.Topic()),
 				MsgID:       typeutil.PulsarMsgIDToString(pulsarMsg.ID()),
 			})
 
+			sp, ok := trace.ExtractFromPulsarMsgProperties(tsMsg, pulsarMsg.Properties())
+			if ok {
+				ms.scMap.Store(tsMsg.ID(), sp.Context())
+			}
+
 			ms.unsolvedMutex.Lock()
 			ms.unsolvedBuf[consumer] = append(ms.unsolvedBuf[consumer], tsMsg)
 			ms.unsolvedMutex.Unlock()
@@ -701,8 +706,10 @@ func (ms *PulsarTtMsgStream) findTimeTick(consumer Consumer,
 				findMapMutex.Lock()
 				eofMsgMap[consumer] = tsMsg.(*TimeTickMsg).Base.Timestamp
 				findMapMutex.Unlock()
+				sp.Finish()
 				return
 			}
+			sp.Finish()
 		}
 	}
 }
diff --git a/internal/msgstream/pulsarms/pulsar_msgstream_test.go b/internal/msgstream/pulsarms/pulsar_msgstream_test.go
index d9aa513be..ca9480061 100644
--- a/internal/msgstream/pulsarms/pulsar_msgstream_test.go
+++ b/internal/msgstream/pulsarms/pulsar_msgstream_test.go
@@ -223,7 +223,7 @@ func initPulsarTtStream(pulsarAddress string,
 func receiveMsg(outputStream msgstream.MsgStream, msgCount int) {
 	receiveCount := 0
 	for {
-		result := outputStream.Consume()
+		result, _ := outputStream.Consume()
 		if len(result.Msgs) > 0 {
 			msgs := result.Msgs
 			for _, v := range msgs {
@@ -607,13 +607,13 @@ func TestStream_PulsarTtMsgStream_Seek(t *testing.T) {
 	assert.Nil(t, err)
 
 	outputStream.Consume()
-	receivedMsg := outputStream.Consume()
+	receivedMsg, _ := outputStream.Consume()
 	for _, position := range receivedMsg.StartPositions {
 		outputStream.Seek(position)
 	}
 	err = inputStream.Broadcast(ctx, &msgPack5)
 	assert.Nil(t, err)
-	seekMsg := outputStream.Consume()
+	seekMsg, _ := outputStream.Consume()
 	for _, msg := range seekMsg.Msgs {
 		assert.Equal(t, msg.BeginTs(), uint64(14))
 	}
diff --git a/internal/msgstream/rmqms/rmq_msgstream.go b/internal/msgstream/rmqms/rmq_msgstream.go
index 8561035a6..3c463f588 100644
--- a/internal/msgstream/rmqms/rmq_msgstream.go
+++ b/internal/msgstream/rmqms/rmq_msgstream.go
@@ -219,18 +219,18 @@ func (ms *RmqMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) error {
 	return nil
 }
 
-func (ms *RmqMsgStream) Consume() *msgstream.MsgPack {
+func (ms *RmqMsgStream) Consume() (*msgstream.MsgPack, context.Context) {
 	for {
 		select {
 		case cm, ok := <-ms.receiveBuf:
 			if !ok {
 				log.Println("buf chan closed")
-				return nil
+				return nil, nil
 			}
-			return cm
+			return cm, nil
 		case <-ms.ctx.Done():
 			log.Printf("context closed")
-			return nil
+			return nil, nil
 		}
 	}
 }
diff --git a/internal/msgstream/rmqms/rmq_msgstream_test.go b/internal/msgstream/rmqms/rmq_msgstream_test.go
index 3e19f3d7b..a35b1e478 100644
--- a/internal/msgstream/rmqms/rmq_msgstream_test.go
+++ b/internal/msgstream/rmqms/rmq_msgstream_test.go
@@ -239,7 +239,7 @@ func initRmqTtStream(producerChannels []string,
 func receiveMsg(outputStream msgstream.MsgStream, msgCount int) {
 	receiveCount := 0
 	for {
-		result := outputStream.Consume()
+		result, _ := outputStream.Consume()
 		if len(result.Msgs) > 0 {
 			msgs := result.Msgs
 			for _, v := range msgs {
diff --git a/internal/proxyservice/timetick.go b/internal/proxyservice/timetick.go
index da22a7181..92cf0c46f 100644
--- a/internal/proxyservice/timetick.go
+++ b/internal/proxyservice/timetick.go
@@ -5,11 +5,9 @@ import (
 	"log"
 	"sync"
 
+	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
-
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
-
-	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 )
 
 type (
@@ -58,6 +56,9 @@ func (tt *TimeTickImpl) Start() error {
 					},
 				}
 				msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
+				for _, msg := range msgPack.Msgs {
+					log.Println("msg type xxxxxxxxxxxxxxxxxxxxxxxx", msg.Type())
+				}
 				for _, channel := range tt.channels {
 					err = channel.Broadcast(tt.ctx, &msgPack)
 					if err != nil {
diff --git a/internal/querynode/data_sync_service.go b/internal/querynode/data_sync_service.go
index e0ebdc83f..6ec7a7745 100644
--- a/internal/querynode/data_sync_service.go
+++ b/internal/querynode/data_sync_service.go
@@ -56,15 +56,15 @@ func (dsService *dataSyncService) initNodes() {
 	var serviceTimeNode node = newServiceTimeNode(dsService.ctx, dsService.replica, dsService.msFactory)
 	var gcNode node = newGCNode(dsService.replica)
 
-	dsService.fg.AddNode(&dmStreamNode)
-	dsService.fg.AddNode(&ddStreamNode)
+	dsService.fg.AddNode(dmStreamNode)
+	dsService.fg.AddNode(ddStreamNode)
 
-	dsService.fg.AddNode(&filterDmNode)
-	dsService.fg.AddNode(&ddNode)
+	dsService.fg.AddNode(filterDmNode)
+	dsService.fg.AddNode(ddNode)
 
-	dsService.fg.AddNode(&insertNode)
-	dsService.fg.AddNode(&serviceTimeNode)
-	dsService.fg.AddNode(&gcNode)
+	dsService.fg.AddNode(insertNode)
+	dsService.fg.AddNode(serviceTimeNode)
+	dsService.fg.AddNode(gcNode)
 
 	// dmStreamNode
 	var err = dsService.fg.SetEdges(dmStreamNode.Name(),
diff --git a/internal/querynode/flow_graph_dd_node.go b/internal/querynode/flow_graph_dd_node.go
index 99cd70cf6..69a3eeefe 100644
--- a/internal/querynode/flow_graph_dd_node.go
+++ b/internal/querynode/flow_graph_dd_node.go
@@ -1,6 +1,7 @@
 package querynode
 
 import (
+	"context"
 	"log"
 
 	"github.com/golang/protobuf/proto"
@@ -19,15 +20,15 @@ func (ddNode *ddNode) Name() string {
 	return "ddNode"
 }
 
-func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
-	//fmt.Println("Do ddNode operation")
+func (ddNode *ddNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
+	//fmt.Println("Do filterDmNode operation")
 
 	if len(in) != 1 {
 		log.Println("Invalid operate message input in ddNode, input length = ", len(in))
 		// TODO: add error handling
 	}
 
-	msMsg, ok := (*in[0]).(*MsgStreamMsg)
+	msMsg, ok := in[0].(*MsgStreamMsg)
 	if !ok {
 		log.Println("type assertion failed for MsgStreamMsg")
 		// TODO: add error handling
@@ -72,7 +73,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
 	//}
 
 	var res Msg = ddNode.ddMsg
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
diff --git a/internal/querynode/flow_graph_filter_dm_node.go b/internal/querynode/flow_graph_filter_dm_node.go
index f50a4dab3..7e75a561d 100644
--- a/internal/querynode/flow_graph_filter_dm_node.go
+++ b/internal/querynode/flow_graph_filter_dm_node.go
@@ -1,6 +1,7 @@
 package querynode
 
 import (
+	"context"
 	"log"
 	"math"
 
@@ -18,7 +19,7 @@ func (fdmNode *filterDmNode) Name() string {
 	return "fdmNode"
 }
 
-func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
+func (fdmNode *filterDmNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do filterDmNode operation")
 
 	if len(in) != 2 {
@@ -26,13 +27,13 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	msgStreamMsg, ok := (*in[0]).(*MsgStreamMsg)
+	msgStreamMsg, ok := in[0].(*MsgStreamMsg)
 	if !ok {
 		log.Println("type assertion failed for MsgStreamMsg")
 		// TODO: add error handling
 	}
 
-	ddMsg, ok := (*in[1]).(*ddMsg)
+	ddMsg, ok := in[1].(*ddMsg)
 	if !ok {
 		log.Println("type assertion failed for ddMsg")
 		// TODO: add error handling
@@ -63,7 +64,7 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
 	iMsg.gcRecord = ddMsg.gcRecord
 	var res Msg = &iMsg
 
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
diff --git a/internal/querynode/flow_graph_gc_node.go b/internal/querynode/flow_graph_gc_node.go
index 2be079e44..63264b5ca 100644
--- a/internal/querynode/flow_graph_gc_node.go
+++ b/internal/querynode/flow_graph_gc_node.go
@@ -1,6 +1,7 @@
 package querynode
 
 import (
+	"context"
 	"log"
 )
 
@@ -13,7 +14,7 @@ func (gcNode *gcNode) Name() string {
 	return "gcNode"
 }
 
-func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
+func (gcNode *gcNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do gcNode operation")
 
 	if len(in) != 1 {
@@ -21,7 +22,7 @@ func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	_, ok := (*in[0]).(*gcMsg)
+	_, ok := in[0].(*gcMsg)
 	if !ok {
 		log.Println("type assertion failed for gcMsg")
 		// TODO: add error handling
@@ -47,7 +48,7 @@ func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
 	//	}
 	//}
 
-	return nil
+	return nil, ctx
 }
 
 func newGCNode(replica collectionReplica) *gcNode {
diff --git a/internal/querynode/flow_graph_insert_node.go b/internal/querynode/flow_graph_insert_node.go
index a47facef0..23629399d 100644
--- a/internal/querynode/flow_graph_insert_node.go
+++ b/internal/querynode/flow_graph_insert_node.go
@@ -26,7 +26,7 @@ func (iNode *insertNode) Name() string {
 	return "iNode"
 }
 
-func (iNode *insertNode) Operate(in []*Msg) []*Msg {
+func (iNode *insertNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	// fmt.Println("Do insertNode operation")
 
 	if len(in) != 1 {
@@ -34,7 +34,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	iMsg, ok := (*in[0]).(*insertMsg)
+	iMsg, ok := in[0].(*insertMsg)
 	if !ok {
 		log.Println("type assertion failed for insertMsg")
 		// TODO: add error handling
@@ -90,7 +90,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
 		gcRecord:  iMsg.gcRecord,
 		timeRange: iMsg.timeRange,
 	}
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *sync.WaitGroup) {
diff --git a/internal/querynode/flow_graph_service_time_node.go b/internal/querynode/flow_graph_service_time_node.go
index a164eb2ba..7a2005028 100644
--- a/internal/querynode/flow_graph_service_time_node.go
+++ b/internal/querynode/flow_graph_service_time_node.go
@@ -19,7 +19,7 @@ func (stNode *serviceTimeNode) Name() string {
 	return "stNode"
 }
 
-func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
+func (stNode *serviceTimeNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do serviceTimeNode operation")
 
 	if len(in) != 1 {
@@ -27,7 +27,7 @@ func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
 		// TODO: add error handling
 	}
 
-	serviceTimeMsg, ok := (*in[0]).(*serviceTimeMsg)
+	serviceTimeMsg, ok := in[0].(*serviceTimeMsg)
 	if !ok {
 		log.Println("type assertion failed for serviceTimeMsg")
 		// TODO: add error handling
@@ -45,7 +45,7 @@ func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
 		gcRecord:  serviceTimeMsg.gcRecord,
 		timeRange: serviceTimeMsg.timeRange,
 	}
-	return []*Msg{&res}
+	return []Msg{res}, ctx
 }
 
 func (stNode *serviceTimeNode) sendTimeTick(ts Timestamp) error {
diff --git a/internal/querynode/query_node.go b/internal/querynode/query_node.go
index da546bf5e..3cf30a567 100644
--- a/internal/querynode/query_node.go
+++ b/internal/querynode/query_node.go
@@ -15,12 +15,12 @@ import "C"
 import (
 	"context"
 	"fmt"
+	"github.com/opentracing/opentracing-go"
+	"github.com/uber/jaeger-client-go/config"
 	"io"
 	"log"
 	"sync/atomic"
 
-	"github.com/opentracing/opentracing-go"
-	"github.com/uber/jaeger-client-go/config"
 	"github.com/zilliztech/milvus-distributed/internal/errors"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
@@ -91,20 +91,6 @@ func NewQueryNode(ctx context.Context, queryNodeID UniqueID, factory msgstream.F
 		msFactory: factory,
 	}
 
-	cfg := &config.Configuration{
-		ServiceName: fmt.Sprintf("query_node_%d", node.QueryNodeID),
-		Sampler: &config.SamplerConfig{
-			Type:  "const",
-			Param: 1,
-		},
-	}
-	tracer, closer, err := cfg.NewTracer()
-	if err != nil {
-		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
-	}
-	opentracing.SetGlobalTracer(tracer)
-	node.closer = closer
-
 	node.replica = newCollectionReplicaImpl()
 	node.UpdateStateCode(internalpb2.StateCode_ABNORMAL)
 	return node
@@ -167,6 +153,20 @@ func (node *QueryNode) Init() error {
 
 	fmt.Println("QueryNodeID is", Params.QueryNodeID)
 
+	cfg := &config.Configuration{
+		ServiceName: fmt.Sprintf("query_node_%d", node.QueryNodeID),
+		Sampler: &config.SamplerConfig{
+			Type:  "const",
+			Param: 1,
+		},
+	}
+	tracer, closer, err := cfg.NewTracer()
+	if err != nil {
+		panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
+	}
+	opentracing.SetGlobalTracer(tracer)
+	node.closer = closer
+
 	if node.masterClient == nil {
 		log.Println("WARN: null master service detected")
 	}
@@ -212,9 +212,6 @@ func (node *QueryNode) Start() error {
 }
 
 func (node *QueryNode) Stop() error {
-	if err := node.closer.Close(); err != nil {
-		return err
-	}
 	node.UpdateStateCode(internalpb2.StateCode_ABNORMAL)
 	node.queryNodeLoopCancel()
 
diff --git a/internal/querynode/search_service.go b/internal/querynode/search_service.go
index 14160200e..cc8e88992 100644
--- a/internal/querynode/search_service.go
+++ b/internal/querynode/search_service.go
@@ -121,7 +121,7 @@ func (ss *searchService) receiveSearchMsg() {
 		case <-ss.ctx.Done():
 			return
 		default:
-			msgPack := ss.searchMsgStream.Consume()
+			msgPack, _ := ss.searchMsgStream.Consume()
 			if msgPack == nil || len(msgPack.Msgs) <= 0 {
 				continue
 			}
diff --git a/internal/timesync/timesync.go b/internal/timesync/timesync.go
index b61b5ea3d..c1ceccfb8 100644
--- a/internal/timesync/timesync.go
+++ b/internal/timesync/timesync.go
@@ -93,7 +93,7 @@ func (ttBarrier *softTimeTickBarrier) Start() {
 			return
 		default:
 		}
-		ttmsgs := ttBarrier.ttStream.Consume()
+		ttmsgs, _ := ttBarrier.ttStream.Consume()
 		if len(ttmsgs.Msgs) > 0 {
 			for _, timetickmsg := range ttmsgs.Msgs {
 				ttmsg := timetickmsg.(*ms.TimeTickMsg)
@@ -156,7 +156,7 @@ func (ttBarrier *hardTimeTickBarrier) Start() {
 				return
 			default:
 			}
-			ttmsgs := ttBarrier.ttStream.Consume()
+			ttmsgs, _ := ttBarrier.ttStream.Consume()
 			if len(ttmsgs.Msgs) > 0 {
 				log.Printf("receive tt msg")
 				for _, timetickmsg := range ttmsgs.Msgs {
diff --git a/internal/util/flowgraph/flow_graph.go b/internal/util/flowgraph/flow_graph.go
index 83448cd81..6d91f201b 100644
--- a/internal/util/flowgraph/flow_graph.go
+++ b/internal/util/flowgraph/flow_graph.go
@@ -13,11 +13,11 @@ type TimeTickedFlowGraph struct {
 	nodeCtx map[NodeName]*nodeCtx
 }
 
-func (fg *TimeTickedFlowGraph) AddNode(node *Node) {
-	nodeName := (*node).Name()
+func (fg *TimeTickedFlowGraph) AddNode(node Node) {
+	nodeName := node.Name()
 	nodeCtx := nodeCtx{
 		node:                   node,
-		inputChannels:          make([]chan *Msg, 0),
+		inputChannels:          make([]chan *MsgWithCtx, 0),
 		downstreamInputChanIdx: make(map[string]int),
 	}
 	fg.nodeCtx[nodeName] = &nodeCtx
@@ -50,8 +50,8 @@ func (fg *TimeTickedFlowGraph) SetEdges(nodeName string, in []string, out []stri
 			errMsg := "Cannot find out node:" + n
 			return errors.New(errMsg)
 		}
-		maxQueueLength := (*outNode.node).MaxQueueLength()
-		outNode.inputChannels = append(outNode.inputChannels, make(chan *Msg, maxQueueLength))
+		maxQueueLength := outNode.node.MaxQueueLength()
+		outNode.inputChannels = append(outNode.inputChannels, make(chan *MsgWithCtx, maxQueueLength))
 		currentNode.downstream[i] = outNode
 	}
 
@@ -70,8 +70,8 @@ func (fg *TimeTickedFlowGraph) Start() {
 func (fg *TimeTickedFlowGraph) Close() {
 	for _, v := range fg.nodeCtx {
 		// close message stream
-		if (*v.node).IsInputNode() {
-			inStream, ok := (*v.node).(*InputNode)
+		if v.node.IsInputNode() {
+			inStream, ok := v.node.(*InputNode)
 			if !ok {
 				log.Fatal("Invalid inputNode")
 			}
diff --git a/internal/util/flowgraph/flow_graph_test.go b/internal/util/flowgraph/flow_graph_test.go
index 88c84af8a..8feed307f 100644
--- a/internal/util/flowgraph/flow_graph_test.go
+++ b/internal/util/flowgraph/flow_graph_test.go
@@ -47,19 +47,19 @@ func (m *intMsg) DownStreamNodeIdx() int {
 	return 1
 }
 
-func intMsg2Msg(in []*intMsg) []*Msg {
-	out := make([]*Msg, 0)
+func intMsg2Msg(in []*intMsg) []Msg {
+	out := make([]Msg, 0)
 	for _, msg := range in {
 		var m Msg = msg
-		out = append(out, &m)
+		out = append(out, m)
 	}
 	return out
 }
 
-func msg2IntMsg(in []*Msg) []*intMsg {
+func msg2IntMsg(in []Msg) []*intMsg {
 	out := make([]*intMsg, 0)
 	for _, msg := range in {
-		out = append(out, (*msg).(*intMsg))
+		out = append(out, msg.(*intMsg))
 	}
 	return out
 }
@@ -68,43 +68,43 @@ func (a *nodeA) Name() string {
 	return "NodeA"
 }
 
-func (a *nodeA) Operate(in []*Msg) []*Msg {
-	return append(in, in...)
+func (a *nodeA) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
+	return append(in, in...), nil
 }
 
 func (b *nodeB) Name() string {
 	return "NodeB"
 }
 
-func (b *nodeB) Operate(in []*Msg) []*Msg {
+func (b *nodeB) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	messages := make([]*intMsg, 0)
 	for _, msg := range msg2IntMsg(in) {
 		messages = append(messages, &intMsg{
 			num: math.Pow(msg.num, 2),
 		})
 	}
-	return intMsg2Msg(messages)
+	return intMsg2Msg(messages), nil
 }
 
 func (c *nodeC) Name() string {
 	return "NodeC"
 }
 
-func (c *nodeC) Operate(in []*Msg) []*Msg {
+func (c *nodeC) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	messages := make([]*intMsg, 0)
 	for _, msg := range msg2IntMsg(in) {
 		messages = append(messages, &intMsg{
 			num: math.Sqrt(msg.num),
 		})
 	}
-	return intMsg2Msg(messages)
+	return intMsg2Msg(messages), nil
 }
 
 func (d *nodeD) Name() string {
 	return "NodeD"
 }
 
-func (d *nodeD) Operate(in []*Msg) []*Msg {
+func (d *nodeD) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
 	messages := make([]*intMsg, 0)
 	outLength := len(in) / 2
 	inMessages := msg2IntMsg(in)
@@ -117,7 +117,7 @@ func (d *nodeD) Operate(in []*Msg) []*Msg {
 	d.d = messages[0].num
 	d.resChan <- d.d
 	fmt.Println("flow graph result:", d.d)
-	return intMsg2Msg(messages)
+	return intMsg2Msg(messages), nil
 }
 
 func sendMsgFromCmd(ctx context.Context, fg *TimeTickedFlowGraph) {
@@ -129,8 +129,12 @@ func sendMsgFromCmd(ctx context.Context, fg *TimeTickedFlowGraph) {
 			time.Sleep(time.Millisecond * time.Duration(500))
 			var num = float64(rand.Int() % 100)
 			var msg Msg = &intMsg{num: num}
+			var msgWithContext = &MsgWithCtx{
+				ctx: ctx,
+				msg: msg,
+			}
 			a := nodeA{}
-			fg.nodeCtx[a.Name()].inputChannels[0] <- &msg
+			fg.nodeCtx[a.Name()].inputChannels[0] <- msgWithContext
 			fmt.Println("send number", num, "to node", a.Name())
 			res, ok := receiveResult(ctx, fg)
 			if !ok {
@@ -156,7 +160,7 @@ func sendMsgFromCmd(ctx context.Context, fg *TimeTickedFlowGraph) {
 func receiveResultFromNodeD(res *float64, fg *TimeTickedFlowGraph, wg *sync.WaitGroup) {
 	d := nodeD{}
 	node := fg.nodeCtx[d.Name()]
-	nd, ok := (*node.node).(*nodeD)
+	nd, ok := node.node.(*nodeD)
 	if !ok {
 		log.Fatal("not nodeD type")
 	}
@@ -167,7 +171,7 @@ func receiveResultFromNodeD(res *float64, fg *TimeTickedFlowGraph, wg *sync.Wait
 func receiveResult(ctx context.Context, fg *TimeTickedFlowGraph) (float64, bool) {
 	d := nodeD{}
 	node := fg.nodeCtx[d.Name()]
-	nd, ok := (*node.node).(*nodeD)
+	nd, ok := node.node.(*nodeD)
 	if !ok {
 		log.Fatal("not nodeD type")
 	}
@@ -211,10 +215,10 @@ func TestTimeTickedFlowGraph_Start(t *testing.T) {
 		resChan: make(chan float64),
 	}
 
-	fg.AddNode(&a)
-	fg.AddNode(&b)
-	fg.AddNode(&c)
-	fg.AddNode(&d)
+	fg.AddNode(a)
+	fg.AddNode(b)
+	fg.AddNode(c)
+	fg.AddNode(d)
 
 	var err = fg.SetEdges(a.Name(),
 		[]string{},
@@ -250,7 +254,7 @@ func TestTimeTickedFlowGraph_Start(t *testing.T) {
 
 	// init node A
 	nodeCtxA := fg.nodeCtx[a.Name()]
-	nodeCtxA.inputChannels = []chan *Msg{make(chan *Msg, 10)}
+	nodeCtxA.inputChannels = []chan *MsgWithCtx{make(chan *MsgWithCtx, 10)}
 
 	go fg.Start()
 
diff --git a/internal/util/flowgraph/input_node.go b/internal/util/flowgraph/input_node.go
index 0c0730f7f..868229102 100644
--- a/internal/util/flowgraph/input_node.go
+++ b/internal/util/flowgraph/input_node.go
@@ -1,9 +1,13 @@
 package flowgraph
 
 import (
+	"context"
 	"log"
 
+	"github.com/opentracing/opentracing-go"
+	"github.com/zilliztech/milvus-distributed/internal/errors"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
+	"github.com/zilliztech/milvus-distributed/internal/util/trace"
 )
 
 type InputNode struct {
@@ -25,15 +29,19 @@ func (inNode *InputNode) InStream() *msgstream.MsgStream {
 }
 
 // empty input and return one *Msg
-func (inNode *InputNode) Operate([]*Msg) []*Msg {
+func (inNode *InputNode) Operate(ctx context.Context, msgs []Msg) ([]Msg, context.Context) {
 	//fmt.Println("Do InputNode operation")
 
-	msgPack := (*inNode.inStream).Consume()
+	msgPack, ctx := (*inNode.inStream).Consume()
+
+	sp, ctx := trace.StartSpanFromContext(ctx, opentracing.Tag{Key: "NodeName", Value: inNode.Name()})
+	defer sp.Finish()
 
 	// TODO: add status
 	if msgPack == nil {
 		log.Println("null msg pack")
-		return nil
+		trace.LogError(sp, errors.New("null msg pack"))
+		return nil, ctx
 	}
 
 	var msgStreamMsg Msg = &MsgStreamMsg{
@@ -43,7 +51,7 @@ func (inNode *InputNode) Operate([]*Msg) []*Msg {
 		startPositions: msgPack.StartPositions,
 	}
 
-	return []*Msg{&msgStreamMsg}
+	return []Msg{msgStreamMsg}, ctx
 }
 
 func NewInputNode(inStream *msgstream.MsgStream, nodeName string, maxQueueLength int32, maxParallelism int32) *InputNode {
diff --git a/internal/util/flowgraph/node.go b/internal/util/flowgraph/node.go
index c12da812f..65a997952 100644
--- a/internal/util/flowgraph/node.go
+++ b/internal/util/flowgraph/node.go
@@ -6,13 +6,16 @@ import (
 	"log"
 	"sync"
 	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/zilliztech/milvus-distributed/internal/util/trace"
 )
 
 type Node interface {
 	Name() string
 	MaxQueueLength() int32
 	MaxParallelism() int32
-	Operate(in []*Msg) []*Msg
+	Operate(ctx context.Context, in []Msg) ([]Msg, context.Context)
 	IsInputNode() bool
 }
 
@@ -22,9 +25,9 @@ type BaseNode struct {
 }
 
 type nodeCtx struct {
-	node                   *Node
-	inputChannels          []chan *Msg
-	inputMessages          []*Msg
+	node                   Node
+	inputChannels          []chan *MsgWithCtx
+	inputMessages          []Msg
 	downstream             []*nodeCtx
 	downstreamInputChanIdx map[string]int
 
@@ -32,10 +35,15 @@ type nodeCtx struct {
 	NumCompletedTasks int64
 }
 
+type MsgWithCtx struct {
+	ctx context.Context
+	msg Msg
+}
+
 func (nodeCtx *nodeCtx) Start(ctx context.Context, wg *sync.WaitGroup) {
-	if (*nodeCtx.node).IsInputNode() {
+	if nodeCtx.node.IsInputNode() {
 		// fmt.Println("start InputNode.inStream")
-		inStream, ok := (*nodeCtx.node).(*InputNode)
+		inStream, ok := nodeCtx.node.(*InputNode)
 		if !ok {
 			log.Fatal("Invalid inputNode")
 		}
@@ -46,19 +54,23 @@ func (nodeCtx *nodeCtx) Start(ctx context.Context, wg *sync.WaitGroup) {
 		select {
 		case <-ctx.Done():
 			wg.Done()
-			fmt.Println((*nodeCtx.node).Name(), "closed")
+			fmt.Println(nodeCtx.node.Name(), "closed")
 			return
 		default:
 			// inputs from inputsMessages for Operate
-			inputs := make([]*Msg, 0)
+			inputs := make([]Msg, 0)
 
-			if !(*nodeCtx.node).IsInputNode() {
-				nodeCtx.collectInputMessages()
+			var msgCtx context.Context
+			var res []Msg
+			var sp opentracing.Span
+			if !nodeCtx.node.IsInputNode() {
+				msgCtx = nodeCtx.collectInputMessages()
 				inputs = nodeCtx.inputMessages
 			}
-
-			n := *nodeCtx.node
-			res := n.Operate(inputs)
+			n := nodeCtx.node
+			res, msgCtx = n.Operate(msgCtx, inputs)
+			sp, msgCtx = trace.StartSpanFromContext(msgCtx)
+			sp.SetTag("node name", n.Name())
 
 			downstreamLength := len(nodeCtx.downstreamInputChanIdx)
 			if len(nodeCtx.downstream) < downstreamLength {
@@ -72,9 +84,10 @@ func (nodeCtx *nodeCtx) Start(ctx context.Context, wg *sync.WaitGroup) {
 			w := sync.WaitGroup{}
 			for i := 0; i < downstreamLength; i++ {
 				w.Add(1)
-				go nodeCtx.downstream[i].ReceiveMsg(&w, res[i], nodeCtx.downstreamInputChanIdx[(*nodeCtx.downstream[i].node).Name()])
+				go nodeCtx.downstream[i].ReceiveMsg(msgCtx, &w, res[i], nodeCtx.downstreamInputChanIdx[nodeCtx.downstream[i].node.Name()])
 			}
 			w.Wait()
+			sp.Finish()
 		}
 	}
 }
@@ -86,38 +99,54 @@ func (nodeCtx *nodeCtx) Close() {
 	}
 }
 
-func (nodeCtx *nodeCtx) ReceiveMsg(wg *sync.WaitGroup, msg *Msg, inputChanIdx int) {
-	nodeCtx.inputChannels[inputChanIdx] <- msg
+func (nodeCtx *nodeCtx) ReceiveMsg(ctx context.Context, wg *sync.WaitGroup, msg Msg, inputChanIdx int) {
+	sp, ctx := trace.StartSpanFromContext(ctx)
+	defer sp.Finish()
+	nodeCtx.inputChannels[inputChanIdx] <- &MsgWithCtx{ctx: ctx, msg: msg}
 	//fmt.Println((*nodeCtx.node).Name(), "receive to input channel ", inputChanIdx)
 
 	wg.Done()
 }
 
-func (nodeCtx *nodeCtx) collectInputMessages() {
+func (nodeCtx *nodeCtx) collectInputMessages() context.Context {
+	var opts []opentracing.StartSpanOption
+
 	inputsNum := len(nodeCtx.inputChannels)
-	nodeCtx.inputMessages = make([]*Msg, inputsNum)
+	nodeCtx.inputMessages = make([]Msg, inputsNum)
 
 	// init inputMessages,
 	// receive messages from inputChannels,
 	// and move them to inputMessages.
 	for i := 0; i < inputsNum; i++ {
 		channel := nodeCtx.inputChannels[i]
-		msg, ok := <-channel
+		msgWithCtx, ok := <-channel
 		if !ok {
 			// TODO: add status
 			log.Println("input channel closed")
-			return
+			return nil
+		}
+		nodeCtx.inputMessages[i] = msgWithCtx.msg
+		if msgWithCtx.ctx != nil {
+			sp, _ := trace.StartSpanFromContext(msgWithCtx.ctx)
+			opts = append(opts, opentracing.ChildOf(sp.Context()))
+			sp.Finish()
 		}
-		nodeCtx.inputMessages[i] = msg
+	}
+
+	var ctx context.Context
+	var sp opentracing.Span
+	if len(opts) != 0 {
+		sp, ctx = trace.StartSpanFromContext(context.Background(), opts...)
+		defer sp.Finish()
 	}
 
 	// timeTick alignment check
 	if len(nodeCtx.inputMessages) > 1 {
-		t := (*nodeCtx.inputMessages[0]).TimeTick()
+		t := nodeCtx.inputMessages[0].TimeTick()
 		latestTime := t
 		for i := 1; i < len(nodeCtx.inputMessages); i++ {
-			if t < (*nodeCtx.inputMessages[i]).TimeTick() {
-				latestTime = (*nodeCtx.inputMessages[i]).TimeTick()
+			if t < nodeCtx.inputMessages[i].TimeTick() {
+				latestTime = nodeCtx.inputMessages[i].TimeTick()
 				//err := errors.New("Fatal, misaligned time tick," +
 				//	"t1=" + strconv.FormatUint(time, 10) +
 				//	", t2=" + strconv.FormatUint((*nodeCtx.inputMessages[i]).TimeTick(), 10) +
@@ -127,7 +156,7 @@ func (nodeCtx *nodeCtx) collectInputMessages() {
 		}
 		// wait for time tick
 		for i := 0; i < len(nodeCtx.inputMessages); i++ {
-			for (*nodeCtx.inputMessages[i]).TimeTick() != latestTime {
+			for nodeCtx.inputMessages[i].TimeTick() != latestTime {
 				channel := nodeCtx.inputChannels[i]
 				select {
 				case <-time.After(10 * time.Second):
@@ -135,13 +164,14 @@ func (nodeCtx *nodeCtx) collectInputMessages() {
 				case msg, ok := <-channel:
 					if !ok {
 						log.Println("input channel closed")
-						return
+						return nil
 					}
-					nodeCtx.inputMessages[i] = msg
+					nodeCtx.inputMessages[i] = msg.msg
 				}
 			}
 		}
 	}
+	return ctx
 }
 
 func (node *BaseNode) MaxQueueLength() int32 {
diff --git a/internal/util/trace/util.go b/internal/util/trace/util.go
index 2234f5c04..6b7b8e5a4 100644
--- a/internal/util/trace/util.go
+++ b/internal/util/trace/util.go
@@ -16,7 +16,7 @@ import (
 
 func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
 	if ctx == nil {
-		panic("StartSpanFromContext called with nil context")
+		return noopSpan(), ctx
 	}
 
 	var pcs [1]uintptr
@@ -45,7 +45,7 @@ func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOpti
 
 func StartSpanFromContextWithOperationName(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
 	if ctx == nil {
-		panic("StartSpanFromContextWithOperationName called with nil context")
+		return noopSpan(), ctx
 	}
 
 	var pcs [1]uintptr
@@ -109,9 +109,9 @@ func InjectContextToPulsarMsgProperties(sc opentracing.SpanContext, properties m
 	tracer.Inject(sc, opentracing.TextMap, propertiesReaderWriter{properties})
 }
 
-func ExtractFromPulsarMsgProperties(msg msgstream.TsMsg, properties map[string]string) opentracing.Span {
+func ExtractFromPulsarMsgProperties(msg msgstream.TsMsg, properties map[string]string) (opentracing.Span, bool) {
 	if !allowTrace(msg) {
-		return noopSpan()
+		return noopSpan(), false
 	}
 	tracer := opentracing.GlobalTracer()
 	sc, _ := tracer.Extract(opentracing.TextMap, propertiesReaderWriter{properties})
@@ -124,21 +124,42 @@ func ExtractFromPulsarMsgProperties(msg msgstream.TsMsg, properties map[string]s
 			"HashKeys": msg.HashKeys(),
 			"Position": msg.Position(),
 		}}
-	return opentracing.StartSpan(name, opts...)
+	return opentracing.StartSpan(name, opts...), true
 }
 
 func MsgSpanFromCtx(ctx context.Context, msg msgstream.TsMsg, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
+	if ctx == nil {
+		return noopSpan(), ctx
+	}
 	if !allowTrace(msg) {
 		return noopSpan(), ctx
 	}
-	name := "send pulsar msg"
+	operationName := "send pulsar msg"
 	opts = append(opts, opentracing.Tags{
 		"ID":       msg.ID(),
 		"Type":     msg.Type(),
 		"HashKeys": msg.HashKeys(),
 		"Position": msg.Position(),
 	})
-	return StartSpanFromContextWithOperationName(ctx, name, opts...)
+
+	var pcs [1]uintptr
+	n := runtime.Callers(2, pcs[:])
+	if n < 1 {
+		span, ctx := opentracing.StartSpanFromContext(ctx, operationName, opts...)
+		span.LogFields(log.Error(errors.New("runtime.Callers failed")))
+		return span, ctx
+	}
+	file, line := runtime.FuncForPC(pcs[0]).FileLine(pcs[0])
+
+	if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
+		opts = append(opts, opentracing.ChildOf(parentSpan.Context()))
+	}
+	span := opentracing.StartSpan(operationName, opts...)
+	ctx = opentracing.ContextWithSpan(ctx, span)
+
+	span.LogFields(log.String("filename", file), log.Int("line", line))
+
+	return span, ctx
 }
 
 type propertiesReaderWriter struct {
diff --git a/tests/benchmark/README.md b/tests/benchmark/README.md
new file mode 100644
index 000000000..ec6cad906
--- /dev/null
+++ b/tests/benchmark/README.md
@@ -0,0 +1,39 @@
+# Quick start
+
+### Description:
+
+This project is used to test performance/reliability/stability for milvus server
+- Test cases can be organized with `yaml`
+- Test can run with local mode or helm mode
+
+### Usage:
+`pip install requirements.txt`
+
+if using local mode, the following libs is optional
+
+`pymongo==3.10.0` 
+
+`kubernetes==10.0.1`
+
+### Demos:
+
+1. Local test:
+
+   `python3 main.py --local --host=*.* --port=19530 --suite=suites/gpu_search_performance_random50m.yaml`
+
+### Definitions of test suites:
+
+Testers need to write test suite config if adding a customizised test into the current test framework
+
+1. search_performance: the test type,also we have`build_performance`,`insert_performance`,`accuracy`,`stability`,`search_stability`
+2. tables: list of test cases
+3. The following fields are in the `table` field:
+   - server: run host
+   - milvus: config in milvus
+   - collection_name: currently support one collection
+   - run_count: search count
+   - search_params: params of query
+
+## Test result:
+
+Test result will be uploaded if tests run in helm mode, and will be used to judge if the test run pass or failed
diff --git a/tests/benchmark/__init__.py b/tests/benchmark/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/benchmark/assets/Parameters.png b/tests/benchmark/assets/Parameters.png
new file mode 100644
index 0000000000000000000000000000000000000000..2ea108dc97e8079310c4358ea82b92e6e3075729
GIT binary patch
literal 50835
zcmd?R2UL?;)HVu(f}<#7p^1PaR;q{;fdpqnP*Jhaq$N@W={-OQppJz;fPi!b5$U0Y
z8VFJ%H39;mMrtUbgpvRu$$x?~|M#uC*1hZAe|_Iw|6O;jS(6aRdC$Ag-p_uXz4vR#
zy}P;x_aEQS#l>~-<_+x!TwHtMTwFZ&_wj;HoVf*;xwuYp-PHcWz&C|5x`=rD8bxN4
z*q49B@aLSrYeRpp_D9R<lAF^*kBXhOds3%t3T&d{-q}BNy5+Ftbkn|i{z7VbMEOfY
zhx}Kgx7|)9w;Vl}s&?Hw%r;iA<*;4{9NrO8SI>9+rc`Ia-@H#h4lJb5pGGAnJIend
z8Q?#c#8v>i;Nr4>!*<OQ*nJBxxP<$K^X~E+ZXwR=`#<>6oL8=cR>6BYulB$3$Z}q<
zz7T<PbKV?p;yS>2{dSVqg!3BAtN8zT37hMD#OySG=_4UcF*TEs8WT%3%aQu1`G>_w
z`qJhSqF$^BOEbNF=T5;C-YjN(d^|V(N^+;lR8{-*>U6td)MGgO{Wu<-#;j-4*%N$P
zn|}nXC6mGlfwWS00)*Zqju<&C$spn<V$*kQG`D9nsU1p$ro5uM<EkzrCUcFPJ2NTm
zZ{jk`VJz2cy?XsR+2f5euF#^&Tpr4zMh)yRG}P6rX9ohwsfdYW($13+KXR%!TVreL
zoR%m7%5XBT@hRX==a>wRGVA4`+=gxUBuspHc|d0CzHld3O>Loh)e#bN38A-5jS4cS
zQ59-e{%o}t5LYjg#jj4q>i2KZeH3{poyn?`gLdf=HA~eC@$6L1^>5dx{bsIm?RI-O
zYY$b6z>kJ#JCW)jV?k_Y+)ibfRj`F+{m!&Py9A!J(@A=(Nt@|RE_R=6iNP-ol{>cL
zsWZv0ExORvXj^f3_NPx)%VXH~)#WK`NruVJ_CUSmD*2zKMLA1ztmDqy-iX`z>aB#6
zFCwQQ3n}g?ntmly#gmhhwvvq0<p8^z_>0QQf>F?I`_UlSf;e12LbEcTeK(Lz57G(*
zrk9;7<+#yBXN!aA5baZG$fc@(7jzAn6tj(*MAS1ZTCHLJb6F)(5FZLLCUTruY#p{;
zV@lv+EVEh5#g7|b*|HHmy}i9}>32EtV&ZJH!Byk8Hskx_8DB1KPCv;1J{{}i>>MEg
z-Graf^0DZd^Xeloi`d&mg6WRIa(#n?k39IzDqQ3k#QM#!`t!&Ry(+EwEWLz`f&%A3
zKB7X<R=Xk0y7ARPYSU5urf4bMiH!kRlrapYoQ9$e60t}`CtU&IeGy!pE37n^kEq2c
z)yiA&{`Be7#Lm`Q^sTH3aBn$RWaTz)we)pyX^>t^2Aa^MyE!Y{O0w}mJNPqEqm}N~
zBbDxx@loR%q^+)uqCX3iaWfuOmB7`dp=<&6ioj$lVk~EKCulo6C{)f2V-Ubnt)oVm
zv1&P2zHN4t$Eld(JygD+6+S~4T?V#wiwD{{|2Q~x8OEB1wMjHKyx`N5&`?%3^m!Yw
zIsV#pi>aRuweaC$b=W~mKXTKWLXHZiGl27H1~P_8{hF9`)_S%eZ1QOOW|Lgd6V7!$
z6;yGnWNd$ad*iefK2p4W=cbV`4<$f1^27xsze4?vN2@gnMIW_g)~JEgukl%w_QTR>
zv`~6d0U{VR{!&;PN@tMRfunfh=%@wNdv$9^4(0N5<yLM?|7L}4dwec|{XH9MNUdU1
zwOIJ-sL83;Si>l()Vsz3OAd@9h3yFn_>~p6fH^%_=;Ck%z9m8|v%Fjx<4Tf%u~ri%
zUu*3=HplvVk4+l4b1@cOgK8*>Z)^U%oO(9_Md{Aeb)CZoZ8l2Aqp(NM-8Bd(^5-M6
zStK^HnJhoUD&XPa5rmOP5hQFD;UL%<u1xs~PDeXnXHv3gvzkr{3hE;thm+0g10k&K
zBDT5;z=sXxqaky!M?o8X#!^<^2hkQ57H5{(97_T>xFx|DT@I|EP}9`C9lDp1TdUJ0
z6^%E}#vZ=!M*(JGHV6dXqgH7#1_|@fE*`!^1_7yh3G#B=o;=|47wzsGLJKQc=5JTm
zAeIZa19&K9rFMiCGL&AG)VT2Fkhr?nFdxy>dd33*pr>E+lU<tMrzt<gsJC$n1c@oM
zE>y<V(S{ra(>2u8Oz2gNFT!>AXv!+CW9B{|IYIM2(9t6pMQs+&t6Tc^?Oq>Q3@$Z>
zT*g)|*D+lD-sgJ<^_D|%bxhi@J6;@NAg+_3jtd0-Lk-an$e?%*4-ZdHG>0h@hU<6Q
z>)%4Uv`-yoZ)vf~)m;$I!NviXp{>vLQjzs^q#$e!0CyUqsK=w9!NoB}C7Z=axO{KS
zd%$ABCpa}JNg8Q5$E1@a2MR6n81Ebpq7NVD6OhQ?+))UmhDEne0}tv*RHC-(2bN5%
z1+B)W%Pzg<T<2=YCrNmbMU7DXR;TNPofhK}Ww7*|iWBkPhH}t)ret40=LQtlzg;D(
z>*yEE?2ZaFpj`n;V<;oCDvg^q-<L=IDiviA5CMX#iNsnYwP>-|wD8Sg!qy%hmn@Ns
z<;41+pk^7^KM?4m#kne)pMy9f%qZZi^Xi2gnz$#WgVkNr80X=Mx$N{vyL6NaYgK`L
z$!BTgX>i2cA6^Un1!tnz09NZ}tqbjfBi{J3ne@pIuO&<|1fOLry#<)Q;$u1=XZ7Cx
zX093l)SIt^HeXjqIYI=`ocK5O^bG*9)MhE3csCokzV~%rBAXG!cCB6ca$wDWsfyfI
zhp@A=1L)~H{&I6YTg!q*C1|k&TCD+Wvg;`BC)C{DPi=t%sTYoojkP5Vc<=)|4-~<o
z>bLXibNvk+A*hW!Si~BcKw2emvUu=T{|*-t1b6dXZ?>?v`GR+F>brNJ{h(2jT9zA1
z?A3(&aE;~KHH-jydS|$vX*Nh_xw04Q<1~F79C9hbrlTMmdDoL%0r;1sGpopHGK|gX
z6L6_hf#5>dkw}Id+sto!E?ambkaO$bF*AI`gJLR8KYqXgGG;DKnila;=yh~#E@5F|
zA#cGmp=Q6=8D65~&JR9Y5`(>y&VIN`q>mwsDAtleJJf2@vL0;vLF<}iz)C}C5Ix>8
z7#qm+2X}rFy4@WmzeQ?C)UBm{C9iq^#I{_sHIS9l**oM(5d8%UmHi7`!vLi={Ei+y
z>Wx|gDdZ#w_R0w!N~}Kg5e-X7gP5%nCE1&Jkn<>|g#Hant*uu5HYnS;e%r8_EQ{wt
zf;_o7AzTkqpV3l1n^hgu&$&5+3Kw1?h!c=_B1kM+g)%tWLulafVIZsLO?X1tTZ92-
zmDH&^T6s~yCKp3DXkGWrat#2-5_(aASD#Dynx<4bA%UsUOCrHG{PcDc_rx%X<<mDn
zL_fY3u;>U2t7p{IhYeUF<v8B!OB=My%*)H0*@vV~G$*BtgM9?g|EGNE-~&##k2!r!
z!-w+uifjR%+sDZ%f8LWdp}@*mB8VRbxy;Bb80$W%GnnUAzw%PyEh?aDn9qrb{!j?R
zv-5!K#E{1T{8L$sWn<LX13scPKGG1y%7$*__%0Mwv_TOL4$`k)y~-WK{LtEY%|?GV
zLvZYI&%qFQHq9=`i0%kdp%iD4O%}agvY?;<<gx`A+rMwXG>V4;2$|`|mDd82g$z$!
z?VVX&Ez@pp8i~y$wbv~x5i}ptsLsyLXQEIm4SbGuguzm~%&e@esj9}tM&<OHx7@%N
zr6v4lKZFvPobL`K5SY_2mTjN4@78pJV;u($0bnI^V~AU8<n}`jN~kF*$q>YtU-rb>
zz$6!=M^C{J)ukV}8e6S}&~D0km<=q#<V0u5GJ``c<^3pAQ#OP6rNPqd83rISr1O4b
zz@5Yuu<H-P!@~`n*(|0q;XZZp+_`ghBs#}a`pD3&saWsSY>tOP0jz=qaLbsVgZ}r@
zImyR>_26S--9~|{VZf6A2{<RHa|u_x=M6^FHBNCTWQB9ExTt7|u%+)~O9%vFOQPeR
z@c>p);R+kXbK*<RkMf1PVx-`}aYL$Laoo})jo!^o-%;{%qFq|fwkesA4l9%t1Q*0r
zmp<VmimAH1W%jKkLP1V4+P$XZsQ!ANF|2@N_VB1E5{4U-nK_AxcrO51|5i)4%;BbS
zNN+RVnTY<q5LRzV-(rBg=GGoGz_ICPm(D}z!}m8N62a2pIXO8c1T%1)`a#>hxdhSU
z$8*8D-|K!E<f<v-rs=b$^rfMNnVHu2$;;{Nt@OOQWdV?}ZMUv)kSJf*(Lb08ILpJ0
z4bLj`L9(G39I-fLNM{UepUUmKjO(wqbIejG%AMLGr>o*r+=7`SWT-LKeQ|LyOg(7O
z2{X@tf_QCT_2e*6lc;{mZk+CNtvVW@S$=xPI2TeKcFB5kj9xcznTNvRY#Ra0*7Tj5
z9GhE#(B~!c9zvIo(Lv(yPOs0@jWR+N2*U#02Lp{bcf8P<Bo23sxW(a6GEl@Y##u0Z
zpr_}7d!@zf0odlBy&S*=JV7vhEjlXT^e*Gts4B9=9tS?ZySBb2wMmZ6G_ePC+z_^q
z&YDY)U$o;05%gGoD>rSRp5^T{QdvCj7zF5A;pUFRHJek>Z_;<B&M8Ozo_crS#CM}%
zF2;JVK8VE%;66zlZ{bj1$3PlDUSWTb#P!CCK!8w$9|9WjnL@R_l=w=V6mA-%8_GBE
z@{lCde4`4s`9*ksJLni-6DmM+6y_EgF0UK{+qjyetcbJ3`YX5K@srtF%%>dC$y;#v
z!og5MSW&-W#Bhac6yP$30W41Tu4F9LB)bGND}opmNq9i0*p(9u9qpZY<NDkyv&tSn
zv4y@oKQGPLEDB;S*hUo%5fD|fw!r`p5RPe+fGh{)Z?k|V`@Dr<dZsf*Jc69SR)W24
z0fI!#H}FY@C#$*|X%@cV6X*|!0IK2~&}x%_f4EP##rbl?8D5L!zLUUSh0Q}F`&0n6
z<47pObmV{qqM5x7W9L!&;?CVodBZ`KqJ>gbhhPY5+iQ@X=heIhhzV?!=unYua#v3j
z<Sy216zlW+nyM4=H*iluVv>QIk6S$nDOZkyFjNk@8?>ex)cZM#Bc0Lm`R&{^;rh+%
zeUsZ;grGpPNNeRoXnd4p(E?Et2o~R&M7s%)7sCJrY~s+n>nNJeL9{~Mrqv)_!(-}3
zt96li)dN~_6oh7SEz<cz)**D_?N5cCD9+9$S>uu|NA)!i!?xOta|2t=-RUEq`sQ^3
zTbLA%`VKC47(IyA*J65gP9`NKh4M-G!dsU&fbV^r<0XQK^qq><J<js9|IWoA2eHVu
zBv*j`Q^mW&5lZv1*r@u>t#WE255{z84AjxuY!Gm1d;}KeG8yjg<6;1H^nIgQ`|x2c
zkX+-}EkP97bt+_<M({W_tz+R@VB2zYYpx(S0kT2}Vy&H7_6BsasoZe@5M+=&;~|*X
zbNVV%L?P2iZrTe0=$o#tE?aiRuJmQPa{NBH?H@km%i1F6jnTcUqLzUqbYGon{lsAs
zoHSRt)}`UPwN?+9Pz5R&>$mn~aNL*Nm1-+V8W=F95jmpRRtw`1Zw;u|CJ+Z=>02Fk
z@rxuq7!1ZC8V%_Z;8Jju3DD^Aa%NTluN4&)-SzN@wV6&ew;#9+(%N(aEb@*0JjWg!
zqPf!`xzmtAU~t$;Wx&>|vm~4Z)PKn`fNWGAWwJdY9$-&<pn06^0GHTd_l@~J--(cO
z@$1VV_Rd6}xH8dZ%(*?=vJ4&y;5(WtI3B$vrSz@ayOoG$g+Rsa?d?->x0*SID<QE0
zWdfNCFx^Xu;TlVonG?R=AUg1Xl!yZUI7V6H=<6mRpGQL@qN0og2*t(4xdaGe$-J5T
zsdp}y4D#s)Y=K2dkhcisc)X*9aU&<?x=%EPym=?FCr`YQ#2h8{q4+npIr_8&zXA9>
zTh`ABnjRGuxSn|?irHpdS3<!Xwt_U$mrhMovJ+w^4LYf?*7ez5_Zt_0CYvO%cL*FR
z<&4Yb)DKKZAXhr87#Es{aO5)!Fi|KkAK&D+57($5uY$V-2ocEKz9ngS&7#&<(UQYm
zdlKr7aX=(OOjXCx(QyQv9yOiS8x)#g>3;!KOiD8Fn*iYw#6<vHz!EfR5vpV78N}rv
zHbcg3s@>Jf>J+f=5iAx<9V7&*0BW4Z!Fj9RPj|Y<#v}j$Q$cA(M?EzXz*NEdI42|I
zKjJm%57%N%YRQWlfY822yEXga!-tZe`ASgD1N;qFzx01YmFvHtQ1-vs5;E#I%Nid!
zd05>Ej<*5%42wB^lk;D{fy^b`-8*-hL9t~52*^?%3eZ}UfX)2#2W3yH7kMb(zZ*<}
zI*7;gE$||!#r@AOzkhc5M?d;+IMf0SDZ?5+v%Bu;oaoVhZrb+7g8N)gR;AZmPiT<a
z&)?j1@Tvf^rE1$dORMbXE>4d|x-4*MtYw+WFhBgWz?_el86OdBm4t=;^HD)ExLi$-
zq{%;*cQNKQ`FG8G|DSlfU>6KQoQg?SZE1;69;#LLp^ON&LSDG<e6JE@bN_oUWBh2t
z;p451*%Kqun|~Cfc4?n2*P<<rYK*cPqahVN_<yXXe3r+l6Vp6qZ2oIaKx*Xou8Fl8
zrN}hPIqQE6bi7wx0zPatFxDV)B4y#n7$kaR&`ABulKSy%;|2joXPe---KBcApNb2h
zH70)Kctk;jizuU*A9d)O$<4$#_Tz#Ge)P}fOtnPe?GRc=x3A6Q4*rn5FQuyY+^KRq
zgl37gS(%_E!gIKvi19Sm^jR^nY3Ij~BbU=uDK5k55uU<b+A}L@#PR|;6_<e1k!g>%
z87Wt`zt++R{WiT)G`uwrokg_t4-OCy*U&yr#3ho`1J68&S`HO=en~EO4oj_}HH~*9
zN|QIdYGa1PT6Hnk&Bn1RuA@R4h~<#)Aus7&5aDw^u7&=2ubG2cwXA~l$pdyJ?p|!`
zVn;HjYOU1Uj1q8K6#1a2a{JNGyQxUw#(c4s9M%jvp{b^EC@sv%flfJTQk7avZ`z@j
z*qj()+)5$Vm(JD;M!l$DK@ac|oujlQG-Mz_3)b%IO>j!CZ_o(lh6KdIKk}|*7#?Au
zg~8ZRg`MO)f-Tk;NNrc@e#?%6NN6~uPGT(FSK=J5{p0QiP#&i$e7re#jnX0Ez&kAS
zC$3I;S3<kOlaUi~8PD9t)rc5JW2aPlN|n3Yh2>JWU<(iDsOFG~4{IYb#RxMLVYF68
zFnd2i!`nBAGPq`<S@OfOHcAt7jz+m6g4b7A+z>`aDx}<!+q`1@)bmng)xnoPT}B9q
zs;Oe~snQ_bR$Ss0#P9OvYDe+0Yef?yG+*hvHYM)!250Y{fQYmfG%pt`(`r3jXyw9@
zFV<c)VG4@9hdaJjl+iD!;#|hIeEB7yLior-MwKOAW~b?rkw9hYI4l?LH6Ft>PhKxd
zhp4f4oKUiYiArRkjy&Dxn|?gE9zIbsWATR>LjtYm$J{1RUw&O88LdL`O4*YgRZ8ya
zp0bCQlGsW_MPG+zcEhdRqspl=#q&6sQO2}Hatw06!B33Tt4bhk99!Kgya_VBbTLRm
z2)20=HaxjjsEKtO|1;%yik8HZx+N-jl(yL&1IvWXGv4x}hiO8*Q-5YZIN)*<eI*7T
z8q{3p#g&F)dL|Md1~?rktV<1KHf+`R*DD!%N%kG*p&WtLFL?%x-##&{927j34pC(*
zW(yEB^pj)HwT&T+e)>`FcMxL>%)YhJHQq0tc3n-0gQ0viO78P<jnvKOu+$=TocswL
z1z7^)?Ff%g5N67}qXxe?Gqq#JZ(jM^;EUn5`as%`S~ICK2<ukj{YzpDvN&l;4a!(x
zDDx7})CDrho8~7-*cCghR1ET=yR7HXH|brZ_-Nb8Wmev!P??0p_EOu54Jf2F6UtbX
zoQkn;gaeP$mb?2L$9YDh)eSx5XAYum3ojL1#u+gL*=>t&8U}|U29ofE?d`BD`0S{~
zLSLw=%a~BqX?n|#ZsQ4qm(k|`++Z9x=63S1Pty`4N6o!@#4$DGC?7&$(<-|SXugWY
zP6JW6D--2n8*JelXP*VijB==kfqr6g;+4929?An)kUM$P>jbH$bakKE`gRRXLZLe1
z`A>f|fGzk27p$K#?^72}#17x$tuD|&;H{B|bmoaN(Ki?KgMvrX3oag#Uql_hf|Cze
z?2OVBLeeNX-3@9g8YM=%``<{Ptm;Y)JQFDqpy{({pQqkrB<cAxWqj=6M~C@EDB-OT
z)bj%vjYA5X6Q6q96>;)Z&Vp$g(J6>#`&Oo{qtT?$9=!mjnJAnLVSAm8%xVh~9d7At
zRa^4e^V1Hd+PuW;wJlX0V}2ETh&#lPy^~{!YVjkrQ`|l|%e`N@X<u>-DCeP=QT)X1
zRy#G}!Aaf8mGQ=2amf93aF}&}IffL$*k75qPC!yZ4IHX7i{6ec&_{AIqn(n2@^eZm
zvi|;Ql{ULzizdgEc1mK9e!nxHl-XtTG-zG+R9ZoW3XWq6kj~WvUucAqkY<!C(4bmP
zPb+;V8E9Q<j+!!q43J7ZI*IOSGPE!N!KEEfQC)W#!Bk9zeU(qSau|sbf^pXj3;ql0
z2E4@P+LpP-V$boSELYTNpL4Orj&_N#N2E$cAMs={fdvV^giUCKI9#DAakAAuna*-9
z_M}f1vwyINvF4juM#EOiGR+nGYT~+*i9r;zScdgQ@5`1~O4kR9*g|^-)hewU{G5gl
z*GM8YX%s)rm>QoJ%wE`#5zE2+XU5C%0G7qxohxD~I4xj01^IB>_=YU3ekLFBfN)n+
z2qX)3#AvABcZWC^Lr55fEmaL$1^LVs)xSgH(vY^xJ9+TDazDfg%XO0Q&lL;k;M@aK
zZ=VQs(oKwZ(bbG^YntmA+p3qScempg8ne3(`>p&h`cqI$-G#s+eyEW<&IxIw|FLbv
zcmvQM_wf!Yv2<m=*N=x%P|vce|7+p)j6G!(Yu#X2-s~JH-pa|^UkD06FWBe8<VUxU
zBvL}ue0(n+k~k6my%+@1i=~79HDly>4TSn7e3XvE3Y?;)Ch|H;BFeZ#@YR4{Y9eZ^
zp~2t;#B#iJbag?Y&@;P~+C~NWWd(jh6D;)U5Waiz+$qnYm49O1jKC%H^O83Yw*CM4
z?L4S)12P+Pdeo}Q+@(3{kC-H7wyHsl3uO2I*kS0;_IAGT|9Zb0(D=()pjOrdN|;i~
zojPY@<0~>O|D6Sh-i=n%I8pesPoAKz@%2e5rw82p@__IJ(0!@9zwXKH0E@(dMYL6t
zBS2wXLEf@f1oW%rEvkP;dzrKn?w32ZKqHO$FBEM4cp)$Wn&ljM3aa8ip(pPWkL)*t
zl0hePpvVeAb&6A@UKl9C13@+c+Q|R>l|1LKE+yXP^cCF~hsrB~?&dV(*MH|!><nyz
z_a@ys8=Ht-?c3x`I^_HCn!L}kxs*6K%qb-O^K%D3S`~Dq=B{zR^SKplUT{}qeY?$%
zzWOk+iZ6e6;=rDl^X1*yg5W1$<{&Bb=f`LNt3NK0=Ogae>z_Una^tjVrfM>5^L2OZ
zjbkj$!y$#y@ABy{&%Msfo2`<ABz_$Tcp$G+9X3`}u6n6F-C$dKGBbK!Uk9OUR@PB#
zq-R!!@VTyEqQ$G*;t&uSV8bvaM`kXmTe(?L@ipJHb0>wyke|9l(!$@Jm`h|(QgcL#
z=-AiQPhffFh|3-?*A)9nMhvXkaAs!SIe$JKE7^+et_4$IRbZ&35YRW}N}dgkRu?9=
zju-k#nOg8rroPs8t3=y}Oj!vLM@;xPFaxUd<r_`#Pn26CGLU2PcrG!;!;ROieduoD
zT9xMNF>*>Ty!E>Y)m_|}%J`)l5pp|&qVR;|e@#nF;&D1vJukXybxEgsMyn`|+h^Q9
z`NrvtGgmwXpB$`m;`b8Ls!&#sWh$y&J|Q*d70#b!1&6tMl?!Qof^6%wWBsZ3h@vm%
zx<$kbvYl>^d=acUvkZ-vV{gwjR6VP9yx1cc`PgGd<m>~_DAzkWgHfhCiD5eZg&#uv
zf>cy-K228>*P<mvBfnpTc~uBTji3$`!Yv2as)a;3Lna(YC{V$5@h4a`mb+fN=XJ)>
zag>K*!|&a75b<JE?xoBH$AhmAA3myhd2Lhs)R6y!=mOurt3<?Zbx9a!WS#3d6D4<i
zzgbL$m)KvgzS?C}xV?1HbA?oReZ_D780jl>W)6lDTB8JX@Eun!RN4MP<r)^x>>E?l
zV%v2qFUQOo`dcQy`=7Rw=5f-6xt0mKT)Ve$ir5<cB`)6?+dW`O=F(6+mim(JASn{n
z+MS4ehtbxr-u^`+F|CI5)X}xNkn#QQu~DgV6g;3Iy|CwlTQ`rB_7vXgJ!&bG^ekrh
z7j5m^LZgN6N34YuLEZB9$0zuacpIvFVb9u7)9w_`RV787kr@18W^oZVZTPp2(ZKhp
zlj1MLHx8mb=@3A@sBgYT=ny)XH;_GXuGjvS3}Yq2kgf}1p%vb8iV`oB8g20KR>AZ<
zkMp&0puDv@$ok@&_C}k0PqsGI!KDmA3;nTb+$!%8+-WznbY@uj!Fs7#7P&k`?rCsm
zdPMB&?F=LXTl}>56R~V=%ok10@ufzXYqL6ix9jHE{q&X&0clBBs5i`MsO-WLYChtt
zF0`vI;QV&=a+qAQ${jX+akLvTJFR9mW0xG1skJm(c0t8?J~BaON29ZT%WI_K=|iWX
zb+=mIK&s@g!23-``O$s6#1B*vG5aEv*z?xt*)(j7hvoy&g`X_F=A0pAD6-E}aj5r_
z#<pSgK9%&vWN`tF*YOL<Nk((`e8Yf&<u9R>cpUWBkCB8<rbmGHK8~(0has}y9tBZ}
zru5I;m}f42!JVn$*%BiLB9_8Fuyw5OrL<fX94$ROA>AMSWlgpD(;}0)`kcFOchpG)
zo_7d+G+I*f4efZv?;xnt0<pAbA_V>Ks<^JYrJcjds_Ka3;H78}*LP9HD5XZY9e<za
z^0Vsvo)0VWd_>M6xjN2$Q%{?h)#~U9n!L`B-VzaeSF=pn8P>?>9Jh;eFhoqZn%vUE
z#}H@^J4bq>Y8iATQZqGtF<GVg(;c(@#^v#naMe+>&Gcl6@H!sdiD@vn600B)nq9q|
zD)v}15IQFBZ@i1J@ZbXAP~n6z{jweBeIANE7MitvO1K&8fSI<TmyIq|=Y7>WZRXy|
zVO|3ri+tOel*K!)H@ok>n+<PRw-1l(zd!BDGX2zO`UR9XW3SV(&I<$`7mw4`b_CbC
zf?q?&DrsEFgytrqJ^zMxSDo&uZqb~Ot)fzCg5%K|;AeD?l8c2dA!$;s2|JKBEX&~W
zUtrA4W$0KkX1yu8LKw;js2!=tuDp|rmNhj!$eA|_=Elr$iFsN-ts4GqD%3^mqQw0H
zu4Kt%hTvvQbor*rp6SBsR3x*?O!)2n9T}>vV;IYC!I^rm@{M}lNYdjgvhKmDtdfGn
zwLytd3ney^ypBT1)Vp_cnYk8pD`^)05HX(fbG8bTgcI;#O2+ljfYV*^sN3?EcU^L>
z;CzftZ>+f<bAh2RsCY|8t(CF|cMpCcl^YYut&%(P>ZEZ1<Jp_fcEO!dov1$y5Q$<~
zA=0b&r8dt}-_;<T$oHnB!7z}W)#oYmGJ)_u--7Fh(BfFdw{jKe0?zpgL2xtrIv(xo
zYTGN(Z3pa@Y)V=H=-_3b5D-8XmHU14V6~RM)w`f`5!bJTGparKhkVbPi`H-Vk-@2K
znYo+}6VR$7ILgL`b`AdSGG5fG-{NLx)i7(;WfLC9=%YoWUMg0#an&R(8CIVFkq|E6
z(-o2I%q(8PTh2s;@e(6NKD#Xh-}ccwE6wky^z*#T*13?hdzvO9Mhw#DRUTi)y>XR2
zg1p2<zL`;y_lKR<#NiW~CzLdb{@s}}PH6Pi`^~l{bAK9*hw%j8ah%5qDuycXw%q$j
z<mqdnk%yOKQ?``1m(F`~JA21od+Clq9Yi-9?;)^b@<Ux~-zbddveffdYgLMy523eI
z)1~=Sh9|F<@7>r5qq>!IV<r|4?2ih_Tu1F<An=SXZl5KUJ*lM?zkkg-P$6kQY`ku3
z(#J;(_@rtoytObZp0jrVcL>oX&ORlK4Zswwuy~2bU7dn0vV^oi5=o&q_qrNwai>5Y
zpn_2=TR@O^5UU}F<Q#%gZM)~}_>LPh#*LX(+0#q6O|4;G9j=>TgaG9AR7^@?9QoX%
zK$H%&SNAu4vZPS`qaAbhLARpPHy?$aBSpFCnkA)~8Qho_F)_GtPBZEGkF76q`!L$8
zHq&}7<X7MJ^H8v;`-_Ko$x#U5B|#0<kNseRhef0m9N4`-G))A)AOV+Z;Ul|TyQr8b
zuUS|yOXE3g$VVKpKW(SeS7-_=c^l?M=r9>R{9>{>NJUhtG^k$)mFYe98a|eAUli_G
zTg>efXS$Q%hCR%r-wpb80Hu$;PhI{tNS4f}0!>`_zO%W#RjxOxW_j+Np9z%<S^Gt+
zw?e>4LDYqlZp$0FYTj_GwD8%vUc2~C8Ub7#$bR|JqjE0?mX7D3Mjfb=!(;_CfWQ<+
zMqzxRNJqcO<8!*1_1MjR-v4QA2u=rTxm<(rpBjk?pPbg|+7Y9EM<qM(qm!-lR;SN1
z)>Q?L6_%uGtqPIif8d5~12@;k27aDEG&h)rbPC7+s6FgCzQ}h<S^(`?A$#L=b!H?}
z`4qrcvF8ACVJrBf*G5ZuoKJNhw_sk27KL4GnmKn$>AR$blGGDp{c+k}LokYA$Nx#!
z+N-SYl64sKEc&{M9uLJqz37e;6+{JV@meX*+^0qT=Jw`mb|972*|k%?nmPxMmVson
z9OO5Kg?qZ52wjr%l$u{_4)Udar7&FPt{hoYA?_qo16Ic}22?nT00$VG#dj_<FR|w(
z6_Rg0tKsfOYSSrT8uUJ?EIzDz(ClCt{%R|7+B)EK_PQR{)HJgI3mF<Vong{3Q*Pnp
zbJmrm{aiJbrZIXVI*yMX@=&C~CMia|#5V!8sL#l;zx_!sM!VNmXkVDds?Cl15JM5Y
z<%sOIgJ?5}@Wty=i?r90k$lUe2J1;?8H}w8;hl~yd>G_<R26;v25mzZR-%Cs)){`C
z4m~!yt6fY=fto!52aa}c3Ak7sFNa}%L2`HtOGeByvvn}fT(1ZtcVcz(24%q|PjjZ+
z8($Yq{o=W%h$-siGGjy>MCW}k;7oCC`L5l7b-yvaoFd^1=}Lzj5l9$WD7OzrZQiJ5
z&P4k<F}{#pL+@~sm9TJR(cY?o$*TmEZJMX+`U!t2kE+<{%ez)gDhIq=Sw>YNaovqS
zi`N?NcJb9KT`MoAo;Ua;j|Cp5KUhw+fjc)&sC%7H)AG<9Q-f^^uhO1<Q8Uy0OGp5%
zAhg6z3afx=N$4ID59N8C79|V|l<Ml=(T$H<b4?4M)J;ndBnj)cBrjd$D3OKd+(P!!
z6{}~=T_w{cpiwFL0U>i+lP}GVjSNecB2J#y*p++V2^_TrsyL)sg@D06l)hj@UbBFo
z#vzEwW=THYQlZ*mWY=?LBe>(Iqm#p1ThF}p%-l2YDN}LsYyH+b1q-Y2(h^swb64@$
zdh<0kN);BQn6@-_38^q}&6QBETC$6%Ci{70+fPa+j>Vg8XXFf4>yGBdSC0%q;A_;G
zHAr#RR2X_D=z>wnR9M+AT;O^{;E#l=sJFu-BgZ@v6-hU=QSsnrwo1=U-h=6DRx-y1
z6@A)K0=gyad$l`0gUF*lwbdCgrG3Zt!Y)C;Ek5~g6k7imA`Ym=5Fj_jc1ivxI7deU
zF$)G|aJ#m=yq6<9&z?PN6%g>#x<5YwV_JtnH?k{TOY-5vhcYNNaE@Gt|4A+@LZK3%
z6ZvSaaaU}9{a0=`HEG+F4t}+{^dIt<^VR>-S@!?UT6F(QMG6!0YV@LGgm#9qf9X~n
zR0P<lUj(?yLJG%8gIj>_pqHjwbfi)xB8o?D?Q(U;h+PNXePZBhY*P5`Tm}E?p-koI
zH((ice_Hcb6Kc`lQTVIKQ%9T`R<f#xGkIEP?eA!x>OSHuKH9DEcO=!X-aReqwyWOh
zBhKAgM%zGsM;G2WeWW`&=`%y5^cu6fw@(ojUa;6bI4Z_7M}5bh-x86ukJ8SNqeV<x
zdw89!9zGIne^cjEziSiu+$qSnhnwah_D>f=>pm2Jy~BUq?$lv9Y~mMWK~vS&X8V#e
zM?$o{4&8}ou6fbd-4Rj8*nZHnI((T$*-ArpQN`zI2kop`dvnVPW-d-@w9dM_r%Z>`
zGiri-8{!JLzS4}V_x^#^YJ7)~t#Xp~uNlh7T0xXzYqVZjSb~`hv^;Rgnb`C<AJXO?
zD@5PWcGSgFaPc?Fbc3EenN1QEkL@N|_y&OLK}9D-wfl;QCu70`=nBP=UUddk&bb-?
z?oA}B*?MB+Z=&jV$wei+kt3j+&G3N{M@)Hz?r_Hfou-2v<fhTm!ba>p{QsgHkBs?M
zB_VQ2`_vIRNOT%RbnELOFEB76zUYZ3#-9kOLgWVmkf>t!_w*l5O;xf+ojM}Uvj%nC
z0@+rpv5Fg`+Na8_b589Z9aYwdJZD;MT{&_Hj7j5?H~`V5oqO?{_d0%-@;)xCxMA-8
z#crs`<Y7e(b)mvPDK{fJEz93YN`~4z7IBn(SC!<x#mOgMipywC_qwyPVlx>TQqk`M
zqXvszpWH|h6@3_;_%+V_@fBFNr#iivGIJtUvAW3G<+8aNZfwajS}ejw@6?fyLXk%o
zp))<Bz4&)0QpGQ=4YKAAS1F5LyKWplgMTeGXjEzH<I|k)GuC2_Ms1g#)jd1YWh*bn
zBmQzA(E`;jfDU7b>U0;EX%E1=)siKn3?83f7Rjg2Kjru|0NZcotJdA5c;KW`ym(>O
zc*j?wXm33gs;Z8m+1}#q+#%M@$q1S;to(J&3b}n;b{1qje_d$0H}HJC*#3r-q=}nM
z@GbK1N3jJj38o%uN2BLIYvRVnJQV>$scP`tuFpn(BHUl5Txn~*L0?A?CR|{8GY8!<
zQihfLF>QYBQ9AnL^F!TxUu#`a{|$vjDPB}por*t%4r7W69Yl*Ou?2!N^vy()%^0Q1
zue>>%KF-;63t#r@FN23&>PCE3E+snMo|2l;!LLeLHEHwv+$Le*a80X09enHfY>Mg=
z`JJ`s*;}Vib@#(Il{c$?FUEQq%FAeBT>Zl-vTJ$F!-`<j7ja`Vo`*dvez%{yNw5%1
z($g3C*i1g3o>*s!DX-u0OBN^99H@}}>@l`#U0meW>}1Ej5pKxrV_)K-WCXqP1f56E
z7y?ZvkAnhm3-B1xoiphd!u?u%^3?P?!y_#eAJ7<4tI;<xoPck=4UKp7%2$rmfr>E;
zre8M|dZA$7-%84v-ybv%{MH^tGZ2BhE?2z^bOZWBu2n>?7Qx}4A=+gdgwg%K?}wd;
z|NO{q+%mV1Fl<wepruuYXzP`E)3iyrd*P7^0ZwyVHJj;=4Io0C42Rpc1^(McSs|Jc
z{#I5}_tD+5P?8+1n^Sl+FKPjbVk>#87IWv>7$sx7uJ8|@*R1GYNC`#o8IzfV@0wi|
zl?)R2(G_&_V-gmX2OBynfl?-{J1)xvE`~JBA@eX^IweI4+tPhnCY5C8#kF`zecjx8
zyQMJn{&Js#+E08Q$*JPxX?FIUlHPkwPEvdFDqov!zxCKDEwQ^;s%`nNsvC%Np|2|D
zGbg9+*`YG6BZg0RuyRpWGpvt5rLJW{42$-3Wz|TVb0dM)?#L5%x%O=+IV3<{93s~Y
zi&t68cnM>;4h!2Z$K11WC(EfuM|kBbRPVQ@jL02s8+i`z*1EQv?58=f^k8EwrAlP+
zLS2u?QdoAiZrjc&zi#12ojbP}>$Ak;s51&sy6ppHU3|c}@zT-J`!r{}KrFT+;bxx(
zu0`8n+o8L}dU~WUf4c6k1x#AlVD_=Dn~t!Lx}WfqG32hX$?_55usiiFbvF9z>)8h?
z_5!y#t1G2^@wXRJlUXmw9i$KOV_%Q)uEcQr{O5h0RXThvwU@q5kw|){_2kYWU4-8F
zz?XAUp}Kf&v!L;Te!Hnt8W)+@4MVy<4F2x*q*7Lt@7#k@`ndv#de_0Xi_F9=<N29$
zR-bDwT6P7xf0yG&D;~~?{Ng8)7NogF{V=ESCKot5h)%kM5^(h*wUQaBVUkg0bJG`@
zl(p#5fw~`<1BPA09fJ7<sF%^+EGj~F^GMnMVVj>~2}{Wm14rb#`_e<BySr})2xynA
z6wsYfN-rLD&%V2%odFT$xh4fFnxW5ZkmL0g*MqtR)0AL`;%<0plfK!9M?^Kxu3Yu1
zI17T(h&`(YRegXt$7gjOa(r&wVBH&YOy^r&#^H4i_c#Akv~uQQK`Ht1={`vdxsV%W
z<?@xrOZ(Hp5(i{JB?4rYjJz)iW_LavFitz0QC_3K`?^<AUH!-l?8|4B%A#`_$O<E<
z>-F@COvJYLrOarFAz{eN`g7thKn2ZZj!P_TNAx&{fSu=1+^i5U(z;>zf%#eSNb2BA
z$0vU+gnsx?_r>1K9JwATTN$cy*7{>b!IZaIHLp}`BCC9@dKsA{`VLE)tsO#I+hlB7
zxH^N`yLY$ggvh_vB9$Q!AIyfr3Api$`R(+-yXTPgM%4$ubPS3}CW-fw(<^NFBl?2I
zRwu7TJ1MNGhs4jOU?UpV7v74)trh>X$Ht|H7vENY_25PH@w>WD0>)Q3VXPl?|AEV?
zlFY{F<F}hv>{ZT62Zk9#&z8=K+&$Rx11_y|>A>sOX7Nd1pUbZvf^QxV=^!4l%!?WE
zwduX4sP-;&yqo&<y0-i%_Q(D{LpOOL`Ufj3r^`C8c&#rgpP1|L6^B(DvAkQ9=0qft
zpsWwk-Cys@uO1zVO#dAqv12^9kV&IZc%<XaK+RX%G0_w;>FNmtO~5+8zF@Zf>#6?r
zD)48)hM-fE|Jc#bD`o3aN&gM`O7E-k8W&M!&*N>vyDU|z;|ZKff4(KJ3H%`F0$2wI
zxY1Ze1*wqk;bAd|#o|jKE+W$W=#MOh$(#u`BN-l9^s549x1v^BWP^=eSq0{H&>u!-
zx?UUW_<K*r6U+;ko*d$v7yTcU4E*@t{1E5=9pZcSVKsQbfHP|hnoc&CM0Y!m9}N?M
z22<yZ4mzobxnehr%q!h~52V+Sel{cv+BhAeD)2=>-hIIkBxj%uJS#NOlco0`M1EH&
zOcYd%g>nY%IU{MtMV1i((4URepUto{$(=2nF*GpMz1znEPmF+}P$`$O+6%jvDx2TU
zTd}Q8U?g0G(+mUS#x38!f4?Pp1eovY^?w^%GO&mu@Bz-jj-y#Py{|=%eSiMk*1?4Y
z)1-0GW8kCUB+ll4J`&r>>HEbcB5}L%7W{-Gc>cTETmL7j>;Ijum)SPAP6l#Dxn@C&
z94lq<a?(B|t$U;-D4zO^ww*RSBqW~tlkoco25yCjhz|*4Ar{?(&^0B;2ux(x>|I(j
z&Q0ZPUhUn!`bOKS7F}z5&}>4KU3YBnAVu}tJ1&RXu;Jl0=knMVopz_Ltkr@VPygEQ
zYk<nBFjHvHsOl`a@A#i05<ELuz^&3PDCJ%FEVj{NO{c0F5YuSCOi(pC(%LMGt|}Ip
zFYr9=v=Gv8yYS0_f)y^YsvGn&+0_*Nu80A-)_0>YW2<1f!Yn0T=``{i^Cu^(%@(%&
z&RE;$?~q<r^AXKe9$>}2#SxD5x*Pfiz6E}un|35M=kt~|ctGJEbS#j1Rg<~?%$GIw
zkC=MD9}L)Ws~omy+{k|xtJtH=_S@h`AJ82JPYubnc7N6+3AWl=zc1aaD(Ft6L^XUE
z7rB{jZF63bhvA>;I7S?hKxIx_Vtvaf|5WH7sz^KggvE(1CSv_73Oy~o`Y1b|n1FO!
zmtf~8R`e=CFZ;Fi`ErFAaXC+N)fv$_k8D3p!PM(o3$!#82T{)^M>w4om%8?%d%*Cr
zM8$fOTVhrz;Ja3*{fB4Rlp25fb%&W{yqh%dwtq&ZuZi<qXoFKC4@?br_DHwI5^e^9
z=NeY4QX9j9j?U0AI&`C@B-IS~KYU9Ro_7$f)Mz{4wg?MRsE#QwU*h(;^y&%kcTAlK
z#pQyhvfY`Zir*oso8=oHPnl6qdMc{CO@S0@>*;^glQ6q%iaG5%r?O8kUMy8?YhI09
z<%M`#@8G)yZ6DzpeuOs!rK7uD5!qQ%9#q3R=Wes@YwwU*Ua+#x9n^g1TmDsLP#IRC
zl@lVe#8HikM$`#1@VHs&YVYeM>!tDFp=UWr9|5#kpY0Y@zR7|iNl+WF8~d`aU$9*M
zYYJBQshR6~`M?$2UH(qXgM-kE60xPm!JtYu*nTc-OaX!@x97C?ke}{`cR;d#w)dWM
ztE@)PrPg?qZ(ku?_n)EhoS}DG<bbAI3tt9`Mvh5kuMq(EZ|@MS$;zA$*!pU)#m}i>
z+c1VN^-`|?I}0$=GVshv+}MC@ZRNYPtgA<O&R7AuU=fAtRzyy_$JHaj(<Hmk>|ZUt
z__H}!YJAv;9k*FEEKnx!(Hfmu$L(`U+0(TvQshL`n~{;L39~niB|1Z26=<8m45~(F
zf0<NN^E3pV!Ei!Qx8X&c&QvL?)Og<Kkl___)jFlhVbQ`j&DbrtGs~{%FZr!4{jP&c
z;yFXAU8xM@km$9eajg6f(s*lz!sUhAoWVdP(YdJ(!Rkdt*Gz@uaE%9(Y%L^xImx_k
z7R8P-jXSwdDCW-xH;y}(mBb_`8#cGKNk6rU?GbA_b#mXIzJDDX+xO>wDmRt8nD|K%
zt(el84?#XzOmv)CJ5~sr@3B8}?ollLKwXOG%!2^|<H9R`1OzJKQ+aqj)1tS${y{LR
z?xDPdv8H8Drre;uKs8o&R<kPfU2Excc~^$Nk55lvpn=pTnS4bs&-o6GnLnsMuUhE!
zNcub?%|KJac!y#xy!WB;Vl;j5asJ>77x}QZ)bxU}1g|lK*5D=**Z<WEiu6o@JrycB
z=;%?D-2S|aFfj(5zGbMqtv?=w*HkqkOZ(+S%gs&SuXu9Tj#@d<{JJq-my?aTxFmj|
z)Nu_bInMJ|qAHq@4Kf8oUWGG%fOf`9Q>jVlx34|8n(J7Rx>ui7q^4hMa_hm);Myee
zCghoaJ1+)&i=FP|y8F4dyfj?|o*=GJ@~JRejJqH9>68&g;-ceVe7U(pRT-u*6E(0>
z61%~stf<#-kKp*n%9*eO=&+45hkTg2!Qe<Fw^h5?b&#$OKFA#xLn4(-UCY&ZtTzQM
zw@q90jna_M^&jys%Id!3!e;ERNZN$`rPFudOzgs8mBicg`EE6b;c|Ce4%}MWq9~PO
z#htNSj`P&2qBg%DkUhKq0t8p2PVlH0T=?0^a-`^U+kr=6mo0inEKdl*c|OR9+<zgE
zA^l27%dB_dt?*v@og<Jg;a|}u$_2cU3S~InKfN<D7SG(%QozZZy6oIu@UN~Kf0^{`
z^`}LPgbl%(qQ`pJm>QjhnI8Vv@A76kV_dem1C(&LN?sefrVbf+*P;r7TDinvdt1Zw
zMj>76JF6Ml_s!zMI@J;%mP2}a+r!fAP^0l(wOTS|FcmFx#1%TqPTHcpPqOneGU@56
zZ_#l3fn_XNwX}Q<9wD8Vm_@COd6~PkEp_LK(j6UvYGneJK7CwYY&Zb*>vOVCw?J95
z%qT?D;Zm~wx?6T$Mv!J}dtHCz*Mn&4bLnQo**lhR4RS2!XsnRw-C-CmwIkfLI4&{%
z2jlI(nam5lSjd$)0$;pJ9Nt&cV7qViap`Hfjn$=&#$3#CQu5x6tY|~5>NmI>j>w!U
zs#tDaA3A{iP%+OZ(I5BuufOUAG~-|VlJETVie#>*#rl)8c7=@tzUBCVvF)6t8{|6{
zsx`;&Ch*}4QU}nGK5sA*8o6A9d1Z?b^0~=F&%E|azRlBnJ_OGjZmbM$l|ZAxq!0&!
zi`RJbz=6@*Dztw8@6(B5vbqcSBb$_w(I@BQJ_<uB+j)--JtIDK^qSjQTN873@sOj;
z4EbA<J}|la=i9yF_0{T+zclBIKdUNidATt1m-HWdiIo%7kGBv?88W+puzWQbrHtb}
zWlp@b#|Xaltjg<N@aP+xg>+H#S0|nN3)owZO2@9zWa<xZ_v_y3D~Ah($Loeu(y~Oy
zoba-@>EC!iRqR}~MQ$&=mH_HlKwLD=j3Mc7d&f-L4ncv*W;x|#nA*K>PMa^+z-@oH
z{G@L6w@UHn-Geo(KY8*`=bh`dxOD2_K)_<mb`m${Ti_VW6Pb}KX3`mGkESG~oPP>+
z*+axgXT{vg%H2+mwHDVuXqWhG#Ny<~y4epu#)c#@cjj!r8)bHX8|^nmGk7R^H9wkW
za&>C%)<U}DN^5bC&ioelf*a#iB>tN7a10mMA?;wuNt^=#+^^df_s!RPOpx=WY772H
zv&xnHfB59#<af%gOAAJ+N<G|0Ndu$%-gR%EHZi+K|Kom+!`D?FNu(^Ek66|*!{S;f
znYrdYZ<x4MMn|~@>NYR@F}pP?4A(>2@zu+3x~VOXEbm9h#E;@OhEn)w(=xn7Pm7n3
zoVGMS#Y+g`Bla%@>WBEzMJ3-6cGZP@i8y}Z=F41qsgxDDj%xuwQI0<DaXYqckQTW2
z7bog7LD(*K`6~QE`JLN`d}&X-zhk?=jUqnc#_(ivW6sRl*3i2{$zv~4Fp}Ek26zkI
z?_qV1<C9*EJ|SU!tX9*w7&(qOb)h@UM$Fr&CAKH>bIH+P9`?;eR(gdh!+Udm5V98E
z7IJ%Q;}dC<PceQ@Ze^DxefnzUWztOrii>+_jbDBEh`4JVw_d1f6E>7z)f9Dt#|z~8
zE^L=#u#(9D&|EElMGrgH-oo!OS(v_AdLJgR_{v54%=B{svgrYIvYup<p#F@Fk!#<Q
z{(4yjYUu!4zk1ZPmsZaA*JzuWX2SurhL!T}F{$n6roHD9+vnc!D8`6j7KnqfS2@9I
z!xz!XcYp7b;N#a{-hMyoI_NR6BrJe7{XG40rdl{@@$ujzJ7vMuzhwr_LH=q_V+ai1
zdW-1jf^>Kg--_-lPF@gLu6xC!XRxPhczlQb8STT5<jMbdy*=4oTt;0>igW>Isx(S^
zw@|EnF{X6|j9>PDEVv~vx?Q(sbv*)4{@b(S9F8~sp_*@s#g^kYe{R~J6_53XIAV}1
zIP*8S06&^_X%DOVoe|+t;7M<BUy**`Cwxtf_<lK3YJNRixBoiQochbmz#()`$+U!>
zY2HmV=GC<J*DIv;o7_e*eVxdP<q8$`X{&GdvwQE`q19w{<W2AXP{s7`A4451BEX%+
zYoOZjSJa_vwU!6aF*DwDMrh7=i@<a)M@a7R6nFplhjXj9DiF>P&AaP1gBtz87IqNG
zJPRXT>Q*?VbFy>vL@{qZh>oLhh2yU0KR@GQ#8+@0f(B1_?jc^|ae|9J@2k#d-ER2q
zy<)V9;_mOkC1aBg`r(XLvLLQ<gaTlH7lLYNvH*1mId$B4RE@sDlOHecYGhlj0!xO}
z26m~UEQ2?ADj4@vg3@`HI(j;2F~5vIZ@-gIcruDO$Y8+FTMQ%8{=^W0>$tq&3%D8_
z2QlnSMI~SCGYhZ(Be-0-<_hA^xhnZ(yAKO&C<!Y8A5f)dv@#=ZkzM}6ep?(0T!uY=
zAMMG?uVw}p8XxAAO}Mx=@9iPJ;BorVaBnbDs6B11y3=f&zQ@Gz$8}jL_E2M<?8YsZ
z{Lb^0{JCWY1B}wQ(l2-he&088Rw9HeFRR+6?DoKA3EqjBrMaT*5iT=tn&OA~mh$PK
zep=&*kJD%v>op<R0!u*NOO7`54E*t{3C*Uu-TI^Sg5LQPN8o1$A0i?iU;DLm1~o=f
z#X%RkLQ-4X@u>nZq=UH=p9$lp0OFwXafH9`kh_oNkhEGU`~bayIV22sj*xC$EVro8
z%tJl8CvW`CR^#uNzyAEko&wJQLOINBv{qfg{|8nSvlCpVfHQ@#pHjDY94uEh<~Dwj
zL0Zx!zYDhKK?Bcxk>|_hl{g|8!6jxg({ST_dW4^%PaWL?_L$en$<c2g(!bzs&0U9_
zwRu^SJhFmuu_%PjXVQAy2<t3*;bCgC%(=%`zLg6*r=Ho<ui|SGY>{re7*+2Yydml9
z9jJUjO9wgAdBZ{g9d&84sCHmJZ>sjwrkj0mTs~gUPV~jcj@RhP==}SJ9I{1KGSqpd
ze{#2F%eC@h9~wYHr=fn)>#RkyaRdOn#)$BVKNR)*aj;h>PpiwDURU^bT<6a%2ffBE
zoj2ve$1%BW8tF&d6H=xR(Ds-}<@vk5Z}<Uds08CQT53;%)joM$^Z;JGy_>t=Oh!$%
zM4@jj{{6*c?Z$ng3O3Gtr8)lIy^Ir5_xoo~lLtU{b(}9~txi%l&-GTebgAu6a{MMM
zb5spKHxEaiB{zV`Qy%gEHUbXL)c<^yW4+NvrQd|5j>FrooelGWCm-uRTeu~NoS%KN
z_SlRTD<QqK%3Bp7GS5e}u|2kmu1^$xHAzh=++Ij`zO!U-i<fp`6J!>SPjW$zannA7
z19^4QYm56S82tBP@<|IKHCzoX?VkRSQ~u3o7+c_%SH`?oWb$W@m+72=(_(sdN8}8q
zrf{~73pw&8aacf%q1NE-X-s}>eaY=jlt#SuZoob9kaa>?B6FWWG1x_uTXSBwLWna^
zTf+m1qdWe=7OAhF&Td3MP){|IF2+hGIw%A@4Y&RUWL>*LXZo@j{LDh}M91j}nKaqi
z693HMWEsPb=Uu6fKEKQmdl1`ZG4>f^ipuW)JYU-qk^^|U-ltp5BSTtRMUfbZFMWtc
z*B$fb_S{@m0mp^*q0(2O4X^!Dg$<<R3_A8ccdLEhGPCucj$fD8BuMl*dYbQ}1-H*&
zE29EIKa{TMaPYQk{6Zn0{@_|+Vc2j53Mpmy-Fx659P?=)XWw3vR*7vgeC@G;#u(L)
z+s8S6y>Qt-$2~9K$%<6h^!7sE_u*nM++*&3zs1^Y(+fx6$zdT6wnCE{A$-1I1_YM9
zx%@xad(W_@zNTL|pn?U&21vI7N)ZJqQdJa{BE5GN5JHm{2wfBu1XKj1i%3mK5C}bh
zB1NhKN$8+R4GASA0Yc#20shZ(-_LW-b-iESbFTAVCm(>Fy;*DRHM3^T%x`9vfSs^q
znP2nzX6#e<dCKw~N=HQJ=j|RT_pG|C)|rqcdrQ9a=6X}^pG#CMjx+#_J7YSee2q3|
zgSjQWTGkV7`WxJH;veZUF5S50X70@%D1b`!#mxJn6s_6wIucs+>!OOMd_KD8=Q^VE
zEYO7yARmAGBf1iqRUN&xbXcu@Ep#woSxlOwFI2rtxAaS2fMFP2+)S63yohnX)?Wq2
z+ppHifiWS1b6be)meB~o%5V9(`M+`miNh|}cx!4ye@;HhOE#_N#n0(eA?%}aM4$02
z!UuB?yaT0xM#<ofjU3_#j&+pUMDOriqwRChkuwJMt~;U2Ake{^VF!*~+GEIL!u!pF
z*uaHTD*{b_BA@U_?s#WFpij?1F&uVavp9jxkFcNm{3Rm7M{n&lpnof@IjUq@ot-5@
zKK}Q$&E=boBFsDG=Rhw{FiV8pJG;Bm!!81Uygc>4?H13`s12Zaz@GFxbK+nL=$wPH
zAbut*RAg_2_W-VZ`M&vK_Il8Ry|;UNgE2j7A)7z_m9ZTiF<YCYo}MvA<rZL5L^Il$
zNwy971>6i$#R28FRt9T#HcU{-NhYov7wBut>M_l?CYQ~ce!ux@cjYu2OT_N4Nv)%M
zQvm{{AKdi;18qJ4sI${J2qgN?41>Z>?#&_Ky?k|Sm*qg9iQ7Q;yC2VUdjtf56psJj
zX8+qHocn>TdiTcuf|1~G7FuTD)gmKs8C0(XOx5iZ_ENlSnT+d++VhRfH*TBvg+tNd
zd03~#LzLfv4E-%^G4|i6a_l~~JO^)JGWYy!)0I<N4n}Z4zfS3L)aicXWg8(wF~eS8
zCkgPRFUi6p@@+4<AM2%Oc-5ipC0&Z1wJnfff4aRQ$v!a0<|#_3ndn6zja6D3Hf2DH
zhj~paJ=~~jt-W=Y##hlp9_N!xmTU4~NIGBFL#LaHaC%dI7zLm9vcejTf(@HpEe<_G
zZNKudnS$z)O6aBB^BB!`|MB}aiC81-r(C%GMH4|6x?5Cc)|Did>6sAGDC>yd<9+Q0
zlP1QwL5{v5C!HN)957ICw|eO+DQs1tmul_@`?f15G`V5(SI_`P49t-Pu!P&bL>lL^
zt3e&mH=eY$^<T4wnO3x*7Up}>aZV!_olk3~SW7Y^ebr5Kr#zZorp#~KdUMvT2)IqD
z!ty`ax4lg4FH`la^*B;;h_l1InN7XWRlhk%v1GI;`jzpUF0PpNymU9702J5Pcp<zZ
zVaxp1oVLl}Yd$(#v!;L~*3YR3I<{1q@IZWOb1}use6AQ2*ttIVRDex&QR(_7?+B+I
z_x$@4VA`KZtEMaOt8F;G5$oTlC1U&x%{gNNr2P24d{VR*;AOnd(i~&SIwGJ@&I4V<
zZyZpp8S1sn<S!}r(pHS|LG2GN2OJL#k>a#PRk_e!QyTK}ZL+dfW-vFieJNjz3l`j;
zUxt}l-MQvHai@%9nD~Ik3_sg+52m;T@fnms`@1{_^9Cv@n#$mBH{@jrC1=!;hBk&?
zKM4F~eErMD=FrGEw@2+Dm>rFilCA1qdwcbmUA&T-QqT~@t5f#$mP){kyuN|>l??yu
zRVFQ09-qEstloh8utL6(j+${%{H<gdDrvqWUR3u1(xrT0Ut)SnY((s>YW{wsOQuGo
zZ#lW{PdUrPnh2KV=rhUAiZVj`eo0ynS9OgwOsSmq7uG`einS!7=Zb`?M>9kpdW7k?
zjuAaLjH0jq_6d$INy@;$fA*961LBJV&;{BeuzBp(FmKW$>DSN2h<=3ZlMZG0vM0j#
zkI2fTF;NCcWgViyqIaz$?8g&?JjKr%rgIl>nkZ8!!hqdQl2G_^U0<N#1Yt)3&k6^&
z9gBrRM8;tBpo?i=aKZ(`Z<ol&pNfj7<THAT^4Ne7kZPULc=x%DAp8_ja`B5nPgeL7
zvdPuU`pcEeUx!DZdIs)ut*<@KL-->0YRQ3p;pWrxjYNAL<DS!CCTyk+@{fbu<4d6X
zF^-NZje@};l!b1mAkkJUV0e9{ISbQU2PA9T`66E%VJzDlN0TQc96XLpmY&!Zool5G
z&TtwO55&jtYG@a_-cPj9idfeu_7>cFT&Gyty^#{)(-D=ZMoprAC=4gHnjp(9z$o6A
z$wKH+J>>p@uAXIP*Y%emF!(C5$hg5I*UCUO;qXG>nMTD$U88UNKB`<SW*x}s=@HCY
z&EOAWj||d99smzMGCqlCf{xekiuDOFAc9_A2plskvse+j_4oDd`n+sQ6c-r-P5lDz
z)Vz~uonryPN|JLdpOVJB?r%vhUQJQAgjtS0^2&O!xV}V)%na`XGAd0IwI@bedfSuQ
zYY>%2;s~jmcD|;SoyY?{6Ehqc=0!GPDwNFyQo#6+n>f2CVZKR9ZLE$2L~w~xlN|E$
z?sjx89w6BYyx9!xcqc&gc|c`OZ6!1xA*)iLjTRgou^c-&mF)_vv@n%Lf4Mw!sDh7@
z*>_;zg@yU{Z^MW?OJ%OH;;&+PN?<(sSG`a3f{JZIf>{=R%{Vn6mogUWSeokd1FYwQ
z!6P*t!l$=*%XaDT@-iTBo|qr)hhvNuyDJ|h@C}Z%%@*T%D4rMZV{hOb@8`PoRC8{I
z%}$T4WEK?QtY5Si`(D@mGAb#`R+3c4;{1??dRLrXy?hLD$6L1c+BNr@uX0r^3twht
zI77)h3yq>O*uBMj$5;L6Ps>kdjy>7c365C<Blz{FTTt7mV)7$5^Jo<JAg`$@K1rlK
zB6b+<7K`lTtLzp!af@y_)~6IPcBz7|BKG8gfnbhT7JjBO+7V19z`B5;^kOo|3HCjf
zun&A}C3my3aGTBJKYrmRrm264$ZQ|qs(LBPt>QoPJ<{P(p}XeknVMa(WHDs!A!BHQ
z?(4Ht&_ntbzau@~&{nx3^K{VLk&jpNlsF8>QB#yG1M9i>Vg3LXuYBQoO=TTl<k|BL
zlQ%U*2Z%L>mv3G%9W~#UH6*yynYmmS0{<+!$Vb8{c41pBYB=h7P12(bgx@3OFam7-
zZYGKGJkkb-M1$hS*U)uDB>h2LHA_>2Xjj^p`qy4Zo#L?@=m&<03u5)Wh3!KdmH$Gb
zwl6c1Db8y9H_qp|wz=Ucb;2^7D;wp7*dY}XYos1{qLW$Hh^;ni(`eXwdo@87@VLku
zYr7HyZ#56yAY}?E(ano{OHIk2J}Et1uZir&8}fxOEs?y#8rF7%`f)p9^QP2F$Ayjh
z_ZVw<L(=XJ{Ob;&kLf2(H)Sdo*UXdiEJnjyI)c}K^gdM7t28!659)g;aD+0`4^c?o
z6*Q)W7m?_=RC0~QXm*~~HrGY~vJi?cB>GQ2fmgSnlYj;kBQReq(8d#^7Ri`(c*E9a
zP`_L}nnis;tE+XNZ>q1Xr8Rs0k#k~<-D9(hu}T*o^<auVNQ+B$sSAFSEzWy<`fAFs
z?&^-CXTk$mGa&!`7Z!t<69*h2!7TZ*h~gySmnpor9p`sTISWNG9+3x(b8M*2V=b}d
z4za1eZi_F65-IQwMs6mkahwNOflFr>0(5<4U_R$6pdeGRoS?!Y?3<nBP0(8X|H-P~
z+ttZd4iz;uu(BXnfnWMRbz(p@4*J2cN3H{kbWF?S7oe<j=qykQD|8j8MHchl_2;mA
z>&uf-j0b^oUi0}`A@3Z_H>eHUUH__SM=T4o0=wmB39ch_py1h0pbRG*D37ML0hAGT
z7?#`9$9I3VB=ZA2{#IHrp4_LaHixqf#dtK(^TtEBtr<1|P}TC^1`)A5#lmQsElQmZ
zbi4Dc+SZzSC{P{CX^|0*01)%RPK*3U(J9F9j3QHj7J>nm{q1&7h1H~8O9Heu&kz93
z>?J_i%vPtx9WDSh!Wdt^*i$_L8w{}9Mr7^&v0x7W_q%&saYplh<*j;17~qsCM;Kq8
zO4P|eB?D~aw+gjQ?vw%UeRwXK)WvIhLYi%|?X7<Bm#zCGi;WVhh(Y_~7%?0)HHwgo
zG5!_o_60T-o^L<cr8NAq@LEYdaTz^qPz*0C);5g6jh~UUY*(ZuJJ)X=1*_`eYv2z>
za&LDInn+|DDN2=Dm_q9MzeZ77@ZP^V>wX7%-!;ilmrB>Y6*k(FY2?clFD@uGyEQc+
z-{a#AfR2Ku*})5GJ1WVI4<3AW#r8VQ-<fWTITXpVYdK~eyBKjk$=L0Q+d@LF&86u&
z5hU^ES+_#>UvzCln^5|@R;mrK&0x*CR_uSv=+~Ked{JwvpV_$37~-)j96|L-i$EO-
z9jPLo$ZVfEi>?Q{P0MDJ_PV1c)`>x9R9iIXT~U>GA3KZ2_?}!*c>EeQIJS8s#RSE-
zRd0jLF(Fwvif|1`8KPq~oY^(IwePF1t7}@=VO=}<+qSEa0YQu2pFp*<Md^n%I?Gq4
zz_OMCzJV&fN;()ZVTXs|xbG`P@euwIC8TcP)uY?)P~WLh>R<dkVQtH=XJd=^U<qxo
zjBo1Q)8-yc=s!-FxqACB_U;opg(JfPmy*%Z21I!CiXOgN;@HJUd6A1RO+r3)0_df0
zVdh+GWpSGe`{SBysqp|`9%h)SkE*zcH{neE>@vFJ5Hnl_>pK*mo1%HQ&-&MZ%@e(j
zz;M7zb#`Vl2hmeP;4t<>#F_>8uNfS)Wn-c|-{aSyszq))Lb#^;b7|WclZr^C^M9QV
zXx=BK_S*RKc|5;n*;K0L?Hk%q-tV0(bnpFnub{<cz`2L5N?n-*yKO@ECl=pn&C2B$
zcY|YS_Bf}3jzcB>2X>X1Fb*o6dEqUGaVe{?lwbwcX6sdW?|eYzyH?HNvt5Fw?>)Da
zhlfbvCOt91uZqu5ed8AistS#l_i?Cn$jPcbRD<Rzb;uT5EX4}>1xKRSYFW%I%V6Hk
zlTS0Obsl$My&JM2L!lH9X!nc&&1I_}YhrSl)~Ae}SIKqM&t|DABSi*vo>()$o2G2~
zway^0T+PIyjCItCD^W>}GzQVX;_IsxAUkB45D0wd6Ef{eS^%mH8pm!K&IS?&W4o!v
zOyDk`Q18NT0bb;QWf|*J;x4OM4h_6^<DN$|7vIOUa>ggGKB*m+PxP(~^+Q<AX;RDL
zZAd%}Srdl*&Bro;5TZtpntJ?7b~W$FP13^S_qRK=#`qNC=lt%KwH8%zc(zygMQXri
zy1#1|)cJyJ*Ve`s9_?EwP4JBq&t7@U!B)1fEC?63+ysy@`Gj3hQT)8^RQl3s>B~j)
zQNFhW^J)#esI)&%r`!eApB3c(>dGWEPeyaQ%O0XjO8`i;&4r_UPL7sku~Y!rp%_|D
z4`x29sxkC64GYF!Ke&Hj%g`1RW4)->H}xw9yR(%pk%JpRuotQB&ja1My3wjqdY3KC
zJt%+4*Ib(_^2UmnYaKN#c&os%B4WHArXx~8K_29{OjrZtU&K^Rbx_mU>7v{w$`rtN
z$M%~w_oQ$0^b~pxn}lBLoeWktZ7#cU3{bO%Xp$->n`*jTNFVA_!H>tELue++*VWE(
z4fo&|`HkyRKK;05457ajDN7N_VI57K38`|0d+5K*9BwAKb&K$-j1&_*R!Wg}6(c-$
z#c#PyCzq=(ck(g7=GC>2wS0X6@oLz83&TC(Ko+ZUdoyRe?d%Ey`J*oeK9eH2c-i;2
zI~87YUSr3oJ7zt8rBRczW|7ipIL+Rqr|2<XGl=o5QNE&mfSG|Yb5m}wzqvYg`3>s<
z&EZ}zRil}jSxKok)njOl0u?kx@aHM0c;JM+^()N*MVKvZZA|2togt_1(d(HHgg2Fk
z46XA=qB4BPOR^Z~GS9PjEa6fUj{t(|oUj9IgXI2@A>|EjGK{+${7#y-B|p#FRn;wy
z2Yiy@ya%XdQsh}Q20PoEauphK@&s3w?@VNel_hwvcfLbgak!56MK?TSq>2`pH{Z6s
zWX(h={N3|iQm!IdwM3&*Y~f&uhZ^hQ{R759=oD9>F7Mh2dceJyopQftdg~nkv=BUG
za6Qc|#%k<k?t4jNBRpIAfO+Aw@RgTu39h3iMAmloyBbqiFh|gkY*X*`t?)TXHNC;i
zE#C0Sz!(6lZZhL2QJ1o7?W}C>wEHj?WHv-Y0;C5zWV1eP{0>01l+?@DCT?q3Cd`}r
zo3Yi)9=oW;L5#6kozVF4P;C_&!h^y*x#m;(g=-;(??O)zFJa>oGcC}BCtw_i3TKBC
zc%??>u5a#>twkR^RI+%T6~HZjbd)&oUQnq-o0rO`fp?Td(6OiEa*Z|cb-fIzv7;(M
zB_$;Ivr(Pkb9nQ5O1QLS**f%NI&QS5tDt%r^d*~itiVY2@p||Z{_$m1xyB?)_nDB<
zPbq`T^tG{&;K(zLVuDgGROy@wM<TrUar2=pT^FmWWzX`Nbd@>Ex7h5HGrr+GCCf*Q
z7oCP>?y8&5Cs#DN3~W6ZabWTaF=A&gv6*-ctMbUe<VRNWhXG|Vy^of2*=f-Q)G$&U
z-3ykx-olB+8K8u{FQ8W=QgIGbhWKo8lcAswm`m7uI<TS9r{Y0K*aKi|8XC`c^{!X@
zB$+x^ypXb?T!IgIsbvgN`76fU3Ms!4rh}7Jo0SBBMRwI$_%;8gkfBr?8h%x9)w7hb
z0RW6P$z3@FiK?YPQRG0270}Q95S){qWBIWZkRzTXr~`?dLvOyym*3jR103!@(mUsi
zhv=^|{kFJS)E)~eGq-Yb*%9tz%wqs>HBk4)lE3Y(t-<A)Gk|@EK~!n(I&C*&kK0kr
zj;2Wip)3u4S!<#ypW6Yr*O>wL+W1P|ipWusP{PCWD9V4u4!1(!05WS)kaB&c3~M&e
zISxtR;TyB&BLsoUaV2EB)%E4xLy0D~cOcWVh{3dOxw_$1?^u+ZSwe+a_2VZBDpDr>
z{y{d|mls|YIo)L$C=>UH9sCJWs<GneP~vzrEeQr<E2x<fH^N4FOm~&Hu!b)l+;-d%
zaX{5e#fWBmCYN6ZzcQRZEV8(hw?5yv^kaS+ZHco)Y6n_Xl#VU*VSq4$rqZ(ULNH+g
zdHj0)WDtO1WPKe{e8K`=B80H9tA58%B6{e<n-xrQm0-HzOk#M-r&!VZlpM{mVj697
zvf$lFEjt4us5DM_;Vl}uaUOEgY#%mrw*62Ew}<~Z<2AN{Xm6|QqXac3n@zG)V?Bv~
z&(l0UB7dZ$f`tfg>SU^P)q0mS|6HTC=g50X)t^o~9hh9`RC7n%%7c$uxM^@@cRm6l
zsdnbh^jh4?TPC@xtjJze@Qn1|M@n}>r;8v4sw*38H^YoK;eWbLv6EKxd_EPNl-hT5
z<>4ZlCI!8?!LbDOr9Y(INucp1j<7yzmh!>BXcd+9>b5At{P;C)ILlk2TiI=J(5Yxa
zi79*#5fT<WbA3;bsjv3nvWvKSrGfl+U0_j9Qb4Y0NoJn|=1}M%V_P&NxDdfd?v;&=
z3a_dT+<<-Z4r*N+0Eu@QLz2&4DRt@Q7{NK{=GBI{7L`_PHnv)4ojq8B+^?z?+?Vn-
zAS4IV!L0vueB}T(r}MAI=klyoPn=%P)BLD>>)&4p&s(bw+eor^UKp))B$N#$ADHqw
zV9kPc1lAuVr!o$|j&YrVc&OMw%e?7Pp6E&bd%N0VV<nXUC{1XS_CQbl8H{<P338#!
zYozV~o#gbg#Yokz$)qR4Ek#e2kMwXMXxrqRcTf~<79?)oR0L7k1!ifr{If$b6;~7G
z9<=QaI=XdIGd1{yWCRc<l%aPTx1Oj*TCKKT`a{6P4d&=VUJx3=YVEHZPN)=zOi)5X
zk(<X-->)C5e*%;BO4d>^bod(z1q5Ep)Z_cqY)-N{<UpwT^ILXb2IRB$z1p=#)+qJX
zVTnoeDOb1Wme-3ECttw3#@CAhWYy4F9wFS$3m-M*Klon5ggka5;;a251?z^J>$5km
zBLirOYESernVLjs&ey4f6c1s=R!B)aN6EWlXB9q$O6Nou>)2#I?)S2|kLD&KODdmX
z5pOIpYV#Xx!^X^TAnuJG%jzI&q+JtgV`eLgff_XjUo<H#7=DYQ4C=b|9m;;%9P^du
zlavehW>4K9bO6ghWJ9LPD@I@{eS|wBv@+P%Mqu%e3{B#e&4`%7S2<;1&;3rH0W@MM
z-v&yaC`zga+pT{EIB$dh<$t`beOTgST})>y_Ss*TJf|HzHP+1ev^l0F%~$;N#dyA}
z-YOvid>2s*{`yx;f~G3G@4w3p=TWwyhFb;d8a$6%)XP6u^6oKsNGw1`ABW5@H+V$=
zOBxagstAs1)h5+s5TEXJI!ez}=C0J?15@39!k0c6EfbybBWCf&YBnvCnwc|35}ZD7
z+3Mt--Mb`E8up7a#$waht@Qxka?VMDd6Q+lFQ?8Cu|!R=>1WyHA6qm^0N1#%Rzli{
z#bX`dzYnUOTIlRIzFI9{H~#eKuI?SSNDKt3r}hGvfcfC}`VXfK^Nb9oRx4I?+1{tZ
zGAYzA{edd=gX)G?Awox^<9{jEH>Ha;A_hpAF1fEmGu1W3Z1uhU^<IR`ka;R1_7C`H
z7w4N>CUY;Nj?HynD<1J^HN|xloFyqQ1V7E-MD~Xs-46UyEfh3>ZMnx~RL%F|@~boV
zDShao7k|6mB8w$!l^QkQ<?1(C9Vr}ZGbS&&2H<;ulNtcd+uNwpXe~@w2k>=)f??-E
zb7Y~3M^T@^6NOAL>9W_v8Hz#QP16h$L$ZYq*-<NFOphdQt!I^$HpXqYm@(iy=e+vS
zFDHArC4)#i`GV5-rU#n2>KlstNpMq#5Lja~kFIm<Ps-GYhgNyOem$8bZrzN}NwGOP
zGTpJDPbDbH^0K3ZHh8;i#<+hLJH-kP)qqEep5m}gv+FN=0`b=w3O8sX@Eik1jNh}<
zL80vb1qD6F3(rmJfUx1y#*o|cxBnAIy|aG+aJG9u&wP-Nj%ZW$IgI}ogZ|}yuxh{E
z6@Sv{2Lbnfj_;W_pxcKS4hVjsJycMYgK@bhxo^A&{<wX34_w@R^8edgfKQ`;v0?k)
z(Nh5ZRNLN^`DYl+d(N1y8Sh=1f?n=t%+Q`I$HvBH6~JUeD8!xF1T8Q;e*ZXxrb*Qy
zz&R2Bfxu=k9368&1$PSxF5dW7-lI|f0kG~kbZou$-uM5)rMtZOzXmgYSr8y8*>uH-
ze;^CJ8%=7*3efs?fAZlHz^actn{(|a_W<9{km}hOcTVkD>qv(G^f*9r#=Qyr4jKX;
z`sI5G=JV~1{psjjX)zO}R114##eYLvVFfAwZ?RSt**=bvq@<>rPS;zJ{~(^vGhgSn
z6c=f(zdivt)Zdy^n3)vIbFk+lvz)X>>wdf9gyt;PW_CPBOQVd16(<WvI9e*+yppDa
zd<fv23}jS16hKCSvV)HbUrWz@poZpmBDlhFHD}9vS1RqlYm>K;g#g6b1}RUpE6$7t
z`Y|!+*%%-I;l(mj?@mN!n~A|5UPWqm28Zfrpfaor3^n0|1U?hM8k~lzunGBPx^*eZ
zS|^vmNC{$v(BR_fD-uRK#^Vy5F{Dl_@~TTx{-E_*nMCmy7ak|BBHNn|@i0xE$k)#c
zP3!<Waj{MNI+7}uOSscze8J77s>Ectghl#MB-?o;QNywv+HCy!8mmp?W(k3)baE{4
zp_p8Lh%y6yEHjYNQ6pVeTF2l|3ZtgC@}*dD?WckN%bg#f&$MPZC7szJ=ox-sf$w0G
zgL!FMS(jO5joUdfEbas4{gQ}p4;`w4mFl_g%jh`Cxtw{P2Tnx>*8@A{@X`3ve(~a4
z4uffkjkm+2U4gn4JRfxM3tvB$M^|GCPu`|~v^bPvQu|~IcYZ~G#(dA+=EsQH;0i1P
zJFz%d3}Jl6Gl&#VS+J}=Mge|!$k5m@k}YReLSQ25r%!M_*$Z4^5qZXBe!UF9HpGlX
z#J1#A{cCHh7JpR8kzkKGOlk861|04VcG1}^{1WBacnarToWmb=lU$tkMDcS*f9Vd$
zy40KVBOzJsNk)aJYacv1<*dt1el26(`eHT7>QCyqS3u%`iTw{!D5wYkR05k(o^GN&
zT_CV7(sV|GuQamw0t5g1dNJsGfDM6dZwUbI3eelfJ%KJJTJ0CGIgaOS6R4Z{cY;wB
z&JyBf2Ko-T0<7%FhgZB@vUG=o52d3jOgcEe17NW=Yt|<D_b@xe3c&MJ+Nh^e{n0$g
z0N;I53bV>czrA3d8wUYl+g8T)h@8qfm2q!Y^7zD<Userv^`qDt^q_C@*?aG!N{cf!
zuc~X*AOe=|hz_`a07Ap2$;M~8T#?-Px~~Ck1C&%GB4Z{1C9_9`TMXvw@cQUjoCk%}
z3v7%m@rZ+G;g4!8z_PG64hF*DkKnS;2B?{lq|h5Xof!4w5lKRA*W^_<PYts_H3~yz
z(B-1%BjGnEB=sj)hW^`}b3sI$J^@FE;67E?kjI1HymyU-6=EbwQ(s$(sTD0aSlAqL
z9D45{8F+)m*YNxS_qIX>kwz%Z?V_~+Vle@&2dmmBdrdo5lG9zbN6ZCAbk9|9OmDZv
z=aka<j7*lMaA+|&(IArhIQn4K!gU!ncH!tm44%~*(~*L4x}Z(2<H?SCp$Fk%Zo2aL
zFGRV2e*f^O;>o)Q%<hx&sGcR|=7unNJ!GGeaBVbFqv#L(J}r{8z%uM(jGnWK5$4$m
zssI66^qFT10a|%{tT+vA^Tmp-SX}-sn=Cnuh2>{*<1rU@6L14>WVAh%l;u*(7k9Me
z9o)XlvdK3%t9|}5R&Az$<jP@uD9F8rtJG&lB7w$_0jut|c`8TXeEED4F3rW4m3J}2
z*V*|hpeYr>#uf_X(>)~EJ!|Nj=>(#^l&la(zcUu&>MNUmnex5&d^gDp?p}s~qH~F>
zeepZcMVJ(T+3zgr|6c|p?dT0aaw8T9-tts<{4cRt5ntEdMF1r70(1@T*w#loun};S
z#`di3_6GO=2rya!5G}@oq^#|J;Dnb89i^+l0T^lj1{Uo^e1+XJm*Cm}4#3II3f(dX
z4jBtw#UmTqE8PBx3)^Y^wbXTnV#5ee13;}_WW!d`cnD1gIKHelWOFSWfSCSm5G<1&
zJ31yK#0}^c#&{M0lA?Bl)r5CQ7`CG-IVkW$Wo%zD@?wGQ6~@?J-~aXmJT^OD>;Y5J
ze0JMwlkq)coIBr!F=D9yixyc0Zrlrfym5-L^#51tnE#W$f)+twscTQHVOH(pX%8+r
zo(6(e3{v3Hj(1SqW?F$bAjFas$=3AdEy6jZjgn;T5Q_=yB@-Y65xw)I*U#G@g*L@F
zwye*1@R<81jd5K=To_y8;lXgXrkhRp94t|2CuMjsqL+`zW~v4{)t|7`<uDbqPQN4=
zFCJp@>&Z;RVwhA*=G?S0!e1G2i#An8oxURF#`y;pJf_rLl)A8HUY;fB6KLKa&>Kv6
z5uF$*n6>^}r8?iz`uk*btGA1CD5V>4*I(SIV8ZfZjkNiX2^r>D^p$M40?>C53$sMr
ztuS_n*dj&CBWl7by`IN7kF%9f{<=&$5vxG7P0vD7#$w0Z{I3l>$Gj3FUN8)@0n~^c
zTfJ3ae_Ascfmv9+#vd;$Na=UO82@0s<pBTOZ4$PLXNIFsgRh)+jkmE5fC!O%$=_^b
z4tD=xXBmLcqf2m6p)JpBJD2YqvwM1q+4Z1hLbMWzY(V&xU?Z{h?(Xy3R~jwof99Po
zoA(<}K7c@!TIl!E?mku<RSb=9=2R1(o8Hn-_;HU;NsK1C4gd~?UkaSRc1)hdRrY;w
z2F7-~?R`6Xs@wE(AZKoRW6DJilxpP!63F)$R8M>a>xoH>g3<-TL3}c@f2nS_0Wo*?
zY-41JY>hVJlg`|7SSSQ!tG$2ULLZ?S7zPgeluo?EAuL~-3LQ|eqRn+%4o$!n|IDl#
zF=YNkM!jtqidC%oSMSK{B1<W(r&Gy=MrI&5DF#T<P~XoCzaMsI|DDT?8*1M7Np1Hw
zypW$xnt5kW^<e+bj~psbeklBGCm0i$+z=pIdF9-by!DN5c_F8|icl0UW{D-9F-|*z
zlZu>}SPh3@bk-%)nqK<%!<)B4^P($3SwOYR7{Vatc7>}7m9OYjPoj?#{|;2dNntw`
z_y<vDZWP)o)l6AvF~=d7epkl-0~A5x<O2{f&kA9*AL84~_e({nHPsTco1JI^c^50{
z+oBHrXP)HhSReo^VFX`fimJ$)yuB7s4L#Q1k>lPqGC#s8R6$hzFfu+{IuXkHgY>Ku
zR-s`(;~~>%aS{a+p2EiA8SoLDYq8cH8%G&fv|p^=3|-wGOJ6iomv&`0(<+OyG>+!p
z8cO`(&DklAQrD$`havS^i%RBh-ekM%XH>iFzvefSE4Qk@WC>vo0uG7?%`TP+!M1>E
z*F>hjlkKC&R#Ucc$i(x=o(ZM#4H+s|>t<&#wi-BarlH#CYq0L7Hg|J0FWqSn$udxB
zRhwUT*|z^O+y?SAa3u6lsbKV&A`+tXElZ`^YfHr|Pjkr`8rs}rF`%kuqlnuk1}907
zb!KY2M%{W2Z-wEB>hZ~pjcja=frM4MyTAu9!~+MdD&SKF9l5CE3WrJ>o&3GSP=daR
z!(>o`w=q%37bVqbB7|vZCxm#m-!Z0lS*oi^Hd^qGE)3%Qt(LzL;zZpnQ%AV!{7=bs
zcUCyse_bz>B)Rs;Wsu5c)jMKt<_TueiqctagkImIXvaSE>Y3s8kr`TBx)FvZj1vYM
zWKz<ol=?v$A}vuqgaeu0v5nOKY3@bm7fHA;+#`P2k8e<BG?9{lukw<QAGTk5vnYM+
zzTk%Z)=-jVEA?ak4+2rNHOmyW?yI#}eUwh0S0fvv2ZD>OBCW+es#V8H)(x%UYTjAC
zmu-jLiSrLI03y?cBOXF(1^(Hj2_rsHrS@53J4oVqDOzaq0#qUS!Ve&AL(o!5k5zZG
ztRjJYQHG~ft(Fpy(;M>Lm-@R^i(!;4uF{#|TrQ4uR!;{p0iE${C?G5$D6;avfD<Yv
z+_oOZKC1TM8V?y$e>KCl=-mU*A&!z|8OXHVgT&#A4WO{eB43SI=6XXpC-GGJ@Txf=
z>x}EnZT}r*+hS|8YExP7A$+3F{P>ed6cm(A(YXe#vQ)zUbqJljd9|m8#7s@mG<752
zmFdcKOjRDkQ&zqOF0OaE(ig?$S(=X&DKLo#=o{ohbBAf7Gbkyjr;}E5f|$biZ&&GF
z^_)N3pEByK)No@`e~DFkQ#**9O{UYlJhLv>IJZLFN<Dtsdc5i8%nC~QpoEG^cSWTd
zPvLky#$iJ3@hSPq_OrtrL83nZPWZPJ)?%~df6ZXs3?9y;uVvzYjv~Diy<p&}-fELw
z+%Ka+{2L-s-X?2t9Wg%n5&t)#J6Cq)kBhP(?nOXOZPeXZK_MT=qYX7=Hm)%cF?!M_
z*<{>YV?sNb5IFk&GB$!{{$RQ*qC#}P<$Ucnaj4SI)lOr{+~H3sw<z6wSoBIQ>gQ3k
zdvVB*^@V5HhUoYz;seh*hO)l&U=L*EX^Jn2w7-}k?>oBSqF^mpC1?75AN-$8*3%8D
zx~JD*b;6An!F}rv@`VO8XS`E0ul4#>JJYb(R!-d{04@xo#%vf<xxanxykWhb4&r+W
zo{>L$^XgIuVOna{WIPS!6C!)KnlRY=clw)>F6O7D^<?|#i_wRC2aVDi-UME4TaP0;
zT8_U}DT3-(pI#BzsGMjqt~!#&RI$;#F88@ab)2(KZTo}o!*R%H-D03W-Kw6;jt-Va
zkTmbDmN~hc+Wc50cZBeUz}k>d){d2s3-mj=7Et%IYZ~IHGQZLp`z|MgX?hKR#lP5U
zR^+eh-x<HJG|~iWWg1UX87BwwudC1#XI22D^0X9yRBC<Ez{_w)WG@6W7D`9H#soUB
zCEyMRA5f0D=8Vn!qIvYWB`S33D>Pp@&vY#ub$Vq{t|PR>Y&m2GMMB_-O%1H(r$zsX
z1yE<WxBRm_c<h9Kq*fSpi(t>e<sV*h(Bi}>?vwP}3Qg1JD7pLEbtWvAH>{y$nTc|T
zN@V-=4Z(9ed{8lv+4ZUES@sbdUMW@ohGQWQ!h(NA(2fLN-Q=bu>s!WZs6+U`U+b%G
z{)HE1WQcmgoe~?R3VPg(HH8y3YuyVuo+oyZx1L=s@DY->6kDQOjktGlk>iHxnGxCN
zmX53sZ6Vyovx*_4v($iw#hQR|>e5iP6=9qRM6eq8;@HOMH?r(z#Z>m&__R^>jo&}J
zKzYnaz`fa70v<3JkCjiOZ6okkt^^XL|8n`xkSdXhm9|-n8w`G4oIBa=U?<{KcdYdC
z?EIb7(T%b*bP#O}O!dL#s?@M``b#R5U3MG9#$;-6fMuj6t0OnJZAhP6n&ZI1T`@#-
z16Al2aHQB$77DjN>GVbS%QDL=6WfE+<MBd1qrq#-4CS*?DR}r_5^W!5GfC(EA)mQ$
z<Ahy!kEc%<ijJD4jNWoqNd=;FKlw==cL7g#8)$8SokKSE@@e;ZSW-flM}z`gbw(_M
zngr8J7fDhOWl!N_5z!oHMa7o1NcF4(X1eE9R8{AnuABglVz}f@yJRRX<>sU@dWm`A
z3$9D4Aqv->OBx$x!g8q6oaz{`>}b#ZC!n!v`i-Uf!`9mV(+66+P-oF}d8%Y~Nl!ig
zXS{r_wOWZ3$1tuv4cTM3-AQ=*(|0BHWV)7Ad*tofkBzSlwin4%o7()^%eEWIH419D
z0HX9`xw;GHyw)$eUQ(&P+gA^4HCdiL)~`l(A~a`B)m#mvPJPV~upA;$Coe}?K|~d7
zcK9#fL&y#X*oCz&Hpz*)#dq7x3UixVz~=?*%=d8}_w@-Q-)c%JqH(RQhGdT)w}W)M
zdfTkeUiQ{|GrUg~%nqawOH4+KdP~D9U8thap+SpH+f@$1!0sR0<6{D~dA*YJLfbic
zC-$Fx@%hcEcP-cQQh{m_g>d*g5oZI7moI>V5jRSXt*GBS%k}WM0l%B}Q&d%_3KiXS
zou<-*^yI&raaA#4uq9)Wl`B1AwhEgnON3;?%{x+cS5^J>^UX2pg&)NiBgMj*TpM*r
zn}dg3{GJ!wq@;}UI)MV0s$TgEs^YcsL&BNhDnDastTEP?B64w`Y$wWEXF~lRD4%_`
zEu=%?cyW|3D(kuYyU-`6p+O-8S;!l7%a5AzU&*k=PAx)PseH@a$c&SH3vrPzc?$#e
z#b^0AIk8AcmN?W~6g92}GEXSDr9zVMeB3JAEG){!$IdKN(N-uY7;5&*gLaS)WUv_Y
z*+38g_`U~mDMYI8GouApQ~rno$M3v6ajcQ$vh75jS2_K9>tR3`@X8w3tc)Mp{3cPj
zvGL`2U9sab!Mb}<POc{aM^b-EXBjl?PWpYz4#ejfr~G@Z``BoFpn3>TT|GFRX(4-3
zWIkXGE|sxyfTe`!cynxYh#vo2l^ASHP6@A_BuyTDaVx$l_!~3}B)71(84bP+N4LBR
z3A9BBVr=PWJCeswzf$&#=Wm1xg+d}IVYvq?_DdLhF35FdWq5n#{qfRy8Q?@N`$#&4
zt2)ov^6#!rG7mT-b4_5mCLT58q4yr}NhizLliYf8*7lb)UgZ}AqNTcZzoFu%i6BbC
z-B7elWfALv63CbRC0f|O#yz?YoJc7wWb}cEFL}azA69VAef_lWhl~QaX(GC8y&Gv%
zRo!QZF6_qNJJ~Vzh<zaJO+UI&^(%<-^XW;lau7E80sR?(NJFu1BLWZEHC5x87D?+*
zBQLydv2SC%<ZCD*XJAHIAX|!{|D#$E@t5{|?VV_yO;=c~qF=l~BlOvCQt8R^Km?}4
zWof}lH`GeGE;I{-J$Xh7$DFCARQ50eI~VIca-2cV^BeX|@Z!w5=Xrx&!Uvvp_P0w$
zV53E!piEjdlgSl5p@|Q`?TSpUh%e|OF(!$n>Zar~O9&?3AjA5@xArj)kb1jQoM3k)
zO>fTIFu|Q#O!=7>Hln&@p&@$q)jxDr40MS)^S6y>^l_w>*+xD$%SDb3SzRd~(k4;K
z5T7F`9fndQc2+c!F9`prxR%u;Td9mv^V{KdFTvFVOba3S6oiUKUBz6_)qMj6lUE*t
z_@F@=$~gTXXl7i0vU}SUU6ArSg-~<YE+=mEQB>ksi7%@E`EcuxhZ8LiVT&cPOY5_4
zEAH1!dmAm>qURc}TXs&yfdJ1G|1XXscDc)sS`J+&xe!og{bunz4Ll_kW9`l}X4hpY
z_q>ZM=IT_lKk07V2edxol1`PfOa$?ndK^Z_{ildk?YHYamuI;-EC^j?hwUOn9_x&Z
z)-_xVLy6A!!-`Ds^K0&uPpC99n5wQ)_RS9=vz|TOg+d?@zLSWuDvSIpo~_>Z6XqSN
zK-e-F=#t|dK0Kl({;$yWFctri+t)dw1skDFh1MOSX0R>-_8TIHr9{8gzU}9g)wS3D
zh@7=mWdWF!DgL9)2%0s-))FaCe7%J^{YugA5pOc(hH%4Rp`~)#OHR{ZB7M^CCmR^=
zP@S~e()rutSR+Eky0;LyMQvPsay4Fw8BmlwZ>wDnLyZ#?{p7JO!HP$)^J8LDb*h$@
zp(quJ>ISV*Ka@-l3MwXvOVByhPa2+0197eOe`SKZvL`9M%;Ka&tSasjb<90MY5nHQ
zcj`StL%kqGN@N|LuYGNl4XQ?kF%Ot>CS>hDjg6VnAH_r=ZpF%Q?yJbFVCj$x@c3w&
zcIZGz9-Y`tQBnvaXGWDh3sno)@FKL(XPFj$Pl{{?{N&zW(%b6QVzF`y!W7=~xgOeO
zktHAtaQ#vB$jIX@PM^I6n)7Go%!23EbH1D3?5V$ri<<B7)t7I%SN??$SG5W!0I^(a
z*S^myd%$CPS4XEUy2NyB+$0Q_1enc&3#$1=J7leUv{r=L|8-Up9p|}D5?d@@1(_HB
zHo{Ja=9_!Wn?$!u;6imqH=gGi$|m6(c_R(hOD&oPTSBi`K*8f3+Ql}>B=c?KjV>nm
zTf^L8Lu94xTtje$uH;f6AVM@LpXXVue{?9J{TFK&AebX#T}99K4Lp`E>{#g-V$YH-
zYPWJf_^xj3LvX*38Wl)WvS0x>z-*MM{4`k7nWcruj5=%+*3t5$uV#mwb#aswrPj%m
zB0h?SH{f_N#%2{5r4FMcMfXO%u@G%PRXx;%Gk)5&0e!1BkJk2drSx-mRK6o6N^$B4
zbu*DQB)ff?c_5-yXO{F)>X4lSx<-cb&M7aG1v?Zd+V$z=DdEIxX{?cR(XzAAaz~pY
zBM+;o=A`2<s;cO=;<W<HttyoY|2D^FqGKyWUB>(y<bf&w{Dsff->LSui-FU0g3S!L
z0dsezJ6P;&0QqxQ{I;k1q<12Ao2l=!x!H@gv^jS;Ack(v88|sPDJnILEeB5qe|xO|
zEP3Q*z(0Zt*z_*YbWodO&waJd6>EzW8$Fzy(RdKw^zqUWl|-2|r2KnLr}q!swv`PJ
zXI`QO?i*;*{8ZJm9KQDA5Rm+wP*m-=&;O}Ik!Nr<^fj5-J$i4ZTPjSY*2}$~3t3%P
zKQ@rPuI(6HN>-qHe<h`oz?@nRiP3qNGxVl5vgh~9+g7r`WiKL`Uq3c*;x?5&a)+}M
zhRU*i&uUp#xImp3yQe9VSXX6|t@kDXCCFte@9tiV>lGVNsKx56-_+%Rh;bq_dX)Q6
z0lz62Ojkr|hv#GDPrVJ#2$mU<t#+eqPXYxv4!wW9tHrhN5o<|yi_RKpNf>x(JjR2q
znCiO%uToK8ZcFpL!xwd&+3W>>R<tLu6HF=#Af+T@1+n{T%VNZ>1LaRBOU-vHlLXHH
zZgo$$&&~ZhnwziVR8v)x<ZLtdu-N8+Y;ac6hXQwm4a9z|!0K|LKbrUajC=+mg>sd(
z1ljD6p*sAxw0`6KY~0`vgZ~hgJ#2Qu_hs1mD%;nee}=u&t5_-^t|p`j2Er-m>QqU*
zsdOH3crxRx2xFJ<1cp5b^+-~-{~!tq<bAUBoBLb9vvPc%$fC}um3w{YQ*{&Q*wJ4X
zfFvi~7e}(F=`cNkAEilJEJ0HuRu!r?b!l#H@5{i6^ZUkI{|NucRE7lwPt(S{Ntf;1
z2JdX!tOt+X(bq+K50c_)DG#Nq%|d@AQI6V;raW{CA=K!7*{C@mreFZ#^BcMNH(>1(
z$mHF0L{T);quG(VzK*+XJ@MC19acM<a{!p;nU#!eMD;-mzbL@VS`jW!CGYum7@KEs
zbXcq5JYGNNt9`15@Nn*zm}`dwUGUZSUqU9+C~xQQG=yrss}Y0zrC}FOG(xeJWJCf{
zs^#fjhW90ImX)<ctHRMk1!c*O5sQby`}<v}{X%iFN~<BQ71sp)FfRQ@0apC5U-Lz3
z6$holrB0}Z`3?y2`lJ?_z>4IRcqN4n_<A7fZWhSj3-tSTQY+!+-7XhnT}(};t;Uvo
zJzl72od#3v+W6z0WlP?&G$EH<<EbkmEtFHMb8ug4rvrn}O3rUC^j8lC{F(^fT8E^_
z{+gIF2im|u4Ku#B4oO(j9(|mF0+)A@_Ugic2rAA=7T@hu7k6#l1ok-yQ)SLC-XE|g
zU7@NW1x1*WSIes$S;k>ybb8#=(-&6PlquCmJv-~mxyEti`2^Y^E%X~iptc+GwbZ6Z
ziZ^({RthX_wrTmNo;rK}uXm=eE5zgW^wZ9#tRT>_eZa0GD%>ezl@NFGNga#s)eK0w
z@Zs(b3l)Jm?$1nF_9LLicPbYH<UT9=xdEQ}xCjte<NZS?8{^u$6$7{-E7s8N%&J<k
ztWh@QGVg=E$5<v=KwJ`N2_bv~JbDvw=5HG9F9AK!{2+XAfyxRY;ABWQdvZ9;D=vT$
z)URcNol`?GrbP&qcK_#Rx>CWa@p=-1&95a^u9qCNJ1$gz-jf-we>bDMY#x$V8<4v0
zdn5xup40hvhCRXys0nzy(tDa)wDHK$u{j89Nk#1!?OB6^V@8K)Yti0I+t<#8-H}*}
zYrM3yIXvf{Nm>0d&8R*YD!0=@yNDH~6;_n{3~{+<^a^F+k43YWu~dDU|K=7?3HI?v
z91i!>VJXm$x{-57jn&(+@tj=H-HB5{*F1cDlZ>PTvhfDi=Ocl9nzi2t?U>TvUu4=X
zLeRLV0Xj1A`Pg2Lzu~R_9apQgU^;DlTVyZRM>L#4y0d#%`Og_w4De<~ZM)F6Aw-~d
zH<B2XbuR3Hr$Js{A&}w(oXeieNLy+}?A9m)@`?arwj*sDuJIq0P3u|(j>|szZMWQz
zM=a3dk97c90`8IHLpPhO<0gQ(oIJgYrw+fkmA(6G-R}jXW8j?bWT1=4rtA&6>VMp1
zJkwq&0rau^Id+nv6zzOo9@?8)5a>HF^u2E%0u#IU=hc7S3>5ZZZ{9(m8`=O_b{{w+
z@_(BJ{ntrQxKt6D`yPm<7zPrQ+WCQs%SLAtfRGo><jB&{poRgcc)r8w$iwKYfe^+l
z0x5t1muD{LNCVL{Kl?mlQsb6W>Y1Olco;T}HxgkVfb{W519QXNmJgt$T_V*>0wm0z
zxyxGej*K;|9PUe-TK_FyWV?{Z?!d0DfyhpoX|1QPJlZ8$BjgUSQdT#YKv*0EIc{pf
zQnFqV@)$cGv)7t9W7&Q;P-^gB9kG;-f22G61Uv*QFlxJ$ApQ9Cq1>_qqm_~M_8DYs
zFPdhd{>sP1v3e@yPIZqHOFyOWk<|QfgHGF(Ua<x@p<gaDz`K>A3TYemgR}xVj(Wc&
zWdrr@@cL_mNFc%fl4X_YaQ;}Yc9Vwriy*({AtR0JB$N12LI|Il<KxrLqYZ<lk+P%H
zeVPibJs%uUBS*_HQX&;YsZ|82q%#*;C2rd3VuB?u;xnV_G-Kt>`y#!FG)Q^PTT8fX
z;Py1BrqIQ_skos7YDlZUC{wur1CYZycX8`j`J<&S4Y^NiCHl;VvG*Q79)rlDgu9?c
zBh!gGqUj`4quj-eZLM9#ALnF}2s`-@JOb^fySX3#nR`TRd>l5-qF)giUu}FPxmzsF
z{D9xwLrcS+U+|ECtKj|sw{G?hHlD$|k}Ksss(s@jtv38>79bib_{0d=Q1&p4np<iv
zZzlKpb$hVKdnIGo8w*+|2F&iTh>I9^00mfzOKwPHBsfoxHum59;Bvhyr9!+$YKq%U
zCxy{<Z^p)IN9mYHkw5dik4VmHsbFPg`@-?Y8X0$)cwP@WqGHW@BgFRsA)6@E(W8AS
zq3cqzb!2<wi5FOlEUBTacXR1YKHcwAdi=Zf%^h8iw@qssR<EuM1jvr;W0=Z7ZsRVG
z>{oX8;OL0)1hO`oWV-qVD~)_^iF>E~b%tv?9)H6*+=qDz&lW7}2G?eOSo(N(ljFKy
zKZqhuyD(5b{k&?h6iV!nL>qIFWu!Uf)6-GuS-FQ@{PP04q12d?-g~fxLfW}7tIiW$
zrpmwJDkfcrbjvt(AJ5h954UX?fIXTaqS{|N&5t_CJN_iPqGdW{bK3nU<xy~@$pMv(
zQhIg!kgHSR5Z2A*DSuCg3ZbW<Zr-Y^=8k;1tXDj5jk+V;qI-ZBFr^sFYDoonGX<4;
z`<&d0>Vs5GI)N2GdPdsPvQxD@Q0#Q%>)SWe0x)oDL9g5W_{=K{A3>Clk6L*e1qVdK
z2?2)J-P*2{S~7jUhJDoX#{O^r2g$8%>7dKxV8H<oiJ^pxl#!)=_A_OP#XNZ?IIGSo
zNe)xOkbD@5_Fy7~)OpI6<InRV8er5_y&F~F7<=Tu&)9M@y3kdw@~XK1a&2?<gTlWj
zZ{*A2?Wsoh6k5km^Oa$zOAqc1-Fj!}A@(_=rdRFT?j@qC_s{;Jb{zX<gtOS(4s>F9
zf*8RLJ0;xUIBW-y;5Fb&3O@1J%A8;6OaZDzolH}AD(+440)o<vSGGINEdv0s{Bmvl
zg4Dh?vS(l6pCyyRH%xH4U&Fx<-jeGX&!!eI!OUir9A2@;LK4YBZLX}X^>BIPQVly$
znukaF{=fo*_BU@Yy2x}?1OhrRV|vpMGDJ|Tl&g=N(a^|8=i0v`<l9c8mJk)arh6AN
z#0ypj^gI?4Gz81{e?8k8cQuUtmuSes)*t<~+smYnD`h_oDSA`=&{;go8`VpmpF!9|
zmke{WJ<tRj`)v?brVnQwUokB&?q{4eS5mAoWs^JnY-O-KzX9;ebWv)V79{tEab;&1
z@~&`+Th+H8G#lK{{b+z_qdl%tAr_fjhRX$cX^L$Zpn@xove{A9{L-0Rt5gSFTBY0S
z$Q;r*8``u4_|0NF^$l@hqHYMV5jyI%lE*jq4LFq4nS#K0f-0fYD8m`GdBenV9baB-
zfc5S^$uo7hvIco=^$CmA%^SNSBUEH>I;>PTh@-JvH?Teh?!}*jT&gY6qPN+OHxeNW
zqf86%w?~8;LvCi>gB|O?61bIJgcI%Htm-&-8b*FPnZ{%}aNQd>Kh5!h34bWLtHQBk
zinUJbgoIdrWZ6xV8Z$$WdAEdO`%8d0XOKXR4G>d(leJ@WUMhwS?8z6*RYtjwh`q0k
zxjX5kf)Nxlc8;%Xs&w$2W3NAY$c{s?V!K8AK`usI<Q$wS&@O9#g%hp@kpzA5ob7z}
z!||kr%#$UlE;Rmr>rag*f3|Qo)_}`5Sbla~@!1BMP<~p=RC+a-g`BAz{E}4G;^v}s
zEzy-6)48Migk?nn3dr0tySf{D21v}e_PSvoc1g8tOJTz+&#|an;C=PiQJ0*82*47L
zLmy_Y#Gc5&j>uZ6A1H~U-2XTQ?|$Cd;zLK3C3QY8``oC#64{dC6-t*O6!PYsylC2z
zS=4%}v8H4Hg~t_G8dgxA>*m~9Cisli0YpZvI6M<&e6NIMhb=d+0ZZsXkMu<6drUAN
zSkap~-yD|`?0nH7$;jCkgGU)xTX&jVGq~eB+UQzc`0niaz*>K|hJ5!)n-}H3BcD^8
zvgT~RM#Q=Ka>uvxX_(|{=z2`blP%(<4I^LsKzjgTw{hWvN}VnL_`Qp^h{92P;V{(b
zm$Poo-$RzIun7gjAfudN!%AbrOO*$0V||?6x(3&n&3?=$r+57{a7PcLN>>i4Ybc_q
zLlwmQ)r_grZ0Z_g9ywpWh;?ru2fZwt=CsR}_0SsktG~Ng4_G-A3_5`)7~W|>Mhh{N
z&ABhM7&{l96&1uc?Z6RF^{=RRmx{hD^Vg`j7kpR_D1NKwPWrMbkTqx7tR7dKGE_NN
zj1yVmB-3K<Qq<V(8VYp%Qv9@HCcJYVdsy;%@VCsX)!;`Fq0_}fePnJx0BZKaE&cZW
zM%5E8%l+0&rgGRhP<`>`;)|6n&n^U7Sw5cuXQjMbALtuc*Iw+j1o$5Ktd%iDmQGpr
z5Gq<MF>=Tw?Q2HvoTV;7;|@@LWpvBGby`2Eg^sX@1#YME5;7ximRUkV9KS#6z`70?
z!z%pUYSl*7PL04qqE>$#rDy}28ZRo^r5Ld>DRB3T6E8=g3r4I+c4)rg#gjhMFD<-&
zd__hsE@UpZ%9_0GUXEISY&d(HZ3a#UvshsSGwOdrB~NEUv?kr%p2kIG0|7a}XW~0U
zBihoYn8h0CHyb<xM-p9H3d#Zo>ZACRxU`Oa81-b5+j+2+q4;iQC#6N@8o45S>QRyL
zF%E}L+#ypQ805SB<?gt5Dkw>7r_n|ZKom{HJH(3<H;{!^H!CYE#s124E5a{v`XSDj
zTr&~QvT>_Vti~~E?Hnip3Q+*&tWl^-5Mq33`u%-jt~A@+vUcuvmsRQ^TVZFQI`o&X
z#)`Hkv@qdEJ9_q+J@exKL?v9|enSYox3x9ue`=LH=(Y`|=l=FA2;H0%+F0D)@}T!@
zZ*u$(?3aIRB(m)`*>+=mn{G=7eq^~0{}*d0X~!B$d63Tt&pSI@K?Z70Xl-v%8_0N^
z6dfoX_b;XsU>%Jk0IpED)bv{~$zcwN3rybHqIu8(3+}pK;tqvD%natfL=PFySl8UG
z+rcO~1}M$B<BPQ&yH*@12*MCO?b2a%+G0bz^<K!G?#{pN?(3k5_d7jx0Hq&xEvUvz
z0Bt=D_Ux&~O90pXZ@v0|+B@@qHn(npN3~R2v{kgU^qjt`HJ+McRvl=~Q^XXsiV{QB
z5X97ho>p<%8fpk_)l5PoR3f2Ow1~8Z7?W@mC6d&Tn7I$<oO|E*AGn|U!_80R`8;{{
zv-a9+?Y-A`t(|cz1zDbvtcLZIJ9HV~>94)b_%aDOmkIG}^5cEEFz0?_-f(AK!`g~*
z>C`n6wZH2h%loj7vtc!cc6AlDtv7m89lOg+i{DM0;yDo8h(QOVCe=E(%+I69Uz9SJ
z2BrLc1WC><b_!RTZb7oqvVI#chd9Kt+IKqk^=Ysaer6&^$sfR~b))eY!b5=FfNY0_
z4LhH*@_{spz{8^D?iXfLMcprwm-7KA#io4L``rX_u^5dS;3!Rdr|c0~9ATck+Q%Cf
zBRX{A&$}@f^viB7SWIUPD2$Gl>8#i+xx~W?200LM@_n{@Rjhke+5n#YWB{-2fog{@
znjpB)efb@#$Bo9jxObs{_|Z#c2370MieDSa`-cTU>4~dQP`c?FKS>(h_z_O@d#A>2
zypHHl+Zi|6BhH1z>>vvmKobJh+c*9+n<As~8OtXg+?Z1rdHZ!@<VZQpwhj8d=0xD!
zNh-Y|X8_q3UgcJ{I`5}RYBL;oV=(Y4s^ri9#O~+U<z7tVHIk!$Sj3Qeo1&0_CAhRU
z#8=5JZe!(tfZ<=68LgO%$qQ6FZrl3!7myUo=cjUOH?}}j*P}WHZ0(Yj{5N~6Wl_?w
zx%c-D9_rcIiTBkD+ENMYoo`ylnj+Z3(J!Spz4~;XTw|e{N@E(<W1$Glf!O{yzk)%j
z^zP+DAfvwXFg81_^PPK)h*<c>L+&@F_r}-Hk2O<YHQLtScQ;T?!Le=m2hYG{$I0Ai
z1S?Oh>25KTAeor*<^k}=Z=Q0_v7V@reoY}?^OCnfHbtFY9l~kHf1^R}z$nR<RlZb4
z<4!fpUp7a9qt<3GObdUv>k#iO*KqMi5d6XZ(Tz=zIAzbS(!RweE-=|;&Dap#g9%}Z
z9ja-$m{R6)4jnyID5%L}@(+fPH;F(&zGG|%JiK34L=$-7AP0vI<j4Li##J*@#*KUd
zRvBmy*_GwautE<<z5tWSy6ts5EmI1I6vql#6Q9gJ|B#<v4aA?LdNPpqu&*tP6()uE
z+;b2>l+>JZAexSk&#SO^qXiU5%JjvY_z}BQZbe(9e=m}u*Q+`)Tf<TVXVETDqKr+x
zpzAL!HM1wLuJGbY{m?<B7&Az?z!_qb=+eT*tB<V;ug-;i{AA%VA`b*hx?hc)0u4(U
zM1Cx*cwUaWXTsq~HEM8e-v8V{my^>{)#5JY_Icw!nArUEfjhW##}I8pfBQBHXmVwE
z+OcLC&f0#sT@(HIWsKBQf#|3>yZM{pG`E4K-x7xrC$Ai5*ba+($0WuF*Vc%kiFIdH
z$z!_sqH==wohWW?|3J-9xbiS3`fkg>L2=o9uI_QapKFzmJ~oWdJ0Bq^7K6cbcK~Mn
zBkndx57Oe6cxUF4WlM8*aVA~rYLUM2&4Fp0F<-!G0%qjXpD!PeY)R@n5;~!R)tUH2
z@9m;+=D;tVEn((ndhOR~Jg^TsYR}*m+s*x`>I0sk{_(o-$YP+(oyc^PPtJw77kBvP
z+)y9Ys+IiJo3QfXU<*A}Q7YOLZgbby3}%p0=j+k!;owf!&ZW45*IvlW=e_rzt-k#|
zGCcLy03>T!4^hksr%l&5Ft36;O><Q%a!xv7FtcJv!*6h+(&DIC=G+0$G^OHjXC-`n
z<~tVzS9cRJio5hqz8p<<JSajJ09ze~L~=L5N>se*e$TY1*oQYo9b8iDD`t7e-<jfF
zsmm2UmpFPmdj0#3UVkKkdj=n`>&I!JLPN?*7}+N5_Yy{<&8wFtt4cH*Z$ucQE^i-?
ztK<RGw}(q-_y?Wq$I#pyJSz@Qs`!`XNX7CknReHvZCk!dn*9yu<v)E&@n0N`e!FGh
z2vYRhD1E9YkaS_$h-6m^gt(8oK{kWx15?;}*)G0d7n;0FG>m@QeRLoFuq&oBcR{hr
zy3Zxi!MF9pjfuJ2fJ|bLw2R;8d}2@8a^U5ir`tylyhS9_<$3Eb(4#|vOm8SioAWXV
zbVNJ1N4oKtyFR1rNSpP!Iy!B!IuBK63UXaK=;?(<{s8e_4w!0J7+K}bob~+jY<2N&
z9;`h2hlhNfQ3xmGuy>3p9GX5a&Le(nvud0ULHqhTKjaR*Z8H9fyPf6`v1fYGuDR#J
z3wfAI%WBr;ql0JaBmrakwm`CZI9Q=13;rUrcKhfnFNj+A`n!y(!;7Z-s<BDygn&Re
z?4EK3=}TD+Z{ZAJ=LTSVezpCbusxsgac^~|^wGVK&hu_Zg~p(J)c9%{w6U?q+6fa{
zTz`sKHw;rHSnbluUYMzN0Q7^`cVnsYku;}RR}!y~4vEd)yD=^ofD9z6_^!KyANUrK
zBn(+%+h7yU3y6p8(dOZ3kEHL@MW@t<1BeovKFdk6MmL;-q%n|KXMr=hA9AafA01@G
zIeGB&4$cM<nXBU_1CpK2cL95CUQUYnEF#?;H^jZ+x3uvpmw6CjK*1)%#W^`rF?OiS
z7)7K_nBjz-G0ZP5^ED&&beQ#e3jg`pl~gMF+=SwH<e5~IHf3U?Rfjiu`DawMd~_Lo
zruYskXe&d%>FZ>;ErR@kozip3Eg1k_4$}5H`lG*@o1&~tF@gax_0SzRLCTf|MBDP5
zkarP19~Jq2IV9czG~%9^<%3K+@u{wI$O4;<`_>JJ6q`9hTH2K6n-kK}Pv9o))stb?
zx;r>^Ys1)6H$+FJXHei-9ziCLT-e=N*dMvjwV_5EGN`LuZKp<OGm5*D`Qb~o@WPj^
zZRcHAFQ)UT90!%GaHwcLStH)vu-jjT_kK1D2oAtKXe5Ssvxr&pVq^r2F<*);%9>oO
zr-QV@;*&gy_%y3rvgptYf6t#<W$w(4fLO6NSgbk<mhOnnIzy%9Ye4ixW#XZT@Osjt
zebpYnNz5T2WLWNRFy@Uh@WmDwly|#fXed?qoBIK_ui%D(w~^q}2c;8wc7!)iG1Z(D
z?TDLtY19gAt?`|&t3%vcZLs&MWtT8gInDtiff<^2xO@&-3n=B7-YzmyzrN(JH6^LC
zxmvi#$r7>{;F+n6TaxfUQ!%lxy4R$eR?FN<*-kK$dn8z$pSulXZbtdc=Vj&P`f$#q
z6ZYsJYh6YtbCo_d&Z^yNd#3SzhRoM@8e~X$9%U<EzpYcbp`Uck*KX$oouYK<_3L4z
zh-n%>`oewDT2?xQp}{J>Fy!+1&|9WIbZ~+}EscG*IUKuZ(7;($AzyjDPaUg5-Eyl)
zaL*IQbswN4lV~RzuNSBoA1-23J#nc%K%D*02eT-su)AKl(Xo1;Uu7klO->!8hbvOT
zVCdP^@VCp~E~5PXaDMcm6s>|%k$6~@j{|<%w+dasb)h^_JwmM%Ru|#v1bkgd{ffS}
ztCZWw!|hRS%2z`jV}w?^*KBAo%CcJ`q1au36J5{<Us|gy$?caSO6I05J|amNFht>u
zf#g@~XO?9D;Q0a7@|u^ITJ|`Z)4UI7s{m)Z))$BaoWM_aXD{)OQc15ZNTus8uAB8k
z`K<Z9)h1RMLDodNOsj8uEpd<2NKe2hlt~ZsHTLBgxwc3!$m4u-kJjJMtgYpYU_yiK
zXqJgn(e9+>h4XhNN^tGH#I&!3@mBfCH*dR#X)8yVb3*1SYyrc9z$mSL9q5oA?tB<>
z(|NJM@GM7)s1ZKRei>V)#~4Z^T$vMNNXA1zdIRQutJk*MyxKiR3+`$D&`w*71|yTD
znoUaAZ|Q`pWT!jR=s!M7={HD!Jw@OC)^^|E93l6&Y%+`dXCudT<cncPcu}j4OC0;^
z{B_mB$5V@<mI9RMJ3bTn+1655R|J(uwO5u(KTPPE5#G2-zh+2pBENT13JFL0GiHs&
z*GdWT$KNl+KATQEAk7tySB_rmh<WBstZ>#E;2*)7!x#=+d?@R|VHUu2trW*O0iIcz
z;MR^SZr-=Qvi}TkORj3cBznDfkJI&cbSXT!Xk-1{Ge@*dk5@&>OltVJBi|gK+`zC4
z&r`ga?fMQMcu2^@`YRw+0A>oY*(Tr5Yy>~8a5k4EPkM2D6;}4Dyc3B2$(CSev%Exu
z^wlfSkRTC_?2&g>4QC&u<{y~OUyeew8oHyVJD2mb9>_{_^*=IU2QS1RW8Y)@8t+}R
z<^CaLpKdtMd7!%AvkIXZ?8q#0Pago6ul8&mT9~i=IP|M0A!@$%%uSFqo}KSZ`_kM#
zY>;~;@yGU0uMa()YNZDbjZMue`T8TbB9BhuUFuc~?L|aHD8VA5FM}@s2&rz7|67>0
z^c`U;QVOe#hq7y8#Hzpazv)pBKcH}|BU(tiPP#UlDLvz|v=Q=i%?d2Z#|rQM(Cf}%
zB6ob!SngT~Ux4?C<|(NU@T{ZFY+wt_RCc+)JRQ>Io4)^t>LO{nm(hO6Q(b|&-_zRm
zC5ZP$NPR#io~8!%J>lbSgz@1UIzxk@gCv0Yo<Y+g4XMW2hB*1ffC---?5*aFNs}x|
zNq|<c?FFC0YP=_e1^-9}cI?~B0eg7K2K3z<{$0PkQG%WngkhKiL;jXsR%^LPK1jOE
z`hKdUTmo)s%Z-YgR;MJBt3X`y^<Dr1np%hn-Ya#4fADMg_H8_t+XcIx?kh3zVN*6j
zDN@FmffFdzJn@6eg<uLBdR4!nD_Vb{_bb5yVWPRlncKtIg2!qi8*^Y%VXa~Z?#K-O
z$(q4J#`Jt4yUS3R-Um0zbl=E4_<NW#dP_L8{o?p_)Q2mfozxh$o=*D@DB(PP)m5~S
zwDpePNu2<UvpVkX`rc>i#8rbeItiKt;`LIbmPg8I421%BQ3J8WwzkR%ZE6C!bX3F%
z|7ygsA_8;M2U6k|*d^d5f41xB-jTGKb!XO7l*WMUdc8aaKU5?1rPUG!X*wMyn;*{i
zf}Io5UcSh)TrBZ!FMJQ2F(5e76QgD~{RK-On?%V!<Su>idAz8fbjXwS1=~SCaretX
zapp)Gk4>&OTAmRDSQ+(P&YQ+l{-&_|r*F}q(&Dj4-$iO&Suo_CWIDYx(&PJ@{RH!<
zR{PIz@06wQbxga~*<_O!4N4<D?YCY{3tIn?q0t{ER?D~`qGi?j%Y<GSffx1zLC)t0
zZyGEVv2dDBN!+O$+fz=9kLiM{vl<oA=fuM%1Cy79Qm+lXnLeNT*1DSWvT5{vAgP?=
ze`jT9au_3Nq?CXIB3*eu$qvh3_2|wmLUKZ$%bLsqho!wWtC<M)>@%(LckLCHpX};~
zr^CkAwE>SEZxKHtpSB5*Qu!SUInZw5iEI^ga&%-|7E9P7`H`ViL(ZxC1F%#FU&^5-
zV&^JK1gSQJA+BdPRmz+qMf5)2yj*V+d-VGlh}u(1BKeWM=bO2r{HaWEoFVJQm%Fgf
z<_0UMX2y}`wZ2$x<O(h#qGFS6h?YH@0E^kU=k=jie)6(F>|16B^;7Jpb+tPK2o5eJ
zq<-}DEaEADy}Ia0VDW*&{A6erke0_d&s2jj+&`60)J%6Fm@tZHE4R8I;A3xKIY)JG
zpj$;XX{SlmUe5a{vhopCgxK4=lbP3m`!d5i28pN3yj&S-d5(?hrHFWz5Hnq>GF^x>
zaol{ZTPnUEr$+K8`BRSpXOj&_A(RJA#%?yRq>6}o8W?}`)?7I|D)u~pwRdyIbZ+_f
zU7JmlZhXX-#dp&9lSSRUYlzMQ$V7BoFCZCGczB_#;nGO>27mX2G2mx(i~3z&e(99%
zH}%kbr05op_NhN*ht|g~kbX^|p$P^GI$V4YTPUO=R6J0umXgLv-G@*wYypPjK!$Hu
z0c)c$!FPhh|FP}Igy;N_U>fJ;$Yjj56;94IhJ75P%45VGVL6V?9n(zA+JR`*t115h
z(ehyYTEn;>Le&U#32RlR;e9XiqN|Aijz<M^CxF=ryhEksBKB0H$ee_0%iXnG0|~5b
ztK431B5UzReq4tp)wc@Y-Vvm7{_rQl(&bh$kgRF~EOF+9L8?)#4(VA7ztc=1$t@If
zys>+<afUq=5FwIxV?sdPS0N7n2kmU|ARa$%XoZq5*TrUbcRuRY8p*OoPOB>9Lh*Q&
z0M>Jo?M#2ALeq&9+f#RCmPI@RDQw<#Sf<CZs2*n9{AL@agtK;*Z_be>Z@o?iFvTM$
z%|T4CL}luFb|MV>Gr)Xt)h!h6yuBX~R~}0w*nru-HFessZzZ(>*14S2p!I9hK=53Q
z9}@ZtYdY6<C_3ILX+jbo6}KEqT<4^Kl~Tf#qYal|fT)4No*?A4U7f)IYYYG4h+oD9
z%8Kau+vmZ7ky=x_I1rVZD=jn4%bEAXSx$3>#KF(MK?bv-aLit0Wqa3h4j){!(+ZSp
zm#w)UfNMb1xTa8<6yf{^AL21oyYhhVdY2wTK_XcpbM_vg3lq_%@|o;Cch3w-wt^|2
z8AF{2PuK0FzN)qU@$(-X7He_yP3=G&<XhC<h-uw~d{_e_*s4xBreJTsHw8qE{iIjc
zN%*ieZv1I}admz=r?H7?@an01NB4@J{e0B-jg6MjjZ=I9pK7YztQY=NSs_v@5IRp<
z_6+_USn}W47hLSk++^Pj7gqMIeMCZ=1bMJUf{E%P6$J1ft`$L_LJmjAnBIoIofiin
zVxtJsPhhEP06!9s56vK){D}T}*N!+UVt&RwnN6WQ(IZ(KsK-5^(hN&?_GB0Xq5u|$
zbPJKifR9E&k_t8iX_Y1Kd?$)~4|)>S4TXz>2n63P7lVvUX5HBX!(FAUdSnqT1?N)G
zeIJLfXZ!lX>CpH`C#oMm$a>Qp0+5Q{X^v^}YxIZR+yHb?G?DKy7k@~H0^uQf-=s?C
zkO7pGrB%x@;Zn_W1o-|SR^8?ZbB>s8Xn(<ToCHBpAa~Pbi-`HTKP$%MB<H@N2yJgy
zW1N0S6j^U5Vy!{U?0t0<6$VrMiB{a3C$$0r>3G-Z^SbQ@ia&;o?%C0>bu+K|0#<6q
zgrJlI3#ab$4Fs+*{n4rQst<B>%Lv%Rh(vF}5uLwxt+YwMx<SX=y9ZG1tJs;Rv7zn5
z<!nBhrXzoQB7pLKbuXxrns=#njCUf$ENss~j0CsmS=GD7+`~vMA>*xIi^E4@2m6jJ
z>De^!i?*`0ly?iAuQWXF$aESJL5?})&pZ-8m+gSJ0nFmns#sNZTr{j3!H>?yvweJ$
zj_xqGWtC(Tr@x5Yb)9g+k*VlCPG?HY>Py^%n|k71ji3ODH(^@8gA67FpK$;w0)sg(
zBMjX$Ov6-N$5tOO)#7|+eemZ;ElL|8OAMUP^dIaN_Fr(}0>l;+oVAOu(vv??0e%&B
z=`^q*#9f)ZFsrmq!+Nb}u-y%*B>75P+R&j^y%yDI<TUC&QY_v%n!4VGXx+e--!HLz
zgjqWL+Bzn!9+3ag)9E*F{fX!``J$N_PU(dF^WOoYvy5O6?G!i_Ruh7rdz=HFW;+3-
zn~BsxSxfL~bN9@?dBfn~_)>d!LpJfq*Q@W*jlVz^){Ir9DJ!mp#b)dp3<5I{Rf?K8
zJ-m|Iv$GuO2p7LG7ekI3MFjTuGoc@Xqt=n3I~Z_7zLw8zQ-TChIt^9fBwMw418!J~
zZrgCSIUe_yGr`D8H=EV9rQFw3?^S-B-jfZ~cKI|+>21wy<K0#6wFea5IafUj^_v#T
zRu{LqXyMmx;S7MeF*lh8CVzvui;+48L1Yb{K=q@HD<MRRh_I(7yaD@4Ot5O-Q?G;4
z%^mpwfEsj6wM#mqQ%)QM1^+P(KE*ISJ*^7P4?ju?3-Rs<Lo`KPvSG{^X9^&)@}>LK
zVQ#Qa4NO=_UAg%}<*Q=GVx?&r_Er-V`is39fFI)yB;M~*<dAu|a@WTm>AbifL&B<!
z93pjcxSJ-a@F?bxrddG%qDKbUyu2ejP9wZT;hg)PwOJnnCWg1e>w%O@7iQKDL+j!3
zK22Ail8jfUeB4+4L`%YO)F@7SU@W8T(zr`jegulq7y!$9hOS82(r$8dlZHyt;Zrf&
z^8^fRVob3O@gw4@w1+La;@Y;%6@xT-(IK<-D)|6nrenC?zrnz?NjWt6U_BcO7WEf#
zR8KksOXd;s(T%auuN4<vppy9oa;guSdN1!Tx(RC^_~z~#1TpCrf{$YT?3Y<Ji?XqG
zbarnE&|A+99M|b?WHYYfiQrsBw%_1;NVj|=EaTz3MAPt+E$~m%gPxj>a@caO*PZyA
zCBKG&6qS9>iK|kB&Sp)Pj}FyDJ*`U163m_+;h5*=Mn}@bLC#W|=J*5q6;uw4sJ~A2
z@cjqDZJ3@y>E65N@sKwxJ39))B)Y=ETTr=vQ(JWZ?bvs5r3QPPD8-iJdlb$utOzOI
z1xe%BnF9TP`twlrZW;Zn)uZ8%mcWnV-b&iof5nc3zrM-sk5@Lo+JFuq8fEbts+WBS
zz}`ns8L=!WK@YW^DfjTofo=M2FZrIRFmRjhtrHTi9P)F5BZe`(L;t#61fc23-cO;H
zri~Ja4e89r#-UA=O!=;P?oAW*gi)O~gUnN0uBK%?k*cbT4$e*cv27e+K5+wDWn!7p
zZffA@?_Z0bO{+Q_Sf`0(meKr69i2~B^s~#1PQrQZ5vNn->_E)wvpWUyAqC~-MoqVC
zgA=?dT*Jt3p~P~3AeYDUZgXl^Ca}=U;g;u*QVhmufNE9&d~SYjIHUrisi!K{NLk)U
z{r#&u^YVi%0JDWaDlEg@1)_~!VGPB~tW)KNS9H+7d%f;;Ag+DSt4PCaB6K>Os;8sl
z!`u9JvxQy|=_gtEwMf|?xLyaG%RuNfr0@%gv)TP-cU3Y@JhE-D!kYe;qeOyMK$cw=
zr~_NawF75<^m9b{j$p0=uy1!LtiMDa!lXo>*c6%oSVqtI-FO`I@7Not=^%hhy!eN0
zv=ZW*L->+4a<86~5RDwC{QLLyJ{9q~`*GJZT)Sl80pn@^ehuuKvOdd=er>+Za?tYQ
z<6Zunl>~n8KY#uI*nd5oW$sQPlfNm?yuCZ&OQ49zY{B-1{9h~$NLy%EVqv=dk^27$
zxUK?ZDG<N~ineV5g;7otbpMKr>g@zKb>cU<I~R9~_pdQAUQhsm20o_`?+9cGh`3so
z*b6@bprqLU0ak&iYkIKr|6`>8TP*F?oqtDCW2&4l>i|d?+%KZ<Nz7H*k$;F(qdEOu
z;2=%|(2n@`33u)QF^C(V&n<<$3t?Q?<HU&D%_9UlcR$d%M*05@pZ>xZpv|{AHPSBl
zH!!@B4Rr2?#op@iPt@SQMqq-G7;sZBP<#{MH**27H0kKfUkvBfbJ_1%dVH^|0Aw_>
zu_z*LUMBd9e(Li6W4^NkW&lh96g>lQ+JJF)7MP<Km?EpL_OzDG2p~tNsr3RNpKUCX
zo8yOhWA{smMF(&=SbDca7(=Q9v8Rk%OpUc&4e+;Rgjuz==NI?B0!3_3AesKca6Gvr
z<IZsAg--tldlv+E#77o0K#cWB&vxZG1E(Ii*Jq&H;HttRo>ldkY+BuMv$t5a&Z_;`
zz+DNg#O^@5GV7M&*2iCV-%5|zLhyKw*(e~>Cxi*q=54t8^`%N_OgzSA`-RS1o<qu|
zkDFZ%T1<iaNzZH>nk@LCd;w*)?p+d^jleDAUfl*(zoH;oSluA}AhmD3!O^<t>3Gzp
z+GmaQ&y$kdNaX~Bxs^^loobehFAh~BH%bRc3)xp?8_v`xfT%LF^eUuTc7RpGoZ6UX
z`ng*D@Oe?ss;zQSv6;%d4UqMexkCC&rCn-lWooD8FTI=nK7Xclk_|NNIfjG-GW`5b
z?f24ptl?Ir*~%5u#i&fW!fGo;T|5QM^gV7S37@rAURIyG5p_hO<O#rZ-Wi_Ad@|FD
z?4ouV#Br$E+??}edN^8vsO(}zc4<C{S3S{bKk#rtoN~w+S(zht*H<zBv({}fl>)<j
zT%PWf0vror`MK7YUA?oZ?S9HjFX2?hbLWTkt-LU96xAlpxkMRO{9y-s0+GOgHkP!q
z^~PaVDKk^5CFR~;`#o9Uj9x%1Dmo$;v%Cy^^464mpQv0WcTKc;@iK>tYhIojVTg#(
zerf>3=OGj>1AKp2*B+^J1~_<9RG*~J7Z7pcG}3=WQxY57*#ePeooXJIf?9ZEbtrRV
z{mk`xp%W%%aujC$9w$P{eSm~GR@<O72jHSAV^)B35jcANim}5EI!a{Jttsy`6ZCo)
z?%<7V{(9xmV5GlGXIv~u53S_R6jC8p&f9Em+1UF&zUN(<J!AoAk)8k;Sa~J6$PU$}
zV~i<7%MES*`DaKhmPOF2>{nEZYnn^uPiy*-bQOlhu`-8RT#GIVR_i$hm)}Y;*fR*G
z>0u*6C+p~+U3T?R{`Z{(fEr|XWmnHvur>~vs4D82WqkY1Aj7NSJ_MA@h|!O0@S~Q<
zl~65ma1xy|Z#j$&BgBbAs{Dhy&q=_~R_t+d?Y-fe(m^z<-#3Z%vo<gUvdhT8Qf`fu
z^vp&VYBN*wn*OrrF9Tn)oBB{QoE(0&*L&NhN7%ecEdiE9s^5CeX{(^w^p3^^#kts0
zf4lXsy9$|R3i~qdUX3-+(Y(8*?nj&GmuL?Kve;6AH$2Wi=;vBgDZgTQD_wDPmeljL
zu%Z3v-cH6-LV3o)ij)tT(yu3FxkGfnT``}O-f=8IK!+wsHo@UnF$H|gz0Z<{7GVcG
zC;EY@WF_w9>m1U+h@tf?0wLJs+Iit*WX$<)P(-;&5bg+&;AlVsAW9qpM4-y|zQ*I8
zFIXMVJr}mD4b*!8J|_X5E1~yY9(P7sP2~Ym)tx`$L_o2bA-uw6R{SVXi|R?Ns~*1-
zLfEm%68AsM<;8I4_JoV!4Rn^jFd$LPaek-Hp^Hl46(ma3tmN`bK0Rg`Z>7`>*%+Ie
zIsLD3M9l8fNTNHA*r(hjqDH24$i?~uE5z%_T5LFo**#_PIlyAT!wCMx0w{siIGlV;
zLpvd(>b00!aS+(IcUX$<OU;5-w?}s=8G;#u`+xC;iD#vKCrCd(6&cfdyqSHV$~RE_
zD6*1|;5MNJ85VH{_SYJh13Y#goVwF}UuxFme7%a(7bbItiTT6-z=`vNaW)|4LuB?l
ze6W|LFTq|P8QBy`e0IkO3($@iSo%I5vwHWY(ICg<F^%!c8~*<dSBCpwi=WI!Q#En;
z*472Vq}QW?SF3`cEEfPf|F+fV0z$6Li;8+~u8}cm?d96cCZEng;SzN+04+<&O&+ZX
zcMHvU?L&RTsLML-B=9xq=^X2g@@76h1u32vM{$htY2P&oKT~$b+I4JD)P<HF`=hH*
z)@ZF~-DVc#fj~O+3%1ODYJTx_@UXJ74=gBsF3{X-nOv~9+NG-xCQuKTvv=@<>y}j2
zyL=GJBg&Pm2U!;_nShXE^P(c2vFqffG*$>%=m}9yZn37MCN$p>o3S~NfNK{PS2qm2
zaI_;T({5LSV}zZg9(-j@5-kaU8eNDrN>y6u4(f)4G3__$a#%&FonPr@-L#|Cf8dPO
zt{0tiNZdXEPAS9nvC<I1XT2>o=WqbD=m6&~t+vZVZd^Xu6U=kiWWS-OM0f<VExZ7Z
z(K5r#k~QwDiPc~u`u%~M`vAsKW8+qv+HPxrI>Lb3-c0L1Ck|24)W=%UJan<=vZQbB
z+6T}{N#L>=cJ#1f(M@FRWhMB^d#B9Pwho-m;@y)m0YCvr?i>3)IPrwA1y_K6WwEM&
zqz!EGZL>rz#Rt1mPvGnh`FHIw$P*oJyT`32Rx_0giSMLhKSFog@G}<ZmfgSpw<b1!
gJ?#HNO4I80zZ6XZy(gQ0?zHLpRjVuLOP=@t3-tqqzW@LL

literal 0
HcmV?d00001

diff --git a/tests/benchmark/assets/gpu_search_performance_random50m-yaml.png b/tests/benchmark/assets/gpu_search_performance_random50m-yaml.png
new file mode 100644
index 0000000000000000000000000000000000000000..0a4613ab07f3e0b5b140940512e9e915c4b093a8
GIT binary patch
literal 66119
zcmeFZXH-*L*FUPqf(=ly(e-!)rAn^>v4Dbrq7ZsO1cVTJKp=nx;iwdqCLKg-BoQI>
zBnk(R5<?FpKtMVngcbq`f&a#G-skyrzua-hd+)dz`@<fPti9G;v;5|7&b6Q4GBMyi
zAar2Ijvc%=u3t6Vv11Qp$Btb=`}P8V5#MXHbH|R;J8oRnyZbPGZb<jK4Q;G>(Nk33
zghc3+9SWA5F<Un~EOJ%ze%18%<Uei66BS0Uv-VWOzn(6+lFer(a#cje^1+O#%s!uw
z)Dx&<MQ-J=F6cROd5l{E(Q9FR{4Wj3mUY*S<uJVw4wgBtJ*7chXSMicPCWr0|Kszx
zwqy2>U-zG_uC8s*bo+DWG-A)syGMr)<9>bP(!PBVV8+{@)4L<kKks%N{-YlJ>vrGH
zQR!c|5swy*|GK?=>3{98P@`i^)=HsVboBIhf3tOqlT$qxq^ehMV&lhZ)D_<yoOHE*
z*=Ui>`*S7gm)FQgBhX>*NiHid&-r83Rr4WDO)Ewo_Td7*WHb_S8Ufp14`zHyO;^55
znlV4O-{z;tye}WYRqP%`x;DN?#fd#bgZqj!eh!;%8-bS9Tbs4=vD*?9?0ok6HnWTV
zOL&r&`$nO+XPC}h9hDr2fzg<6>}>Mle!CRAo^f`-&_{(VJFrKXb#jvdK>-y0yb&Ji
zW(+m4vQa=zs2K^RK1V9Kq(r#++h*J;6S6_#O^OO921;|Cze8IM4JDJ|ATjB1Qhx-h
z9sQs;=g>M`C5B*H)NMBZa!jB`5&wSOpGij6lfG9>W*H;>KG@D1m9??8z?B_h<0|56
zv;L|@9s$PT-)^RIX1+hPB_;O><60SR;y0Eoh!ZvL8$PMt$inxX65$sxEu9K*69Uu&
zB<xK&Pg=~IP>C@X4p&nxqEhcC5(|!}rKq#Lauv9)Zbk#C=D{%H7mRcuec)62RK+jx
zt?m}Voxs#6TM7Oh6KnD9x-M?yl!FAd9hqxibs~*aXY8F2D&TdhovT@!@1NnDysJXZ
z+CHUNrffZ><37wRzc;Vw#r|w}om?ez0X?ff8jGB{f`R1WYqC5-mWfZlyj-;6s=DKc
zUoK93X5DtH9U}SNMiTS=-F&;&{%M_vHn!g6yq7bUB`0X1@h2rS+O!nkeS5|v@96PM
zX@lEx1WAc4v!{b@l8{}V47$eO0TX#3@kNsAp^%p^^#6Qu6}Z@#s-Y3a&*hkWbZ8(A
zzK~0K+?U$-L3DhH!VY1kpolU9dqdVs_9#`&q(eG=Q7Z=5@+vL-K`y)17muGqCJ}Hc
zeR)ND8i)VFB$Xk86bdF1t;RwO;1e--mYnwYH;v7n66|bzZq?XVQJzG&kTKM^ly=T8
z&Xg2^p6PW{ism@arc8EM`=bBZs5p`A3cp|>i4X-;uJi)eYWA^J`wSzmYCQw5ku>nK
z*umT1(5c0YNUXB}yR0A=y+mKmqz)&Bt=^tt)b;tTOAqpDy|PPFpnY-*Fh}Oav=*o+
zK@RvDFta|BBlBXNh_RQyA<V9f<ppQm^{9Qg5~yTA<+N>BT(b-DB5qO=D_2FXQZD{-
zE{~M{>R3?0>%eyFc3b<ayqc+Kl86=yUB1Dw|9imsY`-W;+)Zsr@1?c<DYXzeGAj(T
z+e80SPmgrh`g@cx+8>f`4d+W-sxCg=E%YXa#!5Y`fNZ&uE7MKe8M`0K3u*Spy3`lH
zvp5-Z$+tXMd~a!-cAsA(xN1M7?1i8+?qan^S@@^a^4)pRRcU{#Nng<|_r3ksWQoF5
zh1-1MWzakN0~WNE)`|;pP9duUDDBFr9dVFfe$P8{w;0Y|IhbW!J@c`e+di$Lmh*Ou
zL!CHX;y18<&WCc26iupXrrR4OOYN$7yXm30(2`0K_7mNgb|&RYN?)enndu0-2h)k?
z5TZ68!<pK(?@emGm0pF8mt;!g>#wb)+Ngxw(_*f)qY_thx&x|Z{i<WNz^#x6^TeX1
z!#wLH1GyxQlM&w+G-zsd4s93YNz@a=bGEu6TcSN9BiVbmILSvt2q$;_@HZpx_do1^
ztX6X`mZ4svYtdI;m*Lg#r-Zc@i!XEf<St^mtmKG03gM=%cGgrXW7Ko05=|ma<062T
z1Vxp~m7tTf_LO7VKFGzYt=UgGw>kNt23C*VMBP-Lem(Q-93qB-25<YgzR3HJ=gyik
zZMnw@QRwxOAg8%5%^bIY0UeK3TbH&78|1`iwQ5AUuKc&Z2*E90vLcDZ`4AAsRhQlx
z62g}c`{f-$J58(3<&h?1F1E1+d(rXTLe?g&s4E!*!sr&Fe3`k^eTkC<u~>uA+cz@=
zl12)YhsUQs(68zp-${Aj-LV_fwgFO_Ee$M&TLg0)_tf)2O3pYqcXqLz>e!f|Ely$i
z&^)c0Ee$wfS`(DZ48Au-T1gww>O<)dCRN)u_c6+NA>9WJ9^dc%%YT?smK8981@UyN
z{+!*n%+C$r8wLW)A9T7q+7skrz=U}*2ue_?`;b@h<-+`9@$8{f1xxkg?<UJI5C>fT
z(9N~kp0hnJDGv2u&f%4RBGLA%x7XW1J$_7Z`9%b*xLv0K451I0)*d*GVD|p|81H(L
z)cRnOglijitk~5}9$zsR@J_C+D&=f^lgBc~@C&$O6neOUuUraIqBeKQYsisjka4yx
zS=?()XyxOs=EFEPl(<<B-fG}ma})kKc~@d-1lpiN<wTkIvO<=<a??|hZf2xXKe!e5
zTy0;pZ|a3Tv5zCsefwVswU2m~;gX`za*~5fWpS;sY6x@BbN%}(W6vO%q)aYG8co-D
z`>(4Ofv&M#Kdi!6yYMlBN;#0Xmc^V|U!6)jg?Seps{_&BE6_<(l_z3S`k(Sc5>01o
z7GJD279F1gV_x>g$7VcQfZxAKA|Xk(J+tpRuLSv9FCX8U3l+D*Z&@~gosUZ=26fGw
zvp+}}F-I+l&4bIm;9o|0njGn<n}gq*`$9kW1>a`Y(Gce5sh?VZ|DZC$7~RyXpn=o{
zlK}Vm*Tr=hLEa=ahsv-~9kY};**?%AZzA`=UD}54;R}rt6nZu;Wxt)sb7+&0;l%K?
zp<8v4vuxT{5@|N=nw=0PT20u0P3i(x6ZTDitF1?gP#{}+H_Rp+Ei+x&fAiy+6xY55
z_|=p81<Fb7f;1u&?%XuUgO}?wc`+U&ZdJi~*Z{sztLikL6+2*Fk`!-BXcvZSD_ScZ
zi)1NsM1HJ^imJm*ZLi7ecIWl0ueUG!iHyjXKetbP&l6q<SfcIAtqXq(mTYWg9J~>=
z16eqpHsFiaX*PvFywgMW9Xu2rd-+1!mRMTnxZx=Q&%|*|ow9lL<}7i#<sd}9$(2bd
zyb?5Kf<1|y&s|z-7R4C`ak1*RE9*p;Y4BhtF~7B0B#5UIRaZWh3+F(E_x_L9)0(@a
zQ?j0TjjIDHiY>iuWT}KQ>Rx_P;tJc9^8)=<LZ$j*iP2cngh5k}f<_57tK@+XB}|_^
z&VCat+1!@X`4f0(NIDyp4M!ctiM8ZYA9w^9X7tg;(mK4#h|2M(P%OT8lqO$x!?_H~
z_H*faJK}crWb;@Jt{kPGl3yA67e)dBvi9-~wZfYkPbc3t3K}b8`@w4U(zNFnFn8_c
z=^9l56DW{P#jxLh1e@_w&4_C>s|FI;@fXb9u{&ykX?x~3>Ww`(N~^y78@Gy_tS9+B
z`weK-<s#5i2Yw@6Z`*wk)0f|CP;bkcYq|fZ#6y|e;C}lvb@2C>cKmOK!^9WX>0$uv
zJYtuALyF<jIVfx=q+wn0UBITx+OqMNe7rw=vs_Ch&9*9WOV@6e-j^GPukV8_-{5VO
zgu5+GcWdbX7FN27s|<qR=DT$M>)gW%y4+zn!C=k$T(!2|zu!=ZKxZELomF`2ugwbn
zD=60j9l%U)_L<mDEL5vQ1>T$}vkohJpOaFh=`>bj$>wzW06@~+gK*)Wl0%TP0eB3-
zWR`S$3?E}!%o<32)7V5AQ9%$~NypQ&`tlZU`w9O>#c-0%-QXX3@^%7yQLz}(B<b%Q
za66vn$V2+MU!Mm8XB$X!?7FZt8Do0@ZySxy^hapqn9nR{<@p;IzTF0x<jrk>Nl0xQ
zr9Er0vJ>>P3|mS`68<a2zMeFm76k8+4)m~JuJDUJj;XnO+%hgU>B<x!+Eb7Y_uP($
z|7pmTX)@|RAM9<C&R)R0;W+Yq^2&$1x$Bjx-6)xmdG7QB;GG>2=}lDYy^#;#0|2Xa
zR&Cl6g=p<iIK1!Blkg0*uFPW!fSoW7d(j!o($=bNc6WoJ*GNwKDc_|HE#%W}I`g_d
z9fVq!oshwo;K5ll?DnI-<vr3}%@3hTy9YTA?o*`RWrkrQ(YdpBNPC;xP$T_9&7-7f
zn0(LK`Fav8#yH{1(1)R{`^hnn;I`rKrSJ4?u*+}fyUDVPPXA5;v*qeZdp|*&KJFfS
zX^z#C9i{cxCDDUlxPW`yRur*j0^gFV4?rrj1hMv#$I6WD&s{*iJ<<^%2j09L7D_&K
z&%x{t{EyMd)vW^jrdHvXFM7P2EctV*2*k->lG=L&XK3bWbmyfK8+Px5@7x_)frJAl
z0<D6*XB7S^kJ{Wi;qzSrP@$w|D7{dqd-V;WzY^c>W!`sXJ_hx6tloqcm#8A^HI?FU
z@aK^G`T-G?wOsQ%)RX&4CI8!gqgPf!x^RDsaII(PZW<qr+?S;gb-1VAc&%ntrqBDy
zMcP#I0GXLjnk0x-PY?^kGSd4xe9dO`$FRQ=<p<M<D0Jid0VU?oz+aTBG57h#K}hpS
zA=4M;reQWOkwj%>r97jt+(bPOgcWr6$Dea~BtNBzAb1gGPdaUM--uegLvSd!Un9S9
z<UyFcH9wa{=^FQ_2GbMO!uCe|Hq6O-(uK1KE?i;gB7%T>PRhDyq(S&6?Noq+N5yD_
z&6X8fX|&Jq0wS~z_<zC4m_^%We~_R*s|OaN(s}2XTrW9o8!a?!_PlnTdc4QrvWf=m
z=Evit1Y-l@M&CP4{nULB1O47w3v9)^zNF||@cV{JaLqR^q=FduO(@Rsdv!_aekZJe
z5j;xl>r@3jWV~;EzU$(C-Ub>sVpAwuN+?E3b1mF(uG;#@ubrICC|xkOwo$at5YOlu
z4|tlHQw@O92;|9vB#g)2_dz8%tDd~uZR&;a1`=RDF*G9c!x76#OpPXVZzWz%dHPza
zO|MrIH9+Qr+M%0eEqiiVw7=zk2Jp?zdnQO4lI`Sj`J};jl^m~N6;y1C6}P$ug-LgQ
zx<pBZ$nd&d%%@fg^$ZQad-wY9Kew+iS<SfOpVQlsZsaNlfE;tMOwLT>;#<vo3h;j<
zRw!k&!|B&bWSe7#(Rqh^m+;Ll%GF)}MA*ca)hN1HePgG5plV@O?2tqWsbpc%-p0<`
zTumtHX(bSreXD32rTc-14c(mo{$OH@VSByRao?cgzqW%aJBc%F^}=_GcgM3aQg`d7
zkKISf7X|v(iPXm$Pq%KoNb)la2rYJ83-*x!Qg1sar3tvbNY-6OnTm?84Aj0&dYLV&
zYHc;?HZ>}+H1J;HB;ZX3YuevxN)#|TWyfVbDKaUh&@kuQcvI72r}j^S(^YcDMCpo5
znCA!Z|I9;4@V9fmg#6b}|9_AD`G1tFnx)|E`Sg!H??=3QNru7~f2mnHKnZ#M%cZ&h
z|I*&^e>dR>!n`m<%hOmz`?;E$?+%_lTl1g6pVCUQ()^c1i*YtsF_J;ijG)%ieGtad
zTz{wHqKfmmy!f^nvACIci=dND68_F%*N~05_^ps8O9O<t+tg9Fu-OOeFS`w>lp4jK
zN%oG-Be+b%YbA2VsXNEFq17o-S)S+H&jx6#zqV<s26km{R)5{3-#3~ZPl{K0ouYZQ
z{jyTdjk9_LNmpju*T3bW|52G(!LF;=Df}_M^?}sX)ND5?^j~}uy<G069*yi9rMV^8
zbe=nVan9DVJ5>eQ?nPtty4L8F-5179R-R}-T;>;ct*yFC{CMK9btm)fk*iJMgmSy;
zb6-W`kWTlno|JSsZbRv^RxR)kuxay3@(1bI@hQH%VJEJ|9;RB*1-j<aE}XrZ*?G@=
zmHb8#eD)Vqeq<DZF1Q5*@9829JAu1>l_jOmbF-I7>+k7EZmXQ|m-(vA>lAnfW_ODI
zh}BkNxKm1yQ_79n93CLQ<kh%XW^azNNUZ)BPuu>906klxbvHqm<G-O7DFb0HE2lGV
zwHHMde0m}1=sNXjNZjga`!<ad(e6bMzQW(1puuW7<?C>h6mMSi;9#4r<5oDEhJPv~
zBGNMwzGZfg*bw+jN&vtI#k7C)y?XLqH`~eCxQ()2Bj9nvEEuD0;?X?*UTz>vVg>4^
zh+LOVW0d3S@%<aS%^S-jo8oY+E<<N_YM6kU1U9voYEC6}{oZPw!?;Yd8B7qo@8&6$
zVu)mcN_l$15QPOL-uotCF@Ok5fgQeYf-4og{Q1Uh<jb)p-@Z~SztrbiJOw4PO21|%
z3hZAJweS0^5-;KAOxM~2=fi>kk3i}<q}Y5Cv9ejdJ7rOYqhOiwA`PIL3QdwED_Tot
z)ttUQrdGNi?4A8R8GAtQ&io__K-bW9MZs7q;u)2G)y^X9Njr=yD9sCcrP6v#t)bUB
z%^SmX{*|lL0fO<{W&*-v@Po@eF87f6>jxDHnIGb%4Nv%`Dn^Q|!Oy_7T<AB>|DN%l
zQCiBKc{?`k9z6R;^tXKSJg)}5IORv-bIN5OQmjnwaXbV%Y=oNb+E(Uh8h@>xyQyKy
zIk*2#(d}`&e+pr0|0wN3f_)6q$=4zrg@Veu-Z;Cw9jdYUUb>O)s?`oFe~ETBw(K#6
zIk)YEDHQZF&AAFnIWN!p5XRk4ByXKA6UpCHbVQhXlzJAD*@{MFo4d+KZ~p~cUFb%b
zJme=nd$b&}z&vJ%zFB<Fw*;_FkzdL4t5P6JTjjFcu%-|7TzrGx-Ln%V3Y|+SkyrYS
zmj?muj9GFcpk-!RGdJMTF-Z;0oj!J{nP3kPR4dHA5qv*B)%I;)o=@A-S*O{z>3s~M
zMR^r{g?H!flmTaWDlOXWL(A#1JZFjju%DmH|9^EQ<^M94Ub}RFj2}P;!oW-vt>}gM
z)+&}!JdCmH)n7^Ev$<Q}jm*w6{cae@gyzx<#o}}-nIK4b%hT%7Ts%G64gZCj8Q60?
z_Si};gWS1Z9JhSDzI#POa{02&@u{#eft1^${e~eH=}jSt!OnY4gvm<Y_UTQo$iBn@
z&a#dPe?55jxUx=1I$xt?U}AZCgqmh>;@q^kqAx^uA7s}Ry9{#;y6y`o$HcbQNXvMG
zJ8%f{GA?Fy$j6i#n6cRkv&-21CAqq~W(hsx_ibL9O98zvLFE!%ct(+|P=6tYdF<H_
znla6r13tD>>G7j;&(O~-%U7FAXH7|$hhQKC=Q5@94Ax6+6NiP^gz~R2;q(0!xj}ol
zH~(5;j_wMb7nXy>lTlI{Yo>RmOiU?hrZb$}Dn-y0L#w{(BZ6H8X$RS_eL}9?gjOOq
zKxuJZa>-up5X!=B;$u>xTEXH#vAOf=a$Q{a(}m+ZHDdUc3-qi*nqt1_Q}1y7gO)%K
z!@xz~!my0Y+m8hT?Rt8xFiv7uieBFpn95Q*g_|T{L=|RFClxP8dD3S8+E>+XOiB#5
zCu-=5^QAKudPzE(F)XR(BNiD=PFvCHO@7~6F0EP9N7h60M3U*?$(H>by}z<L45F4)
z1#rRS;SK3Lig1sQ&D|{EQN-p-sU%&WQ6wptF9%hp8AG%vxlo3m3QZS_K=1qNhRD{~
zvxH7ikjoa&%K8U_e;gmXz#M6z=Yrpoy`+<iSjhznkM>uz6G-N?bk;&I^~Yo)(5?e+
z>QlREhoaEQuI2c&?#DyT7jCxHbrhK0I#KPlx}p7$b9u{NZjW;VJNS+7I^wc81wG51
zurZ`YLm7+d)x5k7U^K<%z@J)?2{@&K!LFJEDWb>k(Ab;j^JSGhL~gCZ1i(#Z8k+AW
zrH69>o|YU5te^&USSOQ3<o&Hsf2>F707_h20X3BOdAxg2i!ll{v>JOn$t5uNQv8H=
zy*=GFmyfcZ0y4rdR;ODX81S^UCe0d>maZCzjBe9FBlj(nBG>U(0mxm=?RUzi#yDvN
z?6+`Rsn$0T5<Y_vf!3)|%QEwv7jItmef!X1HQ$&|RWp2QMV5>Vgkjm*mD&8wM{zYU
zVFT@6zijw3r!UK!)d1O&9I!Gr8XEeyt&&_OmOK}dJxH<b=2zw^ium+&!Y52sGb%wW
zNHF*$r=e0l5$j`=mXY@f(yKr9NN4zMr&_Z@<N@|N!XztXZ-R33x!GP9VyJUgfvB9N
z`KYl@p4ZvRHYa8|3kvl%(e(&XPe_JslnOqg_&TE2ltG>`$y}O&3ALt8Z3j<LGW#f|
zBF54A-t)b%-0Tep#_fR`$m}IEA2*Yf`6cuKLv!!SV$)@3(TBxI-=In{+}#SIr9ty6
z9bz85sundg{iyThxnk4OaQ|D}<Ll*1eoAaL%@2gXoo_K$!(gj}cUEuY@<1vnHn(u)
z3{CMK7|*lUg;qT4mtqaFI3y>#<Knz3L5BcOLx?HJ8e@%X*w%POpHcY$O3PH*oNfJ-
zdG~UynkTf)H_TW+GVwXqV}Wd;cWq1r8q4hqFmCy_GgJhTjh(`9)C(1Bk;v1rq4=q(
zvp`W}5BF~&PHydRkXC^gNI#T`El;Rj<UgV-p4fUc@cc^-*Tyt=)iwZd58C$OHoloF
zxM7EfMbf&Y+|#kotO=E_XqJXO>-J%i-hG2kzu&z8x~zCl26mUaE@wo;p$!eK1*j)=
zRw~t2Cn|8TaxfVa5JIoWt-jSkTU9-q_dx++kjwRU5D63xl{as%Q|}ywKFhx{ub+yR
zCTUZrb}wo_On2s+-z;8?+F$wYXgye2KC8<<uKW4D>nWMTk^IVxn)w;0sD_q2X%ynT
z0PnGK@mh0*zu|-eE{fpLr@^hdvo(5u*`<DSGROZiJ*7L@Giu^2zlW#H0kU|tv&n}~
zz2;)l56&vyUuRq)&z2tSG9$<EB}=+ou)CQWTxCjwLeN*ZhiHCT-J8r@MCs5pu7ID<
z=@`tP*o)nfByMPberwvISLSsd@ltO?OmdA>a^-3_I?k@Q*#NC*j(14iRZUv26GvDT
z#DY8+l&VGPK=h`uy7mccPaU(fdRXeD*H&-|^63<d`9<4e?+R{(CAurI`4;JwPGfP1
z@VZ7Aa!yuC==u4hz%>I@v(c?94hE1CwKCQ4Q8X%cBOR$0Mlr{}93-hliKnePnNRhv
zLKEo&lMfHQYoDjJZj~|^8@7}AQ?$`8kka7#?6HJ7oN}<RU3b9?($L0Uli^i#Srgr=
zYo-poXgg_<A^Ef_DQJRe-$-Phk6ysEta^N)wuQ#BLdTair*lCewJg5x8*_HXpOfUL
zKc*~OyXe{Ir&Ndy#NmS$-Yd(^Jo+?=gV9&}9;hB$slyV^vxd>a=O?!mTHQBBN<-R5
zp=GcJ6Ngb~JbdCgiIaXhlP9jSJ&b|g05QhO|3qAiKR}jezJStt3!?H!f(NAJ2s81_
zGkL?iVto8I8aF013%Jd!_Owlw+LLf@&e~c<P<-m#a~g$~v7{MrhrPLwS@9=kl=daQ
zcG9=#O95FP<UvPC2j<>ANm_A3vG=~4C4GpsnV=IG(*_>>8^io6Z5E5`LQm2Jg%6hQ
z^h?uevQNRwiT!mV0aY8$Ev$L$fzO+JlLynTfmX!yh!z}fT)Nez@!>AT9A=9v7?Rv7
zd_}&kkZ2a>sh=`Wfrlh1gTYBl`ri%sMQhPA__us+ESTN|I29!ol)JQd#$mpqBKRIT
zBP8z$yrOd>b}4RN@JyddsK1A>eMO~qF0xx%KkB&|fBIuG_>1;(WjL3VIPTiNliKX{
zvVqh<;*VyF0^VK8s4ASfcJYYPQxZY=U~3wuDEr<$y%aOXi}`0{VMN>0{z*czl;y6H
zp@cB)SZRR_H}Clalkk})_{3H}dJ?X02X5dShg@(<$LnebJnYX<5(}U(EC}WEO5O+V
zZzZXaGEK*@FR~OXXSD@;f`^vF(BG$mk0I7(=Qrl)I?WuNB|p)XflSa)2v2xeK(2O?
z89Fa&%Nz@hn{9BgrHVB5%1{HX>VXxm3ecp4=<Ee}`vb|osYiXDUNI6^^cWOM#88Ar
zOTuzRQWU5^T5zg*>;S~Bg4agx8QQql{9-Dexc__5!rQhv+Ga7k_Ef+~-D$)%KN(5)
zTAwJs<oV<f<DSs$>im`XS@6ceq_mk)2T75B*{oA)!I_#{HI=n@Fvf25q`hH`PaN}$
zC7`w#(+Rh(;BGRSs3O_96dlQ6A7#p)Za$8)mOC8)y2vL`s>vbo(;6$lDXDr*vSU{;
z)PO2)D>1|9SIup637!ok)?_0-dxO`Bo7}cDew~IzvOgU<(!s4?glo%FY!KSE1!8{2
zIGr(z!yM8LCHT4ZgOE@$yeu!okL?n<CjiYF@Ua0WEgtYvUl}wc2=27SS3tmh6Qj!%
zeNgQXE3DO{Af@KOX1{LILa((xWohm_IQX2L^-x+nWaL=;rmWj1P*zjOafq|$Umuq5
z-=e`=NWd1+Zg-3#Da(ErSsE(TnW{L1El(&0@Xo`I)U;mGLw!z|^Q8Mz?9-_{`^?*+
zLDCoSDEU0PJX6Yq;MTpC$?un^N+HurI`5yK4h<3JtbgpWA+I+_H&g;W4=_<{T7Nw_
z5Y}LEX%wmiKYq+>6xu;N=$JS_<eDD`DoX%nGuF77P}66?Y@7$zAPgafp!8W#hkDFt
zy!A!-3UR2|!#b^p_Onc(lAr~&^m5N!=~z!KAw8!ezaV=ezt=I^*w2HYj}8&Jvu3@i
zQlolW!^~@o?O5F(pdPK1s_2;{Li^0Vb#JgEXhao(8GfI_DJwwusKf~-$A1^m7DQ13
zw7nSfMt(^>srGWU``hq)G}>H%9k3d!d4qU{4vV95w`R~=eD2=fx1Mfsjh<sARHe@6
zMICB(BFDDXC`OePl5A==E@Ia|F6|ua(AMSGz<)aKpF&qQ2Y(UgGz;LYi40X{dXQt?
zQCF6A@zz4`)~1T`zz+k#E2^FN7Nl6ntQlkcaG4_uz07rux14nMZ=A0jcn473MKDA+
zu~s16se5dZkYdL3gs}M&e;xOK#WtvZU7K_!Xs`QhuC`3X;tT8qY^NVb25uH+j>k}J
zat9WAmsk4&*8=i4k{bWMgZ`SZxYrWh)#o5TzR(M$d^~<>jcjYTr}@sBQ2p`~Vb6I&
z9Tj2@U^KOTkj{jqe-4>E)v;+dL@r+%>)G|^R-8^?g4f2=+Z~%3n{0kFzE;U@O||Mt
zblEqx8AkA2h_Z5MeAvxV=whk!mgb5ad>9{QDOWMXht&lju3II)m!Y#XK6Z+Gh6r1$
zaSm#He=O3{^{dX!e`3k6>{1lBrfC_qZ0~B4<6uQFtdF*OVx>Vu3FRw(A}v(53A1Ar
zjsCqMNi9~Z%qK6$dhP5IiNfH-VxhCXZ7wn`sU%9fa5pmeWOWJMKt3@%&2RnUh?=r{
z1?msYRoPf+1V-nhxo-OjBj@QVSEc!Csw2;ne*BxXRRjQ2HWCNC=cZb+`)INFkmK7p
zr2#BI<ton7$XtERqGe~T&T)se)|UNl@yRH)fmvb5{bw<`UTU!tt%kfpQ`vh*Y0vxZ
z(WB6AijBc!o?=stTChi?n&JjR*<-`FH|Wgx0wyyU){sO6qGHW~HwD1%EIVAZ9ZCzr
z;fpuL>PgE_0Ao-HH!eB3Pa^<ZIru;Wmv6IqPP=N+RYvJZjmWKy4und+NnFNI^5mY{
zrQTCC;~MU2W;u`-Y%&`ZKL>Af4sCOV3|RAesdj>YP&l>|w&p%GRiRdw=YuT9F&t(d
zPb6<5gGonvC!LKf&1a8v&HI<G;;<8?<sU%$GVx)%bynl?TpAt~TS0`b&2rXYi9Jtz
zJZH^aT8tUmfpF(cwbYG%kj$~HLA8*hNxGyJ9IawAbgfl{N3Kwj^q>Z{A<pTIl|^7S
z4-Ytpj*xGo=1Up6-b?uKiQ*sb2T!8*rAt1wMr#51695UH7kazb2PkkV5EH^gSIXk*
zsM|4t&m^c~IZ%`(HT!;21);?oDXM;iLWB43)bflJ?GMchCpb!d|E9Zi-+tPObGw#Y
zV()dZfwV2(tOd+K&4IVc3%wjbzJq{#vH0T4Yr&p}aqlh^EIyvR52O`dUHc(J&(J3L
zB-K&NdeVk;8L6ih#luc29x9lGJ_0fiRHa(p2cPu3d;08DYj2VGZe4MX+N+v38_&>s
z1ozN%0B6LE1rE@mkzx&CJU_hlLTI=M3Eyc$p8b%TR~>@-(jOUC4_1T2Qyajvah)p@
z`-2os35Tcs>cg%lUy9uW;gIY?bb#a%*E(3deBXfvpIV7R+Y^amC=L!#Ir=$7iMfJB
zd0uk`efM2+A>%6epA{rxgiUXF_gsWc?bfuB+u-Q+o4O)Ya_da3&s@JrPfyoEZ#Arc
zNeHKE2mYeP0WwbxQVOWq?;Vv+HCrquT?k5K{B=mn+q$0gg7lBc$j4nXE>WWDqodFu
znev<$g3n9Z9nK{zn6s_Oq$cSHx<+bYsHZh952^SsJ&FE6Ewufh)5b8AJsy115Z%DH
zTWkG`<D@v9Hz}ldFUY92#oR7qexX-L$WxI2?c`sG48Ad>RrqKBeP;<RS8YWOqE^vu
zKdmjH+I*^I<d?Crgm;P_%p?-$(VWd6#)e;=3-5m{a75qYf@zKN2d};WsNizX%<&Mv
zdYNVO(3yW=US&DiQ}hx$NMHIxIu1t?y;Fzq1o|Y7k|RasN0G>y1NI#ad_Cb}hax#4
zO7bkQ`98>Y>Z;rreC0kxOAe2s=S!4&KP;(NeBM})D#(YGHm#cY1kRe+`vs73!8rs-
zNx(B>VWHPHYAkN&1nF-50Z7TN&*Ap$g2~7;mcuNsi*Wc#AS6bZF&TD2cW4wj@Up&p
z&2xUXcmo<P5Wm05Vm{9fAC*Jl+DxKQCb!mROSvhau9_1oN)R#6ajtPeaM{W&T9~6B
zs%0j`&GtH}`LZ8}1pA>AVLx=jn+7jP6!6^RBlA#Kw-!Iop%@IKpW<Tmh^%2rndlF%
zlw7sZ11FO*5yGJX989+OQ<6s#$dN+vI6oZ}B8_*dtW_2q3=M^zlE?(yj|t`Dm?{Fk
ztr;|+9i|HZuU%*)LDGh)A3&qx67y;C#Q-vL+phGZxqI56Co2;UE`9&VF%-;AHXQoW
zxZE56D8Y3)jO46#A7EN0<)8PekqhJz<~Uo^7GaDt;L&I2sAL9tUa&7}-Z6CAZ6)~S
z#63zM@+P6dLyr>}ebAZ%i-J5eHC+p?x*Po9R7Ggse23y%*n?4|FWYlvWS~m>euCu4
z*tbt|XpY+38pT$J!9h)2>s|Bw&#~11E~={7R_i7$4@ET-2uE#OV0L*4vKf-{#Gb9k
zlJUOlxyBk-i^TU<%a(nC+hwE!<jOjI@6=TuFV|=%m+Q4g1YR|>c|lht!7Po~%*qAK
z=q9u4M_`Wofm{cJ?Uvx^?~JepR_O?BV=iY3i4Gkf)10QAS_x(h)nEKHxt`PY#jM&`
zf(J?K>YAJjKJS{D8h^r~Z{h+k1mxxA2D(6BO{o7T<V(bJM?S3)B&<I)>*az<bj{M|
zJ8QG+K?_xF9;2h1#eV=zH+pL?B#z%bC3w;FMV8p6)9}UP0)FhfLg3=jT?7Cci}xi@
zxsAPh-Mu*$GRd1wZ|p1hK1P{5gl@^K!eJ}YI|A>p7tiPOzpho`=ixD(Pi@QFo8^x8
ze@C?g&+8Y-p$6&{gAO!+t(rxP2k}u}S*c}CssMiVyg!WLRe5*`WAA~Uwd2S-CkM2`
z#yahmo5s1K1r<KQ^<df@RT$*g>e@=J)n#3>q4(A7lSU-+B^!e7M1SgEy|nL199Q_d
zou~qY=8n(9R`MFe9GYJn`cDd)z8AULB4YM#3^(<$!OqTHN87`{aYf~|9LXxofJ4l3
z*V4?6sTfW5(_38pXlBP+nee&%EZW%)@T;{-_cP;!Vv;8++TSGCXliUbt5T%l(9NoS
zm^R0trB6P5&y}YivsCSIIyR`MJZMOQB*R<%#Kt_ti@vTrj9feKhx``56Gf6`u%vVt
zY6y{oZ>Y;<Q8XN|ol~0AQ$Mz|=Hk*tA5+FJPHZ{+?sa<M+ks9ZX@4lzuU164l9$52
z<-m8%$DHveD^V%)Hs3I=yd3EmT4K2hoC#P@=L?82m2l*1{&d86ddhR1>Q0U=peH7n
zx}lFrR8aedfgF<!)sf3gzI+H0TFKjD1Bx;Si#<FD!Rqjf1*Vi}4??Vf8isHL`avf1
zzCG7X+$_J~`3;#LBn4q<n?6Q{5ic-&Tbn&o)cjT_(7cem^sqreZ~J_Dv6<SO7{cZ_
zE}seauF`%9BfU~3@Vo{XxlN7*NfrjyU6|}9)5_+lQ1bbq(->+OzEc8dX*`AV^rv=5
z1w_@%qe!ZC5wGoOtqfi-y=PGkiLlNZf@mOyb-sOn<)I`n9^kC+XnxMoky;<X>-8j_
zSuP)xE}|E!YUUbVduMoU?~3F58qyG;`O8~xL4T?`=`_9bHwVAD+d-`MJwLJC0YF$3
zmZc%x&!(wCR6NH#+vbW;8)6(CBJnuDs1((xVj$i692KlrM@Z8g7F-jlB2wvZKsB7&
zoHd=YqsOX?AH=PqvwnmS#E%g2xsO(ZsspwdOI-+)S<}@Gsh;Ny80es%W>U(=>BF^{
zQRpWVtGJ3FOGr{WqwEEdL8^{im0o^d6K{ZQAlU|eeX%#Ib^mj$Ct3z{Fm0_$V=6)f
z=ol3iqiKAoasNsTH91Ut3X~rW=NV|3pFyOmJXf5bF>~d;|D9eW0msvdB_z6fmn>f@
zI})1r^zP+>=~u(Tl6^7X&g2=`&$@mcqlwgo<;;!9!U8Nk_h|ugy6CyNWDU^%#Hp2P
z*Go5+FLxmCvOu9h-<2<1L)L@aEM1KeIy#jc9S)Zxn|No_C0=b)tbsH>A}#vfe%8qU
zYn|1=w1|a7(t{4wI_-pqCCJmB4-W#QrTz?RbLq<MW`N1GyrpwiG{J?#tAxOOtD5sw
z)C>eH0uMJ2SX+Ho!D*e!=yGkCRQcR}3U^{ETKw%F4@HsdHJ@bSgY&Ryl}MK|hIuiq
zLMKjUnq!mZH#c8{0|jRjLjhi%*hc4)XKT?zQ#1J?YE>7?Dr=TP=6HO<P%@^QouO^j
zZ1*8msG=mYx;PMa+c`r5Q`%aPSnFq#COW}gb_8fs#b@khz-Q3#KQN)ZvyK#NF-!8P
z9XiB*-u#a$>cT^rH7}w)n)8z*MANJ9daAL8We=SVBqhEaA{ET<#G==}Or{P)aCz-t
zU-~kJG~MH{03{0Q08^&oyG3y|RQrt9h2C(AI&2h~^^+>9ZSRFl0NxfTf(-)4d^#f1
z*Kb*eFA1}#Hn{#sj?n%Ot?@(3pi^~{bJVfUxwXP<l*z8uBXz}r=&oYg5xU@x*|DIV
zQ@ci`A2rFu+af-sMEt`qS5b0HY`J$rz6@0w9@p??xwdJuJj-&Ab5Czev(6J&IK$l9
z5A+VS>(W_m`4I&0T<^c|s5wFvPA*Rh;zqF}kshocXF5Q{$M1#ADemXm1UKf_ep$of
zO2Zq#fb9r|RttbvOS^BKSHEnRftWk1JhE<|cbs&Vth7V9Y59+DJV5`7LHnXh^h=2A
z-69j(_|YruH~WI`Nyt?+I~i2t!p>39=5s`kc%_Dr=FL4jc7I*Q9w43Fp`N0yJGMO`
zhyG4h6o0~uU<ar8MhifKceC!E(i1&N87s}j%C%kE8e(Cl)LU#{_2YLuIvs=sj|T7F
zAt^BBn$91AR!>>lAL<d1-;_%H=o^jPv(})nZwJ#S(?bmYEPk>M)3RKF4NI@A(Tt&l
z2(?A~#r^Ik-SNNfcvQOM9`1B)K2WK{bg~SiO60y(n{^N+ee!DAn9cxmMmrG}N+dy+
z96Z;iy|kRjUJI3vK(XBA$mQ)~@}u|{3&&kg?YxNu(j)V(qcxuXD^dfU$ou2Cu*0}N
z{Y5xaF_p{uCY-qMWSjtLzRTooBee8Mn8Z;wd*QJTix*m!i|{nz+w2}$TPT}*4HPHJ
zMMb+f0Zsz|+0O)ZkVw@Is{Fu{)JOM0d?Js0F4wmXvD2R>v+nBJ?JeXXGNgJ3(5xXP
zsl&K>0bpKDlpv>OZE&8xnQLXnxB%O7aqp9tJ9>$q-tc8o!L5$#fiP~Xwnkfy8ZfAL
z#Lw9qZ4-Md-t4vXOydW8Vndb3M4qAPVCLqMY@SyWhV{@Si)<HxwofM1UEEr=8^;ZF
zGe_tLxgpiZKJ(F_QlDYl{dp!pV`RGH?u)XoO{ebw6f?9of5j}PH}6aJKc#Ul>G2EZ
z#e)?aHJfw~9ILp}`5+lDpz=rG8M+0}hm-=BI5JBuO&AA^vqQaPA7r#;l#<*!T$M-o
z2A%K$Y4HF>lW5#P%vh4Cw60oicy0qZ+ET*Nok4OTf}FFIStB%0tohEHzlt?|n4iTO
zMc%#?!w0M;7|g0KCHj!GK!_k4x`?7#ZHK5SKlY$f_Ir&cd$elEyfjCfev243TU_Tc
z7DtG%Ns3y3VcPAaHb33mz+oT0keCBb3*}D;N$cJuZ02tDG4D3I;97SDa?Tfc4dsn|
zPvkR`XErLH_^v<oQL!|l;TCVquEwmmz0vkAKF@$WW-hD4iRuU9xW$L{(E>xT8EP-$
zB2%@&cD+s6Om*q=5X~%qox<AK`s+P1Mw@@}WqemeFv~r{>?nEMZk8F3O8BjgxWZDj
z)_hib@OEwg51?f``2aa?sQB&8qRH;CN*A+peu@d2S*)k>wB7gZ`=I;xgiNmFQ5>tk
z?rP@2XZbOkkXpfsOEyn;Gz)t%WhPd}Ot{{T+tm)eem}e8<+{E`Hg;E<B;7Z;`uD#s
zQKQOxolaW6FBcY7-Et{g`8<4gEwzt?vyWm6_Dyx+pB=>o4;V~jLvuNqI^`7k%1)4L
z8NGxLqOoeaRLa0+a?4vR=#Um#&=^oX4ZTRbu`3v2ZP0vaqPiG*J?kDVv3Y<})hQ9o
z^r|1-R~8Xp^jeeA&G@hK#d6l(d37KOeNqu!GZcJZ7X=XYHcdKWw~}&r02z2k9>LWo
z0-)H1n1A2YmTJ#P5Xb#y^#i_qhQXNslxwjJpG8mPsNi!qazmPS(N#jz%fhcOR}2@6
zrHEFp<n=5o;d8}H9hbFq`#-P%j=UlH2S+|3SNPWQmc%`T7wbt_&4*@vZPN1ZDgjki
zwhT2GV|G5+ILp0)1hF$;(3pFaX4Up@O#r*hcl4m4jmrezSp>5pu2qk8yU^cgAkWNe
zJbhJsNTngWC!=_zpm9{Xd9#z1+5by9O2~N3V7glCAfMK&K2Pw4hvm9^`2|=pr-QOr
z%|nn%Vz?w)4mhO?ej1<4{io(d)20SOREd1zN)I585s$NoYs&$EHSe|-2AO7Di5e0O
z<oD|%Y1Be`BJXFCpu7%jX7%K#98BjSo@R7cHqET>?cs553|JL(?OfhubS8*P4ig=D
znma`|hE^&+zd=eT_`1;(FDIpXe>1f+*FY_(Ku`ta4s;#OoBy<8$xrwoAjVl}M=y}<
zMchyFIZE-OaXcsR7#YMwKidC$ELVTFw=7F_#ua<OKO{~Pk@zAmiq>-twILN6_|)&4
zAU}jZGdUQe&zzul`a%pz1!i%}J?W|halN<c+wHTiCjllSV4<$($^{{hk<&9HH)g(6
zuVVZeG|cXcd`>GVPRn+v1JVSN5shgt6_t_3-!0c_o;5`$XsbY${bw_R7v8T7^-?Bs
zQN^MFWjt>+aqLkZu<KV80PZy;t$eqgDBqY~zDx9_cQu>VCvm=WU@gbyFeNJ8k{CvQ
zpT*o;iv?JJcUGQ`;1#8wmB}Gv%gqwKw^duZTqaeImygeV0Uv&$l;n}J%Ufg=sUh-A
z;r%1$Q-3V?f3hu0kCXrwvEv(Xa9Fu0o2#YIFM*DCy1&^;(h6jb`g#SKCC!==0y$P_
z_@F`Ue%<tj;_qWiRYgU`K07KsnhxF^-sfK7!l2mjosG%V4`|QS|Mn%5Qd8^$5U)Yv
z8q6l?=@7^%0Pm=}?XRh!)2!CPt>u(GAteX4&5&K{wRHE_Dn4RQG@rl~1DpZ20&DZq
zPv48US}PZ_PrS?`0!{9$6y8OCP+Q!S-FYzeN4%ce4G8{zN=nsjqJc1v5x)%99yM!c
za5pVk)=|GG>K+G6E)@o$NkSF_cqzuQ&u_8cQ#Y!@9+iMy;wT{xaFZJE(Ix^r>9`)A
zvcL}**Cf`DStG0CM4qNSSyHQi>)mFWR({j-7>55=Nm&FQeafi}zHJUII;-b&nR5lk
z74BPPkFC;Y=)O|J5^QeZ3Px-ClFnZxrvg6nHSldKd^%9$iAQLkpMuQzxh>`XAoOCM
zwS(_mIp{F>%-m(i*sPw;K1tx@mf+kGT%XUkdT?y$No#YW4*7oDs#nR_AxK`rC;h_o
z7lJcT_cQ4}j~Y6P_g5BgMeeTz#`iV>IK&%a_EC6O?Fe2sqT`{)EPfsl@s6htnf<UG
z-D`=fAIrn~A9o+coe<9e&Y^XgH1H+VOGOQ*KeJR$SV(l*QZFyg#8$TS(W%zW!Z<@+
zqbYGG8A_Idla@Sjx7L8&Dd1QcCU$XF`TgfF@m>6pc{%%m2R|x%4sgTLrm^2qu5s(P
z=GGozb8Tlv@sm4;IKkZA7o*VPr$D~xA@qpG<yC930Pf8yI6(2mD~u*lRA;>EWy>VJ
zSO?Qy$rLd1d`zySdB1ehy;)M^%7A@>b1IWa3ruLTo&2%{z^3MQn&`-Loid>nVtdqf
z+B}Foi@>ZT4NsRHWFBNIJe2*oAPP)Ky#=Td)D3}KPodv>uE?n4B~ppz+OI_A62(47
z!5~q(H?=RioU9DL#Hz6me|*o-%1L!OFxhoB>QTzw>@S%tx-9YYCaP^-Ww?CRQ13cn
zH50dBzV2Jt4$A%A3jjLj)swUi_+QCu?m(Xo#I4rVQoJULnIoAm9uuq`JZzAZjVuAD
zO!bb5oG1@(p2m{HY^cSpx96`tP|7i#e5O;OMTcamufj4CoTv~`h*x|G1&^tFhSmtB
zDfBUv$1i6%<Ap(;YI6L7Om8ENf-{g}y5tJU;cn-HyPaRQ?%X5xwsJLM7kbH_bHVzq
znL?vXb1XinzWM7mz(M?pcqZN0TuB)C5D)SR4S@ySq8iUM=f2gsI&YzKKMfRta?f=&
z30fXiF5p)4kfrwMoA6KDlW0n}v@NBg0)_1qLXF?cI*HCd*jm2>GvF!jR2|&d6&PO!
zL;Ej`qpEG)7m8Mm^jhX?eAS|1u-g1M6_Asu<C;{_t>E6u67V)hwAT^faiqH4+`uvQ
zq@k0OmQ@ULWGXppc=ff)bWL#}y)wa$tT3xpz%>XYsIoR=jZZJCdPZ6?WI3!6CV<%n
zmauvN>s*=aM8%AuUBzLaoB@TXT<BH2Yrv7EW8r~4B65krR1$4;#^Jw?@Yt1*u?Q2M
z+9t{Pi_Nn`e@m?RRc?5#I&B8qsxDv7Y8Q`~mv)__Jgt<ModQS_=datxztepp(B2mc
z5OYcEA3Zp3Xx_^Q_@ktgOLY<g2YZLl7TmDfC?P?zErF~jWFpfhHfv<pDKAP$`Cy#Z
zE_hp7&Fhc*|NNKZPzv7WI678R&62ioy>KJS^I}F@0R|l0J8bEa+fzaInxQ_37w`<)
z2f@<MwvFo#u~u@19IHJG#FDkx7UcY&q(TH>C0X=|{y;_*ZNN0mqj!@TH2kquTb6ao
ztF}1iTcO!&;My5_%I)H@#}mEmKo}_jHKStNTpEe4URvPi)kzdkVB1+p>EIz#>a*a<
z=iI~F?>G2t1wb-WK70JFfQ2MgEtxN1AS*xAL>QJ$+-@)WnW+c-o2hH#dq;K-wFN)j
ze~kYQx;${XqC&Ie3<PavSIG>zuSccUJRAvV2?ZG$z(EB>&@RIYn^sDwDI`<fSgN}i
z*f2S$Kvo=)j`K3m6&vo3`l{k*qs9@Nwvj5>AatE7;thTubN~`pMKPGjmeyD^{L!2w
zsu)6q`SVE&7)`qljh^Ug9jY!Nbjd8!x$KMT1>z}}mpNZ~V!81^eXqn75BqeSqxSc*
z{=g{ZBza!Ai&H!j<n*!g*6uxY>WaEszs7#Zxmq@z<U-vQ3a^XY-f&moAmAe<sRBl*
zexd)^`{)Y~cxy}yQ@nAV5?G@P(1_0rubT9a+vkx!Z#Qm@czn9~mATk*W5)4X$FulI
z)fw}kc2eOdFB>}{;5$1cj|0PXOaGfbM;-6~kv_vm^y8T#;!ot#mCJ9KZs*WK?Cjbv
zZ@adC^i2iGj|-ADa379{!Sxeo%T2DUyqA9--$gV3bGp9eN}9)TtZyjSumXZqZ@y2B
zRH;`4e)Xsfc#p^D3l+Hxe)03&O4%%P0VbkOp@2~of8xbq|Izn3#;RThA$EBk%+%pP
zDoHF}e|y0XfM4x1)X9l6{)V&T2wKQ>&+>jwxss+W*7Ku%`1Akb7lo*U`6eToD0=ZQ
zys13P3-0EC{-4g&+0IS(T7b`~WMaSQIBjJr?+ImSk;8>M!J7VM4<?^gNUh=*pSKqt
zw(v16c@nl_r&T<qlgva&VIG8BfyDzVGN;SNp$eq--m=}^6-j_a`F6a%Iv%|?j(U^i
z;FOySZi%%&cb3I`y1-0WJc66Yb>WAAzA8<&M?iYPf(qcod|=e)&_K6>=Xgm3=tNFM
zOS}*cD5$*dAVu-7RC`s`5n$j%k&ObqA&%AR*KIY8<(mnuE%gY&SFpvVX*Aoyw(V?5
z?@vTyweP|P$}EE$Oa<lsF*>+Eq0c_~NFNi_YnsgYn;X)#whw~bK&X4i){{h+)vw3A
zPc2ldCt2*PFwO*C*Z}l=)(+zcy9>jT68#r8aAtgMcqk1p{$+lFu%zt(@dP**Ix_wh
zZ`L%Ox3Z{#jW^M)2Yc;<(6j_8A)fW5Rb+7ZdQz9#7&%0{z?3dm4@S4S*uM?u$oW#|
zN`e<wpKW4o58RGosvKYV1dR9N6jHj#e{@}DaQ~*y0r^Da0tOdfik<osktjt|!=u8r
z(iqrT*uaxj#?_ct{?}`J&$vw%4dkwJ3A2pFG8tg7!+iDNs(`mYvthWsH2(4_K?{l1
zu2FMFG0z0TkgD&Q+#C*Nkorc_!|u<Pz_*<WDeAgg#_xVgE{{c9gMDMc=-X_)gaISF
zJyQ#GBWJI{u6*Yq;f!7Ehfexbk>)z->8WLn(0RdIT_Tcba2kk}7@8UI@D}q9c*0pY
z+(Tk;sV>||yF!{M6r~Sz5B`x+btU`Gt<HXfNy|vU(awo<G=JeChuG_W&>B{?62ytU
zKibNd)a)=gEn*ft?tG`gsEV&<$!%iFWyQ<iGVMs)I1C^s=c)_l;`MX#@xcMewFiYQ
zV<W|?dyy2A!&As7OHZ)|;Z&{}&zl!Uey#onZE%u8cAVM?k2qNdz10ZBQ-+-<!V>|Y
z&>Z@0|Jyd8K>cYQ4om(<3pIe1n(67j)91nm!8+m{KX&ChKu@$1tAoc1MX}#mC6kXy
zM?W&#XN)SlKk<u*MRc*rBq;tS=c7*YCSmolO5V`4chHlBt}7`o7kX_{uzlu-e`f9F
z#TCQz<eT=1-EmsXUD<@R{hTN(BHX-V^Q)}imIwSA0Z;w=5WwcYst6YApHo6}z)K`l
z-I6fwM7cPUY(OWqj#@s2>29i8@j<BYq3&kBC}u!oBDG}8*z>8J-xB5r7a+P?f)8H(
z{v!paa2^fY{6Ty%mi;3Ej|Ixb%jV8&?91y0l~R?YjYAL6#cDaMfC)?{*Aq?hv+E70
z-PAjshZQRjPm4^Laa&3+t1Nu**SqV$^&aw)LinJ4y7>ss<wUu2Pg~zvPN@}t=Aa@4
zC^P*ltIqO<tL=!-!*!+Ei2Ri*t6K`C**E`r`1hQB2l2%@WUyMmU{|sk@!dvj*guhk
zC_29%hTrJf+r>we0Aq0{bi2JT&+|QbC5|QnXHo>e0&g(^B@Bevt%DH=ij2)CNt72l
zu$8}hyBhz{)aEsLyPw+5uexXC4n4X4;}Cqp^C}^9p~xkK+iSijtAS4$AN~{uCM*l$
zte-q1q{*9q+?!N;f%{MdETQQ{RVqCP>pF_75Pe2=q1*z9J4VWxF^(`3A|5D?8}73(
zy;<vrzWdcO=_>>1<jlMcGzoU1jM;@#4iCixK64Zv?1yMYB?4|Urj1q385ciprX`hc
z)=DIt?+&f~6L`r5N}E)Mr!&J)$Z?eMiE>>t_Pl*)@LHUY{e{9R<pNXBZyK)J2+++z
zwJ_~D^L&5NuZ#z}rKoqEC-@-E(S8OKn0n$5Ewlvi`1^!U$!%wz!{V}g(icw;q3NVP
z9wFLOU%U=UdoIDXDx8#DD0Fr{QBgE63PWa!N!*(dyO;9}E!Q@OL?OtsB$7Vy&eUO8
zU_tT!VeZYtp=|%Z@w!!tkP6u=Z74z%SxPtBB<n=RRI=}lv2QKtPL}Q{S+g(0j9ms}
zONto#*v3{2GsbQPW6bXyx|h%Q_xnD-=Qy6{kFUdVbj7%?>pZV>dB2wT`}z*NZ>Nq=
z#}%8(<X0Pjl1_Kqf5H7xj~>;)G4ht{Y^0aAM@>t*G*{QG0q<~ABS&mDb)GpxP`iHi
zekzj@uj?g;NfeA*c}&+{)Qe`ndj3GgRdyba@uOVtofd76KhZf7amd}Z{m|aw2YoXS
zZ0~$52<Bxzt-qpozD=y}`AgY}mf7L^jlCv=)Zmdr_l%(gXpgLMPgyl1;N!F6(p-~#
zv(oecn;x@>1F9*0;ritdacI*5^@Uj&8dI}LRz7>R9#Vt*DB-8r(7E1qcb5YM{P63_
zo@sKkV$UhKoz?eguAxaG|AAPCD%@IK1HMVo;j#T~n?W2s(UkUGsMRsu-Ijf3?JkwI
zoST6v2;uz}7j!O$o5((RC|%txCi7T?IqRiA>qd7smok%hHXKv4|Gjph*KW;_wp1=`
zL9@;3z~c|kfgLf@p*I_U55CjPqetxNq@w(+Tce7{1Y&nK@}}%zYzP4*B%tc&k7c*S
z*jxJqm6hJmStTjfq!v1|A3JS4=6Iuz(^(C5LUk~2&WBq!XugN0Fts1nI+~jlL}?CG
zWB)XK#UEN>J@C`er*{VaNM+DtjiPzTzm1@=+%-4;s_Dlo=IrSVS#pBvZCPIIfMG~%
z*Ke1!uMg5byNM@uaqjA8<wD!?0aD`U*fqNV)wD;ObrSErIcQCZvL<b7YeW3C^V|tH
zi7P^a6A6YzT4TsLd3p}==qpUTP>!g(_nF`~ikbqxC3mgPlWI`8Rd-i~JSy$Y+wItU
zDKQw&w6`kI0i%bPVdadHV%>q2DfxP%KaEyX0aN<1BMd#SNY{cGcBuqh{3#ch(^NcO
zoYZg#lz?IESIH7}<+&ie+Zlo-_cK@akjYYQ9lyt)J(oRM<oeNDH-8$g*&8u$OvH$Z
z_gFxZUw`V4ya3EEg|eVM?ZEhA`%>*yc3u8PW3%9;ioiwEN~&WrUg7rH-1Eb$_1A$v
zXT*wX47qyA#0_<^zNG#wBgZ<N*S11ebWq|;g2Z74-Z(~p1J>XS3FpL&zFRJkw~0J_
z&WTNBDzTdue9qURp{Tg5P<P<zpG4wp25(@g=13;dj}Ytm+Kb%f?0GwMP9)`{V)J=<
za?xr+jSqJu%l)S?wHM~A_H40PP16>#N%~j29AU|aiac&#9?k5qV~^|m`=NXea9b{|
zFK5<HBY=0QMP$)_YNa}4z6bWvR>IFI5A`I(S^X@@*HKePjSrbc%gUW3IoiZK-dZ*5
zE7IwjrM8g=vWFXzJZdHUV!GYcLQh{`C6z;!>+v8hBi-2dMGq<tWy8eZS{1rm$v>34
zk!a07WbX>f_3iiwY%?8E>&bTbai&(qjA5&i?&Ws(4{SZg2PnxN9}VmA1%t*vM0Qr_
z95KE9MF=S!nl&Q;{c<bqfS%N35$h(JO4u^i6r0?q-LBKUO6&2>KYDYB^X}TK1Gyf?
zHA!qQ(3CijQMNAb3sq>#BonmVPBBQ{0YaR`(HH(8o(|$a=BITnNgWq2p;e{x){FCp
zR)s&Ovm$iz3WD#pU0wbuNS<d^!5&jrB)Az5nNL=c&SaS9zE6voLU04e{rs(`>MeKK
z(FfAub@}}mPG^58Qm(n6L+OOh<@t@|1H|V0(gPQHv2nYpdBphya`S!|Po{^3)9Szg
zsO^=+X72J4i>iZvat*<v{O>%}$vOwd9QELs12~+Vqi|K0Hl{lNV4KBxMg4uh&-X)P
zrA_9r^m~{4nuYjeBX(LubKY<ExGy`}Ls!puOMN7RKIzA00@*onbEWj+rx~@CD=AO(
z<2zsdtgLo6<bUjL7i?^dAM_q`$fo2LWOUWNh@c-*5xY$)Yuj7f`Lbo2)bXqzk@$NR
zsUpeXfw^d3DN<LZ9*^2-Wg^wklltmLu{!2dG`et{@nay})a5M(W!R)AY~uCKl~J-G
zsV3WisZj@&`h2)=S>T^f!bRYLna!oA{Q$|3YmB>!x_BK1L48olsedOwS?oWLRq^t>
ziBlXV4rQzus%G9hn|W^|FdCZ&pFLmw`)Y3LY-ZjYoJ<<N7)jN1>OlxT-WoZIVB{1u
zg&Q0##m9FWs~GyYNhXk;os6>z^(lyxQku~Z>aIv82zKIwWvREOyQsNEY9<9sarOA4
z2)?0)&>Oy1G)A2kG|*$0h4QuXTQ@{?vXH-JHO1KZYDi#nFH7J4N;#w9OnCnEuEtK(
zqQMwa(-BgE@D2D%Kozdj;JzK)o7^HR8>`;XkrCxKvH^rg{Ghy#4T5?s|55KkS3(Lo
zM~?1;&45l`a=I1o9)k8C2tVOoW}SEuUCC3Nfyowl@fbq!Q5w#27<oH`5iOYE$}}oK
zpGH@9S4A9elQ#}UUnMD@r7j<Ovk&TwPt-5b9djt~m;hs%io?<|9IYic_>2?W=HOzT
zlKvFuR9wYI`n?Oew&2_S_vAYc0tyA17Lm&EmQ0GBCiRgWfp}8`?jOLIjfdhP^uXU+
zC0#Z}eTRnKm*4J#R)pW?F4|hvegmtTrampl;NEz3urM|^s$-%6&iigB#Z=bh%|8D~
z?{fx!b{MPbn-QwO0;HB<c_boGgOu2A*UuTye!js48hi|``J|p(z`XoAVpVDaaaqx<
zxa!NN`?c+W&Hu_U4)QQg>1VB}Wf9PdmD|!6e?TD_7529VEz&n?ct%{jciB*E-?n+5
zwLx@J+}ef*(oW@D5?YN)a9@L{$Jta~Ldq&rUvd7iXj5ydN9e+sFIu=t-%|@cQE4W$
zo6wV0ofP}p_O`aWQIo>Mh+XGI8}wF%(xGc;;zvBW9F3IG`Qc<>P}e>&8bnu?ux#d5
z>LXU!uExpgcIew6EfUUt#dbCfn1_2Prp~<byd<_Y+KxJcUKw~>n9)Le%ZD`J`34r~
z@()%7M@XQVi0Y9`cuIX<L>e1)jXRWxaQbQNHOF`4(j>+4!DUm<K;E25vXp9MG9JO3
z-2Slq#}FY+B|>&U;%P1)qUK#^d7VB*M`VJM+|&OGU#~&ofIT0Ag(ZVtnw~c{IF`22
zoz9(qUS2-+hu6*W{znu{GiA!h&SE5_>G7pGhsVF`G{Bqltw;gd^dhIC!`RuzvSr(J
z1g~({)z2J<o1#DTcl$RaGl<6b;^0GQtPlm2O)rgXUXF;??3>k3d@YNNiV@2<o+D(6
z?oCN*m@lbDb&VPL_v6V2mC9xP6kAAH&@~j%6;-~5ZPQh7GFT+R7YDrw)#Ate^6xVC
zzn*gSvVbH7Hq>wJ@_P@BNvoI-b9<VVuW}8G4QyB2+#4~^m)Kh9+Igerk$R9`$#zaF
ztG;PfK(HvZ3fA2dqQ5EW=&S>gTT*D9tvCfa+Z3-;xt&#fPJ8tg9+$-4y|KUQ^r^I_
ziU%FBZJn?0{QlM!0yYt_jWmMAU}qbm_&j=MG@DzBKT<_dZ~9Ai`T0YmAs(#g3dC1o
z^L$=h9CnEp3XYf1-`!OooaL<L303O9MA|CNI{Nf)-+&q5ShoHK{MG-ELziYtsT2ZA
zokBf<fY^H4je@5bqh;lzQ-bfI`PE?7O9mBl;#Fv`>0xcg-IVztkIMBD;LFbP5Xd@~
zcyIFPfO~a6LNsP2c#@xy{b=;L0D>0qbIq$7>9aTG6RWY!yx4KwV+p<tF_X;FxxGiI
zn9!0DwRFZ*g7ZGuLW2+2Rci~+TVX1RW=76Gs-BJna#;o3aKa1^y~ENXr?Y9^muCVv
zK}A*CbP0-oa>ePAK4uc!Z&OXx(=y;*2om2nCOsV8RB9)vCZRIL%sxIgBpuyhg%-qc
zqe<9rzMmOdL~xPG3dV+@f;O9#7wi1!A3k#e+JPJMrBS_q4BBqjg>+}tL_+I4yw(48
ziIe)x3`@w^Y0>Vmm%10`;46x!Ti4^SCvy+$o>>aee=lyZF0@s9?265CzuogbhXG*j
z$s&NQOzBw@#}%R%SDo0HgA=&W$nUFCF?g4UIwQVY#dzC4fI(GqS|WdFH9f*-&&}kW
z+J~A?T~t$FL3uqqcLbC4;cV*|zhH;HYQ~^resL1n<KdkqeR9Gz6_F%Jz-1V{v16g$
zq^u|L(1q8mXm(!J$%XXDPct+EKDY9k^mro;rQsVcb&eDOd=Jml1|sj_Rni;kl%aW6
zsuo_)yN&He*5g~A-N(+M8*M5%zT>=?Ftg?Lq36_u;#I=Jr;k1&48AD(R?=j|wQ?<u
zep0&KbpNsT9ChaNepKG#tOu8Ejg8Ui1#c*-hkM;mO(*y@M^5Cr*sZDaq<dB8<Wn2!
zOrd$*oC0A|pF@*q(<9w!>7n7#MU2AELW~YV9y`<D8|S;HVHzH{QgESnbP1nT?PWE%
zFYS9BsXtJhoTVEAuWqFkPHX`828pb>Rnjep8%)*Ap{&Wr-9A?%+omz3#xC9hu1xpd
zxzf2R4wP(*Ae4)zV(!q5srVHIynxQNihHRgcmLq)R^-L5OBat|IIih?e*2?H3z<uH
z8n9Z(t#Zz?XO|0|>+*3|PG(}HNY2*vSPh-(LWA~0qVNrRY{VWSboRptU3#|z<m#%L
zi=-ccoZRE~xjKoqZYYBvZksR7u6(Rj`D%akvr-jW%N+c%EY`&G@=KTAPvvcQU7^7l
ze=N!m4ra@6l{+|71@{jVR~J4hWm(ddg?pY#%%An$^l4gFtoR{_U6(Cp`cOxj)}7v7
zMdMw>@4FFNL(sdgy?<`OeCu$#zitm5)1voEYpX`0w$Ac&$(y&K!1V%Vm+0$xJ}%#z
zrzrx2h6O9WqFx7>KF_q3RoY~WIkRZCg_?JS(5IH7;P66k%)I>D3r44JLaxQb%Ge9A
z3zd7^{~RjPp{ixRk8#Tj8oGYnnq8iiyxcrfgL>w>dfx86Q=qFpJ{fmQHdp!DMb2WC
zS0<t8Na-$WfVQ8pr?Nw`=tPqxRc+M_QC&yMy<Ka^E>u%9R)(}*)d2nF&mDHkz3^dp
zbPL(pZh*V(wd7pXOd}}QRunPwz|OES;fD=~ki_|8!u{RFdyb4M``p$g*`Ciax4+ex
z_u~}@QR-}KT$5<~A2#vsFP^FG6NLH?_+CT~H4E^C!_|(2fMM56)iVixwwY9SAX#E$
zL+|ov&G$``@?CLqrS6_e3#Pwg1*o-^I%}P0$umu^7bu2Fk{(KApj(##AOXD>Bn!6R
zsFW~IoUA(gkNdL{@Ym66nwL|%aBUDxA}9GJ);QQcgKyR|w2N=4%1Lt2soK>znI@Td
zHoV*3lm(IjqPS9F;7{Q~`LI`3EWMJqiT1VWv23=gc`cJaik3>?G=T6~NcnM9Ko!rn
zdnb;Oql%0<|Kvklv<f-<<_6(sb6_uOe#OqI^DNxz1IcazZe&!q-i-+){Mb~{ysGfo
z!8Z562=2j4+#44bEnr18#MKRiFqH&<`aEAQoQUIqu?PYEMfN%5wU(^#iZ65(u!;d$
zLI9v*Yu^qLs|&g2J*Ugwb+$~5X?|IlixB7)E7xnvNL7?4Lf39h8~U_uxZ|bG<=pw5
z<{S0Z8}O%EMDkG2yv>WXhZ^HJ(~6MP>*vwD_L0)Ph0<v0D^O#flzGJ&kDlxYM9__d
z8#G*8c%+JZehoDQjiXrKhP2);?ieRdxt-K6ZhaT}OfJ#sqL+CM?m;YQ#Lg}3s*l?_
z7&6Cd1ltN|#6DPZXR&qip|1Y278ilIDe~-QN^*bzL{$v3;i)0%Z%X|65UjF*OJ)bw
zkZWS<(UUm}Qj<GXpL~#Y4IEl~@-yMIOBW*gPsqHX3L3vRBh@+NK5)az6enXiQnM>1
z;TK>ubHgRgcbauVW1(+yg=ZA+LyRrX_;FQ26{Y&TMaLX!eASQz8II{x<__;QKmZ^f
zh3AFcA-UTb22=b9O5)A%XhteB$4<0HcrjL|q_=o{Lu6R`WBzc{oWc<j{O5f@Z+L!)
zM-ywE8f@bCKdG);?|TKmz>9syZv;0jXs^mNSw^I6)!^A~#Y~4Pm-B?IPTOb`qstI2
zU_!4d{q4_%F9O<&hgi3cX5IQ=Ul96@o1ViVYj6A4yIPz`eV~Bd#!o^Z)Y$u<r*QTY
zADri|abHJ{;9ZgER{B8O?(+1|7-a>cC(eI?ezk6)t5ZwLK0~qPIzsArZx+ZOOHdq;
z-2(YW{2WvtUERCYks~(eyNO>fGRKfi`HL{z!{x$&fyYr~uHGh-gtR~AOQHTyKm-7Y
z+g=Cuu><tos1)O$NGeryqY6mQHXr-}#zr6W<2nMI3Y$@5$aG?G=HDn^8qGraYx|c)
z_{x@B&8}yATHv);`1%2i1~#nl3m*Na6Xr{GX=gy%Njaz=0k`#ceCs6tb>$#-&sM4X
zt&Je0n`w^WXUgVTCEqTy8+e-8B9e`IX3H#LOFXzmO7YE9h3j69T8Q(g<=CyJJ;V35
zke?k0vqdm>LD}#JsyVUj3SZ$yal|LL-;2R*26mc!9dkI+#eD$V+ncLiW)Z*tFt*HL
z;3?1T%U~+)C=ni$7Ft~P9!}73x!#|7t?<0Xy}|NPlP)W6BS>8|Ki%%h3T@`e8BS-!
z&DH18EB1eR0oM5izye#og09?U?oGG{H^tsvwdcPlFu7vV)*|>*xoGgI%?=+_mGOH}
ztzu_V9poU^Vg(bPK<!MLFD>=Y^Nio`Uj1~vZx3#)UT_^#)>I#H=YBgr7(~6`wzk%O
zdH07Xk!t#^CFOy%gHcuOZV*E_vKFenkU4tvoq@32s(15>_n^C@rF-zhqeV0uTyS%v
z)lKQ=b5xAKOkzg{pNTQlg&2%ZDMQx3yTg7+Wma#>>wM0jnZ&|xiFjVD2bW}}$(rdC
zeNV&5?CP)SDCuT>p=o8Pn%VhE)Z3SMhEs+?pSdV?EifWUDer*v*!mwdLm!I}W8AS&
z^k#A${s49op=SGz;9wsnPuT~%KiJf3|CWW;#qZC$Idv{6qsNbM1p>HUvIW=ig#Q;@
z->R|p@i{Lx^<kuN#sgdQ#T=x2=JIbwm+*vv+&h+O1LT`$VEkUYe6Px$(xKF{)_VHH
zyq&LcznJuE9{_NpFdVaE4e8ZI^v%&!l*VOEOhU?FMtVeAm+{lCAI&Az$-J<h#1(_&
zLH&Rs3_Dv&z^nTc8TF9hKGYL3RW-a4YKfT3VlJsP;MFPGU|Vx@?)3z5YnMm-<=7|t
zPL~N#Y8PtV0>w4ZWcuYMw`<%~G4&IVm)l4)*UZ3f_Eq^B-q6wm>UXR1dkcxC{3Or1
z=3=Zt-KMF9XO6e1R>I1WC#zC5xKW3_Fwri(-%q($Ks~iPl!DQ9>-U&W@}ncWNBkXw
zXL1R|BXeDd&DnoJbW{(~=+l3T(Y+IB+ZbKqC0Iaf9dtmp&S6Ay+_~^0OGEWsFypUN
zPx>m3GzU@h=1<6{%*mk}HRUm{PTxev(;}t|vfTGKEX?UtA5u97s0ksd)qLdnpT^hk
z^vSNF?x(Bx#VleHh4M`;EQcssHBivebmR_7uI5{?d@QN(50O|Q(+~9(-|$_k_)^47
zbH0sP#fRQqz8T)m0GVs`vw-;v=S_OTxMu2xpledGui5)s`S)B~#)*P{qGQM+WQKk_
z*!anRe0%u?fKr6&p~>s*opKvJ#Z2|y!1(<>)dBQA(vzW9BazecAFICxagWPPNFBk5
zh`4IlJl+nT1VCy>M$T1|V=4b>R<$<^Hn2HTzV&rZ(7(OvNeO+te1|mny~w!?sS(P(
z`78i67;Gx4!#=Yl%8errd_~R=lN$+gf!1t;-^^CSt}DVIW8NE8sIb)++?ArlS+O2B
zKm9eF1@WJl`%?3u2C|qBz1wSm54)$LhF=48N-^M{m)_Om8LRKa!gUiHzkFX4p_ky_
z-c|b0>)$%fFW;=EOKKOQfZ*Sa75o<>>MWdf@VA!Ml~hkP^z%zds0bw)I-fK2{25n6
z%>Em1xBr8;dx-ZBV$~gH1^rHfr8kR$NaT*3{&+lU2+2Vm5X8n=t#m2*m49QQZuMSW
zi{K1p9q~>`vAKo+oq)^eN}9x0Ty%vKOtppyN=zJ5KxgNMJ)|-hVPUg+^FGV1y5r*^
zU?(e9=0C4u`f!<3vlHcEyj1n8Afmg_e?spY_FK>!{%uw{QZIBT55*pW#ogL2D|{vE
zm|r8=2FGT<M^8;oP^~Ta`4{(h*4@%^-6)R<`oB-<9t0<x{Vsj^QttCt!47x67-Hni
zDwg*Ln1k~L{QG>nEuL$do#x6T&0tSP<I3p2%{QdzVgrUGSU2vqvbAw-wPE-F=)+N6
zDH5pp`C*usf>^P`51&F9W>*s}r4}|S)ou`a)}R-1v*X8zz6An<orVgP)_8=G2T$v{
zb^S&YJ{JAHqx}Gg0==GZV{X!aVD7=AS61T&J87G8qAThyp#%`ApPQNsq=#+NyI|2n
zxbn*LtTWtYRnHzycrD}G`q9P9n5MfYJJO*`-bdTLovE)?kB{257bZO<`x*<v5Hu{{
zLq|>P_iVasR;n_KT=8-O{X3d3=_o1e&Aoo7_sF?b6;ROr+SZdFMSw6Us5~>O7Y~rU
z2iKY6g&$4u#HaH;;#;xn4m8+s<J`e4R0gx~d&Tik_e-ki4G#;?Ih`bCzNayd75yiI
zz!Z_1X)0f~btIit%4CE8Bv|Na_CFr@(M3#k4rTw2OCzxT0pP3cJ;$r<ca4szFZ*q5
z>Z;xJ>ANmDC@y`@pj$2X?Sd7)LN@tz$&(*%RY-yb#NMIEQ_;H4y=&%`*`cM;SgweH
z6+nS}WF+Y)ekLL+th@Kxyuw_h5z49I`=?!9?+7>uMl$Mwa*8;40~7(n97%3ZNyAco
zv=Q=~06JCc*si0AqZ={GFMN&8yQ?Xf#*HY*r>3zew*h+Zh&lg|)XbD`WXqhADiN6+
zAR*FP9)5J?Y)+K^u@(Ih%YCBSqG2DjwB0XVLWc37ZOVb%cA*pV@-qFKoRzhbaNO~F
zm(_tbVz%Ii8b)eh{0BQ*dTTOG?YDk*#tJUbYqPpBa9Y#Anu%Tq<&6U@Da_F1mB0W-
z{O0D}0U~pAnucp)yrsL*nAD{u?6;aJnAn7@`OKL$a`N(^#^$gLvF5bE3QU;US!1?v
zfHf{`rNP>HEDbq!j8=Q<&-ROC8^wBj^(I&^hpr`8Z${sWVv*qpzIUgjOhU@b^r1q0
z6E|DGW%E7JBDzy}qX{RQq)470H>#3SAQt6@mczsuQr8&0L=4&{=*+6@paxs|z5lnn
zJx-1Th+}7%D<aV&^6WJ_mCq#;SA3xW?aSag2;W~+AMMh2$y*-MY7$|pXQtJ8zb~^o
zBXBF4-@WKrUkHFo3~krV0qrOskF(laupDFV05CiyvFAp4(`I#;)K8LmdE~reG;wP2
z?W2zDRMt1IzlziRPhCvknlW>&MRuJcTQgN&vysPieUVX9%4B%c1C`{tUZ9!XFebGo
zqrKFTZdzj=P2DGion<aT5O|K)Z)->p>)%+Gs(>>9UEY(k!?t^JO`WK)imdgyEi3(F
zKZpGbH12833mK)n4u5aLIbCw?#s|4lL*9+*)cMlWpl|hj=d5GyucfBCq1YSlf`QP#
z`K{>6f@EUJ^VeH2UyJUwh4QVT%d%wbVsyWiX#cUH%vWt6PufgT8&Zo3BT*@M5Z8!$
z2`v~COzD#z(c+Q)v^fV-6-nh97c4$o`i2h+;rsqV_*P#9F{ZIS`Xwq~_Z#7MuAPG)
zq1l!r^(qO6REo8~@wXx3Z}&B+p$5t|0nDEXDg?7Nu!i?Fvy3+{N!;+QiqLJT_X7rU
z<m|!v3XWUF)*tq`k3R{d%=hfiq%29Xv<VLl@ADSEX3wk}@a;)yK%Z<`OZ2D(D8Wsi
zprEkciVHMZgz2o~wJbh_88TLqg~5>32<FG~N4%qqmJ;zb(`avGqsAHd@<j%ubN>SY
zW+o$(%<Ab{2d&jtUk;Z=D?a~|v1-zjU6tkD$-T!u6n(AP<lMYrHz8V3Me7vetu(VA
z39>ooQ#Og^Au_`;ntzgbU;p_!TCgBuY8mdGliYi%b;-C_OHFWM*pzm3;^3b^(;|fC
zVmOrjoIasWOx$#GaB=OqJoGIQ5)H%w_VLQ^V?${K6t6#$LstPaqev6IgHKXe&Zx&<
zVQ3$?I-vb>#XKYXnPRIZ)2DF-gpWY$%B6oZh)TSR{c@%o&=Uue)9!G=T5F3pPcyI9
z;|&>OL?WEggr;L$`ym2>JuWuDBBT)59cU99$eMtw`e6G#rZ=s;v}BKd?{dvXAAO(^
zgz2ZD8a*)!`k|<Y?G79;OO-+A2k$9b@WcJtPmGiC_ir8>8q>_2Ij!f=`?6UMs<>3)
z2Sj~2vf|<bOVqdO2k%)C&Xc|oNnQC8$9bKt7k&WSBGRcgKyix~x+2vU%RKZW@qPh$
zUK7Yg0TMx1oFqQ`cr1MSbN*79gdZhoY3lU*)_@|tE#2R6%pn%c(*GjkC#vp$_yT={
z5y-W>ac4cgt0p$hBG9B$(K{xc^JHsK84;aS!zwKLv4Pb>9`QV7scLOY6{2(=&^bBf
zj9I%$j_8XLHNTa;kECKqxq8|zjec=dCHu(LL2;%*02GLe_tJL$t=oIPfJ#o~^7;wo
z@P`bw;(L~|iEjp;z74HinTco0haw_9*a^vLA<4mDFg{-q-rVgVe5@Nz4MSySA~u}A
zS`J&5m;J~N%hfN;tQYWC&^aiqkaWWiG>)E>R`5cOQhT~7)vT1ZAdI!%v@ko7SNX_d
zU<C6WMxUz@Q*=AWdRi6=V7dG(Ha#Jz#AkfO<w0G3Y?FC#uxMgj*eVOEufM9tTSQ(-
ze-im2*0*oGkkW)~cnzSt?~D6(!tGb*(y_9R({&SKs`<LHH4w|sj?RWn)UUUQUn`;-
z@O#KNrASIjqYj539$A@*1Wodc9MbR%bYeZChMEUX68F?JoWBpI#V4cHG8;T2z`K^F
zg+JuS^7QdyeeT%Y6_PZU-sN=4&uIjH_}zheJZv-P%a=wv4~)6;KAm>_zRX|^^TQU{
z+#YqHYQHwcWcFc?@ipl>e3XLw(nGLG9RgeD8J?@_hQ>=d*xdQjIt&MpiOq1jzFcDt
zwKik<FVNTl?I0f+$9@SzPhX&98R7HQD76lpFxZAN$1P9^dAw!_Y@z+9U02teCidv`
zvto$tOY+>xPxd(aLy;21=kC4hVB_N%<6!iPMk$pjN!NP6(0Qh`xu=sYuv6?)5(A*6
zEYZE?No0p@E?^J&;s$50(guDB7)+{aEHTTme6`%#4#8-r4<4f#$!P^u;DQUvevEPr
z%BLhUO!%8h7x%`EWEJ_Jtjx$!khLS!3CNV0H*O5F%u@@Ci@h7K(I@=;*HvBGA}^)q
zMLs|(T@$d77%6Ib$`9wTzyWUtaoiYTVt{TR@Ike@e0V;bc83!-SUPG_P&QVRMDbbJ
zKY@!WU#~YCbD%jAc9&yOJ@;IZf1q~`tce6mpqYvdc;;%fo%iZG9+vOFIbEc8HTnCS
z!k96KV@ux>a4S6PX#bF{YVR&U`VcbgGbkJ6VC1s>0oFW=s0{1X0mXDjW81r6(NoTq
zm(L-;>VRatdDPV-QCvh;k<<B(eK&8+q7C+AS+s$xl*0wVOELAii_?N9h+9g2a)XU~
z{~V~dLGFjG=9=d?QMhD$1HZts&E4~2R?I`^<k>595(TiJdvhZZDzWWuftpRvZxDJ#
zG6d6|b{$#6Jlsv(hqpVK)VU;|N6zO19KxAm1A-p^+ez=rQ_-}zGEE#4UW?So=O2f=
zzwgysh<#KT`)Glwzg{$4(QvmN&7_N9Hz(8RMmuIVUZ6WSVXbz%1DJ(AyBs!?*EMRA
z-9QNeuu|GA*=V?QC-{7f(r|L+He*dV@Gc@gF6rvkEy9<6a$}nWxR5P?rP^JTPYDZ$
zD|CO|BBUR1XPQsS4Ox`1g*{)*o&A1(Y+(t5t$~ZsdcY-rXiFLmkabB?iIPyrn7O9|
zQYAdx9xbiUm#XS_{p(a>Pk(61E7K;viTzN$@P69p{Q)QSZ+5;2M1wx08EE|K_UxBq
z4Ee8fq$K%=p#Kz8zdbN9EN<StVxu!3t>9i6-R<U%IiAO1g1fpVQ#|L}{d5fZY+0BO
z@D1jle(?=uydRT_$ts0y%780Sci!d-MDX_&{3u<{CsK+_Z|5X$oWJj*K7u!0ma4SJ
z$1JErX@ROQs3On+gR?@$7r?hls$Lf2j>ti}A#;R@4p4Sl4lG{fgdx@i5b<G;7){Ac
zR82f>{`it!IAiAC1*DW;j12XCYX=(a$+9i$BWYm<_co&NBc7Wm=H_Z~fI@M0<S<)2
zYvXl40#-NFmXtr11*H6knvXR{EV<5p>d`=2+Uyt`TCdiICXxq$X{=Wvf9N~e%(Z1;
zPTiQ}R@FsN=_u)RaS6q^yMK&+F0$w+OVM9S`0@#I<WNZxBNT055&ZLo=uM3C8`4pQ
z>uT&oR>eg9%6j~3)c8Kpm5Bbq6wx^DudW+({ka!+q?uVka{*~v%tIM(dQzB_%!%f~
z7fbz=q}~YwHcsc2Hd6XvT2*zaPL^2{CO*g3^36_))*kl*o|TzVNojy;X=YYm7<aeU
zu357*AbbOr7Bt8j%yC7&r?`1zZ^rrpPexQv1!E!Y&p<brYl{kTQ-kRkT0RWG3h0V?
z3E=p7+lHt-kNlC%mkGdu7vMH6f@v0Y_d(nl=`!8X*KO+fJ%O`G&sS}q)mu9Pe#1|B
zTZX@K{4W{aa7kE6_WH*&YJ&GMei}JxVm7N6PNaDnj99-z6jK&M5(hh0I;*f;k>92~
zW&{K~I_49eID8mL8#d}@nB5%Nqf;vG1`<O}Y^1d*{%k$)0?eODI{g-(PrFZ5&CcqJ
zHVMQqXA-qnbBR%_(#6TX-KhB<Iavq5Y{U&NY=Gex#|=wX98r4K3)iPPh{sHeN(L7I
zV}T&d<e0P?O1GN}MtgMMhlN2{QsK&5%jWNny|brVezJ@J-QKWPheZg453n9Lr4lv`
znT#yhvUrUwk9^>g4LWD<25TnBEH88=cQ3Iosn#&lHkWXLJ<L`OTc%5CJ)}5KfWB_E
zrngQJ51czg7Vl=XBrh{JCtTLun_sc~_qCBg_&WA&u<mM)=3T2XheVv!&Z=Xj>)S8<
zR1qJOt{=O1kC<+s0*2-e8GGvXo73EZZ|DN5AT5j*h8Z~DtijVpnUYd^6ku_gsM2(D
zOnP2>ZT2EK{f-?|ejsX3P}55nOs9T+ynIQ(R;bH#z-d_se8~DgK2%@f%s&|;D$;Yc
zaa{BC4@tri3r-q%JEF_`k$|mt3Iw$QvQ1p?SpM5R5Nq`CK`wM^1o!WQTbJ1(`*Lev
zW&P)WiWy-0#<p)~I6ZY1!VeyQ2k<9)P@D`d1`qhEXCB1W48HA#CXN6x5+f?s?hHw$
zhLkUN&yIhb@QMH@2r6g!9fm)K#@JCVYvQYCk5N2%>@-xe?$_g$j{5mKY^dn~%yW+9
zZm(YvzGT8b$JcxYf1gy${azltc?z6io4SC@V=i2KT|H(@xG4Bh9i`hcCD?J5sl&Gq
zc1?42y?50&qK#sMkChuXx?I6LKXxQVk3}s6wXt4V<)X{d@ZmvjMekfZ?T&&R#BlGl
zF$WJQOu~PF64Bq4zV!~vkA~o%+WUsr$qt~2m3yN5&y-bOi25BsiK<3GYLNS)JSsz$
z&z0s`ZqX83i`}$&IVW5tf;%>OlZ!RQ*C<u9!;z}Y`*XQs&D3vX40FxOP6L~vV`*qp
zC?^^#F-P?|*nFx>ELk1Tb7J<rb}~*FYP2e6pdY;2zO+EUxN>G1NRp0$cFV2KwCp=y
z-b0BqzVe;T;d-QOo7zpICmL~gl`2e$3SFsUy$0(_m+bLf&iEjzuQuXfGJe7=|NU(0
zY^y&l-(Lba1QNO$&AwRHoF^&YgWl-uqGrl~cV<mxqpJab@j4LfJ#)Nqz)gH9(29FZ
zjBh^_cbil&I1u=|&O)?vaVagorsC`;pd-=5-j6tWp>Aq0w7uf6{~_^5o&6>8U*Pvq
z^c+4)!@ITJ@!|Kw!=q{>ueJ#<`!LjWcy3v8saN_Bgk7f3rE>Hx^(erWL>fQDLf_R(
z$KU+vW_gDWI9=i4?rR>2Thl-^6eNm%owE1(%E*xY4^~0dLWpiCBO8jA^n*ek+ND0S
z@^TRF$<WtFbtk+wlWp!$hWhTZ^8EJdJ>~0_HE%Uo?-C|4mysH<s-Ca&9v&i$Rf0fp
zcvbzf9YzS@cn|1amh*U`{CnM8$LxcfHVg~=<++1NhY=SQQqqRy;9)UIl~b`lMw;9&
zO!0RfX>L47of9$JX>e@}IT9eTe{=Rt^#Ox$%2BLoT2(-s@Xt+ka*0hHIOSS=CyFk;
z<{u%3pLBQBNJ~>GXzNfj+nk|BOglWus$vdXTp0Wuh0Rji)Kt-YXqlM{m-$a?U3H-1
zg<x6h{)4!{i%+%))hX3&3L(S=%BFI|xj*fdSXWE4`(+Ci2f!i7W*oxw6=>5Y5akz~
zYHkY|0?Q`oCd!SkQ>3opO4A7wY_@Fzx$eBtb$_F>z@oEv{eV;8z)x2{(_qIA_2Fwi
zrv2Hb0*n?6TGh(KRL5g)dCsG@g<>F$@PsvgQOAUGNOyUR+I@x~Y$)+S8Bw(!sX^y7
z8y8*U{$DW&-}nB-AWSKHeQ5unc(cqEHTbw*eb%i9%lF%z%kAH#D4X=~QNNM2?(~Q~
zZ<{m9jJ~UovRqO1oq7F}fH}~|ZMF%|Y<m1G9<5AzzM*S}XXN*OhZ}Xyl&}d-d><K!
ztfWx@23EK(W{lFNt}g<Ikf$NefBa!*mC5#O1#X@TnLyF?oZgLkVl-^1Vk#7FsNg4R
zP-g!$xs$Wk`LV&pipAvWI6zUzkODCdQ{#!sKK4}@R8rtct37!VDhzWAQda4WCBGPi
zzMgi4XRq4zid|;~p=Pv{T$Cn&%P8p^6$XO?!saX7Ay0^W6Cjp*x+(*|1eg<t-$!ya
zyIzYn&v0isgv!eT6@YZ{w?+R*lAB*Mp~eDgJwoJq5_oyRG&{?LHFysGtm0Vlft~V;
z0m6}ex0Xe@Trpt<@4W|h$<KdaGy7U`z)Z*R_Ncf5M#Xiz$FK2Wl_|y9keB1Y%ukKg
zYda4s%B}y1KaBn2UPh>uq1q&sYV*cuergC_6!@B@=wQ?ni;7$O{ZgfUWbBI2+UloA
zqLoFC4@1#hy)o%JbRWysV9lLm<-H?T+|y#-ts}O$!PT~NeF!hW@!kVu4+v7f7D5gt
z(dwAqY(pb={P=~G%WQt#{g}nvEf7OFk&w!Y*LrM+%?B0~=DbT!-tc6$>(zuwi$zLr
zRwhSd9ZS9k9GQGcIA)bFrm__}sT^SGtMhenX`K#-_4O|{0ETT+^R_X^{buc<XHOxA
zvAw1RpAB-6>VkYr3!)X@2g@}!Gd3^)&J=31X`e=5N^<Hwu(P*3{uAx*RfI>I0^Ogp
zdKb!OWaGVZo^Q$JsZ9~Zjk7GPf8NO6dZt>DRn3&Wy;;*-i0Y(_$7PSGrS+Pc#MQHX
zem%cdwE7j5s(ZjM>`Ynv)RJ5r+WkUV$^-L_w>}?H6J4Mq@P38!pU3i2ZM=CYxH!As
z3xu4FL(kRtJlySNR^9F@>5NEoz<i<VR}q7jrzSU#&#|!n^C*@~9|ZqLrvEhbJWFxu
zB&PRe|4=4l9AqUoOa5eG>OWs)=vk1%d-H}AUieU(0zWiV>dba=RF!Q#n-@&q6E`A-
zm#BLuh>K0`t8KiG&C(DO#XU=9>*@eb4gP_9b~ydlR6<m23UI<QvT(hJ#oIbMh!_&j
z<tU!39?;Wohyn7DHI1Atb)@|%OX07_3rXZj(F#bpIRf?qzy~DGzoMp;l9u#zaYu%R
zv0^d_$pM%n<lTDw$CaTHAouR^;|HnBTMGZ)JeI<bVDu$tl%Mr;n%(pmFFuf$#0F;c
zAS0jp=NR(wRQ>*#@bnK@p_cp)?3bddoWIvdIEQ6yYrcno5OwSy3Sgy(EGLu~`??yi
z44YZKo2ua9$y*4GnJ9b0M)d~RKvvSc;%W)oSTEfFm(l+Li-w!+$n%c;;yx`GNCITN
z%_V^^C2Xz3BW-^C&$^45HIkIdnjQ-!znIy4Vn+i1q`I92u`=@9>fkhFdR=JPVk1z6
z6&tm(JpBU|qwmz@QGtYslb6xL4oRPZXs_;{bWvkY#GqHF!1%u1vcZaZ;T9bL;rM#*
zFE7A0HSn8KYdF^4New?){{8BlepWCVZCRH;Is3-nDQRq=V$mefgs*M4t?<rSm(%K;
zz!AqBiKY@p>;oF`NRZ}P9bP~@B;HvbE=w);T+Y>wI>;gdMw_l})1Q<w%^K2!QNR-m
zz;gt*mHUd+ZRNh1rQB;f$7mm`IJ@@|j*ABBBYtZSuE<@Tt&A*f$jR$?JZi*&%!O6m
zLVjR*0}jS_Sk-t!ccX0rL4nJRGM3?=A?cFxq@&QJ7cL6O!JlFX3)VEsR_~pDpxMOO
zC(3gQao^=d7S4CM<YBPGK77uxEeb%s@&$ZzP&<k~=#A4X@S!p<X86;}<G`~*ldmpq
zSQyH5_tCy=)IXdf#G3LPYWCHAYA`L6pFA$t_(-mCoSC+4Wq6v0y^B>W<0Bi(2B1Yg
z)QCFf4_$p!Q>)>GmKywVB-mgJ6+hM#5GGFEvfL8qcfik26iqczQyKDp2Y|r8cAT+o
ztdF4E*F&b=(hf^eR!$*e+Ad9ssQ%+6Fsfcv|L-t9s@~3b!uc1*cP7+4&6VUl5quX1
zUIKlCNBk7!$sCUiX}wtD_BTNc^d{+&3RI2b+!YXURDQ?K>9dERIXYd8TwlllgwI9L
zgVDZ<iS}hx*F|$sy%#aJTFGa|&lKOkM5I;ioO5YX<9td|t{sUk14Cf59@2F9Jj=9y
zk+z}kbT6%=Rpp3tDjNGA(to@P;?h5p_migf`-ZmQ<UJ(<lGPglk%|SQCd|BuMA`DT
z{1J!Gy%nf@ONg^tS95C>YIk`aW7e=ZgV!i2{?@8J?(MYKRaACy1MR+;)i<(-eaWP9
z_(F-!SH6FNbK6hOyyfQ`K=ku(aQ;W<&G~9em6B^+I|YB4_$Qc$=Ls%G^T3T+siVaw
zaBzZtDY<6W4l3h)?(5;1s=8U1EHzG$nDBKZ+Ji6>gnMJ?QM!Cm6Tl0fNAjt1zz!>l
z>;CYnw(vkdh<@db(?T3T0XQ%o{Qi5`J+0;acO>q++`1(Xn$Z+BU~OkK6mzaQ#E`ze
zi~zXJIxL+14tCeR!x^V7Z@r&8jp+WdKRM)YQvlS$4}KYA5r;1ylM^IFFpmEv!xIg=
zQfKk3m^_#9@<#%rSml0B@s(R2eYi~^ol9p1S^j`TJV7n@$7(7FG_L&yTM;=JXhnrb
z0(h)4Y2^fL{P4?3$pLB4(DN&%OSHcw{ol&|mh|zbNH5?Z&}?H+@@(o{)YuvEoe^~5
zFL0y9Ela(esL?K*?}CpZ?_mUOn3xI}cF-#<XXD-*!xp7g_Q$e|1gzfM?cPzP4`6aF
zXQ1JR^tl1AdF;2{mEe;*Ry9{&fvTgu%BDFY3k_lNI|LNz+aHr^T@&hR^r*bWLbFw1
zQ?@mKs3R~ZfGB+!9VnJC*kPQvESNSZ^1kW}n@jXu0NZ^1$YEHKoqu_z`V?BBX{#JT
zJK*x&sH$%!Z@K8*){cZf&6G>bAZVi(QukMxVz;j(<cZpte@T^Kv%(EEdaNp>vo7e$
za;hYdtsh$S*XfHIojw0VKB+T&Y(>5)Onx3D3SQhH3wDvA3K`%0cElBS1{{-D=yeSI
zVGtN)<4pk8rPuLgAj^L@L}sTov?~SKsIuxc?3h2kH9uH~AO8os0RDfa|0)kH0yhs!
zoAYhB?_=-j#%jqZJ4;%3X>VvQp^B!EEof2H%Fq&!BnjT<E4*c)-mxQ}<)Cx9qy4_x
zdP=1#mwt+^``@;ChtTa81O}M^t0hq9LFoKb*-y>11yo~ZiN37M*q#LcdO2{|`BO+N
z6e=&==<$A!lNJkmZ(ZQksl+Y6ddE&U>gzFJpOLNsGl2ev7)XZpT<cf&hSD@5iw6H<
z!)~)reyyxd+OnHx;Z~kMrDt>ym$U`=0$v;a-&5>w{2wXyIStMZOkUAOLriP$aQWtB
z(WnW_%Wob~KBf#~EDJx}yC|~e2>G`S{`+4Txm%ssqM>tj!1&0N<v9&&ap4(3mcSJN
z>h9WVYC{^X%|6vVk>b*e1Cp>WSU(~@&5VFdqs{0R&L2R;!vjt&`<~$NfHuO;C|X+a
zbehRmzQnFlox2|IMUdGePZyVbLM*%qcf!4si;;TUZg!hbbywhs6~ER)R-9Li%M0!B
z!5=s9@VfymA{$~9ZkA^rQN1MfNma*F0Vp@33g!2vE5ag_H~nwxJYSM=YPl=(t{{-u
z&hbc?1IC~VU)}8;u~9DVD_0~mc-61?ODT34emET({8YUcn<S2k<f85$kkOFj4`;nG
zSV*)Zt4pPZ2Q;!uejB@Dk9cg$-$wpG{_di9eaPj-s@M4GPCaT65{<?aI!m~}Ckq;0
z=9A4ppZV|6>x>mJUYEd3c$(fDf90qAP)mOw6CQ`?sL7)@8(P0dJg)idk{b)(IQzE(
zE}O^ls)yyXtbL^}&N^!gvHnbKW*ZWv(EZ@!ADBPWLwVkMRM6N35RA1dqmYD@c;Nfe
zLOy?bWkn=vL_X9ll9lc<$nwueL%00%{zE@<{iSy$fYMz@g!kUr043medqq4eK{ovx
z$O5<vfUNh>&P1=_wD$oKAjJ>c-eeZ=NnPoR+WYOa5ir}dECIrayZSA_4{2|NGgPr-
z&n|7iAG1R8uS$}foBiy<Z<9_zHj5#LuyL}nD;ky;-4|xpXgpt#WYQYMVDi@V%R3tH
z->(DrxGKyF+Wl64HW9&NAoI{t`+cu}b}lmN`O^^cO#@VS8Fde&`da6Zw^qfo9{5oZ
zyR`vfSE!>rxBWs|iv@gS%$=g=4Ff*)ZkJs@n9BB6l~TD6bc@?-X5Fe~86v<2>6a2&
zx?$^|$Miv6VNLfSZm>RwsAdqq<M^uU_xvM66EvWT>ZEqX{Vd-l$V%s$DXO-OI4oq0
zB6aEAnm)Pddd!ge7NF4=oh&qJt`5+s{Gw0_^%ojtXZstC?l%1!jZ&zebh(c5*?edI
zvCwwz$dkVzDKWc|LIPIvB{A$O{IGT9Vz7GeV%ka}mR!;KUj%425!MD^%QFA{9DA<l
z5|7{e4miby_rhAWG;@J{ApBExE%0FUD4dmqu+U~T-;&+HV7A)0OW?+>HG~x&n8F=j
z{cQ^8waNI1{Bo<}js3%~1V5-+q>qx$Zyq>B#jMApKGar#&@jNZGK<r1z}0iIoSfl7
ze(E|YB}kBKtgB02B}6qPB$N)cwSXU%NyAu6gt<7mI0+~#LE>Bh6F-Xd%jG-r`XY$h
zzB4im|BGsj#=VfH#{N+edQNOGph~66``!ly*l6Nl5Vz0i75|5~c$}AvP+O{TwEmWA
ze2%!xb8M@7jk=v2Yw_8b^Q3-prWN;`SViC_W1t=nz>;e~Qrbt*qn5*BqHBH>IZ-V9
zg>mook*|5=ac$O67}y>PFa7h9eOMxCzy&bR_l`Lv)3OQ8YM}PC?a5W4f2DS_O~|4d
zIk^>^7RKk>A3%c-?%$P9%mjJhilfEz+Vzkdx*9r92aF%y8p+f8?DLm6d_`i6%&Drg
zT)14A<Xe{WPoFFnbg$d7%Ug7D-;<3HODlTytq;kuhZJZ}!5LD+WPW74xgLYeVBb8U
z<f=yJ-198(ps=$V9=~bFt^!2qUY8Q>YT}vel|DCdaWv07`%>=~M!oH=di?oJ(J4^s
zPQ9#{-siKjA2e*k7Qo-At=#O_AUE5)|2z3h9C|)x7n@!(u~m6KBzlF{u?OU7=MhP3
zEAQCO?KxDH#Ms5w*7zXqzB0CS#XKH3m#TFV`LSgniF=?bIqfK=86|*=BAa_7W%n!k
zakZbieBLi3lEJ&)C$xRd2jH5q_3w%Zy<rBRavfL{WHI^x*a#VToP!m5vP{@#Vz;1N
zU$pisT}UMM8;bl{M2>FuY$prOtuOrtoNLaqiAq4;Vc|K8xWDmSUC>{6&eSpGNy@wb
z6`m8eJz<eg{2Ey4Gf<$3dO^0C)|>W*udL2DK$uQNOZAZTKJk>ud}AEnZ0EY9nBiG|
z!?T+jH(^SM<Dd^eQb2nO9zE&izFQ;XR|qRfDucvO3hR4(z<}|P0Q}{pi?Fgq%>B4B
zw%F&A?|IJ;hIu@2+rny{>wjalXFLCe)uK^IS5#%GapD*4Qhdx^?D2H()u^bB%gACL
zJHFxw>1IJpl2GwnU3tl%-=AGn5-B7N6}{!_4rH56_TGcCNv;d}*V5n5+fLk1kU5DE
zNknlVpLN6UVq0eDVu%N;xknk=!9y6e_mj@}djz`Rl=9*rN(5Os$4aRVe20hHTj=mM
zt%QhTn_c2(KC?#8q4mC_E9R5`X(=18XH97cP^QDBAG<5hn4h)0;%gFn;6pp@9!r_T
zk@^I$Tq+Mhzft@reR*m9+0LA7dbgupbUbl#u^u2Y(*dd!X=a}BPRDf1cI|z~QkBB{
zWOu#|vo0taa!mPQC>zU_ePpizu5RsI@d!)aQ%pm8Y3fv4Qax-#Guh-tm;v3FQ@Wau
zHhR|RgM?|tcKFY7D4((U_2}|_MCHmXL`C<j-Q{ON)`v|9dbto?6Uk+Q<KAYtT)|ZD
ztZ){vMu?QwN3-I)U-syX0+1p6ml@N#ZFfINmu5wEvkn2+1pNSegV5fa<8OFHn?MxB
z53@TzKl96d&6o6#`&v5dK~(hrr@pDO^y6PS&BPg2PP4x62-v3cQ0YY82LcQGL{-q0
zzD7+8S`!+T-7()FYN4ND9~eVv9>M4cZ&TsEJH0~@#H^9*_Q$J28?5K}3In5O-=4cJ
zIYkod`L<fbVLI*Rlo~unE}k5CoCQ*urYz%^QvDtG;y#|(W|URyAumq2pv^3*y+RcH
ztR8ahcxT37ErGyw0IGwJxt`5~7a1u37BbHl6c+cd--JugOpP;AQA)2DaiRcAz6y^`
zmTjwObGxz1QKb(+(AGIv;-!!c?vUM{oGm+a<27K1_7JWH^|-c>(%&D7b4A4mdt0&V
zAnq34Yx?GO^I{0z;Oq-@?chZD$kwMmF8vLGV)_9DVi<RKRM-k%aaGk}{l5MDb~q0X
zZ9ziT)}Q=OKOALWX5rnwSnlIM!~^Jqm7%_bSx<2B?AUI=@cw$pDErbUrPFCG`=Q7(
zUMoo3AoTaGUj|(SvgGQU$M}^VA&O99u?|MvnU$#_9i%(eYztRrnHhHM7~TQS5USrz
zF#5j#UQ;x3|Gi=)aLm+-RWVbu%A#3?d;!#=0OG-31*GODfYf~JY~}QZG4F};C5*4e
z;)V-Id_FUH8cp6LM!|oP4-sn++Li{0k^Pj2&K?{i!YN!X6?@WYx6&rR_G<AZYMhyw
zDrpiSs8r?Xlv_4^Cr>3A)~#%8U2ex-P>OI=a?5aAVJNZBzc|1|;#V%J|7H)s3bVPt
zweJA(rm4<kVdT3ZBEsMN!+D%WE@!6L_8Dv?>mjw*<m^HcwpAnvWAQB&=~=hX7#V)|
zGBCxw7EGT6DpEe%lFgUyJ6g3Q4L|>zIJ*&%#CXE_K!ZzosE-bhb$_z@=~g(;nPUAs
z6{YQS*8|-72^jMRDX}Ua$D8z~(?vh_a>GKSp)HUZN^HEMd``Ui8V>0GtXe|7v2A51
z0V8gGu81!&Fv+_1`gHqQnp<wsR%yK*J6Ku%qp25Ox&NM(OwfKQq!Old05}^7L=#2z
zxMQ{xd%HnHrS#P^f-Hre5l|h(-Jgn(h^$lF_i}~Bd<pP~fBD{C^yG1F2C&HQ0mWj<
zgVD>XMZF1}R1kT347Jxo63IS(M!ryg`EfGN>cp?tdV9&eKu;a;9C)q67mjf()Whsw
zb8HlZn+44()7~JZK<S;@XZPi3JB&dXMpb3DUG1+zAG>pTG{kNzD*{;k_Ja}yyT*_i
zIlIfH3P$<2ZmPBgg!|^IXG;SHp1#JHuoV*L5uTSQp#0xkIsAN&auX6C{ad8;AwI)H
z+l^qAuGuL-F?Ved(;uvvJvcOpu#9*O3L%T8+0jA>nj@^TEeP#pmO~D3%V63E#k1lC
z+||iVuN5Lw&yKvrP#JT;M-@55z>)kQ(=@^Y013>a#}vjh<@_s;F+SZ+fa;%o*$>KZ
zdJm!Gpe7NHttisxvrBb!+1V}@hr&8Fi0gtv|K7UUdb23TR=#<zr539LS%j~^zTDPY
zpn$H-u)F+84dg&wEf=u5rxA%Jwe5fKR~i&}C&-O{gnwy6zw(5J27689V0?uCA<U!Z
z?fDk;4p0tNhvhLP>I)aM_V$HE|JE~CxaU{e5O1tX`DP>r&RAxe<3a9ed6M0P^$^IY
zQ_XAbA9Gq!0);f_eg)fiyBApX?som!nl$_V*_z_wOP>Vv7_4H%M>^9AbBg<7l|vl?
ztH=`NF=$1i{dOgc%KQS)-<wz}xE4XShZ}Zs+Rhi6)nKXeGFlT4COFg|m9XXN2A9hR
zgv#ol_|^i&(~c9}JKN@CDvW<EAW~qa@iaX%KP?@Tc4HqaO%i+oyyW%I4q&(Y32O&X
z4Qv}x%Nop`X8Vhd+WJ`fN>Aavo`5Z?>{uR)Dx17}^4AF60h-wGx9Nm-Mbe!DcCM}E
z{|RJkg9aDdW<|k4(7S&Q`TwsSZh$&zde?>NKjxUT=3?eO(08+)t&vHM8PVcrl3h~F
z1&%`>yS0alzjC!5dt$b#Uypw#0)2cepEE@!eJ|uQC;!=!8jpupUve@^?n+*@EK!;+
zJm@qKA=gZnkyrR|;%#i8;UT);l~H))9qY-&3js@2bKZbWnBTGRSLqDZD}Qyz4xR(Q
zdU!`I+vmTwvHzWqj%wZ@UlQrXH$^U2cwQgG$~cwSh<7}tsi@60Y*}}9tOg%|Jq`Q3
z*P&cZz&w#>e7mwpEl$gCx6DC_JVJzw6SNw+ns`H0J+Ui``>)@7AI1JAaNFX6;_>yY
zx7)1P4p*_4=qsl5^#v~@amK~(Ei~T}0{wapkEAGJx@EK60T=Ua@KlBi!DQaN>&8qt
zcGsR#tF|kI84-iyhkFah%4$jimaS=}{gsnKw<435ZD{0VqxHOo(_0=;P@7nIT>7qo
zIn3AB9Ir>1I?`!Fwy_!~Thp%j&$z;#BZ__R=xFVH_t(wngLWP4Jf`rZY^2@Uwlyio
z=b**iA>!T~t8oXV?qTC6lFlKx=!hiM_IGQ=eT?f@51jmUA@jqZA+_O!nJ;jK?pNv}
z^%{;{{`f8ut#-5OHlJf+ZwDht(0Xo~esvS~l0`v-F9Eg%K36v(G2@czTf!8Oy-$93
z$#Q)heH6Q{DL>Tf5#DM$m|!dnd!nxK>+z1Jf)13C*JE$}fDBeJJZI-Wmwy!NQ6m|r
zC@p!pxYwN1gYKp}=l#dOu5fPry33x8MLJaMsg|gcm&^3WinBp3-Tsrgo$}5Q*M|Hc
z-zzb^9Z7;(<%_-k|87mm-v5WW_l|06d)r0bHryb%Q4nde6+r|6=}kaIKtV)MdWrNB
zs&q(#Wm`asNC~}n0+CJv1eK=rn$RO41PCnzNC@pLiu<?s9p4%Eo_o*u?pb5~!AQbf
zbItkAw>-}~=Q~mG`fy;nINl+jzi&IY1m}JSH}?MXLnxB7)&gf3&vfF~>j*ywY@KW;
zr^}#(V>Jl%*KiajLBi@+SK32?Z^f`t%W>T+-lVDBobeA8LqhM63x5v~)MMSTSrIA}
zYZS0GV5*-wd7Fe7ZV+0Cm$3b3BT*li)`ovu>z$-{KULgbHVt|xRFupLsxIqxH~5|f
zm4IY>uKs+%?NYf|eZ2cjWU!Gkk8csL#OLH1puS|0d~)Rzh?K6p+zp<fPfDAJHM@Fw
zrEl9If9!+bM}7zc(h|&!OY;h<L*4yst1gneb1wJC8<Wv9YsK}<50Ze&%-pE4HZTZ}
zAc#ObmAY<!8ey!|)$oE3fWAylnvRDY+G@$OBvt`1Ncp02pL!ju#lBGEEw8Zyr{o(k
zxaLQ{M|QuEN@^IJ(R|uvs!{<It5?zZ>#MvsBa)xDsBb%9Xx*r|Qk#cvdX+Z<OFDPB
zf^G=`0w8sNHUL@9?=;@?TL-m8zTiNb>I8wVGc+@zyi-^^{`@tMz??+c-i@k}$U|xt
zZ`qt+x=iDkX*I_zarTF&PaXR;cR;II>=w{xhzO2_(*oGp8fI)N=C4t#)<4@RMlOZ(
zTyrl4nP50kGjl}T;({DBoE5()`2J8zjvRIq*x~l_*GES#0*xPZ@j2bE=N<X++kbuv
z^zq>P|DK;NYibawgP9XJTG`-O7UQP7Ha>L4B)N^zCa_W>GeZJKfn=`L$^<AS027ov
z8S&(%evrY?_W<s;w}EQ2=<v&h84A!rLiM_796AMAHtp-WRYTt6+MbsJ3ik(Fl;8R-
z9rd5Xt7`&tM4dk6e4Yl}n>=3x6LwXblImS#!!xWj0bV#5ig-F;X~X2)xy>aet(QEw
z4cNawG*^0rDAhI@mC!f`YR&gTgz-=+Z6VIVl#}~v;1(Ge(9g&w!^i2ooL7B7*&Unq
zH@j|<%JmC5Kne9o@BIQh@$7pIOQ{*A3;|06rb5c1^_r6}VWv0xEr0&wH+n<3@a7EK
zOr1p$>tM#hn7Rh^Ui3yuLQmzQ)2SdP#O#%IHPF#lL*jO0&fZMdnCanFt8#@rwScD1
z80BptofFc1ApNH<%M$*=%y*?g3TM*2mh0`;y1BoNW`4sVM~4}Th@8nyU*)Ty+6qZn
zU$1L(Ji3=xr$aQK^|sU`_7W1d*JYi{z)={sU%3GIM`fj;>SN)aRB?TNfh;d47tZpE
zg_-X4b{#%Ylcp2yoqLfh;>zr5Jtv<lzQ4k%Mp|LBJRm>~KNgdmX0$1bLsnS1d<I)L
ztP1;zR!ZBer+A+P){zcuxRggkW<(sYKD|>uLgu*An9flDpUt>PL&NXC5$rlrC%(vx
zL8ki@^{15_gOSPsiF(Z&STf9|Z9{((<zB+lqaOg59@<>>`@W4||K5|kp`fTC3DK7e
z48P7sUeZyV1dy@kwrUVn)(P6s@Z?1&`8TVBG^02wdT{#@kAi81>r#&`C9xHrY8#A1
zTHi#OxY_v<yOH&VU)<Y61@%RqZK$sm-W5=wXFMOW6uPw?bDHzoc9pWBf69UxP+kff
z)T+d}X;X<VZb>GO)^Ofpt!utmHFhe0-Mz@5u-SAMT;}fa5QNrDTH<9F*$oSO>9dd^
zw_wWfb7ypej(ZpSNI`}NRa0pxf9mv<%SoHYir|-)g(;kC-x_hD;4|e!Au)X%zYQ0Q
zs|mjDwSQAfak%4T0dwzoyY36<qo4%ykkTicdm~b`3juGA+#?FuV?;1?jmpZWge)*P
zOCPN14kPY;o%;HQX;q#2T1aXAc=}OT-4<}~C<!knkv4&aLcR75nQJI-4I6Z*OB?Nq
zR|L;rsY$JpaES^+4vMn)M4Oh_)@1Ifak<C2n#G$DFGY>DSU?JIS-?v@)BSJdeb>!r
z2QcEru^}jSKJ?NQ^o0WB+j-Ha!<B5-S9;dTHeVSusO`lw=3VaNBIg)}itA!AIz#Ml
znJj4}Pu-x;Aw>A6NV_xhqTu2e1o@H*#diW;3Qy}`DbwrkJ4(|)-USzg?zh~mwd3(w
zozBG6j&^d_-+qASQSJ+?qqe%3O`7HyhohL0I?_8M{cPar*kUA4+NDiD<@3%asU0?S
z`KSya!G?3qtpT?#;_P{Ps!8J-w<6R&aN2291X3rnE|C*o+%l}LN0KVcNL?G7?apal
zYA=2uR0^C9oE+*=+|pT@O!0scOmKuZ%)Kw5rD%gN!k9sed`dy2UM)6yaw%f!>$YOP
zRU&!c$%_Z9O|%$xo?Wr~#RR(|U|hK+TAB?iHN|@6(XKFNVpYx(-G(wap6Up5klz$v
zkNs%iVbSch{@AHwD;?m}DmT=jw3TF3-PdASO;b99Lfq31KItYe`&uSdtNtIN)vV#s
znXg6yz~pIiPsHlex*t_@oCI_qq%G7hv;?lmmaJ|1)|O>xh7n$ObIq<1hgOoA8xKLs
zumtLF#YMjVs-nTS5{P|9TKf}G<)``un?zA<hB$)g@d#3R3&q2XF#NnTbiiXFesh<4
zMdZFb%&{H8)$@f8iPf0~UN#+gS!g1|-!IE5#~2<D{}CN*C<Z0LX(hyWNX&;#zRY5F
z=jx`t#q}F><Ixv>9{%=0(QSDFbYl0%O-uB)Y%(RX7mh_F+=doTKig>V2$+757Z-lD
z6R_qtE7Sr93*8V`k_HHzxgsC1avf{A8?l_L9L}C-Va7kR3J0GXj9R{}*A7OmL0$Ul
znL*GDJ|&OyZJmiC3)78**!Res@wd~mbgPZRTkPh*G{#SWqn8Nij>_99oN;z%+N>`0
z!R;hy_B#PX(4nf%ILj@?64SCeb*XTVw2WO~oB0|0r%!Jk+ubo4ua{5Wb4eIgAgZe3
z+J)ucG}>F-ii!>SG4V1O0ZYWS?(y}k%%BNy(5*)TGWur0W<rMAy|C3`iPH2+23r?C
za|c{};bdJ-?NwK6$h{=Lj8M}VL9(iZG{NF$5e!jMn!MSfUUGKsA|bvX))wj?D(!;a
zRmi8}zy5kZQq3rGBn9V-?8MR%zZi~H6rKl3Xmd@PPViffh5ULu|Bg!c6wYhVx>0z7
z5nxd5e}v{(wc;}p87P$8CU*b;yaoOflYV*qbmm9Uy<a}(CfhZ3=7l&J$H5j8N_o%(
zvHUD^j@$dEe=ad>7=_Q=BEdJ?N_7JC%@^HOOmK^{Db3)Ps?qPXDKj#qr7cm;*Q%py
zuBREL*128)Lu8i0jw8IzcE%PORoNVCLP?kNcubn}QfyAd3uCYpl+GD-h!E<yii@{3
z?36<-)gLx#iin{aFKnWc$<8x`rZ4f_%QEs8`+QKos?-2$)VsiNM|L}4{L4;G2npVS
zEC?!sE8Q5ti2M*?RX~gEc<SyRUv$hV{ZTYF>Y+-+N%g}=>6`N}fxL;_gnTI#NFf=2
zl%&^S;M#W{RKQ7i(||h(aj{M|YUQ!s6=ucD=6CNs=Tl&KKG)yR`REguqu$EYped~I
z6v{5htkaWr>v1ElRJ6RP-C?4ZiSAh`*qZ?}gM-G|jZT^l!$)bNWT$!Al!Yxgw|z9#
z7*9}BoHO-KeUT!kY%};Gr?7VZR{0VEm?Dc5&aecRMs#84)Ftj1)1^{o;PJanqAQdc
z#DC_9;8!mkaPl!l2rl~Q8nhD2==725Pg{>b6`6#emHMl2#Zm*^fsKqZ%q5<@x)$ys
z4DwErk-fGy_KdI~3pXH&;WjzSjUmoq0I777{Kh23e7X}2{%QBHom3t?Sl-b!CEuZT
zH$V$gU+9SMTuXFSQIZLXWhydq54_#1W(smOmf*jtm>ATF&p?0{mUQCwyQT?<k9br3
zSta5wRdgpE!of5s^HG2kG#n81je%R&yCO)gG-EOu!`jACZ1)^hm<RW@=~Qxzp$axe
zNiNyfxO-(o5;Yp~PhUbAM%UeZ^|K~&rAew%lYdfvUPMtJcn@cgx7UXNSLZQP0cc*1
zjxU*6w0YyS6px1@Qis@xqbbP*r+}BHKPY*hnVoF^0E<d8n+8IcL*>R4XVnig$a~qE
z&bbly7z|PL^F#i$vt(|LK4#(*JlW>pTejj})C<IYYe}xPDVz@6CpH&wO_)q2lA1f1
ze=VuJMR_;>E1vg$-4A@mvq{q*0*E@gqx8p*(x2V}jvmxoI0Z=J#Y>oUOO#=EhITl9
z7<BQW@=b&EUV3go)dCh~MDLjce(PvNPj@8UI@?s?rbwy0(gQ);ZO9K)SevNjRaek#
zN<kRGIi4z*bbTtlXS)0Y=FKFEEWYlwx`J9ix;^B?BS^0tm!lGF7LY5dU9WMK%g*zU
z-pb8a>)>TMQ%J?t&Wo|7-e?w&kZ7jM8lx5Fb)1WE5v*c|6l^d$%Fn-3X>q3gJ~V*%
z&W28G+xKjb`tGr90qPx-PFY;Rv{liDl;am_zQ2+TJy%T$?(K*@&&|m-&!)ZJpPp@P
z^<9)8%+`+tjPi|1tw`ixsrd~an-D9XuXYXbWj(hk?j3&-R=*oG%Hn2o8gTM}ee4X4
z%+d*XW3^?pQ`a2{$A*kt<O_Zo$h%CLN#9Y-w@V~*i3t7xjvfT=rQQj1)~kC7lAV8m
zbmVx8>dgs^f4bQB#zTAD4R((UR*mcLBLWcjXG%bx;rT(F3=ze^lXS-O52qqd7o%bB
z&Z(o?vu|7BHv*E{MyyJowpD0$AT(y1w|7c5Lq<hdpUrU-hU(@uuCb?8ud|us)YVDw
zQU4|sto))!A+p=A5m$v_I_*FhO5Gl)$e3kp<l2Q2hRhaNn_k2#%h306fRKuPMaTw2
z5O~$@VUm@e8`Jt}&CP93l>}~?GN~UcG)^(@v6^vM8}}=Xk=2PeCcyeh+P5xEos7-%
z*qK`cYtMWq!%()l9{Nu8hr{5S$h{wFXa!M&1s90TlW%D=m6+9a`)W!ArcqaXF^ZNR
z`Ojb!qq%BJ_;5DuQy35?av99+C_0-kXJs6QqD79*4RihE6A$G>3ul)*YF}2LrwDj2
zBrDOet!}?+ozH#s4x{Lo*;)H4c!O)$Yt<pg=LwzYq#a@-21{y{gJK4e_HV6HfN*-s
z2(eY+73u-C;<K@n;-g!19QGT-z@S{QPv)Fw<E*7ZJzn*Bx0TnZ5m-Uo#1j4G0qE>H
zq#WYHk}HOBIf?Z=%OJ#lp-=zqTAW0#?{X+zid^Ugp`56Ys~KVLM8QoOoP*H!HYyAu
zFwjLc?%a@`*fzy)=u=S4osDB1;;rTvn4!hCw$fXRt6Pd_Koiehv^%#KVWnPU26g{=
zO~IQ5|HoH$5X$KI5lvL*0Sjf!Lt+gbuPW|v={EKOKsAvTm<u3P0BJ-6eA+~2ot2JE
zj7Z~(=(xhW8Sv+thF+ODbQ2$mm8#mmh1v%^ZxMMh>Ce{XBW&*-#M+md_F`>YBs1s*
zNWVShU`t+^(u7d{r@tfa6Edj-wTd}Q#kw-#moP``ij0+I_MdAHZ7?vssb*NsItHz#
zig`Af^>z37x*jpwGNH;{*fT9Z;OhV1A;JA;{ci^v|LayoQhU!J%h-UaRR@ACAbRc2
zNg7k9(WJe`-X8FY%dJ2N%+~h3Ri|;Nea3io=JCYfjvvn+r4H<Y6n#OZH@y;MK(lW5
zUJD<RFw<75=}|T)Jfe_^EcQ@(xc{r?AOJJ1y3r$#=DBf<n94+d3_OAIZfE~w;xk5E
zH4Qstvv9W|CtCgvk=5ESI#ZN%=TtP*eP|`S3qu|@D;4)=O?;%EsnG!f8IU0wwq_us
z@HXnyYcHp&1yLYS9)vot7okq}OkUldZo}TsN>aXYd59=!SGkVq4jT5LYa=g*wz!Qd
z@Av8m2LQN5vEXoH7b)eB75)`Gj^IF}^U1wBI!6k5G5qJttduAn=O|ug+R39*6g={Z
z=3ZJEaQOxUO?+UAizwZG<E;W-f6yPf{BlA`{hshSP-)tF(ML^piOhYFtfDQYaZ#QA
zD0qRiCV4YIS585u*m|d70H-6H9^yBj<Ek=^aTUNsiTEGL_z_7a0I{?}TpRbe&i!b4
zve)e!Q|{CxIQ*>W;}a$dSlfkzFrz>UlU!3~TYhqN>eUtAbWyBU#a4Jp>*<E+GUtnw
zX4Kco13>5&FEc>qpo}v2uuo|f$!yUb`iTP3pO^|qr>2n9*AkBax86f#Lz7-M=F))u
zQs>xnA<|Y`)?V>M;Oe>nAg-#@v#IRD40hSZJ+v{C+mHqCYEpEG1h~EYX(dlHIl0n;
zq_;`)t_CG6m<2P|v|Qxf2T_1!kpmc1Rj%3g=O9>JEAOY=`jbn-+>-DU%c1B_Q$6FR
z1$vh%R<|>`;=pvZ?R4mmp^=QH-k%v;_sj@zZAms5>_Mbi`J67S07Q83sqz_Mh$8$Z
zwo8tYbX%E@myo<NWac?Qy=l+E5U(PVvE%(0k#;@|F^8<&bAvr#m0U9(qnJ^#8Le5R
zHCrtwQ6Yelx9xRVN2}t?V5di9t<*#hn%|R;eHhh7#ddWcN3|fXY)edl1As|j!0Brd
zXE1UB<@??cLJ{U9BGg4n^~E|*L<6#2s!HnLs9AU=e?4(9-nYn%99y|q;{#$k;9>S+
zjT0L8-qIGH(`HEl3ZIf;zD0L)SPeRZk!C|PRRuqLFzgDC<^5MaGgX>U%r4Koz2AsP
zIdc;i!TwZx3<E4N?oJaH5FelUY$UgxdbEE6W8GDrG1&_49)lRJgZ?jb0kCM}J7O2Q
z0dIQwX(=z;lilB+04Vh0TiU?*sq7AQN=TTg=r;a8_Sds+Jcf__tS_vxF<gl+8HJ=f
zz-pg2UB(=>`DM~BJB3#S@XVTA=bH>Gmj;j1@=;U{Zo@;$pk2zkx@=G`^R575@)@{@
z!#!&e-&@fcTnnYi-pJtzDho@A3P*nQq>V?f`_q;8N4zf@t_@#Za~?mt=X(LyGChek
z_r3-`pis(!0nX&sWPsJ*1N{IrLl02g#<DKo$~O^i($@~f>qvt^sw8jv&!qcMv*0OF
zA-6xeakoJ0&tUjm7>h8^-z*I<9POuwk(0K1-5t*@lR7vp80RzxoEi0g;iCXM!oEq{
zp+=mQ*({%`26~2pEsQ1iQVjEk&4Bf@--o}OJEHynmh{<g=f6wCN0o)uTR@>(_fi7h
zmMsu_)B@I-xqXqnWTl(?{df*K1!(7cA)ma5J-+mB{6dhoOdAY-To(H}v@p|y?XM2T
z$Muc(%0cITeV<4-A>{Vu=Lvn@^g#JvqyYgi2T|ono^bWG?!fvQNS!(`wNbPWJ>5x|
z`M^CGx5inQhIKyCd5Fh9GMPTz*WPi7?~VmGr^}`$QQg%Wlu!#UDqf}D5A?3vxu4Z3
z4>_El&nUng^EIxOS2lZX_l1UCf>yxv`7tiu%6-jD6e&ZQYJ%qzmZ!fu!*&9g@Jrm{
zymp(63vaOVarsxrPc%ZqJ{a?3Y*iM__}~+^)CaRO-;X)RDZd{LXF0C(>p`l3DH^!|
z)}V4}(y?q&CAmtDIc`vgG8$T+e8D<!kDiIO01B@1{M-#W#M9lx09jzm8P`!A28`$!
z0P?s5NgV3fEA`IKxPg$0cB66<AAx-CP>w|CCxm`nCVO@zqKsuF`afbRWAqbXN<z9(
z>BoO4Rro)i(0w^<)%LS!9e#@0u+QQZC59=!^(a+s)UHTSc+&fX%m{rI-IKjoU&m9(
zb=2S=Z=?GDPmJ*|MxYRwrK)c?NwC2ezIiruM+d3sZcgvetl3Au=z*@I|COfv#oTp`
z0N-+2Y5ktUF6_z%+6m-faww>MNgW#ddw^u@klrJ`l`rGUo5R~}PRL(m=|pSQ^c2Zi
z{j-r<zt29Z<9?*92y>;w_MYjCo++D{Ga{91a&iN}<d96gorb#h#es2eB1ZF%?HhTq
zYh(aJ6hAiqeb|6T$e;AnY&77_iabV+I7KY&G!B$TOuicFJXL(KzQzEZm36_L+Zaww
z&^+$s{qyCV@(aMw^zXsLTwnS&W|F%ji0w7&YwiAvU)2f6xR1Avx(Q`H@d?^yy{Xi}
z+Oc1z764TM+xenoeQlDY0$nv>uGkamRpFSA?#;#t<;&T|{}b(Jg<EzMpU@bs%W+O&
z4cJ_DfBI1QtOK_)AjJC}EISIYNRd$(iZS@>c@gd(AxK2&0Xwr?0Cc_FZR7)(Vl;kQ
z_aExXLGA8$g-9Bjr4^+A<Ii;D3(Wk~CPrslz*y<m+yTVUqj+ZQ)n<;of2q0AC;w>Y
zf1>6{3q>Wrq~zB^Kl1$QuaEx!q>>Ki^xs@oFuhHgEO%el(?`w{I_&*U@%}kNU*;?P
z!n;0VYvoOUjAVLw=4f*QaDEk_ddNy#N0Q~@I9qm1mQK&+<|~KnwFzTwz{S|wdriIG
zF@>Z(lAI!8=H4OIp1g`YhZc{3dWQW0a*r^pp#tn{3E?qtOmNyf3b58$wVzWf<lFE|
zGrbR&c%e|@nw^1gk33K@aE-(ZUF7bm5cNsk<M9iBush0wc(a$Z`J=1ka$~Jh5_WDw
z(WR+8L7veBX55$Z5weqVkO69Ya4x{LPppFxs6}{SSQ1}ThkA75W52z5fPY^*_tL%}
z=-G+%)=3}?dD?)xb5&HGRS4yra<+3laBd^<JCGvKK+-=80?UJ}v`AqRO;w7ioq8et
zq~2=3VHq<>p;w~fg#zQG8c3?4T}lPoE{cC&6y0t^oPn+NsT-z0sQzAR>U7tVsuVS#
z0#^X%g0*EUW4*02@(gmdQ%@+0aTYvJNa~COtc3!)%xkS^!iV3B>e-?6R$>xdNC3aS
zhfkfL5TcBO1(F5~_|JD!u3lOgC@uzlKf~HK2UsGuu4w?ADX)=n<R++v?`Kl@z5`wp
zFui2dH<H)G=6>hAj7@CYx7*w-C!6<B<4J5%iwI%YXeF%i-Aqb{#srv~0B(qi4*;!f
zKjVN|cV>q&A(F)OuR$4B?K5iCQwZl*NdR!+i03o{B3qT_{Y6!y3UL-AMF&lA8bte+
zwWII$JkV`jijkC+$m{5e9YWxFE<PsoA=I#%sv}#}_y=l|;tFZ4_0xfz36DD8d#MTu
zXq7h^@UrXJ!;B4@-Yj@*2}|agXcCQGeaBm7);ztHoqKiI;}qNLVmk^A6mDh<mA3ku
zIc_w?Hw|`eS+7%yt|P(L@=>EM)^`GHG+S;=(6@)!?GC%$+ch52DQQ>?Tr`xrW88F0
zTYAE?t~)5Rttav9D9R{+bI%7c{{}MXe%BhC(5?L9lJn-~v%2B#oP~rl(U9{(%;r6`
z?f~9uHS4ALhoP~U6H8Or#QB-tj?am(D=pk)4D9B`e)U~OiHfy7Btn-*7ZCZfFi^)V
zxgDmuAay&S4C?XZJKP;^^$>`!)+yc>>vT$RTx|GMrOrk#|3A8Krz&sa<&V->{E?Bt
z%{xv~bke-(iW1q*s~C5=Yyc2$BauT}K7oeGil9nz?sa;sKPLL!wb`Zo882BQj=iLd
zgj(0A`!$Yp)cBl<Z_t>Q=6uI6tG0kd)HHZek^9hjl*Jw&J4iMW(*!6=AZ>$*RNi=n
zmrR3xMf>$viJ0iPT907TqSvT6-5P;qKD<^kyR&F#Q2|xRUj$<wAKSkcadud{rfY>A
zbP*hc?DiqAL@O+I)l75x<C9GxwUXtCI)2RsADvbaufhuVjz-)>b8M0JX|}CO=Fw+)
zqm1kY^{OYfU>n=O4K*Gquo4iO2-&9(6BzRR!eQL<w&#Xqs;`j;ID9A1-tr)MO-?6b
zJmCXlyWbQ)u<yY}I+iH_YBYcC9OyC4J@C`Km@EEww!f-PtK3agJSoQG#j66veO0Z6
z?~f)#U>*1(1d3C+$+6zaRmeKOl{RB|8R-6Ixgwq{tn0vn*nkKXBUOmgMpG}-Nn<y$
zY4df_uTr=`LQvN#2SW(Jp*WuHz5Vo_0`6qFhx4Oh{da|z4?Lo?V62OGr~9M`FpfW7
zUcywA&B3|vVG^*>c_&_vsg@@{vrc=U(JEAGTgHi9^Lg#n!3=tJ+l(W*4-}D!(pD)b
zDD^&txe0<9y##p4;?k$h4ILY-!o$r;IuJY6$~)6yT15iJU&wk*2mcq>lH|&th0}~&
zcf#V|H|1m9y&G^{@16Rj(mA+Fo1k~nY9FjqY}HCijR-y~O=t@%uVhlW$Gu{Jwljz3
zzOq}24qFg9_x^iqf=>FT`E5vTPEx`p?zX~f3pLfKV$sR(D&(F?&Y>is(-vWXuaU)w
zHv>SWM2<}wGe^nfk~-Wu{N-;7G^oFm*-oOqt;53<F5qDF_ecOu9q8od^exIbMy8`<
z&T=v73{Xi~Stq`W*!k}ChmxU3+h@+cFK^49!4rJ0`B%HcNd%YtKC8~Y;Tv{IFs|6X
z%Ckhr<Ts@fH^>vY#mEe|yX@r90Gf2BFgbD3`_}$cS+F3sAwc9&;VQ02Gxcb_==8SD
z2(K)J(cc(z>(8YccN<E*+1~x_i7KQ$tP}7(5k`vuPwDp*BZ_9J=CR1z+l}|*=-xD`
zAvfZ?SH8xw6UJEES^(JAVhzEzD4emzCy{^hs?=j!TN^Kump1PtDwpUV2YqH_&^DMB
z*u?3jRV#rBn+;EA&%PfpFRG=U&$X#LnfM+FXDc*nWf$(WQz2%NOmAovX~#)~8gLKG
zxFM!&U_i9^=aKt&Z$Igfz*P7+WCW!B=EJ1j`blJW38weQ?IcpUcFOz9hBswAiewUb
z?r>Q&J16z@X6Y!TvozzXm=FmfW*!L<Y`8RD`ROo0HkXR|8<8>Y!(62W4C@|)5<;9+
zWAlSPWlN7Oi)81M0Ib6AZ>(a&q%_#dW2i!<a_x&dhG3HURlsm_BYA;ZEM<ZQOvYlJ
z2hD<ib0jHkBxE54Aa?>>s~jIe3at?wSrz;4_*dLmvAG+N%B1WDJ-eqdg!haIM__&7
zPqZIuYam^ptnr2y6V_I3?uh&uEZ=Sf$Gx9$dZ<V8T6!%P>0z3t%aACT_ko>T0j{~D
z8_h*3x4&M31^v|~fHqwVF#)jfMqEeqh~L0wT=2l{{(-oD*Vk49--_jF;X6B&=O<5|
zlw4k_V^u<|950v-LE2V1K9981zu@=e4>h2z8M|rM<D<Ben~chmb}}bd8=du2n0wj4
zwco$~8SCo;iZQ>Lft8;Qm~I<@H8UnW8*gIonzyQnGs*s}q_i8t;dN<;WO^?}rE^V@
ztCw`!Sl9S!F-7A9&wz~dko}S<NBBs|AJcs6VXZ3*WRH;hi@MunMf~OHCR}`HWlirH
ziFN7WFS@0dt>mj>7z5nx2FS7)EDeyy=TzfCL#c;aq<*$%ppGlHO=L$q)xC5>dq=<<
z(zk+WE!aD{tJQ^v|8zO7M-`w)gi(zpg|M_}?rpJ;3dy?pK}b^n2xAlE1bE2Y-DtC7
zS$u0q4hJ#wNmAu7EAE(lz3KNvu>&Bd3a2N6q|BWqP*YqXNw1wCUJb)!&H^<z5rrQY
zRWvnfaJbDgt0YJ5ATP^mQx}(?rsV#Grh7f4NEn<wS9dx{s$el0Q+?6S7#}b?30^E&
z6@M|9%svZa-|6Tz{iFa(RBUR$wHUUTGyce%r$5Xp^dXl|4F2>_q@D>2Z#mMnvQ(xe
zm@&3A0&qLD+%e@%xZuvpkOoe^HmEDWL7`x?q{vHTANrSd&7|_gXG@F*YfU(SUW@>e
zQbre3<ixR2H|+Xdm3L6ceQ#yy50Hs5BN(H`*52AtNM+L{RPlBgfnPFp&UxigjaS4s
z6}MUC73THMffkHXSe0N2&Jc*RkUM5-0_?oE8tdv>7rLb|O>8C1UQS=akOUe1&>!!R
zIC}Ugfd%W?2rF9)xHRN9XApOp>zh)m_FD_SxjcM88s&J&(QI8(e=^i)#bqt5b&_GS
z0BS@_#qet+_I=@0h(VdXS*m$5Dy3!&4o+r&#le^5DpN=bjFsV6g^yRuSq;9(mc<*x
zx!oSOmmH%5)0)k*ElV0UwR8c%N}*(_;MLddWGjd;Wz0=a8&c?r-~edbImqs`7y!y$
zH+9()k6Q)2fPxa71oJy~KB$!}&fTzfpC}Q1>smpbdl#boRsw^@-$GbJ3jGo}SwMj_
zL#da%n3#B0{cnc7cEo9Y+{IZF1XbKDVDLo=sbBH0KR`n8JxWh0Gs>z^1IPJ@RhIb1
z1em6^5x0V6AHmvj?3Vy&F_wG%?P|tPj?`;Vncan|vD{hHN$@P?O@Z+wxY4TL_aNie
zZr{dD6x2Snce`_rFJ`l>-X2Bu7indB04C);>_sSIuu2_kjkrDe%T>ZmW6inyEZVkx
z5QeyK)ObICOe>S5YRL5u)w4YxFmm;9tM*=9_`tACTZw2tJbn=(6#7ys#g(-4aXl2M
z6;t~p7k*`d;KF1uo54QD>EalK)C<*{l=2^ZA$x#Z?s+d+S&}wQxYqY1z&~CYwcg;g
zy_OaCoj!*9LYyMN+U^ENp~kUB;4JwIBvp^l9_G>DownJ;@@CvnGYSKc=T`lv`kM_D
z{G$?Ippj9m3LF<eNxyk56%I32ULF$it1o<wcCG4X5fj(exVG7|@)Gw|EYh_Ux8S8k
zZ2k;XiB6b5?Dmcam#2x$E6)B(2`fv9W}NsEqfCDjF26aSR5iZ0LtrnZ&Llo<Bk!qB
ztkA}Aw*$FJJ2L*6kz?0@QIx3RZ3rV%fnBYeD|?}6oS3+5R-CvdJ3nvlv{JTIpzV=?
z-fvYXJSnRL-piS%NMT1w<ylRG+b2yy5a$^~?$>LSTen^S_lsS9>ZcJ=nLCUUAUNWl
z+)w}Cck?D~l|w_29fJ-4hkLBs!MCplF~WxtEIJlo)*=VzJlg1G-#Ov2Na3=|j1c7f
z@-XJk`i2@EeY1GsUZkK7XKx823_1I7ww@0~N~IvnQ(~E1jKxS6R+j1TR;IqSF}DDW
z;6wqNmEJ2LO=KUw-IbLcQ{bj_B=hZUuuQc9-hG7j5!FFhj^bbMh+T;F&mr2bf)k9f
zCEKH&U31l{X6)W0AM%_ajB`E9nLEK(+ArAKdwF4*g5hi(RS}OJ_39Gph$in6*&jse
zNJyT1<(OdD)%Q@GvK1Gl&s#wAur+o5^1Y<HQr-)(VN90Aae_Ey_20gyr*5xo0{3Q_
zp+9TT$vlg8T!WB;wewhFq(w74Ds1|qLr%R|FEWCp=aBVk+xzauDz%o|P-0vTx+1p@
zV8B^Gc2%4GNY=n-4{oD~sXx>EoQZy}_A;)v^BaxddY4t4H%_wFDd+UHQZ$;^qwmiX
zsm_L=C4B4a>GeyLDW0>))$jhoTNxU)?H6~yzm-98UMW8_3m&V``U4ai^^mDe{jw8M
zw1)i@k@_d4G@bHLiy-v<-E3EI5I4n>>aAS4o374UdRZ|9=~X9_In*>*P@I3;`9B~|
zr%+bL)id*?Av9>Jy$KlXBa%`R-~!FUIW+HddFbn(48R>qq_Dl#WVSBD+;r$@DVs={
z3=D6xx6FFU>7vy(M|)?bK(I>R3ehB@^g$s6SDBlLig63GWG{v!c2)e;gv+dYWCJd~
z8MQ+l`c&-rnwnqM6aIGoLmVSV*zQ6=YW>wv0NKvmqCTe1TzE2(y~$WvqDynH!$VDk
zTE>{zoP01ODSxcZJhuw1<Oz*2+D|S%;kD@=*=00-_B4ST^cck_)Zr@|j$9{4uUcfc
zNxz;nqryb$CxJ|mfQ%;hbw@-8z$Lx7v|!3v#xtCR#kdGeQ<oHQi;kl6TR+_ZeYTmV
z<Q*kDwiuUEadD4gc%Jf#>i^q?fTdN!>R%#f2y_({x|bwMGLwUfO}V((ach9Los(&8
zJ=aDZl0K&=&ffj)flG?B)wwyp;-VHfhg4aOI1;Z?ztL=Z*;C_VM2MrM1rVAh4u^b`
z5i3+SN7%a~Bk+bqAklB3fQO*gU_d6Uh3RQF;O4!XMeSJXu<-Yv_Jhk05g#6!kFD`z
zG(P?t<vN6J#Yz40i~EFfF1Lk03lWwp+G#1$>=7om@@M)J`zI-A04`7LABL%z7%T}g
zL!uLEO`2xk8VnJ+`(mIN=+GiG6X`no)fC)}o9i?XW(N@iUCWFip|kJnzHS3fCnYNY
z0tE4g39#NZzoNkLJ(FAIwvRt0KWj3G**C#3zI}zSy^qL02eIza0G}W4fA+%w*cJgG
z>n!`!z#d%yfc7`{KRW^gQiM!z#=R?|dxd_|Yo#l}=c2|`#szGl&{(1etzOU-bRWC=
zC=0XzA(9G0`+p_w9%KUV$?gA+{T_Sm0N4MMtsw`${9pa4z404;1M`{AMmBoY#{}pN
zyL<mP`%7f$0O-ddOHzz^4@kqX1&DWSy0rknzjNma1H!9A_<3*eQXR?-A^$ZU@EpWR
zVzoCG2Gr5kXUDs%+E)*`nXRFBNMWIjsX=Bl`jcJ`&&=-MC(V?&Xuat>-*&QElXnuW
zRF~fWz$F3KTaRWro$o9^$3adPnjCai5iK+SR9E9H5fC3JR@lMp0jjq`i&rCX7e%!k
z=L^BsFpVUuK`YeWKRKD=2yjUr(derZ!D=yU&3#oc&WbRfyGf7996?t!@3xcK3#cuB
zLCx(Hhipf~3$FcQZr&jaTiHQ)2R|D@HZ$_Y$@qKI6%KbF)g0gicfWL{$4H21k31)v
znZ@_GPQCoO9AeJ4Fx*9KT)AE9aVb5sb*IA$@iD@0TS+WU=o|nUiWRkneSkIYEDbi-
z#Y{|1691Kyt9PR<^v+?5z@L~35p@W6LX_x*C&N_49ioD<b~cOlqQl1MM;EP!GD?_}
zuh5d>S#=1bjJPOsQLP~oh2j&MvcmyNsz}+b%wvlW&w1ADI`81_r*CklktBsuvT`LP
znDytEM=;~sAm47LSZo)}#3ooEfK0Vt_^?|MFe9z|F?+meH!V)$Atx(K^QwB8o85f{
z7W4Qp$@OQxy!j^6e4EaX3xZ>|YM*?8Mbd{RG)orGmze3E!y@`hW0>3qlPFdP3kwJv
zY=E#~Dg=3HTTjOj{OA#vi6??Lp>*eK;(htW^YUPLh+(O@3QfYiG5%zpo|&B08;573
z{)_IfSFjiD-xTLdU@mT}2kr*kI(F<B2bF@S=h5Fh?)3|S25d9jWAHc1XF`RR6;{m5
z$B4Ilq+;!xy_b7i4I~hgzh)hp<mVAOAnq=YLOk@CeUV(lp5J~$->V5nzWcAyPV&w1
zeLXCBS)d}i>d9CEmF90-mCatRX5Es=elsmkSF?bFNE=P&Cf+Qty&!Z5nBNityx!~T
zkR;C(vss`EFD=W5u^Ii7V8wqAfSwhk{Elr9dmK?SE&Dx^P%-}*6~3>XfTf%o%?vWp
zcP4rHjSWpFwMxG^Us$e@z>kr4t&-B8Vyx7kLyu!O!u2#B+d^t<F8tb4Rd!(l+?vma
z(df33pfPsZ7k`@08k?oLeVUyp2+4H`wGJ<EcFbDsImVvxqv|6tO9YAW3hUdt=OVK@
zD%qP_y0)VH#v~RyA7gb41J4|1Idz=n0@d~PnF8ob`F|{ef(mMWE!#?5>1_Z_bL_wF
z|DCG9Xq;9uI8l&E1#&E<a`D;b_DYMgN3FDx2Ip4;?A4GA{7w8gyZHr37&#gcw;I2i
zFBWD#3GBnit!zY{Z`{lZss*y*G<yEI0Ah^&({w@(yb44o#tAZd`Y$iYhfVfN2b)y4
z1OeeJXrksvP1~tG8}Hh<kf1Z8S~}|1YH9E1(882g6h1yd6=e6Xl{NQI0MymSyNRMr
zr<cVVh10eJfYZgv@STnRI~xaXNwmW-_(x6?E=qTFaKIl6Y#7R`PC1}(O|Jvust1Nd
zN8NX{bvI7p)i2}fT>|jhq?V|lmw>e|{YdiRh)dEXd~*}|D=dUa8DxtOf@?&utMaze
ztQ<}21}}Ns#e6r~E{8RRBI2JSLTY_z9{zXWZ^_5phUeKenSYIK$FKSSUo{`EeZ-l~
z&Ybkh%WlrJ)g?d=_x0R5+{^+<)W5lq|JHpy2d3%Y9SCN)p7FO57o4m)bGR9~j%7kf
z<i;=VsRPK_V7u^(xF@`&1~dHPP@j`kAdM%~4%h4F4IrFDgiJ)(Y>P=1wbtL>vme)F
z5%{JiFQ~W9xro}5;_iEDEb%GW=wb_x;Zzud)*QA6jg`pqj_X^g=40%5kS`&bE$vaQ
zS#LF$7~lWRe*>xkz+hq4m2sv9y1F;f2$RUP5s%OiB5f;JUz6)qab;1&#x*bbu<E}p
z%1FR%Nc+6QPGgz8`m<&za5ypYvCSFu$v4wi+r^SX1>;O{Ac=G9#6D?hfDK?)O>j;G
z54kRyBKOe#p^@fqf9?3fqbwR&<JmiRD#t(Ul$T{#2U!&nw)f%=Ah9L{5cq2u{aL>&
zDA^txbQ6g=0Vw^7N8(S_@a?UHt<kUQ_U2@f+u|E8C9BfYlPmM+37aqlwiEj{Yd3bG
zxr#rS-b}4P^D(V(Waq8js(fT!C@V-*OPGlKu1l!SozlHj#*g}QP+G14|8|rzERC9{
zaU|hnaTWLx)OqD5PpD<pkGjwqeI2i&rEMn#(<h&o;CSH1=QP$zz<Bn%`Pr%gryph9
z%_^bcC4vBt%JFp#^b-5o&{Cy8ISN9OPAJrPcOdQMY_@gPNJ*@t^l`=DXR6&Vog<gB
zy=)wBr}^j<=5T2PHEaBAu#hnevp@q!!8~62Fs&9gz<xvY)7H2PuRadGN-5iNxnIej
z^L_$I;TZ~ZuhXjSg4~Qw_}$^q%mJH2Z{%w7KBftMY;%F%pIz(8+)k{6-<&pPIsSe8
zTIk*O?Ut^&mJpEyWwLP7dt1-sy;lV_qu!WX^HwnT|A_V4IY+->Q&J}rshEwQ_F3sf
zg;6{%9tx<vcmwXd_1$b>OLbvmeo^BX5ZB7Ra#xyzZTLJJJZ)hG7_oNGu8<hEruo5`
z#Zqx@ovzb0d~jJe@pQC}2y2&gxiQOYk2<*EskjcRe7171ttr2hn8n3b&&(I_a)n9a
zI~UbpxQe0K={SpuAzW9zdyzvO%A?ll?az=HLi=_&;WF$QRB#i|uTKuEUZ;5c+P!z!
zGlWs<x%hnBuhU};9_tfTcNQ63H8cdRPY!iVeXRS5nL_&w(grMpjDkA^F7$v{n~F1`
zn6&&?pSVGqT)i{~iI-ub@G!KVM*}Y61ci_Bqvo8X-N*GP)2;p_10c^G=0O-jeGI4@
zn3QKN=B+E1NHCMWT$bt8OvY}(L_Ak{wzrplV?2G@{s%r#2x8dmaiCEpkC4h)0mFo@
zHJ>fpF*U-iB)d<ywNo8k%(U<ge-%|JH7#gPycanG&+H4!K5w_WNxc{3b$8Fd0a-HY
z5vG>2zFLr^3*KAV-E^IG_5fpwsykLgi$aFsZOH1a8tE{q{454!la7`$<$t!j>R#kk
zw+}A1IC#n_6an<_rlR%yb?Zs+xL7|Gc96Pg4-gRNSwSf>b`Rxw>d`mF3cpO{j_|)5
zDrFXeY)L2ZfFiq91DrdY{$j}U7-4oPT(f*@$AC32mreh1H&1i=eWjnGgz4mbpD6&~
z6(gSGNd4?Q<TJuW%FXnmxK8<$y<VU^wztg|uLFP-<Pzt)nM<->>n8jDnn@i5EPh;Z
zPGdhCM`B!0bn(mT7Htu>Lb5|MPGgY^Byg^uXBu4q=RYr%avgSbvt(5x{b?S1n#iyu
zKL)MdwdX=ebx18$ijkx?8TxAvjaNK3Y3-<(u0nild%H_}olvdE;EOp|Cu<~*J+bR<
zP`urVO0{O3+I0mtb5i+8$uUI$080ec00w`u3HQ}Xe)5;*nSY05>qu6+vk0!>0Kg|j
z9P?=YI=}W3f9I*xYwd<#iqlB_0Kiln?^@;M0_+^(blKAy7|_xLSbvrxvHy;@N_jEn
zvH>)qpO~23vYZHaiPTWhXvU{%j-sxGk9ZwVblt}(^l&P?UpY}5gxp&q=4bKJ<+}c}
zFCL6*#mE1NIjQa`q+3M@(PwLCPpa>Y_t~P>G@;hF{dqu|btw#7|3;2_p!<&n3QAIk
zUQQH5t^nu&vi}|Saagd;yaUYrYZxqWz6vqSE_KZCMs7vW39W@I+{DoAG7h8u6p2Yk
zhKG(#?BiTMnXdcDj)Y3V(s?hNfnp`QjLAJuhn4OX&3W$i^kj!6DhDUTKvR@_{0VRS
z4!K8+_1u%RS;X%tmu(JH^Xswge&&;=WEe3VxmJ_%f60L7+s}Xpj5PNIm?0SX;OCuK
z*0wj~U3?Nl_t|dYjG5}ek?Tz#!(USKL$L6x3rFHZ-NJSi#WR9fMO~_F#L8+Uqw99R
zHoK7<aTS20p=3JYdv(;7Ck2=SH2!1*4n5Y%bbxyv>9PjGm~ZdEK%LIO2io6cCaQ$C
zmSc+^bPBQDPew^+?;``~A4My?D_ReT;cP<2BOU>7LeeAn)*ggRT^{{bRn!USZazNV
z#}}J$2<xpRHQSuN*_%s1ttq9=I})Uc*3C`@%#Jjto;a$g&X&K8)A&UHfx(M5_oe{z
z6|*h<bnwN=niRgnG`U;7K$;69Ap|*c<-jx9kIXs3e(gU7a-V)Fwj)3`(cin30iXZx
zP0T>V^0W1>r(!H)PQNn0RHXdU;mIVoN`Hk0NB;Al|7Z9^KLj%xXH!K^0PtT>i|kUp
z!=<;ofwy3F%DW`cCFf+y3$qJ6QrpVgs#YVEI!edp=YwSgjKz;&s&RoSWIJ#b1clBK
ziI0B-Zu3B|S4XE!^{cKu_-obVhQcEJ<-Q(Mj^Pcqh4p+fxRPP-smJ@il!p6RC3423
zne)l;05F@P4lrcfZ+7ctRV#NYv-y8=x;t^kch#NOUzPX5eqVGT1)k``;c_sT-JadU
zq3kLHra@=|TsTjqDeow66LMcNio1tCU>aubS!0gJS+<Z>kdJiJNaD>Lr{{Ft?(P~7
z3c>K!>2vH#;NcHDhR9{>aO@goSM$JuOEm%{!Jq+Wtj7kDFg80q_+^ErD=E_|71^`0
z5+Y(Ep<;)PUZoBQMfUZzMfsmMx1TV(+e*qz&x@M)t|*KYC1QO=(s$TJV;kdxX8iYT
z{<%3LQ|trDqO~U)Bx}?6LJVljw}bAXQ3;CR!<?$FzQdQu4@SmSoosIVAv<;6fKj+3
zw$$=%&Kf!|hDYFwVWnMVMf>?0AMt>AKT6<mHE7^&#ZLI7S2S>W6{5Ze3&j_B_ZMDf
zW!1vYPwG26Uc4>x(&>*=4X2JDy}k11H2*v1pK$u*zpR;m-fukh@%__t$1O{4xADL0
z)b_N!@bt~Y4TbAV2I3QAHa`;UPgg@^2>~ZLnv&!{d5*tsw<8nO_Rrk}XxIKzpop%!
z+Wqi9=F5iQ3hdvV^u1<rh&EPFGgsoZXXRx=r8gO)xSFw6?WY`VlFzrG!L|B0k92Rv
z-4)|0_y?B(S7MQAUtXQ50;nf+P{(K7kAIFb<ewYmIo)}^ZOY3?EP6>!>^Gbfbxu}s
zia0LKiFO`BVlR6oB?sH;;a<;{&`K?he1uKbg|@l<os+RF?X!qSJzAq5yWbo3B%gI<
z0OhnUcNSV$r_dHY;&y}E%DnxX3|Z>~IzwEXx;UIOA>9g<efqoQZ_gcMwS552kG*Hl
zfk}Qc<bS=nf4Z#C{1j`>IMD%B<Rc~}s@=Nq?uRFh)(f06z3|Y61Kq*J$KEjyRyJzt
z;Yfgonm2^BI#B&B`@AB<<?#0T+~-cMr?fvkZ)M8;SVA0;oI4y3_7IU7#^15}Zs-Ry
zL{p@#(CC(LLpstFY#cIW`xB`=4w)4*zaZ&XI+DD&Ma<&xau$||8K<i}JeP(GI6UnN
z3q7dRDCwt=|EgW+!zO>n=vQvbqhv_BEF%c?vLSc{8rYOq;9_TtBVV>%pL{?y79d_u
z=y%8~N>}`_;JYTBVuBVjumjFPFFLV+&h_bEOBvzLx22V<dHC0LO+TW>x6fTMNM1K6
zLXY})O?!B_`&B8}AOAwk8q`r3_yS8GPt8zJcDwF#{#b>(I_~xjr&cCqltRzr%Dif0
zBJLE3Z*5p@S!txqe{OR-N!H)dz-zi+d%I7YQf7eTE*K`xT*}>u$Bmi{&#%9X`)oJ;
zyyW1p7%thpmtz7k_!}C!E<LcD*9-7S(~*U=6N_YcuDzBvYtge&<$svA+|M8k7ab0C
zc>GDImRah3SHyTF<(|2~uUvqFfy4wob|#Pqx}0s;sd8&-*{0vNWH@3a%i#v69J$*e
z;5V>^#bf?TgSBKCm+NQcKP>BUv^qs)FD;C?n@iiA?qRP%s|cMMw+nyiqTsH4r6J*D
zH{Vob4d`$dGJz^qXX#%fPY6B3W{qBNx(;W0joeVXYjpJfLrplWi{0p+6c^_FTI>}&
z4xd--neR*8YvJVuV{HQ;gC(vx2te8s#GDs>ojd0ExjMq)#}K#;LZY19sW+m-rDvW6
zz!FaDr6(k{b?(j*>y+juo5N{TB$Bpn6-w}bRrwbA@n`;MzS_w+yR7(?TCbCq0hjA>
ziD}y2ro!WVol=fkb~k0to%|&+2YtmsLi5|Nttn*18fB&}mvcdj_@Sdy>Gk(^*K%y=
z((Cbli=R)&Ae!HlKUCmfWqggCBFtz-@*h+5Emstc?`6B-owH^vdRhe+p?>Yu!uc3@
zc#7A#zSo6w9?kjzKeN4G75cd{fq+y7!P*)+Txiiyn2^CaxsF@qZ$KTFa}qj+^K%yC
ziSlzpg{hehep27(ojM1wX5&*ey&89~1<U?(T;ty$a~Nz+m|yuFqz!(*rUOcb<#w7=
zS!#(;0f?04*Dg61<izV#P1_IXim?-=R`o~@piV1ID0J1(^z@@w?4N%RFQr*JccwX?
zEOpFLe@IZ%OBsSPM!xY6Sgu&^*XdqvzL9^o{_cX>C4u`E7w;ThV0x+5(82lpTm>2`
z3hBT&{aBBo#2l0rd;SDCU--`WvXuO-Q6nmcOn`UCSUv6zZ};g@DpQEHR|4%x!3_FC
z0;6T?!p~AcpLyV*aL+;5?UaeLQ>h^;`b%tcCvqO&*p#byQ{2ow(~XQab()zi96dNA
z{7{M7e#P0?(78m$aNh$8AlJ@+j_0&(r`X;6pp(*(UmKKXRqg8fJ1B|p?9-PhW=i%O
zM~WJVb9Qol8(rH-4K3wYnR>;De_&Klll0GV9C}1W2QgNjeS_rikuDq8-|lCu{3;=y
zQz}PBzn^cFS8GvktI&!B<fQ&jpme#%d>c;x26+u#eLZa}uUomoX*kAtH$!Kl9V0DT
z#a5Cvv+O1Qq<G%-sa=byl={^JLwR5Y-h9P5nfowkHea1HZZw$lFZu)1q9c}@H5fL~
z@GXM5?Pj8@2O#v*o?y|6L6?o+OjOI`mYp(dcF|ebo}OcizEQa_XDVu2m#%w?{SFM)
ziwmcq>*{dZ&2_5_%?alAHm-sSH|&pdhO6S#lX5xxy3BOrZ+(Of(OMZUBa3#rK<hC*
z1~?;g58MJV?FQpw$~2Dxd!8Zb6^=IdrC{&CcHp|z!i>3`ru7;L=uW{FO7oaU)tt9u
z?~#t+gm#n$RZz~mJz+4uU$wTNX!qyhBdJoBzY5-KQtQXT`d3jU7Xv!V+DH9wa7qYD
zdeV9yk^1a&#oe;Q<3jp@;_@i{9*2sXY$F==E=Fu>X5pjB^X>vD4|Q>2-c!XqCy{OW
z+ZMAsEoXQoO1z`mYn|&0A3aKHpUrj<`i3%7*Dsih4)guJYr#glJY#t+E5_~i(wr8%
zH)u2>W84=pZZA*(o`qWYkD5xWm*SeNuhgG0_t0anFXqC9%AIYC=l|DaNRJ5O)<oFo
zrR=05jHNX_%a`?@tyxE>gF4UHw3|1~_dcRJ9+;?iVDjB<g2OG&z`K7Txvj?Cb3ClP
z2WRS(PSP=BG!}CUhQA6;K0y?$8@~;N^%C&e+uNO#P*1z!+AnG@t>Fx0Klci++`qmS
zsEb%)=kKH(p44^^DAs$WEN*!XF$SL;@JFzJ32ExRW8_nq90ON3lY&6yD)C@qnr@HL
zAC}WzIz`0pi;W#+YxTHZV^CP{V~p+0O+w{hmvb*HtNkN!)kZQOE%Niu7h`iaHN7+K
zVF;auf@+mGZp|}#Dr_5FeyEp41=+!hSB(mB$9@O#-H(iDi^)sSy%d`V`Pe=?k~^2S
zJycGb$l2Bl^xfXxZVfCi7i2i6?p&RrUYhTdW$C7sXk9Ab4qq*@{eH^UyYtY&`H}kv
z#**&qI9OgL=<76s1>^FWGM93@%XG8{f$RH+7gT)gFJ$wTQy?U)_w56<xCd$!&<@s{
z+{II}SZjpb-O1**#RS~=xHe@Dd>7Y7w+%Zy=>L{6Gw_5{pz!aK5JqmEV47WmC`VsF
zEe;RVO2;kb@A#G9X!^CJ&x{Ls11I0^d)!BwfjVyrw+_RR=aYLy3l1)mUmqSh@_*o;
z4erM1He0Wc!ZW>YB}>@(zVO{@Tz`N6JSi<VaDDW~kt4dq>ij@+;jQhsjZS+!Bj6W&
zz6cD`i6V4Twvve!>*gd>#x~#-$jfUjLCIRY#abM_yHbg1P8QP%?0TH#Jey{WwNWaF
zoNsm4=lM2jF&sI3H^HpDI>x0{HOY^V{F5Gi!;#ik(XTyaQ<^8OAjr`xRy{8}?6gXc
z^$onrYm}6|DELO?4k770W38*_swB+$Lyeg99jn9xNgCC}dljn%F6fK-Lgp<zB~6E(
z*Q3I*#3d!<cc|g61eRNo=+WNg=i+Z>Y9Orl2ANWu;-OxG84-&7E)cnqir-%%KutA-
z_4RrNGm-d=sFPsMTh#ml{BGgy_5-)CnK=oue_k#hC!(FbH~l4_Y2Jzb7*aMB!Rh2t
zXA&nT<26pqUs}pU4x5fpsp>jsyxY!bN2aYz*;pm^-|vo!j=>jV=a+`Yi7!Ug!D~Js
z+)oT*gPpkgz^q}tGPRm-dlZL~05m6!Zog>QDNylQpkVc=#yD7{fUK)p8JZp9VzT5x
z_XnnE#8zExa92FJw|p>0?n0jO#R6rnZ~iVskI^T>dPd()4&Q{q*b5*!ISx!91HZuS
z&jPM9SjPe^Ps_z{DbF`0|D(C{jB0Az_PAc_Q7I}a7>XX62NhJL#(;{VND-9|Az}cf
zi;z$PM3G{lDAKFaA_Rz(fCNbdlok*WLZlNqp(7z7A>nPn^RDNP_ul6>Mn=ZYhn=zZ
zT653&|INAgn(BPeUB}IIR@zp_tFJ)G0^^GW2L`5CSr(4(QA$A#BP@zzUhKETlHQ#u
z>bOlsj9w8dlt>HplE|kFR9d}3m3+IOw<a>oHKVntBD7oBMvELv6KT3kH?{od#%_;;
z$Nj$U*$qP@0PT_0HFft~2j+5PjcI<reO-a?JGgaO%U?G_Vt-gAK@;$w42pap7rd6_
z^J!QUKVYdTsk;glmHM6>YI*_2GU2e{%marC2t|(lr+~LQZ#uBd%rT@rp|4#S1FY|%
z-V*ukFb4tZNNWDF2#B}8#?YOoy{_%><KZjgUDWdZe-G&DpEVH+?Ay9%ZPV#E(sOoY
zskoeU$__-wA|Wrnk(nItVU;jC8F!<ukRhB~g9NmB0Y2;K3*8#(bHdDx6ROYlq{<xi
z<?_*M?L5>^Mtj~Vvt9ZQvcer8ep7ulkI)`cjx=5f@Keptwp|{=DpTz-ZuBNpDvhN^
zp-qK_L*Gb_zU-RVB42W0wfCqtQJVx^)YvO`PGG3k>^m&~ptksse@OsK9e#vrn<&Mf
z6w01nd$!-lMQ>?j?b&M;N^0g{?}-hcGcQQ|b3f)jJ;i3C`;4bcbYNDo>`D}KExV3m
zVl**GZ?F&_v{v?gt@w3yo>5{&t%xUhVMa>{N&I)=8y8b_<kMl#BL_brEj_r+qk+=h
zvR!!wd%~$bt2N*KKfoVpcSaCN`-$=n#ZIW=v_}N}c7uy#=iKHuPgJx=<J6O9&xYXI
z6V_hRrVP`#g43I56w~uTSC@@_2DGtxoiuU){DpJfX-TC~zM$c}PWJ$SBmLuz2w7k4
z5;#W*quU7nOI2mhhM@y3AkC_J$o#6v)4RnL=uiarP47F$Y*Lc~hPhYwCO~<*kIiop
z=USo=lo>sz6V_jv&Rd$>vea*t9pbl!$2Nu#zd)6pyJ#Whk`P1^0gK-aYnQS~>a(e-
z7~5xDd$0?6r%e0!M`FthyFUDPycWm4`vNoy)wMXwf$xL$S)Zw3G+Ftj2K4OKXmPcG
zFu1$)L?L*rI(gk8((kMtn|)!*0rx$YlSrhcQFIF4doi+`;*Vwmwx@$-s(g<qXy~}k
z76G5B<VfMbYyXN5qloPrT{FH=8DL+-@lOE(4O)R9AP)GJLBPf{(n+_1_bZ*0Et$Mg
zsHOrsB}yDaP?OMj!|+lo<4ze8LOuwmP&M_OxfVNfYGygNf4I~6AZ=<^if`vCQ`ywG
zu-sPJE9V_1+SBi#3-Ve&*YJT}T#vX7OP$YDxA9tkBs?Mf_Ox(XoW97@Xe;4AK6MT_
z2#a`K!SSmCi(*kpdW}TtXJ4u2sYB36hg*>uaX-bmXf2`qxE2Y_440csskrYoH?9q|
z$pIcG0s`7eZ#YYRc4zU$fv-(MDXyPxC-#i|DMTCtwJCK5zVfUgo7T{#?&cv4wcma`
z?;T?u7~Fo>gnHD4wW8_IrC#%2W!;ikV;Bzv<SkgaU1SlYguUfkf=**bvzuNsCxXRh
zGHy6b_p@EZa~^zJ-SP;;+d5KTZR*vL`0at4e3HW)096L$;-HA)+xjd0HqP=fnI!Ib
zh`0sR{ON1<EIHCBLeVhae6R8o^@i9bRTar4!8u6>22Z<tQYqnIZoDu?>~g3{)b=&)
z-Q0>lfp?Z+|F||wmO8X?yjkY?fhL&*0YfdClbZdVSRI&Ge-xR#ypxn3dbw|k?kUsb
zFsg>6PJU*o{W!FeX+{~3$X~A4XC4_AtfmIhochD!V^k{O%N8_FNM6ra0A?D>>FaV@
zi?5~@=2rx+iL^e)Vbx|Q?=*0FA9zxbR^<T~sq?zABc5-T7jAS9NGCrzv_0zQR{HV&
zYdG`Hf$J3nnf7~>d1H8jy$+@;PoCaCK+@UPA@&b@3a2@Jh<0l@@_@>JIk_v#?;T&6
zBX@$jXU0&ptcG<Xa2~;{pHwdBbgFMvmi*STF1qei;A|QlMMCxyWy}v$jQE0-Dg|XQ
z%t|h)%?a<bnwQi|=2|AV2t-KxAvo4=6DiNztcu9x!_9)<=uOktW@GzZg+>(0RN8&v
zQnPc|n^MI?yYd(b<7!_x6Nm+Fc*z^5b5~7Hg9TUK24!tSdb)1zF7FKlv_Rl~BR2|Y
zpL6n2P|bNCCFS(!S`k6tKiAk3THn00aI|zfI>1K84_Yds*O$~#IYRb25e>BUMmP4;
zU66JZSiwp?-%P}DKg2HIC9fz2d7LJl|Fm0<pZ6Q0+NHPg6i)Eb&cEhEcrrF+H>&PG
zIBiupv}G<sYKRST*hRI?%`2vLuEP6^96I^Gg>S+Kpx#Vg5sS8!S;~V2JIXy#zV$b2
z+IgU~?_vVB9T-q)uT=b{3V|S;Zg;Zn*q!dn88JN<Dyau0f<p)ntG8@ooF1Js()h8j
zHJZ_ADPmpf{Ka1=IQF#L(oe^O{XYhzvOz{M_wTa-=N^608J#gbn5Y+%$>pP1gj;sc
z{fHT~CjW;gG&?lRr>L(5d%%yZ$!|TSj2YOi99Zrw{o|o?)%BHh(s{L%su5l_HSH)D
zPA+xnTGl+8zu1kv(LL61Z1*4)CSRGsE9f2555?`8=Y@#FPN9k^d$XIiAE%z#4cp5W
z1^>h!YVw1MpP2Q|Gnts1vZ4gqvBsvynd^3E@qOR~_@m4bvCj};i{?ib;p{d<I>uLi
zjL_X)e(Uc@`gKn>V&<Oi)~1I>`yAV^HXlaVZdde4EE<QA*NWG9lMHNmQ{+qtxu?gH
z)9>I{4*ZM$b1?tO-1W$>K{Q5-8=Jjg&15nOYpggT*Bhl`Qdl<6z~qxYDPT=a(Gv=&
z(1wdc$~QZde^wot9|i4*iE|SwDdxF;Xtr@X4Spy%hs$iZ{`G~#Bg(;0tJ%BknkFAf
zx{?fCnd@#1@MrgoS^f{0eyrP{!n}9%JdHK4uUq>8&80$LDMjnct3E?LL>A+N%}0h^
zKfD_q@-M}CNN#k@{tWb)jM=Vx(&Law2DRo|U4f6HW${KLj)ecHMbRT^Tg%@Q2>=?_
z`c(R%r~N10DKj0}HlcW$d1fIEo7rF(8SvK1EU#Q;Iu>Em!T2%LARhV+u>qTJB&_$k
zqRj}TMbe4;wlfo)o*T5OFEPWub>0@Kw35y{11Y?&#ScDpRiATRklj_zv+YIF9wU%@
zLlZgBy*tj}roD+|o@;Kijf23tsFRfx38;_3HVQ{zS_&UF4Ui;9xh=uY1}gfN$a(Ye
zdPn1@>KIvV)NE+B8hEde=^PIHe(J)e4g}RBYmY|M(FIVsW|YB1z-?xb&xro;FF~TS
zAXaZYfZo~BnG41J<JqH!9BbmPsY;><ZDysSP?SGx{M$x~WIZlmMZ7A-S}PT92dB%w
zxC#~0^`(X-Hc!$U<aR_xs>?T}hU_2T0dkF*@{N0`M(XYMOKGs9VWqsz_q}q`5EVv-
z1uLh1!*h%F<3a4?r+>hpl*WZQ7nT`8YYDKC)t(a4+6pm8+=bZ8b#*zKc~vZg!PZs<
z&&-8%i!P7NGq&^(7@9vejkJQ^^pN1&q~h^IQ@NEE3XiVA@!IH`B>S&vcRJykyma+U
z&I2xQc04iJeK+&%{U3^POBHXy-~jd<@mtDzQ}r!d)?L9^L@J})al%jD=t4NuRqYmu
z+NqV5Sv;8hNxl&Q>2}B80rZG@r2-%l%@hL_K4XH5W-TBtMIBCqG4sYz)1?wcW1b-1
z<uQIc<SB>|A9TdyokHynsHy%TltGa>;o4y$uY^Pw0N&R?ui#28?QFQp;F<yrYBlqs
z!FbjS@ow%F>WC|cbFWXi1qi#of^%~+fdviM?a3YbIf6|r&4w{gS0hHN!Lkp&xX=Pa
znl;47<}gR2zWt=t5v}fcHIGlqg-Y{}X;@T{I07(%A@O~hHPs}=J%l`q+<h?8hh>E8
zCawa4B-2XOPk(&Oh?I^8YU2|qM$54W1+G4a9~^CkeX+@l0v<wy=OLf{tzR-lX;T{8
zX;Tz8_A7vz8YXz;f$mu^#yU`om;*pv>0(AyTsq<|j2+qd%zzr$k8pZ28G2(+14tW!
z0@&pJcH}%lBAezk!7}X7?D1=Rdze<q(+CkU|LsgMRslWDbT1mc(E(Oe1sJR7hFk;|
zivJ++9}#~ecPJpQNU1tERdl>UtW_Ix!<l`XPY-Na1rX%d;oZIepAVm$<}9F@1Og!u
zq9Md+A%b>pQ9U+b%>C&|n|srQ*5<eh1DE|+dN=IJ>7)=GX;KTAQJD;m0(uHHEpm{t
z3u4?IE1qiy9V=FEAd22+Jn0oHg_JXTrq|7RJ!_MabaH~=m~&1K9?9$(czP7uaNPux
zQ}N>6%U0FDi8}QYTd3}w39xl)*v$=uUK9m>hWZH<1k7IVj6>O-6R|rdu>{^&hKGXd
zgV|e6{T+`7r2#xnbV2%N%ir6E37nl?Fd)4CA<jV=!j&ecl#ftN)=toEbh(iCh2Pn<
zsk7mfcl}R6pz*X)B5(5^6#ZAV>0PqP9+-!DN}L?Q<-DC|k8c|I@jQZ}9GUjCE>)=c
z?sA->Zzm>7;zqQwJM#M*+hxjt7U-nrF*1mN@6Kg}7S+M|Qr_hmXaN-v%i94$XSXfK
z*WLh<>%UOe{a|pUa~}Vqc<+enW_@Yb)A>U`ic~^z-<=<x5ux8XJ_?|y%TEA}(`N3!
zSCvo?s{l3t_^C}`6FEHbK{I{rT!_HE`;LTdC*yZ`o6Na=Anj5$*%OKbnACG@Aw0(O
z0dp{<E@8t&RfX4USLql>gLTZp`j-P~Fnff>eFf82L}flE>!gfqQQGNBh0#idw`$O6
zeUWXZHRFrqwUg&N!jrp{+CHqip!4}};{Lmc2Fj8X+HEQAm*Rd5Aj?db)P0ODRId)D
zb0`S+Jk?Qw+NaZR#9=JH<rKc;@#Nr2@Jzq6ATd>OnpwUm5%@lRodfwe>Asnr<lhO@
zOn}_JY)q**`K;2^3zMceZBrA;f>fJJWsISyc|^J^IUXV#*Y3a~!L@o`NyN@R6-x*5
zrAz)X^9Y@!Ix#3}j)ss`FZY^m^mOn|jD>{Gq?1UEX3fInGxw^glA_pPAV>W=Nox8+
zb)m(pWz@rVp!&7$b47ukHvcZ0O{*!u9?HTY_{K}Z>{*|MmsxOXpeD)rM^WWPo6=6G
z-m|0Z@4j6h>_SP#75{|2CppE=CbC_hX4(B9RS_&$*l~i30Xt9hCQbVF+?*__5L;r#
ziD1u^9dWKl*nf-YIU7OotjYI#5P}Ah$m#u*q;nbcA-Zav<E6&As_;m~O)Cd9(`!Rj
zAPa9pd-+Z=rOITc*?$3Hw6v}B(S3Kd{PwmAX&SZ%>x;9D!5b`IXA!`E^h__UCRksE
zL|A`AE#VHK598ZA?F6SWB~0+doPwgsac-$*MrGz|FV{N^uEYl$jtHDhJ%YI+J`AkO
z3gqL$@9VNz%ZknE3(GN8o!zNu>NfpeA-bTcadDnu*O~XO@q1zs?tkU1-3v}!HrZLL
zIQeE#R-C@WW$Mk|M}3t&m(b{h!o`mnou1k#kB?{U>r_pC1F9?Eulxck8Sn;BZCe)p
zpQh#Gm{W)S@!O<3i}nlW?Lv8Lp5<98!?KqJd$nW5y!nw&H2yMSl*6%*M*@;TLbR!*
zK*4akMClZJKvHQso#1v?wSd?ha#)O1xJ#c8kaT(;&!<r9n5y_8aLaN@XVS_(`@Cp2
z>f!M3wsM{icJoN%o=?yCU{6P57ZgoHp^_SYG3QH~@2ZV0)o%ihL`RdZWHnXg)Yb^U
zR)tR^YKxcth&xT9^%}Wz+SOaM6_KW1wT{*gf-e#^6kWq&ZleWU{gO8_fRCSEV!a3g
zlFR6E+7$U<_}`Tz!S2<QNfe!OG3TKe<sh6}@2WlTEQxKe`7_do90K{HMW4^>?KP6J
zXW=`H^pT%!k;dCAM*VB;$+^L`5rwzu|6x<DKxd-2MC-06!n5ATGBo<$j*Y3MkZt~e
z#g(WTG0(Ur<ik0%N|KdIFtTkxO9I<qxKbqXDY3}90vhR)cTZ1cM2@@<W*cVAo~zRT
z-B?@cG$)qsxE#p!w90Mbgg;HWT|{qI!H@gz%2Asft}=JiYv8!%;knWm+V1ovjV7zW
z<KB~4Upc6HxROY&*|N&CqMgi2%7|H=TrAeY5PA10mup-rH^Us%t~1}rM19^0G^f7o
z6oh*e&2+jDE2&I%SN9!{6EHjO=f6}<^>A{pTN2SEO-|LJ`UVE3c*8LE^6+U~F`+UT
zr|H5S^;aVi*jnf=u-@eZWj^Tzqb*~KwoS4J0A1eI?K2!hTwCNqS_>1yjPh7jP7w2X
z_M;*9aEk4UOrXI4EGm^^?$1+X)z`}10Opr3qIeVOL<QHY90n{ozL{KuN`=@sKJfb%
zdc^eP82Bu0>Vsux@GUC5Oco`|*7A~SUv&Ix|I=6aQ&czagC}})*ESJzFq=rY%~E$C
z{n&qYcJlaKb9^qN!wZ9s%{^wj9)=hiD$odP5b;8FLb-Bdu9R5_hs<QQ);gYxqV2+!
z=Z4X|)$R9Q`@%(CfsgXj)mt_L)6Sxl!jR!Yc1^0BiPCQCn?<8v2MyG73-^e2Lq6uX
z$8IQ;$J>B*)j^$mVbZ3tYWXedC?QX$TB@HIAeKE0Jc`%+yC$xk8cy~&L&IV%)Etu!
zM`nTVt*vIakn}m6w7DdD+gQ(q=>)<v2nLY1Rh|u!lXI8CDev)_`5xB_3NX)B8D_PP
z79doYa17%|BxyX-F3(zY4D#{yKL@}ElWs6B1BzRJ&H1-AO{P2NstP-?tRnbeqrXYp
zW9e_Fa&zvr=#&br)dNhrY|5oEm`sH=3KDxNZ@}JbDX+y?YrNb+(A^wwBtlxj7jN0J
zwP1Z;0GX2|5B0c^_`>dsEIF|TvvMtpMRtFju6j#ooeTy@czZS9M;tuAa-x==h2Mn|
zC;)$p`u7YOJ&X;;?Q)$-#GPbYa}{?dVl7ji#PZj54j|5$G@M=kfPYy75MeT_Zw28@
zf0Ip46<uEh%$fcBO3>)`e&q)P3Gk^o_5Vw?@^1w+Cr!1Q+PYp6DP2jwH1ie3_1~x1
z8j()|l3%~1^5ABb#DB9TbtY*2BvbxhD)eD85KZ>{svB)#Q8t(p2!dNaAO8P)xT*=~
WSh`dh9#&c(sm@h{D+QPD2L2cS71Z(o

literal 0
HcmV?d00001

diff --git a/tests/benchmark/assets/milvus-nightly-performance-new-jenkins.png b/tests/benchmark/assets/milvus-nightly-performance-new-jenkins.png
new file mode 100644
index 0000000000000000000000000000000000000000..f03b24e647ee5c863aadcf4abf78e0ffd89712a9
GIT binary patch
literal 45034
zcmeFZcUY5I_cn^%0TmfUlq#s8qM!nTv?wAdEdojefmo>m(tDyJ(xmqqq>7YC2Pqj)
znh2pIHA3h}OMnp4&JH^7_|9);-uI91ob!F>I=Q?qjd_x1uf5k^`(F3D*W-UfTWv2Z
z4=W1`%iinPRCHNbb_=txY;oGX6a0^iYriK8%Mq6ADwl7%CVm^vD<WtnPA=0>=Zh*&
z@SQrf&*#ePCnpZ<^EoVeEBCggmGc)F(;b$2CAaJhIDG;G0s;dn^@nzaFF8Mb;b>1H
z>2VQ<M2}4=RCH>QB$AM#;10jPp1!2Ycm4byEOdbbU?Bf`J#Dz*_pdkLA3Rrtw*K_q
ze2>rWpWddNJ0N@l#^Wou&}-P%z_XX{qISXF$=3um4^@r5NwZi8?xmq>h*BPnXlu&I
zL<FhOonQOQpALqsNEt62$XramoZr=uN1s;{OEM&1{o{v!v+3m)rs`|9uPVQgo5Fph
z2VL_zGi-(|E4XlPZDXolJ_rr*R^I-{7tD9~?9MwV{HoZ}^+FZHXSe@a1UKE;J1%&}
z3guExa{JlFOJXNfPC!4h8)=Cw%yBLoq~j52TAH<Fjy8dvo0T_t588>o_Zu8`@U!%r
z1AAQ!7lH{PcUsZ<*AL~T!m-B{+wzq{{_Wlot3JEcx6PAkQpd5K46;R@&K1t(W*blG
zFoY#S7qdV{E|VC>B1pqChhn2+6tCr*6fE5lQafmV&huUB)}JS|ot2d%UrL5U&`QrO
z71twTvu3oI?Lh~YjlZ7so}J@<o=e>}(_?ioxq3htR?Ne7RWL9q`Y0>MA2X3+eL&dA
z!ctfXl6MM)f90Z%7(6))gRwl})AA+-+Z68~9VS9c37;T~#*85Qq~5eoQ$g%A!y%ub
zycID~oRh4b5xxI5TXQE=$~!fMl=GjQ%hgPQ3TURlZtMGn`m*h9fk4=7f};fTO>7Hl
zNbZ5RE0@V;JLCEFPG;@j^2dlYQ}&OlANAR7kka2Hwu}0WH2hdhrRllFgqd&4H3R$h
zA*b;@3VF|0Ce3yR4ic~L%Rt3_W8}WQ^t0m~x-3)|CBn+liSOA?Nopoi<PG)xo+RFc
z$=|*zXFb$8cY0rdcic{_zScFGZijD{bs`EGhDq5k1aoKQT36!_$(YB{6X;zSDs~p>
zVMZxqq$254$VbREv;XD~Lu8F1c!$;nF`i$fu%_NdE1>BzslY6n@(UG;o!fat`mL2Q
z(T6qBoX})aQIM!1=DqEeDYz9w2FoxJQsxntXe5;u6lVGCj(dJ}nRQPYb>L+(xA1tg
z4qKUFx6TNi=FMnp_}f?SYwNSUa@Vw=kkVk~7h)xB9AkM^1fL0;1IQVp#dC&!OPQZB
zUeZ^@PeP$`^~kzdh@12kfxG|*{xe+B&|<95ZnyNF2;uFGb}NF6f|0$8g7jy!e$;>U
zCt-j=zR)XvLGhP<ap0ZZ34T~7!;n+lQDqu6uB9$*DQe)?%U$UMJFd^%Ga`Egi{}W8
zvzpc;U1B`QHSoucNv0n<&Db*s1iZ+I^!z^F4BzI7L>HYt?7{Vf^cXH{5w$zq>RUC1
zkWVj;v!C^)ZkY^){KiMv6?=uw!bv#R?(UiFvNzXVYn#t*n5EXosj%pYW?Qq=D=ow(
zuTv`J28wlqIfkWncfOpfp<Z^_(S>~*6r7Yi2z5G?aPt0^OKN<7`b~!>!Mpp&+ccY@
zken*^N!E?R=N{XDlk1uZE3r-Xh*Gl<bGOb^pHgYM(z<Kn7Mm&SDdfAGzSk@K?xy!l
z&{&ZcLUkFjkKR0b*pak*db`}%UyPf156y$-v$FPl&<l#bn!tOpqGOWeDF5YWleGyl
zgzmok-#$4ZFWBbxBt&$mvT=+kF$OXH#YV?Nze$O6bL`<}l@KJA9TZLslvFiDols#J
zxcYaNysG%jsL*d^7{L8Pud2<HC@rOTpgB1_u441*j2g)H_wpf00#ezU@e-?R+vaC#
z<dzp?<LQM;YQsd^ZzRSF6%7k}qHxAVjog?wX|r0Snll~L_b{<oAZkr8uRG94&yVGh
z%HQ6+z>$t6D8DeuTAgkcoi%g84TpWJnoTEftkA{wjwCmsJ?O18Iw}cs?DzoGM0Vls
z+KBp25Mf+-`R#sR<{wM)`G*fQbdn*g96WJ;Atl)g)r%Iq$R~382ZgVxGY^5rmEwK4
z#|KWTuq<8p+qr+0K|~%9mO9IUQWFdM1d$vOBab^iKBE$vt`LneHXqv;Nb^n3Y)N`P
z?tMwukL9bi<Hs$38pIDrJftzjI4z;VBG=H#%hA5~AFl(e^#0@j`Ij9FDdq{0vd<c*
z&xG!Lr}Y*;)MGww$<fZSYv3xK<$q37!)-Z++RxFN<u72uALjwfe}OSBWsQI8RJk60
zO494pQ>X76_IL02DMCCjqci9H7`wEftbJlwZ`ROCKa2BUMiS<*gEPo!`?pO0s$r{g
zN%W5(^CycMJE=N;EROyeGQugGe}8L>&0B9S_b>k=ZvFm~1yaBbS&Z+`1c6BLJ*55q
z(NCki9S__+QY`2Szl+rwvj;&tgZZ+)>?Ga@5%*Ovw4A<)q?0dnpSRo?+n`bt`Z3w?
zqha5j7|*V#6(jF6Q)w1DS8SJqq;#lPH#vpas4zjujIF|mC<%FX<P(j2c(DWPsg>X5
zzB0wPOOo7=V#M={+K#VTChVAhn7Y?3ed66ju8Tnk!B@~ARqsi=O?CI<G5vw@=8H?y
zy9j+XF9{J@uh()a8ZwlI?#hPC7@IrnjzTQME?sFtoeP>2?YB~v{{k6DdzB1ejIO)3
zJ74Vj$ZvUrXU?usGH>m5I5%BpvH(4A|908@h9{Qql~nSX61HD!VoH3dCAlUD!V{B$
zspbn8XZwzIA*Dm8C2zy1jwv*iD@`~G&tb@Qu7&7$_l#0Asouw4?K2r6XR9GR_blkS
z5-RI?u_<#tyPeuF{xsgO4a4_Xy3GUYI+=x+%i8!kY|8%Ec6~m(gDY=&F7DDgDEzR-
z6jCppwmM^fM$d1_TlvLXfeH5TNJ|JJ@s`M4_m^Na{?u{x;6M#t#v68*yT{5XO{gte
zQ=&ch<Wl&*9VPS?%FV-5hsS*u0~eHi*GEr;kA0}xP~2#{5HA=TYO#%Mr9-1i>G@q^
zlt?zgb7rR+9Vz~*VHg?dvxsn1anc?EIO9XyK4{MJp2d*K&4o8SqjJciXQ{r2!2}a9
zxuXEKN_cTxbKJzOQVW^;YY$sEq1zko3Mp5uBHFBVX~J>!0jblbZ0tl00oA~?I(dkL
zIz9EWF)PPg_ejXK9*M12Dk9<z7Ik>Qx3}|;qaUpjBX+dtLhh!|*ynf)p4f9`2mSJ3
zkLHMZ-6b2rP)x6M5~`bV*hoCjMos<;C-{Q&0EU{rI;~K1w&Q-an7zl4#l@3(3$li@
zBSi<u7RH)S_lXz^D%V?Np4Q;<-Ido+&r(q4Pe!a+kE0c}#;4AQ$#st%8sYEqPrar?
zm2F&s%fA4tia>lWMVTKWd*}@>M`+fjmba-9lb-A49t$3~&8#0vtVEneK5|D&ePV9G
z8_9lx<?)(tH8vngi7mqXB#(JP>jVR*aAf(7FH0r2=BT`1F!`s;^P5(RhxjK1%i`gA
z;b#r6Vv;r76Nk}D>0s^4z}oL5el4}{y(y%8&~jO_S;?rznSUy)XTrr^X1NU8uJB{d
zbn|7GxRN_tN)mQSirn1@0G?oqAS`tvWWBT3pvly6hADd}#u-f1>Gc_+cJVOOVi$$!
zFWMAtt1{QM*%vqIsfWf_OP5;<KGavVf4$7XIF1cY9MuiN^mE=CM_+X!>Lp)uTJ3nU
zm)2(^gv@)JB$zfY%A-afx#>VXHexiXy0hy;>l|`yxO_d=BkD;eNv-H~E9zg#1u82M
zEee~{Y&)K@pAmNrB8`j7^Pt12G-OOON?)`vYvK`mcn<6|RliAx-l;8A*B6E6vUL-G
zA2*V3bnnf6p?(xmrgx$KgRbA*3gGr69l!Pm<JfW+LWDpYT9(IBAhcxUC^FCT0wk_(
zVmfQlU6<#EU%NcGM3oZKtUcnk=f0<6dxH5CP5)u2SZq=tmx$c2_(fslFhL5kDyln!
zao$6=x&<(?JgVQ2;@=R488E@n9g_5?dN>8B)x@!T>WzaZXS(U;iOfhms_wwKTyP;u
zbR)rP8udNTF;aHPzm}Z6VSJ?8y|u0gc4s_5zdl?xtzNrD#{v#O0jx&9f%4V9Is|P-
zgP%}o;)D;@t1OV+5512UC$V<vKDcO9MOV2dU?D9B|KOr;;kqvGb=xmgbpUP36QaIo
z$n!q_^aZ(O0e(eT71`j~!?UnhGdcd=c()HIiCx=*8XWRM;6~xZKIIphqZ?HBz-O72
z5(o60u7l7>IiWIrj$k%4N51Z)K*A0Mjg@Ne$6cWtc0WcCrp@9=9lH0#Ck_a&u#+Y8
zJLnJGUg1UXXfELwM54>5>{v0{c|zYAC{fQvIErpH6^hY!s@XppLruFkjxKf4-`<}*
zwOcKRdjr{r-gO=XI5Xx6Q{LIF;_0SLgi7PsdQ@g=-fw-1H|MExaZK~sy_k$FJoA=c
z@YdrzwJ0nvq?~gy>%RYCGg(+@^a$jtP#x?h8%Oy49na_X;Uz%+Ri1CMYr<N#%U#K{
za=(yb@~(Tv)&k;XE{iVinEgV57vlE_Br>2Sw8uzvY>JWJ*&fpwCF&lR8DtjEx`mK(
z=G|N3%J07NXDEIWTu9shy0HmmCoz>Jb<MG8fSWP5;ZH5eYNn2hT3116#bl`INtJ0K
zf|*eot?_jGxAQw4X)0#<=58kL13i<qoE9+|_$;;L-LwmvbK;;F7B8mpAW2)a{`!Uo
zr#hp=14K{DiKz~=(q2TW9hjjQG~;vMil;S{=F6^;855P{YZxhdNfSg-1LpCW4}xx}
zn9+g73-*cJIP`r3A!Pw^L^P4nZ(7@0(D|Wf?o2N!xhc4W7lkxU=|2}`&&pAgXXqEI
z9*6G#3iU2IEpr>oOfa!Z!F*=9Jwm$Sk3MxqQ0H?EKNIbRlr5ygWp>$)h^sVtwo0J4
z*-7Je$Q_R>l+qcum8zr;R=YfnX60yeOUQM4^=pPf<}o=-$ukQ_BW$aB$L<L!zrE|E
zRHo*00Qvf-jTb}IGrZt1X7;J;MrD0^rzv}Nwm!vgOM1-_<h;dMNQ{e{_FBw+8&(c?
zKkDJWhl~P+hS3p^MyJBQ@$;(Zx_TieTaTZFxr00L8?pA>@yvN$-2~5~Mx*WvuhO?2
z!Po0wCVXWarS&Z^qfHb_4b^W-@i%4eN<qj;fsg#+X}<WmD6AMJp=E_`p8S1JGBfoX
z@kSsiH+9ceEY007B(^s)$@b-6X6hH&KkE1`Im!6!&e)n*o4X?SLeyt>y$bb4%hH8o
zF<W|NSUC#64pr31kQsxED<BiLqi<`a$-`kS+qCes)&RjM<?SwB6pC@OqeJp(oWX}-
zWe`UT*BR#_$D9ucU90}X)zFh!=3=|z1)Y187JDwlE^Kr#3i-)aS`Ww-E9VrgHFA(3
zo?QXKYH}l_pfqOJu}nFo*w)Xfd8TP<{fxLh_eF}qNq7Z;#S;Cyi5Ge|+P?fs>)R&`
zrX~XAi|V(h`0oT$GXkb&l84Yq?IV~e357(*+~-5D3|Mth@NB1B1wjfL57?t<A4c3=
zjHDX^sn){E5ghRe8>}CuJ>lQKeA(`7j&o!_4Db-0+<u*x_<VN5UGDhZpxb7Ueo;KF
zaw5;@tMwx0{V}!xmlxGH{X!4qJQ&iXVhHlpdVV+L7`}@VViK4cW5pibGb*tP(-vki
zlxFT|)%Pb=u1!d^p|2daKosZVJJP75O29E*Q4f;{+%uir-L+WtYh6KTz4$pSXA<Jk
zRaD2UU5}#Nb81Ou95r2zsvzw}TLiIbKZs3n^auG}r6*Du*3y;Viu=Z>#)XA@$6I^l
zYL?wEcVD8*jg?TxI}D4+h8_^zMAJ{lCk)=oDwr#ZRrflb?GdUNiTNJCdYZ8xuu|=a
zR3WfG`x>DKVVu;8)T<W9S33-|s^qY`$q@jl)%?ah&7Wi-gp^Zh<<zT$psYekc#FR%
zmeSwAnTJE94&D{2OPE5U_Y3WfiNSje-tO-L;TOOIyY~})7j6ItBe8O%tK3<9>%O2P
zr0iUkd+-S{xMs}PF|o?T?}ldupCv~X?iWPPexWmMjGl}j)2O;cd!gr9zV$CNj&J8h
zUpvh${CVGiM1X{#zBAn;5;CMFq#P0k!7Gu4=VgN%2IbL<&3Fyu(=0>xr(nkXsp)hq
zr}c-5{L0E7bLLNfaPR>B&<Ao+-?mHwYy|rdtCHiDq8ENj>%mpJcA&<ZV%~`BDK0@_
zWYUs5A{h}-cWXJNarF>5?SYf=6BHmJ#e;}c9WYf`HjZ@HIW7nNE^pmxy=(0Y^!2S8
zL^VJX4EXG}TOQ$Rv`bbvN#keI0+Hl0v}x1viJ9wJ>r=l_M8TW1`<-77pW)K4c?71$
zngo7q<!tuNZs1*hGVJjF8*=J886+zZbUsv=*A62hE6Z3pOyn4i$Sg~tx|{6W?9JP>
zkd*VGS?b~A^k_wLS`;gXO=&YlOnt@Vr(;f3gH6u6zg1<rKLherHs0$7pWneSOmBA^
z^q~22mrH_xjj!d87(YyZfEYL^b|ttB+Q3if{B7}er3YYv!g{Boetijhz-KN%mRwk+
zRvlrJuuhjWy44Uejd9*jo{JVGyQNpJaRwblmh`wudq<v<dtA~_et&v_){w(BFeC`S
zyLkxe+6pW6Sr{9(HC=i_0h`7*a0+c48Y$2ag_)WzoV}cXSD)(QQW3LAsdpZ-&`-Y0
zuUVqFL7=@E?1@r*&!L!j7u$wzIb_K($<D@cih?_QjYd9sHK#+vXE)h`=8*WzR4_Up
z12TdEa?S<?wy-Nh`9&>t`AFA9ylZ0P-18)l9ZDn!<^BHJuKPL<7Z&ukB8&b#kX!a8
zF4ON}-VSLKu9o7P;2xF}qJ1+%5^D?G!@A$&k{qp3q2J0K5tUNe*>!b~I+~yF^ESne
zj+H|D@u&Xnwnvew)lsOJhlH%Df*}9k32Yi;Kye)XAbEW1S?>T;Dumjz*0$bEem#NH
zzo~?c(E7ARsu~ji9VX~n3DZq=ZBxyN5xDj&<rfJ(N2}&U-`TRs2mFFFs_o9E4q6SC
z54Pm2POGkbyUut*HskWH{S4cv3)qbx5EFh#Fhh<+-7#AYtjYIlza~_7H`O}wy1Mux
zzcU{!AsFr(G^;~@0^*;dD*ctmMMwrr>>?*)R7KtGx0no?TrfEM^$|)mx}?Vt_JtdW
z!XcE98J$4lsaI+BX#5_u_LD*>o4_X2Mcc_K>54kC+vx|npO)A*&nAB2F-XXoc{c4M
zcj)HXv@C(&Xj`$PL@Z$0ckU}W0eu=fBG_8Q%%zR8B{@(#h_jq@XdR77(w$J8p_vJ<
zxVsg1>QdTJPx8e_)A!dFH*$WPKqi<AD#X5n21-0n>zS!_ik@+;*_q5G92sCJf?kOh
zq-)zA5RPnPlm*lgS7mZuZH-~$P*CIxRCkWSwGPwmwHHru3EwPC7C`Ektm}Qbr}fDG
zr|r=~K8c~vX8ILVABZKNm}4XKB^#1o?;l00&|gvBFk*KYCc$5yjhzXl(i!6Hwzv)t
z(v&)`D3D8NnQI*Tik(0l+r^DnYg37m?Ly8A5c<wR3B*fx=0$6sf`#5#6}0lGnN$lg
z@z53D{vPqJADqzNQS+xM&gF^YulO#uT_U=~bZB^;X~;JsnGqWds{x(`1phn{Lrc(V
zxwW-9-xb!4RdFovRP@=MsVJrr`s;iZs+mZxS{H@^Bm=FdNL0_$B%WtXhzW8FKV;?p
zAm7&{is|C@**!sYk#UJ>jIzH9$+<FgkT;TzgHKDqD7N3uy}2=UCgz3GDV5O34Pwis
z{wK?>Gwm4lpXO$c+#Klaa@Irk+N~-RtCWjHDZ{FC!<!NixA+s+1#GiAEAtvDU05VE
zuIbx(fT#xnM(iArho)*42f`@~eL9``t!Ga?3FCn)>No-T2X-PceC4*`EY1DZ%gwXL
z#w{L28L)&b?_h%G2B<i?&trwucI8Z)*g=oVL9P!$3n2{v7|yYA6i&aPA&~vfa~4jw
zw%KMF&)`VDU+I+}JFgUcj=?t_7Dv@!>G_WEBv%(cE#%C4uOg!X!GK~J?p(h#cG<f`
zB8My&^dVNo?u8$zCH5smXgy5AemIS{gQ)z1P<bXWZ-dnv(4TI?h&K~SjSKkU^2qF6
zKD(Rg;fm<VfGxJbiQ14U6-d#;84eB&W&F>;(S*0LL3b$h_KG1GnREMrQ(lXEUcW@3
zw8|ORc7^^FTR?WE)-sme)|!+h-BRxk-RaT6F^;~4`wS&J85esiUIyDShY^b^u2wiF
z*E^YX8KuyD|Hd~Dm#rcY3kN!Vs{H<=b%~?Mc>n-!kIpp5sw7XJ<uxhvw0-PHw`vT<
ztVxBUDl1ER5UFlpD$DX2opVXMKq5M1=th5FZ+mfT7bVRgdh&OU_t)aSQi{eM5qY$n
z1-<!KcU$OqpCV%t!N9h`!+)I@mEZ&GZhL$iK-UXp$}c`y?sQ?}xaqaysbuf${!`Z_
zA2na6J3EuZ*UR_6zS-0F8+Sc8+mB@+W?URgzU&C-(sYZS9k-RPH1bZ9_c;gy`FmRJ
z!#w;+ooF+4Xw8MLvB2Nfd0VzB0WLSv$1n-$TYJ+NQ+=#Zjr<`*O8>ysm@`9MJ8q0z
zctgGO`I2r~7-p^K6ZGwel`2QvC4A52dx@u7bn8bi=QMvM7%WsjGjU`iTlJF&SA>+=
z!L?pP^y33Dh4R%K{R#7tMtSa`Vk6NFK|49~_PriEb}L#$ZimO2#;f#Q4vgh1N6dEu
zx~G*&ttzzts$Ft>(Q#>;&%N}sOHN<%rOt#fD&}^vzwAKH9{{02m2lMid$JF+qNY>d
zq0%}$v%W^2@IOjU=%o7o8}ZTJq~e*YH8JA_6;zD1ekAlfy}Pf|%UKgT=K|M-g1nw)
z=y${IOf9H@UN<O@CQUPF=J~BvuJd(TX640>v4g)+Byr+key%1Y%+<W#D>XbUU#1H}
z`rFkyul2)e^)WkNBS0{SRGZW2NtYa%L7QC61M?@?cl9Z@)Pp4GxSMQcJWqZ2On-z(
zQw==Yvaz~_Y=w*YF-yH*mL{HMmNaVXl3Q>gki><JbBGQX!2CE7iIQO(bBGaj(!rdn
z6vhBR+=_cA8Xs?RU}Zf5kY?yWH0y{>>8K{f%zFV)91fePdavfkDV=j}GqS<p7%kGy
z$@AGA9@$7JW78Vy{J7fQ<yfH?>bd_K)2T?x+iykF-M;MC`1C#ZcXN97I%mcneL^hH
z^-0$L*SB@<ri$qA+eqJkIXv>^_q>1aMtjf0n%>bB^Zn%@vxUVz3Rn|str~OMowjT=
zhvybs23$g@ebR-iW^)U_zC;Fs9f$<ziW_83R>vT2sdvk|yLq|L13Y^sv|<J}mT`k7
zRO=?xVR#ceI1^RqhJ8N%ky3Kzil`>u=o-jARUXDTYNZsAURgE>!p+fpX%n8pwo<t@
zV@_u`=~IR3X#NfK@Fn^{*A2w`O$@zg-TDe*{b)qwcP&6BtBZzK^?kb4S%TCZ{*@;i
zDSDJNj?KNeJ>|jaY7WJ<^!-9?Qj9niLGd&JghACQm1&&B66q&y`QK^opBV5EMRApD
zYk1Ubb%ArWXM3FTi?;vnYe?xU7?5iH&tl#0%=RCo_w+edj&|XH=DnYa7p$nR9P-(1
z&OzSx7aTvR0)F?AOEI_IkGj87-d^nYZ#BZ-Z_6Bg^du<Fr)Usv1in7nxLoY7h1zrX
z@IT-Gr^3Xg2cVkp(<|up&vk4Tsh2;O{aM-%v2r}sZjQ?E4?lLxbmJxq+k{2xvFnkO
znb_^`*#FX&`%_c^p)SRMYW_C0V9Cx66)1~2>@Dp}7+yA>&w7@X#rV=cR9M=NN00bD
zwMjX;k69c2+3xQJh#!TR|2HmsZl7WI25*x$@i@2RD^_d03dleNjQ6<}ceiKLsYL5H
zkGfatSIK0VBBUNhRTx>>{*nJ+qo8}dxC?8S3-(B#1zw-AmKNn6X2q<|$be$gU;Dy?
z(@BgXz?+rbf1b}R+<g(0=51AGt!9)cl|d!H;LH!;lN$O^mxZ=Ig?IeZZvX<TNBKpV
z{$98dO+w{bF{)~*jQ6vnCY)0Bp&ARyM8Bb@{kHv7ivDT3&qcQ}{>4uZ^@)xZU2CM7
zP}BxZ^^?X0#SdKYwC<5(>|8^Ou}$*a4A}X5Z55Vt?3I28dMaGN42A*~v!}3s93n9D
zvPBAQ1QtoHn3>EfQQfQcQM7WoM`a_Me4X}wicybaxWrK{O77k@G+8@R;a*>7>%(V|
zQomFizzWJv>s<e#!ur=z6C-6<6+-AuvDaS;O{%<uWtrvk|G7Gc9?USk8x|k$hlS>R
znv%sQh2xynNj+RFd}>`^tJ8STsJN1-5kR?s_2@&U0m$@*;D*2APD&$msV+R4cv10u
zC0!cmVMdhysoER@6cDWkgvS9|{j_~!ymq_0&m*P;q5U0|SoC%B_RQ$)>cKe=beZ4J
zTpvqS;C~u8&q<Z0t*!Yc7r$ccp=E&hV#|y|@rxgQ)O^{YFHCZ&n6)zjb@;RmR`g;d
z!nBZ4^p+{!8BzUDgVj19oZgu8%bI+|6v?GHcmJqtcE0nw0LOvyj6MUt1G^;mCg;V6
zZe*mgLkDFc3}HnHl|z<xySc7*%&#%%A#@6311BdCg0g0;!B?;hD*79i?!17c3U0(O
zHjs;EVuTWB#*k!{*3wxdV--#}Ln4`?7D6Nao9z^RJcQPd8~TU8rs)i_4!tf(WS`IO
z-W;GIFgo=q5yfW7RlWX89D3qo#1!o@-oF>UFxXD+8(_2&F)+W-mn|*J)z-P+HhE#4
zs;wW+U`k(;e0F2Ykc<{gQNiE7oGjAz*INEiu<ZC71<MSI=ti)7sbI)8C1-f*G-?`d
z;<qG7FBl10*i^3I#%NaG=46BUx~8!7Z@8(=cl*c>kZb57WRMqsqYx6zk<%Kw0F9Sr
zP5`EjF1_Z&=SwYsQ2P2Y4K=~|P{^7YLypq-rO%F!IaTln3>=^<bM-g52F15VH%7$C
zWBnr+y#=_0$4AG=R@}!jeC;L{FTT$OurQX3F>JgrK|vJoyTtF+Kq<yrdaO)P!a5~r
zN<k+uM@2qGPLXM?D{fmA*UrKWa+63?$>%N}3qc06Y!y8ES6UYbfyq;Q1DMK(Z?~+3
zl|qV7LlN?v{D#S^3wlCzt+N~>COw5ggLT2n6YTL~u?SrYsosK?sNECF+XHPt$V`s`
z#i8K1nTRc?|KW6~i;@GHx3g%aQOIh~T3!`E*nMcBL!S@aq~Fcn7cpypJV!$oh%4Hq
z{)4!}i0mn>6akMrBxo*B<#rVMM;dx{(fHNhC|a07FY`YIJ)ywhfkje`by>1LOnOOw
zAd2pV-0kQhAg^ey8z%*yZRObtByFN2{2;23eY^iIT?-g-)oQptr(?ljR>e!0<$T_M
z4Cw|YexQslP)$~$X_6*;Ba&cAQgGA=GNX2olk9}WRNxpz#F%syN;6v{|L2iA%})Uy
zuuc2t(^&cFW{oS0q-c$2*KYg-FncySgYqplxpy=nNcg`AAUj3Ih~^nOWU~g3<EX^o
zpRn=00>)3+n58P{pF$jJ0zi*OfT6a&fA&w;Z6cKa4_r3>`rs9{V7vuqg3{sNE&$(t
z#%Gq24OQba=l+M|>nHffRJZ>0_P>y={B-r^my^Q9G&ql6fBShQo2B>f(RI?MEG1l_
z+w*Ia9Z-AOcbRSb;`5(CF2Mg=lKv7dZlV?@p}h4w6x2TQm+|KR4n@q3e5rj-LwO~M
zLDD=xl5?N;^A*MSd<66C*LpPa)ZmSlH#32A#Q!J<UKQ>GQ1@=Z>5h}K=4B?97wp7H
z-TtT&5AkuVQjifcyKY!l4{eUwnO0yr=dGP*RJT_U(r<NN*cW`R*ak-|$YZm3DzEGn
zpHgZ!SLm{z6GONko>H&#E^UbuiK6-1f7hj4cyNi{E~8k396Ewrn^|7HgQ0mmrHi^B
z3)*Lh6}9)RmzH-FjkgWXD;9JLK{eKtwq!S?T|Q?jcgRBFjwSc8`Bz&s#<8l3HVhsz
zy<uH+slo&Y$p^9yz&jztM5f(^l#__I_Zh0`HPH)2n8Di#OF@;FtGd=VBtfbnccKSD
zm8Cd-TsGcogd|@dZ2oFTkxrahYVH5n5Q3UL%OBe;L@v@q5o5%R6fQ1Itm|2)d0^Bw
z^(T9Q{v^cIL5UpIB)E}|C53oQDbv4oPR*6sP)NIJ(3Pv1=8`A1E{$MubSqlf`$jC6
z;3N+%e6)kwu6_A5^JfP!A~l$E^j|#lH@|+Lsirb7*?%wn)(37FgN@_YdAP;rU6TA=
zeM*Z@7KzLezZnWU^eO3cact1EU)pgeGzOGu-lX!ky0B^&gv*9w;tpEpRMK@RUDRbg
zB;D+8PO;pt+iKT?m8K+M<%d2U4wk-?TQV#-vFml+iJM;+OAK7=S(dBmR)|h_#|4yj
z@edN^&DZiSdtF2R$Hm?9CFLlF`Z&8+=M<%pe;j+>s|I0JieztRsKC=34eWwKCI>dc
z!lm@$RS0E8_n+g8bC&C$@X8_(hAq_(9aC3@Zh{h4;Y~Z&c1t78cGrY;3VTKvq-CX?
zW;IUX#iM?ka`$AEZ>qk2mYn!u|5kDm3%^Bl|C25}qw^e4Jx`Y#*?kpL$cTA7Rr@Mf
z)=-zY5hxS$EU|VaWv-ytRYqKCZfWe=H~Dxa1dx9_j>~ddlk|2zUw%YugC{FuR3mg>
z4rzIMIS-skKI4_}34Q<`UOm4`hXXGaqiuR=^bBeB4w_c!4ZQSd@1%k%Ii+1h@gP2-
zbqc+(+#(kLs`x!D`$==!QL}QY`njvCkxeM$jUl@9GA(Es`qjY;{K|Lf)D0eU42kPF
zQv3_GIRCWXEg*kAY5Wl>(g|&a&H_$Pf=<V@j6-vFZ0oYOmHUxhOlV7jQ$(VJ!}mHG
zj;zqmaaf)Ao;%JSc%galq{W&huM#5j$;FMsQc5TH9@?!+*@(b!k3Tb=5|wXkOpthw
zQFkV^d3{-)=bmT?gRB^#7X`<$?$tF2+hB$^MUBC!s5_gmHzq_N{1g}4D^lwudTp6=
zz6uwx$9_kVLqVNNqitg-)nE~A$xvGV*M|F1m`(7LaAM&HA%8aN`O^E@WMq!d?wEG2
zTh*5*YQjvaO-K84&e&hgxqm&n<9^n}n_{4exFpg3-I9V*n~oX<o~B=>>Mv;*T`g%9
zfP^5TXZX%=P3pE?I=g2YN;e5rtLN^Avi|bP>!C~9I$M@2!*!gU);&krMJ_Tyqj3oz
zFeCI1h6-x@o<+$sU&$2+TEY&{^f$Fk8dgIXhM@Y*h&2hqBDRo=MsPj!YR2~^sjfk8
z3c>QFq&ZM1{V1$mgT&3QbPAvYg3~OPMmmBH3Rij1n@isuth!?t2_VWXC0puO?NC&}
zas9+mqsxZwNKRmjEz=eWI5G}BQ{ff2;l2_vL$5mDc_UDyuk<ltoqU}a8;0K_#ULjg
z-u?2I>Si2U;XBvWetcGMve%`Q{sd@uw=%um0SqS;GPk<1QLR)D8-#42rsD{!7d>R#
z3<N$rUW<Q=q#AV)jMBea62JPlPgI1OZ`1~u02NR#C1C&q{Si?5NbKJ$nx?EApKiT@
zJaf&6Y$#NIp<rZGlP(v0)a>98pO~Ui^~im%#npoa&UhkrkCc3-t~NNc)EqzF>ul+R
zXNNBIAF-Ms#v2V5*5ctKJ{L|#FFrhSNMuC>$v*KZJc2r$dedCKW5L^r2G)eMAg{`+
z`m+HvhmU2O1|+~>$NUb~-(JE$-*Tl9x)yKLaLsIBLo|boqIGuZB+OvDQh?0MI@bzH
z&+TKZp&h@m|Je{#AZ6(ESmsk}ySp|JZWPTW{4hfzek$Q|G@q0U|HW<4oFmobxZ1Hb
z&+scVW^M3?07Sd{2g_=@uWpE;-N)%wE+-{Q;2I3*@;w1AsE-@!vnqnJ9#eY)I#^yl
z$y^ezX80|OILusYk`=dCIO_JG#<TgGjTfT9CY`Q8j6m8%uhJj)Vdu(JnOnu{fUQK%
zyI}ngk@IgWVx{{1t)|xvihCZf9I0%(!4!Yo(HMvT3t2>~oGMzheTCeLnp>rP_+E_n
zSAMbLDqV{(BT(tVz!pAHkm}|MCta{v#x?A!ul=UGIw#vU|Mr(`_^FK(USSy^wjGxn
zJ3BKG{J6M$Hboz$WT$GCM6gV7Us$?epB%&o=^35Mj+v`Hz4Q4!WjF!0dRMr?*j&)-
zd_cln@frPX3B?52_$EPjG>Q~LydDM_TqSPMifL&K|Kw94=k`spE*Pa6+%R5ss<M5e
zimAn$WmO+^Ve{WfFty^xM;BfK3xi!-ocyLl|I@yo<rY3R5Yob55vGr>fgK~3B)Sr+
zP}T|^8?7$khXCh5q%Lv^r;ifsJ#Bf#Jb>iq<n048`#=lEPC($9q#oTDp|sefzT7J|
zkT`>K*;)$oTcR)PG0fbs^4kpdZ2$10W_-~w`eOk8c9vdxJI+c`qOJZ8*|r)k!xn&8
zdNNX1>4pV{<)cX<kj)k9n<GW6h0sMePWQ(&oUP8-r;~a*hHzS^98Nz?GkT?S!O*Y0
zX<3h)gGvN4%K2?c;WsUK#0V~iu2bi3LUZ2v><I_!RzefYA^s^sbc?I_m1U5|F4_b2
znMBf@;-4h~W(JYpB?1;)X2myepUh$&N=L!MWw&*j=)RqP*JC<0S11b&M4P(KtuwhI
zED|MPgE=L+e-GzII0kmCkv#DQ55sY|gMxlwP}%(2on~Mg*v|yBY7;mHmRyQlfpmCe
zQy*Dy^(D0S%)YC5^@|K6ct)-ftHCMpV15BqP1FogF!e(wK?R6yAU5I#G4FA>LyYZ8
zeRCAw?=qum|8*t-)XDLf5(r1JauA*@Q%l&wJ3o{YNPNcnicMUf0~ib9B!U;_OR#+F
z%M7f*kD7jUR*TX-AgGoFnOB5K4PE`J@09R+HI$3Z6c4}OTPLbfRu2?jKR9wG`bV&k
zxbBpgyvv>&vu}K|*`$k@(=Ze@;JQ1{eJhCHz5j~eydeW1+N}f0AuVVUuVbO$aKz<(
z=V)Y<h{Dcz<RcNr2fSG=wq(KlEc?oddozzF2A-LcMi}kWXl1Z1r`FK^KXw_B_MvxE
z^3to~DPUocOm!A{uQyG*^;hjGdRYWpwDkl2N+bWfKN)o%-sMnvepQft{2R93@yeMI
z&z?@bl3<1f<)YqF>uqL$7C}m=J5Ob`87Ug~ODI~}nbcbqpY4J&V+8=AO{i6OFWQJ5
zhPlHqH%+Q-0{lIpc?vOWW6LcMs<8sG4=gN;v&-h;SAHzbAjS8gHB)l<uSOV@(GadP
zNBM=uG@`Wj+pOX<kVFw;+Qam--gBO0>e>PIB~p!`)ej`c<niS>VnX7@64_Uh>ORUf
znjJYb+>N4EXWpil4k4?<K{*^5Q>*FwvAqQPbBwOWBbdSLLen@D+43e1KQ%fpU$}TO
z<QHzSPQ)+3c_aogJ2Br;Y78l)W%S!;CdsIT0*C!NA0W}&sl$(Qb>V>pVtus!sser6
zH6!ME=|wL?7FkrNAL-{>G_+4l$e;oEN>^eYEpvTv3%7o?J`Dkr5$`!yd1=Ze&1(D<
zAn)C(lJ-Tk;O+qhBb5~S?1bknB!LuQe{SAebgb~2)uMu*bOj@Sq%`#3po@YHV;VVz
zh8eCp6}wkai~R91jqa%Gcp-Uhjf+&m*OS1C#R1IAA6B@KLUQo^v42;ZzT{GQf97(;
zr*txbHrw}mS^6X+--8|m`e>3FtEYYq&YD;_!6@$;%0u+t;U@#7_M2jcd4|dO0O&q>
zvbicww0ho(soKPu(M;=CF8Hw`#F?yCt~_<LFb^E_c6zW8BgZ?^!M4pk0wdLZ&JVS6
z-Ql<|c_Le~ad@bPO&((55%=b>)q}e;-)70c+=L2>+BV=*xt5INq`e@!xI~{me_aE5
zT^`SE(y~-b!hr36<;>0uBu3_0JgWF2w2;1LSs=o6)J$4L4dHtr^JL5kwy<%4zQc@{
zQyKI0&L{OAO9+uxh(X=#yg&8*6mDpd(NEUUO{I<d;P;s><~;;`fbHxSxhDd|@05!r
zzgRB4Jb@dtCABo8yey58plbfkoHQlBBTMBvUo-Bry7#*|?g5lh$J_PXzEe*U6zDzu
zXZOFlup)(w5&iV?K`V@Rp1tPz*_#J;64>wLWlq$r-Uv+C16q@?E%d!o)f>jPrFsM!
ztQw$GEwcr|z0ADu^Xi~8h1AjyQjWffF#Y_T=KE>G9k4?$S-sQN<|?W>t;`xL&34Gg
zuzN5l@I3rfMqj3^{hx})-YppRNjl!QJszZKU0r~i{1Gia)6%vu>sBvRMaCXjpMoxW
z3^WKP21kJss4iTC)PGJf?}N;pyUeqyTzt&E)Qut`5v)Sb)uU{*KnJvUhLCvMijfu-
zHBeTLcwlei`TegBu1EtJ@i~vGASoQ~@b0#4S_!irGCtJTX73#s<}BnQZI^sp^dJV;
z_L468UOc3jPpNWkSN!8TZnkR>F_kwi)SGkyiGqFd{(<mLct_tFI{U|H+kPMICMyNR
z-fLswhB803Rz=Hgy#no;(7sH#e1r*s0N12xIk};+*~4*_B7Mj$K5w5~wZh8{po@LL
zbi<v`JD$@gC1Agj2W<7@p$b8#w$*LhTkStNS6!z@g8{<_{$kW}5_;bji{qi=!Rk4?
z_9$v`^YY^?6_oC9p#AY#9hc|GDkJZtgd{<tZfrT%h=cpjy`maqmCb|b@zid(0x1P#
ziGcBAaeVdvaHG#=!E?Lg*1sa5HtTwu>C9%ebF195|1~U-+nf6KFud!)M`lOYo`1gc
zpGmAg-v9p$*ni<F{|gx~6OFEaJr`gEG;93Snzi813GrCIqU%$ck!cdfs5Q^+-$?1o
z5$xDbFhc>DRuR6GGH;*hFW;Tg-S^}>*(Dldhp?ec=>>^V*rj-|#X}nbgRi!bKkVw9
zPFeL7cDen~Wm;Pb$9Km9qNIorXw!)EDAWqJ%s6}uxmHj?JBnOqTLGZ7rRxTz#|Ynf
zsQr&lFcNJ2;x7a<{=h*@OFwT7(kK+-`P<4WrFogyWq1@%RO<u4IJXPGhF(}djz<mO
z^%APHt>11eq4LIouJsM)$5gnix$s(X*4W7ey70h}JAHQl#+^P8B4|d+2-ZY&I-hq<
zWQ=P1!fLc+o>2`lv3<5o8VuoE**<DxD+eogwI7KgqAYn!q<F5<m!dU0ObMRn?PR;?
zRydJ4ew6=>*hdZ&HF~=X&4xj5isIw8N0~m!y}}!{8*oa_Wyr}zL0FUE$o-8C8g+e<
zTliH8v}VQ1NIQVqpMjl+4=-ypKq{XrztCsnP&JhHv|9x|6CL!Zww1e?cn!D;C;x(%
z0CUWd^5=gvU-9zCZL(v0Ke1yZ5tm_YL^JA<yC-c@G*4}6%{sN-J?nc<R%23hMCYt>
zT851k+1@Acb>islqI$zYnhs+BsaMK7=q3q1Fwa^<1uc&v9|oxsS8NNga+K=p4T(&T
z>pT{YN#fe3xL}BLjZI%z4)kw-eX#L{&Vgav{hHl)-p4i!z!T;z&ZCY!sx1V8vD2~Q
zjQuykV&m5FAdv&YU)jFdFf?%x8>A?~w=WKGZ2S<YlY2K#!FJT7h<K_e;sXh`Ll_x7
zV@H8`P{Wt8`JlN@fdleHI__r73Q~>(byaSGGyoa6P&9jt@#OWpQLCI@vdlZAvB49X
zO27}qLbZ%~(Q#^H)#wyIkcd5%XyMvbQ>W+NY%1{CWU=XELQBM1?-^?~`H`SNtmT7a
z0pC|Tclds`aj1ehw;nlgOyhZ3kS#(On+(S{Bi#FO!$S)=S%Pm&{XinckG<wZW@9Rn
z97OF=M4aQ8eH-LT13%J0SPP-PF5AG-PqLRNNSG<cD!JK9+s<&k9><DbC_`p03c7?s
zsy+jkrQu_y%rYe;BH$o(VRED!_?TBnXYYUYVU0t-=Et-!b*Pcb>t@2VZfZ>SzZc_e
zS|h&qVTJ85+>^ts^tb2t$)De3%O<?%6xjncJ><PS9-Sxng00#Yo>b+)D@#vFRbmea
zpe~06csi=h3<!t|a%D&F3~Zp768nBzj@x~;?@Kowz>;2gJ3YhKzYU6S^}YaCWq{K>
zS$ZBdLuphTXGfp+q9JJOanH%v&SmckLkods)GGb$kA0WNd=vv{y2{x3rv6zlKwCf%
ze%$fut$QT}T4<A^rEfh4D^hP2q<=Oq=(4{#5CgO{Txgev_pOJ8QKT5{7OrG@yf#~l
z12a7Z{Ln?u>>wwZ0Ne=VRLwO{-KM3r<t9NBJp_g_zJ4ML(z19Fw9>p45j<rvwyyai
z)#|Y&(+A%&=HYMg>8iLDUH~W`TQI#Y86`pf;E;*Np#>pCX+>_i(>1h!Z%l3w38TOq
znC_;31N0DN`>#`r-+Bg1B3|>H_@9rpxLUF>9FYA(Sg^~bkb3hp2Y$~KYiB2=3&4Lv
zqP<#XcZYLkjR5W8dkFfpWcS#h#HnM*-nrFPk27pil`EG=7O$DzE0Fq%f6Lej_Sgyb
zfJa1Ac1qlY(!-*@o1Bek?H7}uI&WunUAGG_m0WUkct4JHzEFY$1a%K(4J~!uy~yxQ
zwg!r7fQ??muF&xGQ7|VDKvY><4(xfZYFOfgJMx_?d(U*NQqY9Tlru!;d_`=fsh<qp
z)H0O67y``UKd2cvO%|FWS;La02~k*&+ipYqzh~!tjp8K*3rQ{yAzw5M?87m4B3XBc
z4wMw+^N>FTns^BxH$33A+_+8EG#E~QHE8{2`F_q3i*vicJpW-raq;R$Y-HAJ=fLZa
zs20;&B?D?W<gfGgOhZeHnOlWjJC-_n9IV!&0HmW$9&4OCm{19fnnO}xF3hzmW%hf7
zDZj{FG2v(olZ>q{Jr)A&GRRI{@|0`|_nyo@YWCHFD_sU6Sze@~V}FA7d3;Jy6Qq=?
zRRdus(L8v1ZeT(T0kgu`1DVj#iJJDRlwmSJb8}_Fy=%HDGCj*P<~9~_C7WDXiPTe7
z`qEoh4}%>*rArNNBRDC#2|Vu&VR#C!R0WmnYZe^1r^gKbcNA?I0hGo|#1Hv$rp_gM
zGb^1NY?*1zrWb-fDc7i(IZ{GnNFY}-9QibT3z!+&W5#SS{5;*_*YDjw*Fs>N-_SL<
z#!}@M_;f;1+O71ihBNfR^%U9ITe|{&^#0ubuH^tG?W`IK^FIjVkF;D}s`$0s6Kap4
z3B$jySAG8s;Kn%Pzac4Gmr{-{h!=+#dKU1KKjeJ!sv0$_O?qQ`*{LUQxu({t;u(JG
zFMD#LAD``c7T{U@Tjpkrb048c?Rv9|sW0nx#Lq=ehTCU|!V@g)AZ`l%T}KFJidF-V
zE>cKkmKJ?UxT|lmFBm0em_$i7k%!)g@6%#3j6cGK$RBMxp64*j2-N33`>|#sqI6kh
zP6n5KA85`0N)0l!K18tM1^eyQdezaR^>@%s-&U~ldrzudN&u7uz3F)>KYv?@#2M>^
z;{{`~y)y?+u8<6|fjB^_&7Tg@pfA0A2%o*;uEC$GVjw<Ykop8eH84a^AHAR$x-pD5
zV})J4C%$+>h4-;FW5JV^d?jnOf+mK8@HKY0^5Q1NLP``RbZB#LO+fC}Ta-<c^kS4>
zrZ4RTrow_zzJ_~{N)GnsizO<MMiHd*oYi-qg*q^q56~Am5<Ma|)?_CWfN{(xG3i6S
zNnB05P)uZ&Xwn$!k#TZWyiiC$#=S()?MYSv8P2OWMa2>S>Rhp@%Q@Vh3c`9gp{w+E
zoOxAV&%$xQsSPz+pXRs*fXPjIXWj}&PC`!4J-w>H_scE^5ae|OiyM~Zoe56Oi${^Y
zX*4k*<a9B;6gffhm5d$sO1&B0dF5Qcq{qq?Eg@i2&ZbRDZUkSK*cwCv1To3Y4%fz*
zxViXH4bY48aDz>>GRS;rjyvaah4!3IhLEMaE8w4+KGYNY#ILEd<3HgX91T{Fz|M-%
zD=UXuNVNO%3sSt%N*-r>)}~uXY0hGMaE|~Zg$#kopPxw0GlM2fWZcqO7k*_sM_f%1
z_0YniPuauXGA!qAHqN*So9didqwFO3mQ8%8&$1yGL*<QUo_u<{m`eY&gW8HWBVfr6
zA{)*`HToqA3^4Uhiq;JE#V^hc6kq)LiE}4eJC?22N|+I5LJw@kn3=h<W_*^McEDJb
zSs4n=j4%!qmz+%qY0W&!^8D8oItvxZ^O}z4l_`1PNi;G!YOWr)Fs361onJ{H+kfwT
zS*Ns6dYGgkG9*U+JSdP{jM0OKAkGh?0>fS!dQ!rqz<wMt!ECmyTuQt8VBV-uUP5Pb
zc1|oX9MT;HLno{dSl>mDl=V+9JNOW5GZr`IqwJam%^9Bn^_wX%4|nb!_hiH>)F30-
z4!TMU*0gS{VaI9S)Ythn#YoaIN{gk4x7&KT7WE+WR-k~{&5wCVC4#2np9r@X#5?Y{
z{GR+`th42|v71ECJ>3l#v-F<hiA=U_#a>Odpmc#8NKAWV+p?|p%mL>xa1awVTDRs6
zrSdBtYc;N{_rQIcUyKQbNWEQAA%1w}418<(#VbiNo%1}1WyQ!Sb`j)s!M-x&M4#D+
z!V6t`0hgF?iZt^B8xs98z5NaN%;n=C(MsT@Ax~^Epc5ZaNGh8Sg<(qXP|%MtF;770
za$76T3uQcOb<TO>JtRS<ceb^m$y}&za0*G0$*JNz35Yj2hs2x=9S{lLPp<}R=6B6j
zjWq-OBT=2!H>k*$Yq^KXjOF8)y&vq#TE&40RW>d4PZc=4lTYTomo`*1QF>R@2o1@|
za0c7)NdV}Z0TvtwM{V!S8nd2kA_5g0whZWV*eAt)Xm^7Pz~jYUj3Y>M-K;^3*hF1b
zhq|gbk~!ZcNN5jpzKiyPCyPDo(4o-ih4Z~#vu}5fs8@Dk7TIfQF8vw4_v0>t^UA0O
z?iI6;!?_roHX{MA0+qxE7)I6<vO3@e-l9VO6U0W^B?q;tua-LWCgJgyAKL^VyzsHs
zNBym}D^DD3{h6HGz=<pXE+k!OlL7h}FpeNW$Z`E|_MK~;Is#Ti?X07Fo%W20T3hCi
zpY2=nRMPyI;!RMCxo+sL{`oLw={Uig+H(;hAf}*LAv*5AuV{3_v&x!U*WV4uA2Tg*
zoGo!rTT?_&f+kgZM%l&TwX4L4-VD)dgRt#5Q!R=M-LGMjz;e|CDE8CTi>r6mS7G}V
zl|Z6aF;a|lYLqz4__pT<9mmGVu+H>wn5*|t0eoDaOU+H%<UcTS!9RK}w8Sfjn~Yon
zLM!vG&Yrd4$v?Qbq^C~u<l~zJT7sA%7i!gGhg~XdrRrP%rLG)@>VEC(6Ss6?dP4q+
zmaDxHhz{8rNF7ylOOKkfEc{~bvcmQ9b=}uDG<R{(k&9kl=;v;0@vV--#5Y2n34ChX
zT;Bf3q>lPLLw|72u&7|n<_{X^M^gJ6JDF!A3Dm)_hwJjI9~MCGW5wSzh(5KC-B4?1
z4ED()*D0wWa9!^j)N-BP!2&|-N)1iE<$zK2J5sSVqhlE`>R#>x^zU9I4}=mAg_U|U
z2`e>OAp*(o3in^uWvY<nGqar5C$)ewEQPD@>TT==TjXLvY(#0smaGKOdSBBqF^K|k
zY^H6MGRZxv&|a)M=tbknY#h8sRNSUBoubfbzy582-@Nb_+g*1Tkzdrd=?DrkK!aN!
z)lH0SU9AmBEHKC)#w0yrwrK1LKG^6Sy?PwkTTn`i;Hg1MJ*AHFBd<_IX>L2dxS_aV
z_t#koW=S=rKh}I7c#cA>A2#jQ>T`OrP(cZ3lpx$@KAP{&dyc)#C(7(--t{bf{}%JB
z^TDr<oxD|k$8wg7Eci;HRex8KA?4s{g4yc@BaAh>%7nTwoYJaJ^v}|nU|YR(#Wt1f
zj$VpbBk7H^+cwi79xg2<xxc==x2_hZc_G5zw2=8%vnhc+_VKZZpod4}9v}NPNTt&G
z%c*s8%2RxmgMgL68e+dEH@D;-8A<ijpo3inFTDw-<$WZs&-w$^J+7W-_xC(xT+MW?
z?DUk2hD^gHbb5K4*i8-RgK@muqnl9PiPj*?$ThR0xRlJ@q_i46gPd^qGS|XBX{ZM0
z`TdGxB_1?$z<u1I2dzo4VXC&zOO9hN_Ld@pn7e|?70fNn0%_=A<5=SS+eZN5%|W$<
z1>v8@E&TN#9w*cRN<l2tU)3a3euMtK5G#$T0b4&FY~uG6_4lGKHKwR*NrSI?{}#yK
zQ-i_GE{Lx&vhG4#5r0oJ@-f@lf0H<QzjY&70o!-#oY&c>5B`I(e{g{;dH>!VQT(Uo
z2%euGO2~4_3e40`FXormPx=r}--<W=q$SAu^~dwxeiGUL-d|E42NbbS;kqB=*A(1F
zF10+kw_(mw_U~;iKk3mM|F@p*Q<29DoD(u8Bf@qnLX%+#_paMmZQrr8FkB^_(ub;<
z^eu(vV?!`W^Mf0Qvr|_!@%J!8NF3}e?nK*&;&bK3o}~>O;$0|Lh4YH?c5lMXMw))Y
zgi*xu(ro;NU_v}R{N%`IuJuO<5AQIMmb~DSn$(%KGeXD(vOf`%JxP%$yghpVtxm??
zh1FdI?>POz+XiKjua5hH?;o5(%aW3^o~eGQagTYhXmK{)ye2rZN;W9a)v$lE-mIz%
zCVq4rt9Wn<sw>tAKNi+Yyp(ns{n^MirQ>0Ks}ikE@tq3Y=%F1w9spNTDCE$6JNhwc
zIm8C)Y}*=DG5jSDV_7}`ue%M84Y6Slpk8d8rE;m`^LmN?4KNzE=4G1y!F##IlNO)X
zvo5xQ=YQVPTmp}F(h9}|&EE;T8qW@=RR$FY;L78V^9I>qK=<3%XPTqaZfllKDgkM|
z_z@y?O?04U7WBK-XpLjfL7vcMdR!Skox_?RV!UF)N^#Tv5s=0&i5TVOZ}kWX3VX4V
z0-YZe_)jMg`x=(H)l0h|yI$<-vYy|Pr`m*i@VE_51d+Cu`7w2U23dD{kVv|gY28jC
zx>Uja&g#ZGdrpHJU6_*SF*Q0W^C7NOBIn*=hE$O93-m$ZJ;mna@j}27-nu<?zcaxc
zHm4kFNtp(Kk44~(v&sffuxPy2PFqJ6`+2iRJ4Wxm@1Vz?(ApQEf7#6%G^LKC!O9&)
z2A}f+W_U8l2$KpSffKRllAtx9M4*`Gtlk$!p3OWOyl>N@TnRL!^I>VX+|bE^#sKz$
zHJvv-pCxv=p;q86&%V}}R^ImgFuE*C?zB?e95R}`%-#6tB1;diRYg$L|Ha;WM>Uzf
zd*9Aja4aArpi&hJO+W=iX_0OLQBjZ@73n3Q^mfmP*bopAP!NKmfV4<QN-~H@Ct`#M
z5h68GLL`BNkoN8yX688O{C?-W&sxuV*YmFR_!pW;vhUrl{kgv1YhQgs*R%c=Rk=N7
zwGUr;603;j9(_N-t^5Fh({|VE{B$`4&+4&j1b^1GN69*+|D_y1L7p&YH`I+j@@b5#
zKkCauxq^p#@hNU3fBY*q5>?cH%{swh_HcTlN_nBkL^xATV-!6BTr*Ky$WmT<*sPC&
z@t8$!w$payNT!`-i5#8fe$T<6T2cnuNYYNBp;WF#b!qFG)>Y<em+#pmaT}4imt-kx
zy9mnl>77yRH@}zIzt^D|)K}g8ut;t7iH5J(oTol!<01N9Z5e*#!*t5_CJJP2$wIzM
zXW^+(_IJ;5|GAfY(_7IFGu_A}oPjyvK+OzPb8@yCw`q;Y!dD2AV_&~N$o3BA_G;G(
z+3TejA0T@N`^j;x2kC35v9HI2*ZRN%dF&R#PnZ4kf%rKN6qfd-RAKmHEMfuP{e}o`
zv_a+}q8jvyNk)I+%eq6R<z?F&k)RaY+eM0%KQQ(5J73;D`G7B1Lz=vnu6M^uWfxaq
zxKzqZels~2vd9sns%$9PxbB8me`I+1ySK<#>D~+*um&d9YdnZ>d|If0eu^=5bo)qF
z^bO(&KR{5TXk1j}i+C2=w<V9tNuRacpBoy(J&Mob_MVyP(6%?LtCI=>DnoA{Jd!tR
zRXHaJZ?*G497XJx@g@dsJ;8tK9>FhR2?}7wuU|z0yT|9wA@2A+N{n^S;MsmGaCwyQ
z=07nPa+lh_Z(?81YG6m~9Bf4(R!a|VJ9&W52B-a+o}rjUlfpvnB^;s0JDiJDHL1Bd
z>zJhZ#?z-f!&Smr)%WbT*NS<Mc61(27f~jGe*6S?o`?Q^#)8jIrL5G|Cy1s=mesUX
z?CV3UZb5GPEP^om1uJZOazL-bDhWa9PsT~!GY;F$|7!O5_e)$!5`!4L%P5R%$ckwk
z!-rK-XyGi(0D^v->=FUC=HI$}x{v`!R5dgotEjzr4jLh=bi|j84r(xA{sJ0N%#O%k
zx<BW4KY0$11ZU;=D%p42TAF)Vp*fPh$LvD&SN}#)`i!d%fom<vLbYmzDY7;e;yP+V
zwq)(_YkO4^#>EagLj_Sbb_lThWmgTzB%L(x-zcxo>DaG_-$Wo54LEq#Mr~_FGqAy6
z76%GaPA?7mSmvINqEqy|>9)qQY1ZCM3hPvylgOHF?d2vdqi0W~Ke@XTd^;*Tf)bzA
z#MKL;oGDa5%{%7KZkl!1^pshCtxo4%8dZ+nL@U*Y(A(rc2yTYiOkj?9jYrVyvvTuX
z{D4ImTg<!K|6C0W5W6QhcY4FVH22rqv9Iq#{7s<XM+K-iqnMTAogC`iwQyEi#YAzW
ze|%prLoY0uH;65xcF`p|%sU+R4BYRA2!yL?C)R>!LBT7;L^;+ZQMOEvi=FzC4tncn
z&|5^%Tapya(<&e7apkg$8U2d|JR0YBiQ#9QpT2f9K12}hl5u$(#wXAAxY;OG$apid
ze|;mpK6-xAThqZC9SsvmxciE=P7}_IcIc0oOn=KSdho)iKOpadf0m^7nZ=Z_$~N@0
z51y<GHyZ;tTO2g|9qRQ#s4Z;AY(Qw(lQK~B$+arGksg6p1JB&2ldRF}e!nT`8V><t
zx*cNjUp!uJ*@Y>JqlD4JxAuD72CZ&Y*}TBMlA7mWc{{5<3)5^+$H^FqQM&ihcO2C%
z^AkMnK2h(ScaPeUHL;+g%+53yXp@_tMXJ2V+&9x2#BK%OoU9{|l5_WFPkYu5I<oBi
z3>q#mGMUL>2BvbI3N5TdIXg~Q9tC?1M(73}os>G4=J&M?&6w#Vb6TKOxxVyGS+2DC
z0}~Fr&w}3;JV7gGTFFC;s^BM(jR4vb*2Og3se_rApmSN}@f$=RUb{u96Uujzhq}wv
zAgjIhNar)c6?K!whxlx8GO6h?+htT$7xss9NR%9LgJ|$*Ax>t$uujRXaY)KHWk!K0
zGfuMGT9$w9r&ob%z;<MCu5S0W$Af_fHpsZ8Jh^l_rM}m@9UN&c6R36UxiNguN&`o^
zDcb8Bh6FL#M&*wORptd!o=G-_?5LP#MgfHxEyli{smiWnjv03P=YIdrRXQ1P(d&?t
z@_pfxWsb2g<nfQ*IjAb6xVDA=UR5u8PyMw9M3aLqL~qiHSln5u0F`}GW1o`%{RW<k
zJC(cx^z!ms1OnXee1+&ZCw51Le%HWtT?2a9E~95nBTw}Oa~4KUyTDUWfvgfww*(!{
zSXyihv<D*`9C7?Kfv;0Mp`A*$Co3ds%5msC0;nKK@6x((9|(lPxQRf~85-S>!<~MF
z+i%iZiB8#e?{ZK-tO`tw91=}>Sc!B~TZAlgm%#k@CI3RF)T1&5@rmWl+_0>?9-TL+
z={S>VhS={S1@|1x!6JH!@3(9L4%CwsGk=y!pkL&g&Nt3SzZo&Ld(81#YU%@Xu|FLb
zk|=<lJa3+S9O0t+wjlv7q8NDRaJtig0WuC%FMbmIDsR4lH#o*b$ALwPwJ0zXY$dx}
z^UJ<z<{XC#cwNt;9q*iZm%d<yyIjv3s`#E4m5iwFe*|WE6kEC!F{t0UWfToUq?=Ol
zpafiytCOD9kpIgUIWAG}0HVpDe~b3KWppk{CdlG6p|<{F`m-aCteZk(-i#DpGXc%F
z(tMJBqCN0)N#%e^t;K2j`;VJwKL30G4Q+$*^LACL8`mk|`T45w{2jw21Zp?TM9jB|
zWrVq&DZqVL0h-<A`p|;hJF%4C%aaZ#4%Mm&+h>tK&62;k1pf}^2$OrE0ZvXvCsNto
zm@S6$h`FG?F?BMsOw@;lJ`)WDo95#kX@(Rl0~EZLft94CQqA2>t{82=a^W&UKw>DT
zKW5RW#nn|uWwmznVR3n}xtWEsjStJH-=@gd{huske~3QEXASF}7c(7qukLw)WY0Qh
zFQ}aplfgP%UQ`2TvqhOo)KwKzTTH=tTzk>JQKz9HrqfQSn=`;BpWn#V#Tb8!f15|W
zQfYN;*Opp0OTJfI#m4T{Z^}hvUyR-BCPlaeMK3O|VnDJ^;Sk=;zS16??nk<%J>i6J
zGI@3#EN=sGAe<Z)>d(S~Br=%R9W$-SXQN^4H4yK+D*;rWJ4Agy-qiDcE@~TC<Ym|q
zWf;x#{>}waH6J>76anq8x&S9Cw*R@D&F{n>@o<h>q{c5~pDD2hE&tLbJAAt~fgN?Z
z<r`hZ-a{_bR)86D|HPYDwgd;lx3ql311Y5=6y-ZvB70&fSDs_tdLJ(3l0raN(7Zv!
z6wjz8EY|ZD+00&H-BUT)-hE@3+`#M6)7SG^qt=R9OtmQDgI7QG93(Mp1@T$ot1b3#
zzJ)5&hpMySEEukp$*pt%^-s5wUCsFhQTziv{N7zJhR1zphcu%N_7TLv)~8End6?^V
zb(d$0xIqXC=tOt86Wd}IBM}YJO#;&+byqIHsY%zhUxOMWa;Y@~cSI^`ZYG0WcH;u-
z>egaB!A&^Jk_2>G4XT}9wNXOw9vcGoSbst9c--v0RfSMhw)}Raixq!LDGy!3L{7JN
zXkV=lV4+&!NtlHLrMBjHl6fM6l519u-K|7X@V!WkkZC&@ti^#yiKGGVFWUV{I8cJ`
z;lg+NhhV>Kj|bew1nyuf%l>KNOYNS+#R0Qm(FJCnnmKmN;*^+8mL4W=vX0Q|n&g)A
z>{b(@Mb2~1&1JbP&nQlyU1z^lY4DMEL+DV~r)U`JhPuZOcFbSJcN3qNz0bzILC?MD
zVamV*Z-`A^JT661&stE=WVs_M9c5qTO?F)4OTgbh8K+awT!9_JCUym<yjo`~lDEoN
zd^s}UTs6CCYu!YtyLNBzED>|_;c{!e@gxuzA+JX8LAlv%!Cm$8y^v|DvLzZyNU0n*
zAD){hms9Z81tMlbae3aP5O6$$H?J`vRo->cw-+;B%!YL=QZOKT-G!JDyEQysZnutD
zF1fhTS6P7gGAoDK`_Q+;(?HM48REY%`WCytKKlD?*arTQxeQsz8z&%);xbL&GPKAu
zV${BLVGPdubtqQ2E7L2-uk13E1`rB2q1x|ia<A*abLZC$b%*z4oMk&Dw(nYr<?At7
z0%Anp<+FD8Jm%|9Ru}WO-40{h3`{kW+?vmJoP2o@^$s4X>T#7M1@)yDu^*}P4>5JW
ze~~{M85v+<T0ngP9hHSL^#mj!-h|hX+!k;!aB>@y(wF^1Il(?t=#7EB{Wv%`hJo=O
z0FQx)qBbO9G*56>4m`DXvgjv)%)@&%(s7fK8x|=-hJ0!F3w-$8Stjx(oYn069s%7L
zja?s`(9wPuIn4<5%Dg61PcUX!<vxhu7)BFDQ8X45%zgMsS(^A6g`y8xz*W!}uHF^C
z-88-x)qc_$YQQZ)ZBE8dy%8!X8zDkGv*o<!wKEmz=hr<;wW1RPIjKnq)^>eC;RYFr
z@sQraoqn|f3({A;AbYEvW}ASAN_dUEb2f1SH2PqZxy(Wpc=yG0T!&O9FDW-J<*o0F
zDCy}fG;v>|{hD5E5VD+9_c?Gknej<#UAn6FfLTQIoZ+0o$KZxDXQ(*gG}^Yq&<*Yz
zV2sj2@Sh*@tyjQk_g#@vIiUf{0gf7ciacKhHF(a~ug8JaUq5wX<*9)xiO5m-sUa^#
z@Ux;w4zcUsFw>M~qKa{dpdLXhI-;lxS@8N$@QyFi(}R@929JZ{K=^%ZmywFd$kyVL
zp&{17*}42=pnfbn&KNF?TC#tSN|gr)?b9^}lZC0#+ak5=n$J$3jEQ1RT8UxP)m}|9
zeL*C2Fq)r+obQkKC-5i{s$3#2;s7bi-5yD&NINh>>hAY5@W%+>8#8k9e*cQ7W^KEN
zt$ezjeLbO=?M+$KFlFGXX+lyPF8BQ6z#!J}l1=!R(!ooST(BLj24N=w6_K}P7fI;|
zt$NsEQ#=?%7c+2IkZ!qsl(upp4T=W^C1Qo<nlA@L88bbNN4$33*du;-nc$2YeFF4S
zdmvA4%|oz&rM{ODJ#j8KRKxab8k@Q~(o(`f#=Cb5h$Z&|41){cpX&0vDK(2bUP%Th
zC+6C#7tYA&14Ny0EuLRTEBg9O7xLNs79~ejO&N~ld|}IgHCfKPc9S?_8Yl5&gnE)O
zd6AzDHJI3;b|M~oR_Yo!>DGZL^(b=!(=dt`c2Lc8Ve^JAtTWGJ(!7DTtGiSi+DyVt
z^&|sigW^lVCtL0+fI8+$L!iL!TXl6q4mk5ARI<Gbw-{dG5)QO%6#Y$Z^jqxu#eZM<
zJYc!<Iqu#6zY(ffsaup)7R5W|vBCB~4*uIR?<<Y{55D<Z-#>5BvHwrE=xqM+(w9})
zT3{jgk3Y-*nfu5n3@pzY<GCVh{eQrlKNP=*P5xhCF+9w0EhDn@P}$?92ck58bNC-1
z?*FO8TD<+?#k#I@jJD%3SqgX+M1yCMd!BK?<3ey%4R6{C$uK4<5GgI>Zl*8q{`sEp
z+_~4tSsYE}F-pvxCTsX?)GL&SLR@UU=cpUL>xNS7$I!xs$mesfG0x-mTGEyk>s?+}
zo^G<3b1V5nok5biuW9EO)QKyNIt3P`A4qC}rJQ+?6;|#jg#UPER1%kSq?$)KmQ%=O
zX>PaFM4F$(?pO`dLw1?ussVD9@a<)gsdu)Vm;C7?4F$jg9+8A|<E+fI&75%*O{68g
zB>0QqnS;QD@AnCI3%orL#~`*Z{sN#H0Hy`^_S@7v_BtPS>+`h8n2<&>xGIK(L#SyT
z;B%SG>@S^YM(lb>d@C2&iIYcBE*zVyNfS-B0>9nGBibcyZDu(nTK1NoYLdJ(N&*HY
zyDOr4Gt?676C?1|G^>iJAZ=10=Wr6D<?04=Jf3GSGl$fH_MUiBWr%rCQxwBHOS8ro
zR~L(rYcqC2p|`u(zOQ(`TLBB}Vu$4dGqHetn9l=CYyulx1>hlgBU~6T{!{lT)p!6A
zI3m|d&a9=%frhwU=MQX0JQMtq?9)viU-f9#O%mHVe4LbV@+PQJYPNwO4BZ;@0bFJg
zsLqbXrCO<RHj18$4bo|9xEox(*U}1$TA`yQ;^wMCl*s#94t^CYuyfFi5#Sp9u+cHm
z`*%P~$H|*>rKpK_KaOR4h(98x4%1KY)BVVY=#<A^=JlB2(C7VEznhtJTe^`a>@<1M
zrRqa<aIrpYJ^y%e|G8=ZJP$b)gLebw&ulbs(mP8A`G-yT-zDN0q>~&H^I#I9`r=cx
z!c;YMjzNZh>vjN|d<6Y|=a>hDtc!DEKJK?ol@%*i7(X`W|EdIO1~!YQfH=+eE_dyb
z3_z1ty7oH%wE1ha?S>8OHcU*H$;4IJtM8)=jn)~h5r2|!J4P5|5Rl*YMn(ojbGzV?
z38#RbE$zf=e+rG%e<|n?nqwQ*9XP)@Q=aaK7^C6Esw4fhbodfgC}px0s$lp(^U?9C
z_o)Di#{%r=m=U$K$M(r{yZ*NC#I_jBMJHv^h{h=6Odm7XyonNp4{w>ifGsOMn%Ujl
zGV}2FD~MNQQ9&g`o9x}k8VhqNZu*KRgHpBlxeB^W(}j<XKapj(u$y#!x=|acDAF<<
z$FD|#bQuJ}f7_2o=I|8kGh0Hl^zz$+#}~J-AP@97yKcb-cK77=XW}MC73L9_9}cpu
zMZKe)y9>9~KR#Nx`0c3M0^8$z{C=Hu1ZAV>BdYkVB;iScLO(T7#!~iOUdk4}U?=Z0
z-<X%%LLR0G97+^x)V8lL2+5i3`gJ2De$7#57_6Sf$F=GB9g>5=nj2)cQKs4yDBhxJ
zPv*5xIt*f0gP3kXcLW#Ap8!CUU`Yc&vpeQ7LX1-;A6k(kza4=e;u?G}<52;41Z2}Q
zkLLnRrXa+=#lckg^4R5R7Rt>VavpFHf!$f$!KtsHCXWIZ08)LOcdQmGFq`*e(9-~W
z<*~E|t20v^Ig_4kcZbdwL%iK^D(<EfRsRm=%ds%9MCW7b!Q};uID1&+VJ8mBFlXGJ
z#M+*65YyS;uH?F|Jbogky$ZC5R_GR3bMD##Vr%hjSFEg6rk$dJ_QVi%{`d+Zb><~3
zqB3#Km)TGd{ZdKIgL?oG3y;UIS$+2PZ^i5=pxPjf^IOq{K3!H0RJqlS2imf@S|mu+
z)=l@bKDu;IRwlV!_*0lP45He|U0FJB1i&dqE7O8Rq7FnG3AVydz@jeTwy!n7rrX-P
zYn`9--@(~6|D0kF=mZ2T=nv(aX`PX9#;^Jswt-2b4g$7N*MctT@FLw7w+4}LV=pT{
z%dzdA%Td$&M6?(BLLUOoa#~=CV|J#IFP|}Q9JjI^-i+61n%631+JHH{7C@GP<pnA(
z#|Eo_pY{ybnK-=eIlbm-`V8B_TU8$NXl>^jFba57KpwMF*q)`!-33B=&|MSP>48u4
zb=J+tTC9Zm1qrjTIg3=(NiH|$UgF;5D_E56gHq(*)b&p|e6~DF0!tCfNqadNsy(1&
zl5Z1${@E!{thLnWrhJx@q$i#+X2}fUv{z|&IP8~GMJ0J?=m;#WlAZR6gKo0CprKRm
zwEOkB<MRc{QN5+Fu<Vb49GJQG&fXDxU`N=+`8u9Pa@fbtKhx7!9_Un#z2V}EJ&4)G
zHa0#y!FrNLMQQ8qffC9OB7CbLkc`^nO(*&bg4|7xbhejPP}Xh)>AUXoeULbHNRR?I
z#ix_pB`Xz=2F&*aIIEQTd^pm6;cDsO^0{6{2UgBQRMtQ-`&8u|9xt3mjtKq+XQ^v?
zPVO>tw%gXq7+cu4ko)L5sBVP`e1zXtt~cnVGe5QvsmkmhB+tuQBt7&~fv(Tq0hc~$
zKJtiC`u<Broo82JId+rPbhlUvw>w9mttjkz;WctHUV8<92I-C<uJd!By$8ECqw=&Y
zE`Z6}?CY9dNy7UVbPb=<em5qv*)ENo=Uk1;1Si{aHAFFx@c^IjLshnSz2Iupljdu$
zLk~ZQ8dR4F8C(yaSRdlzfIaC~o>e3|h@JdtVr4x&#c$`aGza}k&~Jt!!y6|WDA0+c
zO-r^LzIXrGfgoTF))x6ZU7|i>zw51E<U(~!vtdi`Yc49M_#toXoE9x?h*$KB-T61G
zF6fMa4%{rJQ_|6Y{QYp0(DNgmemdKUD9lrpUkZtjuHSwc5BO(4;GdPCg#moGfprbH
zW8M^t$!~JsAcAzfn8baytB%f@|Hk)$>iSgy|8!z|7V--HT8<s)EZqymvyd`5lU9|`
zcWVZ3q3B7@1EV#89jRO%hR38;Xz{GJU$*6U4@j+Snr+C+Kb{d8V_{Ki3y-%HJvgtZ
zYi7T8d`?e2xPjv)BS9JegG=jN6VYFp0bibB1~#<w!}$Z-8%nX@x(fs5I42M9Q8b?6
zoxE&%UX%k4^*L@C1Z65hc6T0tL#4WA(^T1J&}g|1ma8y_Fxd1eepXlhTp6>YbFBcQ
zbP=~@xPhu`$bZTTGv{(BD2dG6>Zk(`IJ3vjXYD)r5Tqvz1H`<JszPzNJGDr;^I$S|
z=5h_J^LW%XU@}^=J<EA2)nN6U`1}xen2ElXa&RY9Lfv)2`M7D8vM?p@RY=D{Sx^&F
z%Ym*CMBtwQJ3@jbyU@#fWn%7ydU+AcXce-t+hdPTyO^(AXeH|1hw#-HipEfNsifxR
z1t?&bQCUgR#<4c3l1G7oMTyHgP1!;9t~1A#%W@LhJnhrAozf7yea&k->VR$J1z`&y
zo47ktqvn2iM=^S0qUV#vx!k7U;@*89U9UaVPIU`)=bQ$UR_jUnOd$n@#RWc&p+<$g
ziPthB49;Jh+1<B3c5P7`jWAk3<Oi2ur&Dyi>4dXDNqoSfY%fHeL0D^II&Xxrc6gOK
zrFj1o((qmzJP=j(bP@31{t^RF!e;dS<tYm=X)T1&4m*;Qy2q7N_Bn_?lc-G3k7OTR
zY!5dPU)eCaKqd<0nF@xEN7W6NzuNNbjyom|R6O@Jw-sc?bQB52GLz{z5z4dYipq$(
z_<g+g5Z2GdAxY1_eIMMA7jhJUK9u6eGmT-dl824v3WMv;1qP|fhc9NPfK=<chkq@!
z+BeMbWTO_w{AK>q<x&?=_W4|{lYg!3ygRWu+6n0BS`)Yi{X|_VN<rut-_hMXy4G8;
zef_Ny!DE;%-nNs^Q*v_*UG?(wF2DZ3e2m1b{SqYf2#})$F6i7I!A(!_zLGz;NN;*t
zkygD}XRH-^`U2XM&(k|h@N<;Vj?|4VS>7b$L7RoT3Po@YrLtlL!R@(5H|zR$bI%ad
z?sDEH^o+|@d@%T4!bdjX^M~g5=fW^kq%}A_(_M5&C%^jy*rWi|oG<F?etZ4kS!DUt
zIn|ZylruG<riZ~IFj3EW8Mhz!brXs=_p&?Z!J^ckL^jtAFJIY8WZ07>D?5J@PMC#s
z6j%n^y=82iUVzsl@K4L;@Si}7upuj(K4=k5(|`tXBL%%TVi9S#jyHAq9Bc68a&_xQ
zR#x3H!$>UJ88<ADf9h0Pux0qyS^~u{JjH?(FfNE#tqRvF|H;D=OT2P%!at+$DC^f^
zJZY0e_?H$%F{_BYnL!CRfCZKV7}OfeEF2kR>4;H<RXCYl%Y8Gnn7<OW$bib@KMD2$
zu3F5a+`EBe_Q)?|R^l}{{6f4<;P$d+SJqHVenAI@-0;qkS8ht(0l;S1lh@dZb@x&o
zVLgzx0kj4A<}zI`%-M<{Ii2L#Cf{Xdy!m!sBI4sn*8zB*aTaNUu9Cj1C$z`yU9&<o
zDn&u<5fyq&DxbYCu*mWptG>`eWN{`^b)-Y*R@M(hvyd#R9l@8dE6|MMAGPT-lZtI2
zK=`5k`->!2)6FEK(W>l*#hR4{sj?>vRyyM$4l{#0<UMg|E^>*yoIrVq>s#)NU0^C@
z{W-cK=(dUglr~?-3s*W0HRMeeD$jdeZo0H<cOEW>ef{5JOH&wI8ZBeX=!IYjXGADV
zZ!J`h?U`S6;PBA00_y%>voLJ8Jdf(Wz)+qOYVeMYy7E5t{j_U9fMX2!Us?kFcwvaT
zecRL_>RI*ALrzi;yS_baoXfe^noFvV2UNXSs?i2av)iVwO3!;3%H(j|Xrh);`Ng^@
z`g2RwM}B!z=tTFL@EQrv!d5xSGG&}4i452s#SYvCfE(p$l?liRzh8Rna)EFE8Je5X
z#4DpbBXVZrF72fY)zMk>kWU_PjMH*48JF;WIUMz-E4(_=sm`X`ws7`Kn5(L2@amV2
znmR@k(Wj0CHurFkf@?Fr)f7EltTaf>t5PQZq@%BiG+5t^9EyHGo?j$^rpw^=yxh>D
z#Uy3aYbXB%D1+R&*v7Dd7g;wJoPm^t4ba_1uYJVmK6OmQ52#XSxI=;MT^j{RRYcj?
zlwP9tc*Y*S<-Uh?t180y5+KT@dD$CsiN`*<GIRLH3C$;m4}UaB2)e_%SLLmG!;74a
z>DTtV*CsdQ>Kje+fE>*DUtD~n`HNMxvFqcbA^7(ebt9X2+k{ka8OOV;_y#A99I>e<
zD><GXw@sx=`#S4?_Y`GcPwZt!%=toawTijb`@4|dE9Z}wY+af9s+7Y%6`z$)9~uwK
z-81BMaMlTnTFgK5Y)Mz`)bT}o2%35%I11k?WFNK#?Gd!V#A1Tzjl)Om?(9^G)0D#w
z<Y8R7ZX4lE#lF<`gBdd}r!QrlV3vSZjMN%L5~w6_BE0TkvIyhK@ciOT+^r3weC{UA
z%{$h=N0lAM`Bvg!W?6a|0Yy1<8Qpx3!vXYsw2y*m<HyBJ@N98!^5&Fh`9FOd4{b$z
z7vag}C{7_<%0cduH-A8KC1U6zn_`J?p{Or0{j-VvmcgzLpZ)heH(L&cRQ2wt3&%~b
zn<Q-Q?*&B%m47vze)7JlJvJW(CKXnQ^?N{{dKVAfal5B|eQop;^3Wf|tEIiAeTwG@
zR0kveQ$lzAB`FH68f5NHiX^@5H6oN^8hd~9w~B&_CJ&wGp5Soi43iP{EdyNOIG4Kw
zI%(<f2GL8uL%SjLEW-9z--^cxTGEeV`WvvAOI06d3$-D6NO(z`&iYCM>WsAjWa_ST
zN&~ylFWn3GMCH5C_&HhwmV2orXr6WIyxrpX-fT3WYyVe{`Ij8>PvPT=_VhoX;s2Fm
z{#}&#e?k5FzgLc_aWAyMIn$dMn#Ud2cKvHsU3$@Wt#EXDv%>()_w~aHMxzV6u9dWf
zhW6$r=F}>npD?PAR1X5ecp3L_JZoJJsC#g{t%Fc=MiNz-Wu=K7z+#3yo||r0;kj)B
z24z>z=r~wJ$r7m+G6mepGy25@6$ll}U6;j~n1a?Tv2m}=QVu?KhL#fm@hSSG*C}N3
zuXIWlv<I?Xftn3-=+p-Y7I!tfV>u-NKxHZGwu_~09iEBQz!mR?URx5Tg?Lt@PeF3L
z@bOpTM$vR>NQzV~53hhrAyS^=xB$cBhb0H2!DSbJWZL+dfK?EvFnu{&eUbWIKDAL9
zfD9dq&7n}DiWBln8P#X>B1*{}e1IAgqs<GtYRs6oHGFXknzy*as>}%%xHvHBZN)-U
z%~3@<MIwShsON!=WE<ix_f7Kz*Gc%;q+jl^oG+1id9Tq%9Ih8_#Q#E9AEVg-NoL^!
z5Wf6oS4B}LUe=1ve8x8!b{JJyV32g^LWP<Q(H{idwPNy-ptxbSX@^1@FLwQ_w>l!m
z;ajO$sh^M#nb`xbLtOPm31z}br(Tf}zTJWzvx|W<afJ_-GjS$xCT>H!cZWZF4`f-5
z+QGhVQMr47=&#q~b0{=tw-=qODu!R8FYUH;J5}72`8=v<af`QHtbG@kzK(sCV<%lH
z{gl*F>edlo)#Dr>?TsUO7?gCL)_gpjY5xZv`W4W%{oSaf$;F}ueQ4xNGGo8%D3wut
z(m{70cp7mGLIuye9?o;S_?)}C@9JM#fNt{SdM4wf;oz~xNGIm*oZ`N?yWGYavt4f$
za5yj|4l*e;8;Q*Kfy(71zSEuWlZ#eh3v~e!$f}G-QSTbhPk+nS>_~YjWIBDKUrjiz
zfFnGm%l4IF$6QEvMtp<#KZ6T0MMffw<6DT<OSs4_Xt|1w#<6&V!nOrtKHnmsgJP32
zin`3nrSp7EOa2U%=G{_nHjaF!lsX1&0s<pvWt7Qn`@M3JhGn(&zYsRg$~^-4-Tr)p
z<Xw=vhXLY-y`1B72kR<Smld5}i(FMYk>Vk_E{!Kb`#H-)Bg4Dg3SChMh-O$a4TFM3
zf=xijNkbO!7+<Bdn?XZCEhIY*F_O~<NcZPVgm~!$WimgaF-{v1rx4W%s_*!MH>HB7
zfh88v+Q4-=wgV-&jiLt90#B)B*nO%(2aywcE<c^xPLZl)3f!WBs^?NlF3)@kjr)Ub
zw8k|8T<>ALB={vn*U2kcXwLJc=!$Uj_L5~Hu8AzDc0B9vbYRPQ2gbbH-B{d)qiXR-
zKyWP6<!7=H=8}#7l}kQHgz|@|5E6S<f1p8N7F3%A`5BwnC&iOlv@vaBba?vna_!!%
z9Vfo86`YR#1WySbiL)CjDOUn_+7vNJR|%<-iZDyDWY?vtWQm+My>9Lr0rp`zATZn^
zHHk+z?6Fg_*&Ykc2zcD81a9LsOHSfE0yc9kgGtR}$eC|i`g|Z+2BqI^#GT0<ZrUCD
z>+J`gf4F@1^s7UUeKqCISAhy}HG~;6&7Sa!6Vr2_Tw+HUO=7g*@NS?T`Hg!N6Ky^(
zgeTVx%C$?)fg*_ozVkqE6k{A&4|E53t_Hhn_gc#DC3eJ9FEYVFo2S7}viA^3`U#i-
zQclmbA#(?7c-UPloN{nx@-<gL{od#punSPOKa4=6-<gmHEI$ERsRH<Ll5LBiB(Jna
zO#bt2&=g<&JL(B>g|s;{t%A9-@8RoK;jLdoC>mTLO`jj=g!N^=zEb)j;AqVfwkJ8u
zBw&A@2Ytz!THd}y1V}{0iF`-mF9jwqlhYpat2#JMwspT{bRLN6DhS#5_i6H*dKRww
z_VzUEIs|3_T38!lsTQ?e_T5(&Plc^YJ}l!gSIh%vM)0?!^lB)cgb#s(`DEP5q^Ut4
z^yc6u7#i$@E;OH2AtzH7H1%Z|oY?iC0%jmAhf3vMU}%LcJ<VC#G}?TWcX@db9tB0g
zje$Zn7)-_s{&`z#Pb+1)#I85htJ_2m%~C*>;YhYs4R%9U*dAeVCK)|!RCxN#p;sU&
zw0NqrfW;>eJ*xT)qOQr>Y`E*Z`J=PeOmF+}is%OuY(BpYPZqH9|KyX=F1`kj(_Rml
zi4adrn=f6uJth+W8I3$~5+KDW;y7!m1ORm=CC`&Xbmp*u&>B{<K1mP%OZw@>&8G?3
z-@IMD;UXw%2qQ}Qqw1^m)bb#3!{2bs92%yh0Pxjn$V<D8muI`L-H4v34jL0l1(o3q
z{uXJfOahT+a+)>W@rccx2ey&}*WKl<s?&w_!}O7HQNb+j1|jQDxukaTQz>wj=WmDx
ztf)R@X8)eG?e$cVdJ~~O+H^0D@l7zCH*yAhVZzqzI*^j|Al9?5I}Rp%1L!=(a{~&|
z^y=_!E1gew`_g{K9jilG6k75>Jji4>Bt6oIjC#51+kMa@-CRvyL9GV@aco)R_a}mE
zsfTr*))CxTa5lSoim&$Zj0--Q!nRi!KT4|KGQ6;8r2s@pfc%tUzvOI|z;D=9!bur5
zk5SxAhj*opvv)4Z!8+WtcOFpvWM31V$i0<DHwPD|P(pDr35A#0WHX1ooVFlyc5vU#
z{RLlE*-9=qRQ&eKeT0{9zh&>~xNuyVZgHZ%_u4r-iI>|vGKTzg>n6V<0V5)h6**hq
z0yU&Sqr2cnJ5i;E(K--X@A!^xF<FCGI~4;R)5=BTo7DAx+Y4YqH`iI_iE!L?(lkV|
zoarb5V%wQcD^RA2%48VBy0$UQ(r<bH3Qs~8uQM6|xZdeNXW=YJ*{lP|N?<U%F9^H=
z9$q^40{Y;5-T>mrr$|VDnxR-vJ$1MPHQg$=-+b4NC>TxG1B=8F<!Z<~im3N7a;Af2
zh3>i7p-t30WPj)T#2f;+mPgpG0=~{!e%Y2?#Ausfy<5GmKytoHvRz!g{cSdB5<iPt
z^WrKs?a4+y(i$7R>um%n1bjM;A$7T&g8B!$Ol(G)pez$JXdS^dh{1+R-`y|>N+;9*
z07j@hm+dHAS_1*wh9-f;321&7li{+`{8|Xl(6m=zxtN+hUkJPVEWO<8;Fe9vK5d|)
zN{^>mjF5;?2BI}mpHBcvQ9Y0mx|QO-o$p98E~tNSPw#?sx78{0M|efQjiR+(G@&2b
zU$bSGoo!#r{McsOlR7$m=CC?dDRDN<O>vKS|Jjx?&Ek)7FL&HCKB|?h{a6JXe)1$J
zD00y`Oz|T!-`;lK!%I_%aJjC}YwP5pqis^BH0sNv;E|VZvDt&mZAhBUO^ZtOIz8ts
zO&squ%UK8^S<-Lc^1-8IQ<|$AZTpO0!AxEsaRFTc`T7aq3GG0)9mEkc^dfhke^>_A
zO*g<O3&<Uy4laP5?X0xYCzm3~RzBl~6LE8Bn45ytjlh2%<|;v{!=H(xPSGtq>I=D9
zs4<q)5A&M26AQ1uUU#ti->+EjG7`l*tJm#RA|;gY^@Y;xL^e<r#v!?GW7D(eql2X^
zmr`i6Ywc<Brai-TsK*T*KuQvQp+xZr#iPgzQEMN#P+*VE@ASIXD~ehX*#mJpG{<d7
z_6+GpLBUQ%tYb;HHguno*}QG3?_>7(Prw!J5)!Qei!3uyWT9ll=S;K%trDW51R%zO
zmodBDB&k@dmJ0<gCxhNM{Yg4M%+*Jx;i4DI9CSqvA;GwYm+1tq6V>oU#m7L+d_DX(
z&+Ypw^e0ko+|9~O6CSIkb7rk}L9YQ%Wr7<4&2m7x%#bnFghByu>z7&fYRj-6b3NkI
zg74ISl4+jz4nO7s+<n8g;`j0zP6d~^NpygH({U}R^wDn@s<v;++eY=(Kbjl0bMR@N
ziuBZY=gt1p<&6DCF;AWZKLnYHLPo}|wEpAagvH5&LAStHp5p4AspCz1aDaOb?y<<c
zxiG;fg@cjU|LKWj2E2>`#ioqlsfVC%SN(RuB%ELaD%58+L*~JDHwJ9&ZnKKH*oc`i
zk>J0UYWhwCvK5B>9qKs}+*|;ciad?3o8%qxZRs@&&2BC&QUrwao4qsk*<?Iw=z~Rt
zY?hp(K}HoOe0PMy<KDn!|4J|I4*x5gbL@@Zl}cWH60P!xAa%%u^y$MJP%8347u&Lr
z;%UH1mkRYb#lLhtTJ}{hKd6T1u(G-au{-M0ju${xm(12&Y)arpO&d-W&Y^|jO(qFf
ztE_R)QD!#?!nP@zyaso5Ed$QXx9#x8TU;tTl(PS$kLD-;(51!QszcKEHNld@oFfY|
zoQm&e__bFS=+oT1EWrNK?ONA<o?4ip^iG)&r>~2QYnP1SPei{@Hv2LB(e1s0^)ih%
zjsEZvv2EA=$`hNiSa9+ygFF?J*=S^T8iL0egK;LFi6KzQfD*Y|@i#P=W4oIRVRGqQ
zp63~#rwFG3!5qB$>f=0{QX_+mZc)_B;zuxt;#}2RAf1(c$m><-lHNr}b-B3|E;bH?
zbs!LGVH@4#zP8?eD-G|q!x8WGRlVXBSKwa<)k`>6sph=`?hYP1c|*gtvhPccbrhw=
z(f7rZbJa`^LZ45v+oOkMbe-<;E1vwmROcr}Pb-M2-6s@G9tTpdbz*l%M&k;e%J&9;
z#9eKL?M%r<Y6L&#4eDU!uKxS&X1udUuL1I92C_#s<`un+Y7jlba!szP#))$h-qlvl
z6?cIy^E{8i(w96AG?z%(iu)CZ<|LTCbMVZxj7C)_P|}L}p1iGvT@`&}aEM{X>0b#k
z0MUHpn~KvJa({Ki%kGJ1VRwxh94+Q@8~8F%(LwV2MJsTUVEA+(0VC~FF1HBJsgAoS
z=tOP%bFGzDHvyZTjbxTRy2X0*(e&{w*>rWeFOPWRR3SD9;2X$-{<%}<3j7^ia1Ct{
z8asf9t3K6lEFvqs=~(%=8OZ~4#6WS#e4u38h5|OQ=!w~k58&G~dK&|Ccn2?}ON$ln
zOELU1p3DW%9zKoRJ#QNJXTQrR`*jdW!|;*Ui0Zx!u4>@LN8|<DwE|b>^M3(ANkhb$
z66;LmP2B_urq~y5UmgA+HTsnjEICRi*NxU#?3(iC@oZZJ=75ZPMgzBH7YPrBJFN92
zG<rKs8Yf6NG@r=TTQ*$0Z|GocmILl~J`cDFXG?V;bu1Ml^rZN{pyEDIcTRA{|0PJx
zl=XEMuQy<Ofw7O%q9Q&DG!sDaGud|v_D&5AwCS9YvkDtU_qf7BB+h*0eRBQJ@Z$=u
z73FC#_(na2^4TN#L9TyH{s~1R{s~3>EFE+qMC29*#XZ||@vB+QXBCe^DrzIUDZ{6W
zmf~aURyv&F56dh+r=xSJ$WdnbMgT~wCTQ|x?_r$5Y@-irzD`oW2f8N8MKsQxY*W5v
z7*FOA7p*dYND?68fDiS8smBqLn2|*1uW%&nu*@^SH3lBThLHgm9Ys7Mbsl{CL8$q1
z_lF;ZnqMpBt?QjU4P-jx<}qo<CpW{&tl*aP%x`~7$QIN0SLyUE-2kBfCiiGwW`YL*
zrghqHV?P?d@UQ$3IOFXfWc=kXeWo`;$>a~jA46OlrfhWBFVEeV#hg}6Ow8P};v48N
zdo8W0fX4E(>l0@d#)YmI=MYWxz-sUUkkdpg75{R_064A!jakAVyCcO!N;TJIvykgS
z6AXa5#TDKMLQeoB4Q?@-p@f1T;GN`un@>Cd<U6=){_S0QM_^q38*=3Tb5-a=`9AnK
zA6fh>L1^`nTmQCR<}w!i0hoUHpB?Hyy!&6++kZo!Q^cQp>6!aw{@JSk=I%r|TMwN5
zrbV9Mc(A!bPa-#Qo&UyvTZ40%-v6Qgf9B)PHnRKGyV}mnV29@ipi~&o(~e)52~-rm
zzR)m@gLsqP(H@+!CGH}rA6tx0Qntry$}&56tf;k&_>q~x-C+6Mj1f?UHrge51J8}e
zPOG3Vs7kwTtsNqmk`tqf81~*LBcNgfass2h-Lve=)WvBP3)C{XM7QEOz&swaj+Y3|
zt&AKnclLLYPkH7C#FNiH<qX066xq;mSbwwG)-GbwL)81<xNj(Ot;i%3d;IG7bq!?=
zz!{$2S=pb5p<Q-h^OjxlLUe@_rV=WS7Fq@Ohv%18vl}f6!L4S!RhD$pR3eCrJ=1Y$
z4}?J%^h|TeOCRkK*(;dwK9cSs`u^l3^!$|I7$;4p%KmhVB(uY1Mdw+`BnNUpo)Vx{
z;8ZNn`0u2f>;95%3SPiY_F%7|HGewU7l4z!R!Xa3nW0ASM978X9$b2Hz!wF}6k0xK
z;3$WDW1K<L6|)+S%y!HP*~?)UG+|Q81g3gmWG9t226D!JKe-G_jrm`$MDX{#I#ZkB
zxoA!f;Jp6znP)VKuQY~NwPdhXQ(IctR+T%5H8fC4m}jH4AMh|L6<N~c<*4*EtX}XP
zY%|OH%IzdzBVnn7+DqVWjn%6525)9ylU?8BCf2@z2IW_n$Q6?M8+5L6_lLjXC@elH
zyz>u^+5|W%B6pdi-k;=Zp1G<cE38w$^zPX{ie18c(scF56_LXBI0nl^!cu@ME$&of
z)^B<CFMaN)2i`153P@B-b)n_}JA+l!_{uNuf$uOZOX+U4{r&czyr2}lnl#@f4vB9F
zS{+QWdPwKhpF4iw%mJjg6ji*Xl-Xl5-3(;^+}B&4g++tBqclMi$|WVPW2-0F3wMv^
zxOAmv9ICDZwc*=mDq`LYg>9PwN5g}lb|{dB4&`jlwKo@fX;-peU(w#GJvgA|Q%=Yk
zaQxN*0@6D8v7tc0%>VcD&ZNlyWPcx@dV7v{<_>tY&#uZT8MvkSh;dHv5{{MCZd$W&
zgCi8*=5{`sn3J@Ju$*>&?;ovW<4cYa@!OCNDxM0rp&j_8-0=vw+6SsAXgim!dyl(t
zG|L9-Qgc%0$XC*8dC0HyDAc>dbe+APq*CozPcjjW*lyvq$N@o}w99q=9+;^{F5K9L
zN)Q)$?>Eqjf>Cn1vtV<zTs$4^p%GCze<Ihz#$arLctQyAbsdaVsmSNO>>bU?Msq&4
z2ISC3IJf11`TYd&!tqo<a^Xys?RGPr7XqP);M$RWA|r&8yB7s9^DZ2b1>haJjT4~W
zJpWpIX=gqFR~dUH_{wAXLlwY1Zr)2<FQ%i`-5Y1sT`jMsF^+n~|FmMAa*nP|^+xD#
zo@eF`;G(QP1ad^+<3I+^aW2wdS@MW7NdQoI-W6o`@k0X#cmJXTqVRLRT%7DVi2<u_
z5#5%7-Y1iw`sMbkR{R$}x&~AnW|AcH&W}z4B*mF9Enp2Ks*fRn13vLt!edd9!SY;Q
z;nf@j#C&*l{4zzceh32ASvX9G%QW$qoyRo8$niCB36nF(uxGD^=b%bh4pOE(x#WDf
z&dzPP_oGCtaNM<%j~jh**x6;OvFrU!K<_?Zc-`4j{(E+)z3>{RZEMqqmgOMi4yzXW
z@y0hf`kZVTR4ora@B6@JcS{ZZWwU#V(M{u{58fX;fc*nh0f=~DH;c%>U;&E9Sd=M1
zPl73PPlivnkw_uQWKijXQ53d?5zn=4@j$dM=D~2e>?KcSq@DE9N@eVH2Q6~|Gld66
z_T^9U6tt3;ou&U1aJ3;B6kXgO=qOx*Ma{oZI4j+ulwQw2&nYV7?*NTEPE9yAZkV&p
zB%GOM7n;@o5@T0cjS64ZPDZIBWK_sD5r?S0EhGqpR`_<VxOs7;RThyjAK2*&ZvPB;
zquCBbpb{%(0gP_5!yb6#_a<wPs9CoNEx3sRJ<L9E#%(uluj@DPrsigF4}+Yxdq>P;
z%ou{~eQifm?iPu`@|V2AA+lwb0VUs{sm34=Ojj7*IDjGoEFh^LHq(t=->aN%y>t4}
z$Wa?$xh?P5SqR+Y`#5z`uB41{)zrk)>f;qVG6lJLZEDFIHi&-B9gz9`<gO2ex20{#
zw#Cw#4J-ctRP^Z!Lx-jIBRn`@I_?4=BE<-?iwkr-O=iJ%=0D+UEf9y6mTUwE?+ffx
z#_{T`(5=NeIb}u<H@LQB+BAlFK09`~iwh#{WB?Xjy+{F%3R|!Ex{Vgj<t$4-vk-sG
z?8L!wb3c9OgcKW-&?8JdF=|{=YD}jbaTvhWqRz{~)OBh#9_*tqp-Y7);<9agStCm9
zUD1fn`@<U1O`o!Kw}#dB27YUsX4}t{*Wo7hAIs|u+VCgzwgCNu^o8o(XEG~YL+!P5
z^T$xKJVfMq>GtYFymrDC!U}_PU{9Vu*S%%FXB7|@AVA8}-HERzVjWqW+#=3`$Pa9D
zcQo)e<qb$<=a0eSu`2HP?aF;I3jK3Sl3jfRxH)Bo*!2h6Qn@J{8f>sn3>$>CpOcHH
zNNisw2n43V@Fj%tw-&3bhprun+8qP)x(euIU)sTkjU#MHMlYu}K+9x>-)E_UU754&
z83tuqd_ow%HOwxxmx#;HytjpPFG_<5r9*IdK)K&a?ZT-n${;@dfoY1=*Y;xvz?eNx
zP2gS*CK%#M_`0}XoEr8$)#c(|FV72X3R)QYT{y1srDddGsBV(E8SaA=07MB68P(e5
zf2u~w;Tv`X_6tZUce@w!^@F@+-}@^_4d`tFN<=RFWQActJ;%WHu$>!th#bQg7tYAy
z8I4214AP8w5`qZ2HJ8NQ28&ibT98i?OvXg|Dj-|@c96=DllH>mvKGR`+^D{=lKO{n
zydoC;!Y^J(VH{_as*@2>W-h8^M*>{3FQY|}zhze(C5^|KHeq)d-E`#G_0Mg$NdrX|
ze@a-Y->tsa8`o*^U6B6U!p2{)b$>N@Hu_jmf0)zYoX~Xm;X&-5*wZv`k|+D66<eoi
z{15O7i$<}@U>E{6s^EH0A}@;h-u`BU48g)K_1hL-O<i^MF}D4?g>}s!lY0X2=6?X!
zy)lVH1%3!z*GioMkt?;G5LUhAGElLyW`6=Ac2G!okJ4rp`~mKps@vzoc8P}-9C?aP
zO;ALi{bJAxveDD<8VTM+XJO!(J!*9Gs*@cNEE$eUlix1N9R%bjSFaYC{_Mf|tL8Gv
zgo=~uscwqG<DV>sE|n}!1!=;Qd!Kd2i6_FON}G)1X>F!M7wv~L{vPEg0Z@%-6tcbx
zj{mTm7}f`>uhng28Ys{t<n<l`5bGjWYNn3;Bc1yyf0JHfMc8~_V891DH#bNyAv@P!
z4R*B{FbIMK&M5l#_(Lb!tHv+U#-<W$+fcjVL9KUZQd!U(;MDD-YY&BdfoDr9lU9%O
zaDLLvp4IqtrLI<QYb`PkyIeY`8?0^|Fxa>KZIB=TI%UEiNdG|D{W|}DNmHt=w+w-(
zuLec)#Vj-9e#?qN(#{(D{JmUfwpK``#Wc~#LZJE*yt?VqZNG&&W8WbW`w#o&Y%y#H
zZh4U_ZhChNDKo9;7m<*Tt^Pu8gg$3t<`Bb~HevXUF$z+Qxj@9$E?GCE7^zcoPR6=c
zQe;|jc{#IV?97efZu89lTK|da{|seebS-KXkY<CyKQ8;oM8(oPrk!y=>sF%?$;Dae
z@i)(<@9Ro{Gn6j|Yjp>3*AmC@fRKIh_`x#&)%4Zr{_}VIwIgoIP1X90Vqd!ivV~MD
zMCuC#wlt0K1NvV@z&Y$D^V975v7{T<)oy%aA3GDOwhmBOC$HxDl>!0w(X|h5rB&Ol
zfYz!gYtU1{V>AMvV#3iD*){l-EGbjKNO1zgZ>E(TC-hXEU()@l9;7t^<IDR2!aAyU
z5zj_T!E*lzLkkic4~tuqWI8+VCxdjN9f7ZJ?MTbn|AKL0x&dUqw1H0B-Et_#zG>Ae
zk)RVMhUYn~`BklGUpS}oxA63Nz|6f1@I82$1|bBF>p+5vfGp(^@C#T||Jc2t`}1AE
zR@H?z>d)qEyO*c0uU2%ZVZ}_JhGy=aW@x%Zbs=gd{c5A&EC^^;MC|(U`QBbW%6z3g
z0*$E!a^wTM?tIorgp$c`hE-!UufLFN=tchz7gkzCF|>$d<AH4+R|stLT7k34%T4Ni
zsIq~t^IokQ0*13JeJT>%@Cba-UlzF_T)MfiEJF>+OX~Phg{nb@%Z#=_0#tD2zMxtq
zqB5gC<CP~U#VZS3O!*e-yqQQ9AU^#Ji%(16efujWw6~GjF1vpSNh$~90?v8-V1X}P
zvA}D~y*M8i@T%3E1-935=Y`%+vuw5evG24DKsPP{yFxO6wfu2le>YZ5BhGl*zj`~Z
zyt3Vb@%fTV`O7c=?rWG8=pHMMt+8t#1?)m}(M(@~8kiUn131Z6v^WTSTSatZFNd;y
zQX3bF*71!p+65xjoEGn4SJJg~9XtrWaY+5-Sfa>~zEz_f#tNk9`=SMTEs$>9O^P?R
zUumFNOU4-@I&8Z!Wh|a7VlxDDT54F~?gs$Ox&bijRF&M7&n+<@e+Ld)K;tlE%eda)
zE+dih@($2_ABw06(!<qXX10k{0YS@J@cfI}bpn~jX>wBY05y4A)PT`saCqAbut0kH
z>s5mN3<Om2eKUu0hQ7_Mel(Kb%ieXW>!l^UT=$JxoeNOp+~-d<`m6<x{Pv7KTSQ*Y
zO3*cH*eS%r=fdZx#EwlR7M(Xjg7m=L1vSS(akkl)MTa`>XouUI3<Yv*;8b}p|1Oue
z@lRZPnhWFF!qn*+fNP;cv0q3cbZ{<+zzNDn(_Wk(u6Z9Fk<4;C1h%S@)?a~gr$b@r
z5$ej)G_O~1YIbSu8)kWQ*kHO}1tRxka?JDI7xXPx|2}#q_HfUO!<y7^%nbyQe`xJ9
z%mOKMVwvZ?p`csIF(?l3TF}irpcj)@UiwRU%C+dci!dcsN*Gq60I8J{F>PD6w4b%B
zdY4R;@-B@}dGUn4zc)b6%V5fh51}OW!|rCc(O|()VkJBI1u*PCEzL5ED<0Wno#lRV
zq*!9~5Y$v>6q`yXfh<&3E>X}okO=JldjzlEo(zu3aB1L36K2yS_W^nf3SA4^Z@Mjs
zw;7m`Zw@F5#aY)pZO81_)gRDPIs)=SfPhE9S3QsszyCmMy)z^IkDQi}Ki)3ibsNK+
z^xlE%Uz-c|_8Qe2;R|z3LY5H$pK1?X3S6<&m*D4DEcIDvdK?ghr&GnAR&&NIXTfuc
z<sOBfOAe~g;m&s$-nPacsJ$G*KLi}~i8wi*tRaEcvRkwp9L1NQs4&4ofQVb20=ZWY
zR}3Vt0H`nr5=9s+Pkvlf7~Xc8ab>0&6c9LSQw@P68NbkF*0cT|dfRufj{w0A{Lf=F
zG$;?SQDh{pX(asE9O3s!TR@&)9TzKhmlRRi_8h27u{{R7oe#j0DoPDpbErAv&Cj?_
zB<ibo+2^T?h2n=BV2|(h=-r5O$wJTUKKuNV3|azGViaU6xe3Mb_FO0agT!@nz~2&A
z0c$j7A5_BLMqj}^bWX}olS9XHfIjt6z4Y<t;E=)F$hJ-b+EoE@Nam12ncL-6{IfHD
zsvB+_4WFsW&p#rz;r6{Z6E?1!D=RB~s5T{y&-@j|gk!yKzs(faNM6;pXV?6gW;Hr*
zN<%~X<1>12?NkIoMthLS$T^%*{4E+!jzF}fja8qg+v?gjh%&~uvTaSWe=gy$$ZCff
zC+^QK^?1sCX~^r1&uEP59O8|nb*EYIy(qPippA?+?221|=Af3osNvpLxv?Zf>u%_Q
zIe%v&*HD%nqemAC^GZjUuE~Fxo^>bt*D(?Lzj%{Y7h3-3^Cv8=OwRlQ{=<KLQ&#$f
zmFQ0eFLD*+^Vh8W*8lV-D@#V+omCgk5w?n+zw!4+|M;f7T(-5zZ_2MG@2*!i{PFw!
z{U&PyS4sX@%1qip53T1t-`?}~aV}-M{r&Pk+O_<Z|Ae<^Bu&@+`D3=`z4d?oc+68w
z^v@rE`>6Q0`~KxwliyDEw**(->-CapHsD;$j=r~`Y*To_C%E7I=Y@ZK$nvu&`*d9Z
zH~uQ_ex2KH$@t*=L@k}IhI_>hQo*<MFo7mcN@G+1OOuaV6L^0=cS*-P#((z4kIl)=
z<v{n=pV<p{o>tA4+i-9F_&VrDOG}nOOf>tXSGfEQFaKS_|M_*$&FSg00{FRonm1X~
z`_)MLU-Yo1)}X-J<W7`WhHK}((d|Rb=Y!nfU-_l!AsvhTiA~X3_iaWO7f=|RO}4HA
z|0w=q#}oBw(&zWw%UgJ-r`glhwE-GW-<2I<t!I{EY0?mrde+gj#QVShPQBPW-<_Xi
zPnq64m4Lg?J}1W59z3-4B|HG9H<~u$_G<%yb;e}X(D;AzjTb-48|j+u75F<rqL-xz
zd?ACn3E$3pi4EjYZyFeH^&PQC{>6~@^r{u)-SMvxCC`}P$m~TkuAg}Mq-HL{fmPj-
z!7Vo2e~RB<`)%>mW85ddfNfnlACGC@O1A5C7YPWtmUffru`pXIR!&N0S%bo9?f>>8
zY&r5qVsVd8Q%VP-bth+uqaJA$qPWcY(}zD2$5B(ra4zmU2O_IYXt%`vp3y{-^|6vI
z{qCIPQhmp8S|Q~8|J3>;XNSzV`(_jQ&v9E>8P^gQw9;e+X%eDYMl9}PY;z9{J2G}~
zBT9eZid^AY&YSszkKes6zO(9S)4%=3;GsM(ieKb~2hJPB5ACU<Z5Aht>aNn13@ATB
zE6kG+?bhd0jRQrHWPRR!W~pp-;t+;)v3|*hvk(=SHT%?mTig|yQ59ABO<tk@emv&M
zza?^q!!IbxKUK`T^Ez=nJE-FKAwk=^rKEImQD%o>g)kDT3!SUH*OJ`ga32{_-@I>M
zgB-^5J+<i0XQa32a7%#p=xi6|%&PJ0|9Z$@k>Htux#(<xc<st{e+HWMNdB##XthG3
z^kZo3sdG8_Tfp`Cfj)onI;zW;Ajuw&V3!I-=la~Ep<#NYmKfrE9(lIFzu5HV=jM?T
zHkGk`f78oVk$bm|{B!YKo{m?QqGw#IT5QXZcEYANx4Mdn{y*!wd6Mn7_Uz~D`LC_%
zFnBG0|82sSuCKMv9~6r9Z;ZU-_Id4YHIdKFm)y<e%zn$?KQUEIQ0d$1hu^JTO%|Ki
zbR55_lh*#a|NjyDe_!`MuD4GA`|<pbzu)U;o9F*KI_ubj-<6AvtM(jv4CKyw{`CBw
zpL;5Q8t2>P$Nf7q`~2+b_Me}9eg+r*H~rlI?}~N5-#-2G<=?xPA3xn~_x)z}WO|T`
zMPa32;_oXOzqj2ojH>-0TlQJv+wO~R>*Dv;_1IfQU-}qtQvdAZ{xBDfX@{4uzh8Uf
z$h)&jhxEjnpSvfN@X6mUIO^Kcq4ccz+r{+aUH`rv`yV+i6$6DXyefNq=Iwpq{(=vW
z05zNirl0y1R^K=NbUprS))l2gGs`|t?@znoeq>+7@v6^n88v^K%bnd2-#ZU@i$~AX
zhmCQif{IFuI-aJ0%9^W_uipP(b@Svq{d&XvdwVKBKRXKy6fBII$j|H7rmdDXU!1un
z`-h7NFkYtT9(kW^YxmFl+q;zC-2LCCD(_gNbg0g<;`c^w!9bgDeRk)+U5@4dwsA-8
zzwhS?q(1-rQ+Zac@U~UO`)9s3XPz0(U%%*w>wSJ`t<lAy9|O$K@kc)teBOPr|G56W
zultrM9qQigUb4I}=5|Hon-{jV3w}r3saaNerF*6t6GYzu{ly&xk0$m0xclhc5B)#+
z&puu)zqw--&>@oLXAkcdyL0T(3uf&{8|x#KGLFA^`I8G?V}&jOYWytyarX@Q+#d7C
zd4*}b3uhc(aJ|#-`R`ZXlD1f_XPXe^V)6S&d|jG(yKduKm8y2trfUk*{PTfjAp``t
zZq-+M6SjGV-*W$KqkWe9Ry&r9Om6-^-%jt*)!H|Y41c-Les<Vmv$H^;Prvy+>E0cY
zd;Y3Tm#fdb|3AI|{}(->-aH=l%`?u+Jz7`%C+_XVS3Sp5)+FDtn~~q2^!Ie$Tc|w;
z_)UTKB-`fA|0(`;`^Q`NewAIay=Nl7{(SwCgEysr0!^&E|NnWPy;2gW+}@Y>`P&|A
zNq9B9B)~-@Rk=?1yL<iK`@QXF)%liB1Ew{NXNGU~d^O!|7&q_d3~pGh-XPM&p_+5#
zefZ)G`F+dGd*7z>Enf#z-SJoL8GrMO@~HEl*XG^fQQvr@Zjs{xSV&A*^bBSI!vY_8
nAmeTpKz+vGwHRgu!<1+L@4m8Ebm%P30m*v0`njxgN@xNA$36ZX

literal 0
HcmV?d00001

diff --git a/tests/benchmark/ci/function/file_transfer.groovy b/tests/benchmark/ci/function/file_transfer.groovy
new file mode 100644
index 000000000..bebae1483
--- /dev/null
+++ b/tests/benchmark/ci/function/file_transfer.groovy
@@ -0,0 +1,10 @@
+def FileTransfer (sourceFiles, remoteDirectory, remoteIP, protocol = "ftp", makeEmptyDirs = true) {
+    if (protocol == "ftp") {
+        ftpPublisher masterNodeName: '', paramPublish: [parameterName: ''], alwaysPublishFromMaster: false, continueOnError: false, failOnError: true, publishers: [
+            [configName: "${remoteIP}", transfers: [
+                [asciiMode: false, cleanRemote: false, excludes: '', flatten: false, makeEmptyDirs: "${makeEmptyDirs}", noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: "${remoteDirectory}", remoteDirectorySDF: false, removePrefix: '', sourceFiles: "${sourceFiles}"]], usePromotionTimestamp: true, useWorkspaceInPromotion: false, verbose: true
+                ]
+            ]
+    }
+}
+return this
diff --git a/tests/benchmark/ci/jenkinsfile/cleanup.groovy b/tests/benchmark/ci/jenkinsfile/cleanup.groovy
new file mode 100644
index 000000000..ec38b3461
--- /dev/null
+++ b/tests/benchmark/ci/jenkinsfile/cleanup.groovy
@@ -0,0 +1,13 @@
+try {
+    def result = sh script: "helm status -n milvus ${env.HELM_RELEASE_NAME}", returnStatus: true
+    if (!result) {
+        sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME}"
+    }
+} catch (exc) {
+    def result = sh script: "helm status -n milvus ${env.HELM_RELEASE_NAME}", returnStatus: true
+    if (!result) {
+        sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME}"
+    }
+    throw exc
+}
+
diff --git a/tests/benchmark/ci/jenkinsfile/cleanupShards.groovy b/tests/benchmark/ci/jenkinsfile/cleanupShards.groovy
new file mode 100644
index 000000000..ac0f4a433
--- /dev/null
+++ b/tests/benchmark/ci/jenkinsfile/cleanupShards.groovy
@@ -0,0 +1,13 @@
+try {
+    def result = sh script: "helm status -n milvus ${env.HELM_SHARDS_RELEASE_NAME}", returnStatus: true
+    if (!result) {
+        sh "helm uninstall -n milvus ${env.HELM_SHARDS_RELEASE_NAME}"
+    }
+} catch (exc) {
+    def result = sh script: "helm status -n milvus ${env.HELM_SHARDS_RELEASE_NAME}", returnStatus: true
+    if (!result) {
+        sh "helm uninstall -n milvus ${env.HELM_SHARDS_RELEASE_NAME}"
+    }
+    throw exc
+}
+
diff --git a/tests/benchmark/ci/jenkinsfile/deploy_shards_test.groovy b/tests/benchmark/ci/jenkinsfile/deploy_shards_test.groovy
new file mode 100644
index 000000000..2a4c88e91
--- /dev/null
+++ b/tests/benchmark/ci/jenkinsfile/deploy_shards_test.groovy
@@ -0,0 +1,21 @@
+timeout(time: 12, unit: 'HOURS') {
+    try {
+        dir ("milvus-helm") {
+            // sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
+            // sh 'helm repo update'
+            checkout([$class: 'GitSCM', branches: [[name: "${HELM_BRANCH}"]], userRemoteConfigs: [[url: "${HELM_URL}", name: 'origin', refspec: "+refs/heads/${HELM_BRANCH}:refs/remotes/origin/${HELM_BRANCH}"]]])
+        }
+        dir ("milvus_benchmark") {
+            print "Git clone url: ${TEST_URL}:${TEST_BRANCH}"
+            checkout([$class: 'GitSCM', branches: [[name: "${TEST_BRANCH}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "${TEST_URL}", name: 'origin', refspec: "+refs/heads/${TEST_BRANCH}:refs/remotes/origin/${TEST_BRANCH}"]]])
+            print "Install requirements"
+            // sh "python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com"
+            sh "python3 -m pip install -r requirements.txt"
+            sh "python3 -m pip install git+${TEST_LIB_URL}"
+            sh "python3 main.py --image-version=${params.IMAGE_VERSION} --schedule-conf=scheduler/${params.SHARDS_CONFIG_FILE} --deploy-mode=${params.DEPLOY_MODE}"
+        }
+    } catch (exc) {
+        echo 'Deploy SHARDS Test Failed !'
+        throw exc
+    }
+}
diff --git a/tests/benchmark/ci/jenkinsfile/deploy_test.groovy b/tests/benchmark/ci/jenkinsfile/deploy_test.groovy
new file mode 100644
index 000000000..6f503ea48
--- /dev/null
+++ b/tests/benchmark/ci/jenkinsfile/deploy_test.groovy
@@ -0,0 +1,19 @@
+try {
+    dir ("milvus-helm") {
+        // sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
+        // sh 'helm repo update'
+        checkout([$class: 'GitSCM', branches: [[name: "${HELM_BRANCH}"]], userRemoteConfigs: [[url: "${HELM_URL}", name: 'origin', refspec: "+refs/heads/${HELM_BRANCH}:refs/remotes/origin/${HELM_BRANCH}"]]])
+    }
+    dir ("milvus_benchmark") {
+        print "Git clone url: ${TEST_URL}:${TEST_BRANCH}"
+        checkout([$class: 'GitSCM', branches: [[name: "${TEST_BRANCH}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "${TEST_URL}", name: 'origin', refspec: "+refs/heads/${TEST_BRANCH}:refs/remotes/origin/${TEST_BRANCH}"]]])
+        print "Install requirements"
+        sh "python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com"
+        // sh "python3 -m pip install -r requirements.txt"
+        sh "python3 -m pip install git+${TEST_LIB_URL}"
+        sh "python3 main.py --image-version=${params.IMAGE_VERSION} --schedule-conf=scheduler/${params.CONFIG_FILE} --deploy-mode=${params.DEPLOY_MODE}"
+    }
+} catch (exc) {
+    echo 'Deploy Test Failed !'
+    throw exc
+}
diff --git a/tests/benchmark/ci/jenkinsfile/notify.groovy b/tests/benchmark/ci/jenkinsfile/notify.groovy
new file mode 100644
index 000000000..0a257b8cd
--- /dev/null
+++ b/tests/benchmark/ci/jenkinsfile/notify.groovy
@@ -0,0 +1,15 @@
+def notify() {
+    if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
+        // Send an email only if the build status has changed from green/unstable to red
+        emailext subject: '$DEFAULT_SUBJECT',
+        body: '$DEFAULT_CONTENT',
+        recipientProviders: [
+            [$class: 'DevelopersRecipientProvider'],
+            [$class: 'RequesterRecipientProvider']
+        ], 
+        replyTo: '$DEFAULT_REPLYTO',
+        to: '$DEFAULT_RECIPIENTS'
+    }
+}
+return this
+
diff --git a/tests/benchmark/ci/jenkinsfile/publishDailyImages.groovy b/tests/benchmark/ci/jenkinsfile/publishDailyImages.groovy
new file mode 100644
index 000000000..54ac5da9c
--- /dev/null
+++ b/tests/benchmark/ci/jenkinsfile/publishDailyImages.groovy
@@ -0,0 +1,46 @@
+timeout(time: 30, unit: 'MINUTES') {
+    def imageName = "milvus/engine:${DOCKER_VERSION}"
+    def remoteImageName = "milvusdb/daily-build:${REMOTE_DOCKER_VERSION}"
+    def localDockerRegistryImage = "${params.LOCAL_DOKCER_REGISTRY_URL}/${imageName}"
+    def remoteDockerRegistryImage = "${params.REMOTE_DOKCER_REGISTRY_URL}/${remoteImageName}"
+    try {
+        deleteImages("${localDockerRegistryImage}", true)
+
+        def pullSourceImageStatus = sh(returnStatus: true, script: "docker pull ${localDockerRegistryImage}")
+
+        if (pullSourceImageStatus == 0) {
+            def renameImageStatus = sh(returnStatus: true, script: "docker tag ${localDockerRegistryImage} ${remoteImageName} && docker rmi ${localDockerRegistryImage}")
+            def sourceImage = docker.image("${remoteImageName}")
+            docker.withRegistry("https://${params.REMOTE_DOKCER_REGISTRY_URL}", "${params.REMOTE_DOCKER_CREDENTIALS_ID}") {
+                sourceImage.push()
+                sourceImage.push("${REMOTE_DOCKER_LATEST_VERSION}")
+            }
+        } else {
+            echo "\"${localDockerRegistryImage}\" image does not exist !"
+        }
+    } catch (exc) {
+        throw exc
+    } finally {
+        deleteImages("${localDockerRegistryImage}", true)
+        deleteImages("${remoteDockerRegistryImage}", true)
+    }
+}
+
+boolean deleteImages(String imageName, boolean force) {
+    def imageNameStr = imageName.trim()
+    def isExistImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageNameStr} 2>&1 > /dev/null")
+    if (isExistImage == 0) {
+        def deleteImageStatus = 0
+        if (force) {
+            def imageID = sh(returnStdout: true, script: "docker inspect --type=image --format \"{{.ID}}\" ${imageNameStr}")
+            deleteImageStatus = sh(returnStatus: true, script: "docker rmi -f ${imageID}")
+        } else {
+            deleteImageStatus = sh(returnStatus: true, script: "docker rmi ${imageNameStr}")
+        }
+
+        if (deleteImageStatus != 0) {
+            return false
+        }
+    }
+    return true
+}
diff --git a/tests/benchmark/ci/main_jenkinsfile b/tests/benchmark/ci/main_jenkinsfile
new file mode 100644
index 000000000..6084c562a
--- /dev/null
+++ b/tests/benchmark/ci/main_jenkinsfile
@@ -0,0 +1,148 @@
+pipeline {
+    agent none
+
+    options {
+        timestamps()
+    }
+
+    parameters{
+        string defaultValue: '0.11.1', description: 'server image version', name: 'IMAGE_VERSION', trim: true
+        choice choices: ['single', 'shards'], description: 'server deploy mode', name: 'DEPLOY_MODE'
+        string defaultValue: '011_data.json', description: 'test suite config yaml', name: 'CONFIG_FILE', trim: true
+        string defaultValue: 'shards.json', description: 'shards test suite config yaml', name: 'SHARDS_CONFIG_FILE', trim: true
+        string defaultValue: '09509e53-9125-4f5d-9ce8-42855987ad67', description: 'git credentials', name: 'GIT_USER', trim: true
+    }
+
+    environment {
+        HELM_URL = "https://github.com/milvus-io/milvus-helm.git"
+        HELM_BRANCH = "0.11.1"
+        TEST_URL = "git@192.168.1.105:Test/milvus_benchmark.git"
+        TEST_BRANCH = "0.11.1"
+        TEST_LIB_URL = "http://192.168.1.105:6060/Test/milvus_metrics.git"
+        HELM_RELEASE_NAME = "milvus-benchmark-test-${env.BUILD_NUMBER}"
+        HELM_SHARDS_RELEASE_NAME = "milvus-shards-benchmark-test-${env.BUILD_NUMBER}"
+    }
+
+    stages {
+        stage("Setup env") {
+            agent {
+                kubernetes {
+                    label "test-benchmark-${env.JOB_NAME}-${env.BUILD_NUMBER}"
+                    defaultContainer 'jnlp'
+                    yaml """
+                        apiVersion: v1
+                        kind: Pod
+                        metadata:
+                          labels:
+                            app: milvus
+                            componet: test
+                        spec:
+                          containers:
+                          - name: milvus-test-env
+                            image: registry.zilliz.com/milvus/milvus-test-env:v0.3
+                            command:
+                            - cat
+                            tty: true
+                            volumeMounts:
+                            - name: kubeconf
+                              mountPath: /root/.kube/
+                              readOnly: true
+                            - name: db-data-path
+                              mountPath: /test
+                              readOnly: false
+                          nodeSelector:
+                            kubernetes.io/hostname: idc-sh002
+                          tolerations:
+                          - key: worker
+                            operator: Equal
+                            value: performance
+                            effect: NoSchedule
+                          volumes:
+                          - name: kubeconf
+                            secret:
+                              secretName: test-cluster-config
+                          - name: db-data-path
+                            flexVolume:
+                              driver: "fstab/cifs"
+                              fsType: "cifs"
+                              secretRef:
+                                name: "cifs-test-secret"
+                              options:
+                                networkPath: "//172.16.70.249/test"
+                                mountOptions: "vers=1.0"
+                        """
+                }
+            }
+
+            stages {
+                stage("Publish Daily Docker images") {
+                    steps {
+                        container('milvus-test-env') {
+                            script {
+                                boolean isNightlyTest = isTimeTriggeredBuild()
+                                if (isNightlyTest) {
+                                    build job: 'milvus-publish-daily-docker', parameters: [string(name: 'LOCAL_DOKCER_REGISTRY_URL', value: 'registry.zilliz.com'), string(name: 'REMOTE_DOKCER_REGISTRY_URL', value: 'registry-1.docker.io'), string(name: 'REMOTE_DOCKER_CREDENTIALS_ID', value: 'milvus-docker-access-token'), string(name: 'BRANCH', value: String.valueOf(IMAGE_VERSION))], wait: false
+                                } else {
+                                    echo "Skip publish daily docker images ..."
+                                }
+                            }
+                        }
+                    }
+                }
+
+                stage("Deploy Test") {
+                    steps {
+                        container('milvus-test-env') {
+                            script {
+                                print "In Deploy Test Stage"
+                                if ("${params.DEPLOY_MODE}" == "single") {
+                                    load "${env.WORKSPACE}/ci/jenkinsfile/deploy_test.groovy"
+                                } else {
+                                    load "${env.WORKSPACE}/ci/jenkinsfile/deploy_shards_test.groovy"
+                                }
+                            }
+                        }
+                    }
+                }
+
+                stage ("Cleanup Env") {
+                    steps {
+                        container('milvus-test-env') {
+                            script {
+                                if ("${params.DEPLOY_MODE}" == "single") {
+                                    load "${env.WORKSPACE}/ci/jenkinsfile/cleanup.groovy"
+                                } else {
+                                    load "${env.WORKSPACE}/ci/jenkinsfile/cleanupShards.groovy"
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+            post {
+                success {
+                    script {
+                        echo "Milvus benchmark test success !"
+                    }
+                }
+                aborted {
+                    script {
+                        echo "Milvus benchmark test aborted !"
+                    }
+                }
+                failure {
+                    script {
+                        echo "Milvus benchmark test failed !"
+                    }
+                }
+            }
+        }
+    }
+}
+
+boolean isTimeTriggeredBuild() {
+    if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) {
+        return true
+    }
+    return false
+}
diff --git a/tests/benchmark/ci/pod_containers/milvus-testframework.yaml b/tests/benchmark/ci/pod_containers/milvus-testframework.yaml
new file mode 100644
index 000000000..6b1d6c7df
--- /dev/null
+++ b/tests/benchmark/ci/pod_containers/milvus-testframework.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  labels:
+    app: milvus
+    componet: testframework
+spec:
+  containers:
+  - name: milvus-testframework
+    image: registry.zilliz.com/milvus/milvus-test:v0.2
+    command:
+    - cat
+    tty: true
diff --git a/tests/benchmark/ci/publish_jenkinsfile b/tests/benchmark/ci/publish_jenkinsfile
new file mode 100644
index 000000000..38f8996b0
--- /dev/null
+++ b/tests/benchmark/ci/publish_jenkinsfile
@@ -0,0 +1,104 @@
+pipeline {
+    agent none
+
+    options {
+        timestamps()
+    }
+
+    parameters{
+        string defaultValue: 'registry.zilliz.com', description: 'Local Docker registry URL', name: 'LOCAL_DOKCER_REGISTRY_URL', trim: true
+        string defaultValue: 'registry-1.docker.io', description: 'Remote Docker registry URL', name: 'REMOTE_DOKCER_REGISTRY_URL', trim: true
+        string defaultValue: 'milvus-docker-access-token', description: 'Remote Docker credentials id', name: 'REMOTE_DOCKER_CREDENTIALS_ID', trim: true
+        string(defaultValue: "master", description: 'Milvus server version', name: 'BRANCH')
+    }
+
+    environment {
+        DAILY_BUILD_VERSION = VersionNumber([
+            versionNumberString : '${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
+        ]);
+    }
+
+    stages {
+        stage('Push Daily Docker Images') {
+            matrix {
+                agent none
+                axes {
+                    axis {
+                        name 'OS_NAME'
+                        values 'centos7'
+                    }
+
+                    axis {
+                        name 'CPU_ARCH'
+                        values 'amd64'
+                    }
+
+                    axis {
+                        name 'BINARY_VERSION'
+                        values 'gpu', 'cpu'
+                    }
+                }
+
+                stages {
+                    stage("Publish Docker Images") {
+                        environment {
+                            DOCKER_VERSION = "${params.BRANCH}-${BINARY_VERSION}-${OS_NAME}-release"
+                            REMOTE_DOCKER_VERSION = "${params.BRANCH}-${OS_NAME}-${BINARY_VERSION}-${DAILY_BUILD_VERSION}"
+                            REMOTE_DOCKER_LATEST_VERSION = "${params.BRANCH}-${OS_NAME}-${BINARY_VERSION}-latest"
+                        }
+
+                        agent {
+                            kubernetes {
+                                label "${OS_NAME}-${BINARY_VERSION}-publish-${env.BUILD_NUMBER}"
+                                defaultContainer 'jnlp'
+                                yaml """
+apiVersion: v1
+kind: Pod
+metadata:
+  labels:
+    app: publish
+    componet: docker
+spec:
+  containers:
+  - name: publish-images
+    image: registry.zilliz.com/library/docker:v1.0.0
+    securityContext:
+      privileged: true
+    command:
+    - cat
+    tty: true
+    resources:
+      limits:
+        memory: "4Gi"
+        cpu: "1.0"
+      requests:
+        memory: "2Gi"
+        cpu: "0.5"
+    volumeMounts:
+    - name: docker-sock
+      mountPath: /var/run/docker.sock
+  volumes:
+  - name: docker-sock
+    hostPath:
+      path: /var/run/docker.sock
+                                """
+                            }
+                        }
+
+                        stages {
+                            stage('Publish') {
+                                steps {
+                                    container('publish-images') {
+                                        script {
+                                            load "${env.WORKSPACE}/ci/jenkinsfile/publishDailyImages.groovy"
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/tests/benchmark/ci/scripts/yaml_processor.py b/tests/benchmark/ci/scripts/yaml_processor.py
new file mode 100755
index 000000000..0e6d7dbbf
--- /dev/null
+++ b/tests/benchmark/ci/scripts/yaml_processor.py
@@ -0,0 +1,536 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from argparse import Namespace
+import os, shutil
+import getopt
+from ruamel.yaml import YAML, yaml_object
+from ruamel.yaml.comments import CommentedSeq, CommentedMap
+from ruamel.yaml.tokens import CommentToken
+
+##
+yaml = YAML(typ="rt")
+## format yaml file
+yaml.indent(mapping=2, sequence=4, offset=2)
+
+
+############################################
+# Comment operation
+#
+############################################
+def _extract_comment(_comment):
+    """
+    remove '#' at start of comment
+    """
+    # if _comment is empty, do nothing
+    if not _comment:
+        return _comment
+
+    # str_ = _comment.lstrip(" ")
+    str_ = _comment.strip()
+    str_ = str_.lstrip("#")
+
+    return str_
+
+
+def _add_eol_comment(element, *args, **kwargs):
+    """
+    add_eol_comment
+    args --> (comment, key)
+    """
+    if element is None or \
+            (not isinstance(element, CommentedMap) and
+             not isinstance(element, CommentedSeq)) or \
+            args[0] is None or \
+            len(args[0]) == 0:
+        return
+
+    comment = args[0]
+    # comment is empty, do nothing
+    if not comment:
+        return
+
+    key = args[1]
+    try:
+        element.yaml_add_eol_comment(*args, **kwargs)
+    except Exception:
+        element.ca.items.pop(key, None)
+        element.yaml_add_eol_comment(*args, **kwargs)
+
+
+def _map_comment(_element, _key):
+    origin_comment = ""
+    token = _element.ca.items.get(_key, None)
+    if token is not None:
+        try:
+            origin_comment = token[2].value
+        except Exception:
+            try:
+                # comment is below element, add profix "#\n"
+                col = _element.lc.col + 2
+                space_list = [" " for i in range(col)]
+                space_str = "".join(space_list)
+
+                origin_comment = "\n" + "".join([space_str + t.value for t in token[3]])
+            except Exception:
+                pass
+
+    return origin_comment
+
+
+def _seq_comment(_element, _index):
+    # get target comment
+    _comment = ""
+    token = _element.ca.items.get(_index, None)
+    if token is not None:
+        _comment = token[0].value
+
+    return _comment
+
+
+def _start_comment(_element):
+    _comment = ""
+    cmt = _element.ca.comment
+    try:
+        _comment = cmt[1][0].value
+    except Exception:
+        pass
+
+    return _comment
+
+
+def _comment_counter(_comment):
+    """
+
+    counter comment tips and split into list
+    """
+
+    x = lambda l: l.strip().strip("#").strip()
+
+    _counter = []
+    if _comment.startswith("\n"):
+        _counter.append("")
+        _counter.append(x(_comment[1:]))
+
+        return _counter
+    elif _comment.startswith("#\n"):
+        _counter.append("")
+        _counter.append(x(_comment[2:]))
+    else:
+        index = _comment.find("\n")
+        _counter.append(x(_comment[:index]))
+        _counter.append(x(_comment[index + 1:]))
+
+    return _counter
+
+
+def _obtain_comment(_m_comment, _t_comment):
+    if not _m_comment or not _t_comment:
+        return _m_comment or _t_comment
+
+    _m_counter = _comment_counter(_m_comment)
+    _t_counter = _comment_counter(_t_comment)
+
+    if not _m_counter[0] and not _t_counter[1]:
+        comment = _t_comment + _m_comment
+    elif not _m_counter[1] and not _t_counter[0]:
+        comment = _m_comment + _t_comment
+    elif _t_counter[0] and _t_counter[1]:
+        comment = _t_comment
+    elif not _t_counter[0] and not _t_counter[1]:
+        comment = _m_comment
+    elif not _m_counter[0] and not _m_counter[1]:
+        comment = _t_comment
+    else:
+        if _t_counter[0]:
+            comment = _m_comment.replace(_m_counter[0], _t_counter[0], 1)
+        else:
+            comment = _m_comment.replace(_m_counter[1], _t_counter[1], 1)
+
+    i = comment.find("\n\n")
+    while i >= 0:
+        comment = comment.replace("\n\n\n", "\n\n", 1)
+        i = comment.find("\n\n\n")
+
+    return comment
+
+
+############################################
+# Utils
+#
+############################################
+def _get_update_par(_args):
+    _dict = _args.__dict__
+
+    # file path
+    _in_file = _dict.get("f", None) or _dict.get("file", None)
+    # tips
+    _tips = _dict.get('tips', None) or "Input \"-h\" for more information"
+    # update
+    _u = _dict.get("u", None) or _dict.get("update", None)
+    # apppend
+    _a = _dict.get('a', None) or _dict.get('append', None)
+    # out stream group
+    _i = _dict.get("i", None) or _dict.get("inplace", None)
+    _o = _dict.get("o", None) or _dict.get("out_file", None)
+
+    return _in_file, _u, _a, _i, _o, _tips
+
+
+############################################
+# Element operation
+#
+############################################
+def update_map_element(element, key, value, comment, _type):
+    """
+     element:
+     key:
+     value:
+     comment:
+     _type:  value type.
+    """
+    if element is None or not isinstance(element, CommentedMap):
+        print("Only key-value update support")
+        sys.exit(1)
+
+    origin_comment = _map_comment(element, key)
+
+    sub_element = element.get(key, None)
+    if isinstance(sub_element, CommentedMap) or isinstance(sub_element, CommentedSeq):
+        print("Only support update a single value")
+
+    element.update({key: value})
+
+    comment = _obtain_comment(origin_comment, comment)
+    _add_eol_comment(element, _extract_comment(comment), key)
+
+
+def update_seq_element(element, value, comment, _type):
+    if element is None or not isinstance(element, CommentedSeq):
+        print("Param `-a` only use to append yaml list")
+        sys.exit(1)
+    element.append(str(value))
+
+    comment = _obtain_comment("", comment)
+    _add_eol_comment(element, _extract_comment(comment), len(element) - 1)
+
+
+def run_update(code, keys, value, comment, _app):
+    key_list = keys.split(".")
+
+    space_str = ":\n  "
+    key_str = "{}".format(key_list[0])
+    for key in key_list[1:]:
+        key_str = key_str + space_str + key
+        space_str = space_str + "  "
+    if not _app:
+        yaml_str = """{}: {}""".format(key_str, value)
+    else:
+        yaml_str = "{}{}- {}".format(key_str, space_str, value)
+
+    if comment:
+        yaml_str = "{} # {}".format(yaml_str, comment)
+
+    mcode = yaml.load(yaml_str)
+
+    _merge(code, mcode)
+
+
+def _update(code, _update, _app, _tips):
+    if not _update:
+        return code
+
+    _update_list = [l.strip() for l in _update.split(",")]
+    for l in _update_list:
+        try:
+            variant, comment = l.split("#")
+        except ValueError:
+            variant = l
+            comment = None
+
+        try:
+            keys, value = variant.split("=")
+            run_update(code, keys, value, comment, _app)
+        except ValueError:
+            print("Invalid format. print command \"--help\" get more info.")
+            sys.exit(1)
+
+    return code
+
+
+def _backup(in_file_p):
+    backup_p = in_file_p + ".bak"
+
+    if os.path.exists(backup_p):
+        os.remove(backup_p)
+
+    if not os.path.exists(in_file_p):
+        print("File {} not exists.".format(in_file_p))
+        sys.exit(1)
+
+    shutil.copyfile(in_file_p, backup_p)  # 复制文件
+
+
+def _recovery(in_file_p):
+    backup_p = in_file_p + ".bak"
+
+    if not os.path.exists(in_file_p):
+        print("File {} not exists.".format(in_file_p))
+        sys.exit(1)
+    elif not os.path.exists(backup_p):
+        print("Backup file not exists")
+        sys.exit(0)
+
+    os.remove(in_file_p)
+
+    os.rename(backup_p, in_file_p)
+
+
+# master merge target
+def _merge(master, target):
+    if type(master) != type(target):
+        print("yaml format not match:\n")
+        yaml.dump(master, sys.stdout)
+        print("\n&&\n")
+        yaml.dump(target, sys.stdout)
+
+        sys.exit(1)
+
+    ## item is a sequence
+    if isinstance(target, CommentedSeq):
+        for index in range(len(target)):
+            # get target comment
+            target_comment = _seq_comment(target, index)
+
+            master_index = len(master)
+
+            target_item = target[index]
+
+            if isinstance(target_item, CommentedMap):
+                merge_flag = False
+                for idx in range(len(master)):
+                    if isinstance(master[idx], CommentedMap):
+                        if master[idx].keys() == target_item.keys():
+                            _merge(master[idx], target_item)
+                            # nonlocal merge_flag
+                            master_index = idx
+                            merge_flag = True
+                            break
+
+                if merge_flag is False:
+                    master.append(target_item)
+            elif target_item not in master:
+                master.append(target[index])
+            else:
+                # merge(master[index], target[index])
+                pass
+
+            # # remove enter signal in previous item
+            previous_comment = _seq_comment(master, master_index - 1)
+            _add_eol_comment(master, _extract_comment(previous_comment), master_index - 1)
+
+            origin_comment = _seq_comment(master, master_index)
+            comment = _obtain_comment(origin_comment, target_comment)
+            if len(comment) > 0:
+                _add_eol_comment(master, _extract_comment(comment) + "\n\n", len(master) - 1)
+
+    ## item is a map
+    elif isinstance(target, CommentedMap):
+        for item in target:
+            if item == "flag":
+                print("")
+            origin_comment = _map_comment(master, item)
+            target_comment = _map_comment(target, item)
+
+            # get origin start comment
+            origin_start_comment = _start_comment(master)
+
+            # get target start comment
+            target_start_comment = _start_comment(target)
+
+            m = master.get(item, default=None)
+            if m is None or \
+                    (not (isinstance(m, CommentedMap) or
+                          isinstance(m, CommentedSeq))):
+                master.update({item: target[item]})
+
+            else:
+                _merge(master[item], target[item])
+
+            comment = _obtain_comment(origin_comment, target_comment)
+            if len(comment) > 0:
+                _add_eol_comment(master, _extract_comment(comment), item)
+
+            start_comment = _obtain_comment(origin_start_comment, target_start_comment)
+            if len(start_comment) > 0:
+                master.yaml_set_start_comment(_extract_comment(start_comment))
+
+
+def _save(_code, _file):
+    with open(_file, 'w') as wf:
+        yaml.dump(_code, wf)
+
+
+def _load(_file):
+    with open(_file, 'r') as rf:
+        code = yaml.load(rf)
+    return code
+
+
+############################################
+# sub parser process operation
+#
+############################################
+def merge_yaml(_args):
+    _dict = _args.__dict__
+
+    _m_file = _dict.get("merge_file", None)
+    _in_file, _u, _a, _i, _o, _tips = _get_update_par(_args)
+
+    if not (_in_file and _m_file):
+        print(_tips)
+        sys.exit(1)
+
+    code = _load(_in_file)
+    mcode = _load(_m_file)
+
+    _merge(code, mcode)
+
+    _update(code, _u, _a, _tips)
+
+    if _i:
+        _backup(_in_file)
+        _save(code, _in_file)
+    elif _o:
+        _save(code, _o)
+    else:
+        print(_tips)
+        sys.exit(1)
+
+
+def update_yaml(_args):
+    _in_file, _u, _a, _i, _o, _tips = _get_update_par(_args)
+
+    if not _in_file or not _u:
+        print(_tips)
+        sys.exit(1)
+
+    code = _load(_in_file)
+
+    if _i and _o:
+        print(_tips)
+        sys.exit(1)
+
+    _update(code, _u, _a, _tips)
+
+    if _i:
+        _backup(_in_file)
+        _save(code, _in_file)
+    elif _o:
+        _save(code, _o)
+
+
+def reset(_args):
+    _dict = _args.__dict__
+    _f = _dict.get('f', None) or _dict.get('file', None)
+
+    if _f:
+        _recovery(_f)
+    else:
+        _t = _dict.get('tips', None) or "Input \"-h\" for more information"
+        print(_t)
+
+
+############################################
+# Cli operation
+#
+############################################
+def _set_merge_parser(_parsers):
+    """
+    config merge parser
+    """
+
+    merge_parser = _parsers.add_parser("merge", help="merge with another yaml file")
+
+    _set_merge_parser_arg(merge_parser)
+    _set_update_parser_arg(merge_parser)
+
+    merge_parser.set_defaults(
+        function=merge_yaml,
+        tips=merge_parser.format_help()
+    )
+
+
+def _set_merge_parser_arg(_parser):
+    """
+    config parser argument for merging
+    """
+
+    _parser.add_argument("-m", "--merge-file", help="indicate merge yaml file")
+
+
+def _set_update_parser(_parsers):
+    """
+    config merge parser
+    """
+
+    update_parser = _parsers.add_parser("update", help="update with another yaml file")
+    _set_update_parser_arg(update_parser)
+
+    update_parser.set_defaults(
+        function=update_yaml,
+        tips=update_parser.format_help()
+    )
+
+
+def _set_update_parser_arg(_parser):
+    """
+    config parser argument for updating
+    """
+
+    _parser.add_argument("-f", "--file", help="source yaml file")
+    _parser.add_argument('-u', '--update', help="update with args, instance as \"a.b.c=d# d comment\"")
+    _parser.add_argument('-a', '--append', action="store_true", help="append to a seq")
+
+    group = _parser.add_mutually_exclusive_group()
+    group.add_argument("-o", "--out-file", help="indicate output yaml file")
+    group.add_argument("-i", "--inplace", action="store_true", help="indicate whether result store in origin file")
+
+
+def _set_reset_parser(_parsers):
+    """
+    config merge parser
+    """
+
+    reset_parser = _parsers.add_parser("reset", help="reset yaml file")
+
+    # indicate yaml file
+    reset_parser.add_argument('-f', '--file', help="indicate input yaml file")
+
+    reset_parser.set_defaults(
+        function=reset,
+        tips=reset_parser.format_help()
+    )
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    sub_parsers = parser.add_subparsers()
+
+    # set merge command
+    _set_merge_parser(sub_parsers)
+
+    # set update command
+    _set_update_parser(sub_parsers)
+
+    # set reset command
+    _set_reset_parser(sub_parsers)
+
+    # parse argument and run func
+    args = parser.parse_args()
+    args.function(args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/benchmark/client.py b/tests/benchmark/client.py
new file mode 100644
index 000000000..e962de804
--- /dev/null
+++ b/tests/benchmark/client.py
@@ -0,0 +1,460 @@
+import sys
+import pdb
+import random
+import logging
+import json
+import time, datetime
+import traceback
+from multiprocessing import Process
+from milvus import Milvus, DataType
+import numpy as np
+import utils
+
+logger = logging.getLogger("milvus_benchmark.client")
+
+SERVER_HOST_DEFAULT = "127.0.0.1"
+SERVER_PORT_DEFAULT = 19530
+INDEX_MAP = {
+    "flat": "FLAT",
+    "ivf_flat": "IVF_FLAT",
+    "ivf_sq8": "IVF_SQ8",
+    "nsg": "NSG",
+    "ivf_sq8h": "IVF_SQ8_HYBRID",
+    "ivf_pq": "IVF_PQ",
+    "hnsw": "HNSW",
+    "annoy": "ANNOY",
+    "bin_flat": "BIN_FLAT",
+    "bin_ivf_flat": "BIN_IVF_FLAT",
+    "rhnsw_pq": "RHNSW_PQ",
+    "rhnsw_sq": "RHNSW_SQ"
+}
+epsilon = 0.1
+
+
+def time_wrapper(func):
+    """
+    This decorator prints the execution time for the decorated function.
+    """
+
+    def wrapper(*args, **kwargs):
+        start = time.time()
+        # logger.debug("Milvus {} start".format(func.__name__))
+        log = kwargs.get("log", True)
+        kwargs.pop("log", None)
+        result = func(*args, **kwargs)
+        end = time.time()
+        if log:
+            logger.debug("Milvus {} run in {}s".format(func.__name__, round(end - start, 2)))
+        return result
+
+    return wrapper
+
+
+class MilvusClient(object):
+    def __init__(self, collection_name=None, host=None, port=None, timeout=180):
+        self._collection_name = collection_name
+        start_time = time.time()
+        if not host:
+            host = SERVER_HOST_DEFAULT
+        if not port:
+            port = SERVER_PORT_DEFAULT
+        logger.debug(host)
+        logger.debug(port)
+        # retry connect remote server
+        i = 0
+        while time.time() < start_time + timeout:
+            try:
+                self._milvus = Milvus(
+                    host=host,
+                    port=port,
+                    try_connect=False,
+                    pre_ping=False)
+                break
+            except Exception as e:
+                logger.error(str(e))
+                logger.error("Milvus connect failed: %d times" % i)
+                i = i + 1
+                time.sleep(i)
+
+        if time.time() > start_time + timeout:
+            raise Exception("Server connect timeout")
+        # self._metric_type = None
+
+    def __str__(self):
+        return 'Milvus collection %s' % self._collection_name
+
+    def check_status(self, status):
+        if not status.OK():
+            logger.error(status.message)
+            logger.error(self._milvus.server_status())
+            logger.error(self.count())
+            raise Exception("Status not ok")
+
+    def check_result_ids(self, result):
+        for index, item in enumerate(result):
+            if item[0].distance >= epsilon:
+                logger.error(index)
+                logger.error(item[0].distance)
+                raise Exception("Distance wrong")
+
+    # only support the given field name
+    def create_collection(self, dimension, data_type=DataType.FLOAT_VECTOR, auto_id=False,
+                          collection_name=None, other_fields=None):
+        self._dimension = dimension
+        if not collection_name:
+            collection_name = self._collection_name
+        vec_field_name = utils.get_default_field_name(data_type)
+        fields = [{"name": vec_field_name, "type": data_type, "params": {"dim": dimension}}]
+        if other_fields:
+            other_fields = other_fields.split(",")
+            if "int" in other_fields:
+                fields.append({"name": utils.DEFAULT_INT_FIELD_NAME, "type": DataType.INT64})
+            if "float" in other_fields:
+                fields.append({"name": utils.DEFAULT_FLOAT_FIELD_NAME, "type": DataType.FLOAT})
+        create_param = {
+            "fields": fields,
+            "auto_id": auto_id}
+        try:
+            self._milvus.create_collection(collection_name, create_param)
+            logger.info("Create collection: <%s> successfully" % collection_name)
+        except Exception as e:
+            logger.error(str(e))
+            raise
+
+    def create_partition(self, tag, collection_name=None):
+        if not collection_name:
+            collection_name = self._collection_name
+        self._milvus.create_partition(collection_name, tag)
+
+    def generate_values(self, data_type, vectors, ids):
+        values = None
+        if data_type in [DataType.INT32, DataType.INT64]:
+            values = ids
+        elif data_type in [DataType.FLOAT, DataType.DOUBLE]:
+            values = [(i + 0.0) for i in ids]
+        elif data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]:
+            values = vectors
+        return values
+
+    def generate_entities(self, vectors, ids=None, collection_name=None):
+        entities = []
+        if collection_name is None:
+            collection_name = self._collection_name
+        info = self.get_info(collection_name)
+        for field in info["fields"]:
+            field_type = field["type"]
+            entities.append(
+                {"name": field["name"], "type": field_type, "values": self.generate_values(field_type, vectors, ids)})
+        return entities
+
+    @time_wrapper
+    def insert(self, entities, ids=None, collection_name=None):
+        tmp_collection_name = self._collection_name if collection_name is None else collection_name
+        try:
+            insert_ids = self._milvus.insert(tmp_collection_name, entities, ids=ids)
+            return insert_ids
+        except Exception as e:
+            logger.error(str(e))
+
+    def get_dimension(self):
+        info = self.get_info()
+        for field in info["fields"]:
+            if field["type"] in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]:
+                return field["params"]["dim"]
+
+    def get_rand_ids(self, length):
+        segment_ids = []
+        while True:
+            stats = self.get_stats()
+            segments = stats["partitions"][0]["segments"]
+            # random choice one segment
+            segment = random.choice(segments)
+            try:
+                segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["id"])
+            except Exception as e:
+                logger.error(str(e))
+            if not len(segment_ids):
+                continue
+            elif len(segment_ids) > length:
+                return random.sample(segment_ids, length)
+            else:
+                logger.debug("Reset length: %d" % len(segment_ids))
+                return segment_ids
+
+    # def get_rand_ids_each_segment(self, length):
+    #     res = []
+    #     status, stats = self._milvus.get_collection_stats(self._collection_name)
+    #     self.check_status(status)
+    #     segments = stats["partitions"][0]["segments"]
+    #     segments_num = len(segments)
+    #     # random choice from each segment
+    #     for segment in segments:
+    #         status, segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["name"])
+    #         self.check_status(status)
+    #         res.extend(segment_ids[:length])
+    #     return segments_num, res
+
+    # def get_rand_entities(self, length):
+    #     ids = self.get_rand_ids(length)
+    #     status, get_res = self._milvus.get_entity_by_id(self._collection_name, ids)
+    #     self.check_status(status)
+    #     return ids, get_res
+
+    def get(self):
+        get_ids = random.randint(1, 1000000)
+        self._milvus.get_entity_by_id(self._collection_name, [get_ids])
+
+    @time_wrapper
+    def get_entities(self, get_ids):
+        get_res = self._milvus.get_entity_by_id(self._collection_name, get_ids)
+        return get_res
+
+    @time_wrapper
+    def delete(self, ids, collection_name=None):
+        tmp_collection_name = self._collection_name if collection_name is None else collection_name
+        self._milvus.delete_entity_by_id(tmp_collection_name, ids)
+
+    def delete_rand(self):
+        delete_id_length = random.randint(1, 100)
+        count_before = self.count()
+        logger.debug("%s: length to delete: %d" % (self._collection_name, delete_id_length))
+        delete_ids = self.get_rand_ids(delete_id_length)
+        self.delete(delete_ids)
+        self.flush()
+        logger.info("%s: count after delete: %d" % (self._collection_name, self.count()))
+        get_res = self._milvus.get_entity_by_id(self._collection_name, delete_ids)
+        for item in get_res:
+            assert not item
+        # if count_before - len(delete_ids) < self.count():
+        #     logger.error(delete_ids)
+        #     raise Exception("Error occured")
+
+    @time_wrapper
+    def flush(self,_async=False, collection_name=None):
+        tmp_collection_name = self._collection_name if collection_name is None else collection_name
+        self._milvus.flush([tmp_collection_name], _async=_async)
+
+    @time_wrapper
+    def compact(self, collection_name=None):
+        tmp_collection_name = self._collection_name if collection_name is None else collection_name
+        status = self._milvus.compact(tmp_collection_name)
+        self.check_status(status)
+
+    @time_wrapper
+    def create_index(self, field_name, index_type, metric_type, _async=False, index_param=None):
+        index_type = INDEX_MAP[index_type]
+        metric_type = utils.metric_type_trans(metric_type)
+        logger.info("Building index start, collection_name: %s, index_type: %s, metric_type: %s" % (
+            self._collection_name, index_type, metric_type))
+        if index_param:
+            logger.info(index_param)
+        index_params = {
+            "index_type": index_type,
+            "metric_type": metric_type,
+            "params": index_param
+        }
+        self._milvus.create_index(self._collection_name, field_name, index_params, _async=_async)
+
+    # TODO: need to check
+    def describe_index(self, field_name):
+        # stats = self.get_stats()
+        info = self._milvus.describe_index(self._collection_name, field_name)
+        index_info = {"index_type": "flat", "index_param": None}
+        for field in info["fields"]:
+            for index in field['indexes']:
+                if not index or "index_type" not in index:
+                    continue
+                else:
+                    for k, v in INDEX_MAP.items():
+                        if index['index_type'] == v:
+                            index_info['index_type'] = k
+                            index_info['index_param'] = index['params']
+                            return index_info
+        return index_info
+
+    def drop_index(self, field_name):
+        logger.info("Drop index: %s" % self._collection_name)
+        return self._milvus.drop_index(self._collection_name, field_name)
+
+    @time_wrapper
+    def query(self, vector_query, filter_query=None, collection_name=None):
+        tmp_collection_name = self._collection_name if collection_name is None else collection_name
+        must_params = [vector_query]
+        if filter_query:
+            must_params.extend(filter_query)
+        query = {
+            "bool": {"must": must_params}
+        }
+        result = self._milvus.search(tmp_collection_name, query)
+        return result
+
+    @time_wrapper
+    def load_and_query(self, vector_query, filter_query=None, collection_name=None):
+        tmp_collection_name = self._collection_name if collection_name is None else collection_name
+        must_params = [vector_query]
+        if filter_query:
+            must_params.extend(filter_query)
+        query = {
+            "bool": {"must": must_params}
+        }
+        self.load_collection(tmp_collection_name)
+        result = self._milvus.search(tmp_collection_name, query)
+        return result
+
+    def get_ids(self, result):
+        idss = result._entities.ids
+        ids = []
+        len_idss = len(idss)
+        len_r = len(result)
+        top_k = len_idss // len_r
+        for offset in range(0, len_idss, top_k):
+            ids.append(idss[offset: min(offset + top_k, len_idss)])
+        return ids
+
+    def query_rand(self, nq_max=100):
+        # for ivf search
+        dimension = 128
+        top_k = random.randint(1, 100)
+        nq = random.randint(1, nq_max)
+        nprobe = random.randint(1, 100)
+        search_param = {"nprobe": nprobe}
+        query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
+        metric_type = random.choice(["l2", "ip"])
+        logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe))
+        vec_field_name = utils.get_default_field_name()
+        vector_query = {"vector": {vec_field_name: {
+            "topk": top_k,
+            "query": query_vectors,
+            "metric_type": utils.metric_type_trans(metric_type),
+            "params": search_param}
+        }}
+        self.query(vector_query)
+
+    def load_query_rand(self, nq_max=100):
+        # for ivf search
+        dimension = 128
+        top_k = random.randint(1, 100)
+        nq = random.randint(1, nq_max)
+        nprobe = random.randint(1, 100)
+        search_param = {"nprobe": nprobe}
+        query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
+        metric_type = random.choice(["l2", "ip"])
+        logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe))
+        vec_field_name = utils.get_default_field_name()
+        vector_query = {"vector": {vec_field_name: {
+            "topk": top_k,
+            "query": query_vectors,
+            "metric_type": utils.metric_type_trans(metric_type),
+            "params": search_param}
+        }}
+        self.load_and_query(vector_query)
+
+    # TODO: need to check
+    def count(self, collection_name=None):
+        if collection_name is None:
+            collection_name = self._collection_name
+        row_count = self._milvus.get_collection_stats(collection_name)["row_count"]
+        logger.debug("Row count: %d in collection: <%s>" % (row_count, collection_name))
+        return row_count
+
+    def drop(self, timeout=120, collection_name=None):
+        timeout = int(timeout)
+        if collection_name is None:
+            collection_name = self._collection_name
+        logger.info("Start delete collection: %s" % collection_name)
+        self._milvus.drop_collection(collection_name)
+        i = 0
+        while i < timeout:
+            try:
+                row_count = self.count(collection_name=collection_name)
+                if row_count:
+                    time.sleep(1)
+                    i = i + 1
+                    continue
+                else:
+                    break
+            except Exception as e:
+                logger.debug(str(e))
+                break
+        if i >= timeout:
+            logger.error("Delete collection timeout")
+
+    def get_stats(self):
+        return self._milvus.get_collection_stats(self._collection_name)
+
+    def get_info(self, collection_name=None):
+        # pdb.set_trace()
+        if collection_name is None:
+            collection_name = self._collection_name
+        return self._milvus.get_collection_info(collection_name)
+
+    def show_collections(self):
+        return self._milvus.list_collections()
+
+    def exists_collection(self, collection_name=None):
+        if collection_name is None:
+            collection_name = self._collection_name
+        res = self._milvus.has_collection(collection_name)
+        return res
+
+    def clean_db(self):
+        collection_names = self.show_collections()
+        for name in collection_names:
+            self.drop(collection_name=name)
+
+    @time_wrapper
+    def load_collection(self, collection_name=None):
+        if collection_name is None:
+            collection_name = self._collection_name
+        return self._milvus.load_collection(collection_name, timeout=3000)
+
+    @time_wrapper
+    def release_collection(self, collection_name=None):
+        if collection_name is None:
+            collection_name = self._collection_name
+        return self._milvus.release_collection(collection_name, timeout=3000)
+
+    @time_wrapper
+    def load_partitions(self, tag_names, collection_name=None):
+        if collection_name is None:
+            collection_name = self._collection_name
+        return self._milvus.load_partitions(collection_name, tag_names, timeout=3000)
+
+    @time_wrapper
+    def release_partitions(self, tag_names, collection_name=None):
+        if collection_name is None:
+            collection_name = self._collection_name
+        return self._milvus.release_partitions(collection_name, tag_names, timeout=3000)
+
+    # TODO: remove
+    # def get_server_version(self):
+    #     return self._milvus.server_version()
+
+    # def get_server_mode(self):
+    #     return self.cmd("mode")
+
+    # def get_server_commit(self):
+    #     return self.cmd("build_commit_id")
+
+    # def get_server_config(self):
+    #     return json.loads(self.cmd("get_milvus_config"))
+
+    # def get_mem_info(self):
+    #     result = json.loads(self.cmd("get_system_info"))
+    #     result_human = {
+    #         # unit: Gb
+    #         "memory_used": round(int(result["memory_used"]) / (1024 * 1024 * 1024), 2)
+    #     }
+    #     return result_human
+
+    # def cmd(self, command):
+    #     res = self._milvus._cmd(command)
+    #     logger.info("Server command: %s, result: %s" % (command, res))
+    #     return res
+
+    # @time_wrapper
+    # def set_config(self, parent_key, child_key, value):
+    #     self._milvus.set_config(parent_key, child_key, value)
+
+    # def get_config(self, key):
+    #     return self._milvus.get_config(key)
diff --git a/tests/benchmark/docker_runner.py b/tests/benchmark/docker_runner.py
new file mode 100644
index 000000000..406f3524d
--- /dev/null
+++ b/tests/benchmark/docker_runner.py
@@ -0,0 +1,366 @@
+import os
+import logging
+import pdb
+import time
+import random
+from multiprocessing import Process
+import numpy as np
+from client import MilvusClient
+import utils
+import parser
+from runner import Runner
+
+logger = logging.getLogger("milvus_benchmark.docker")
+
+
+class DockerRunner(Runner):
+    """run docker mode"""
+    def __init__(self, image):
+        super(DockerRunner, self).__init__()
+        self.image = image
+        
+    def run(self, definition, run_type=None):
+        if run_type == "performance":
+            for op_type, op_value in definition.items():
+                # run docker mode
+                run_count = op_value["run_count"]
+                run_params = op_value["params"]
+                container = None
+                
+                if op_type == "insert":
+                    if not run_params:
+                        logger.debug("No run params")
+                        continue
+                    for index, param in enumerate(run_params):
+                        logger.info("Definition param: %s" % str(param))
+                        collection_name = param["collection_name"]
+                        volume_name = param["db_path_prefix"]
+                        print(collection_name)
+                        (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+                        for k, v in param.items():
+                            if k.startswith("server."):
+                                # Update server config
+                                utils.modify_config(k, v, type="server", db_slave=None)
+                        container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
+                        time.sleep(2)
+                        milvus = MilvusClient(collection_name)
+                        # Check has collection or not
+                        if milvus.exists_collection():
+                            milvus.delete()
+                            time.sleep(10)
+                        milvus.create_collection(collection_name, dimension, index_file_size, metric_type)
+                        # debug
+                        # milvus.create_index("ivf_sq8", 16384)
+                        res = self.do_insert(milvus, collection_name, data_type, dimension, collection_size, param["ni_per"])
+                        logger.info(res)
+                        # wait for file merge
+                        time.sleep(collection_size * dimension / 5000000)
+                        # Clear up
+                        utils.remove_container(container)
+
+                elif op_type == "query":
+                    for index, param in enumerate(run_params):
+                        logger.info("Definition param: %s" % str(param))
+                        collection_name = param["dataset"]
+                        volume_name = param["db_path_prefix"]
+                        (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+                        for k, v in param.items():
+                            if k.startswith("server."):                   
+                                utils.modify_config(k, v, type="server")
+                        container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
+                        time.sleep(2)
+                        milvus = MilvusClient(collection_name)
+                        logger.debug(milvus.show_collections())
+                        # Check has collection or not
+                        if not milvus.exists_collection():
+                            logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
+                            continue
+                        # parse index info
+                        index_types = param["index.index_types"]
+                        nlists = param["index.nlists"]
+                        # parse top-k, nq, nprobe
+                        top_ks, nqs, nprobes = parser.search_params_parser(param)
+                        for index_type in index_types:
+                            for nlist in nlists:
+                                result = milvus.describe_index()
+                                logger.info(result)
+                                # milvus.drop_index()
+                                # milvus.create_index(index_type, nlist)
+                                result = milvus.describe_index()
+                                logger.info(result)
+                                logger.info(milvus.count())
+                                # preload index
+                                milvus.preload_collection()
+                                logger.info("Start warm up query")
+                                res = self.do_query(milvus, collection_name, [1], [1], 1, 1)
+                                logger.info("End warm up query")
+                                # Run query test
+                                for nprobe in nprobes:
+                                    logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
+                                    res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count)
+                                    headers = ["Nq/Top-k"]
+                                    headers.extend([str(top_k) for top_k in top_ks])
+                                    utils.print_collection(headers, nqs, res)
+                        utils.remove_container(container)
+
+        elif run_type == "insert_performance":
+            for op_type, op_value in definition.items():
+                # run docker mode
+                run_count = op_value["run_count"]
+                run_params = op_value["params"]
+                container = None
+                if not run_params:
+                    logger.debug("No run params")
+                    continue
+                for index, param in enumerate(run_params):
+                    logger.info("Definition param: %s" % str(param))
+                    collection_name = param["collection_name"]
+                    volume_name = param["db_path_prefix"]
+                    print(collection_name)
+                    (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+                    for k, v in param.items():
+                        if k.startswith("server."):
+                            # Update server config
+                            utils.modify_config(k, v, type="server", db_slave=None)
+                    container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
+                    time.sleep(2)
+                    milvus = MilvusClient(collection_name)
+                    # Check has collection or not
+                    if milvus.exists_collection():
+                        milvus.delete()
+                        time.sleep(10)
+                    milvus.create_collection(collection_name, dimension, index_file_size, metric_type)
+                    # debug
+                    # milvus.create_index("ivf_sq8", 16384)
+                    res = self.do_insert(milvus, collection_name, data_type, dimension, collection_size, param["ni_per"])
+                    logger.info(res)
+                    # wait for file merge
+                    time.sleep(collection_size * dimension / 5000000)
+                    # Clear up
+                    utils.remove_container(container)
+
+        elif run_type == "search_performance":
+            for op_type, op_value in definition.items():
+                # run docker mode
+                run_count = op_value["run_count"]
+                run_params = op_value["params"]
+                container = None
+                for index, param in enumerate(run_params):
+                    logger.info("Definition param: %s" % str(param))
+                    collection_name = param["dataset"]
+                    volume_name = param["db_path_prefix"]
+                    (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+                    for k, v in param.items():
+                        if k.startswith("server."):                   
+                            utils.modify_config(k, v, type="server")
+                    container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
+                    time.sleep(2)
+                    milvus = MilvusClient(collection_name)
+                    logger.debug(milvus.show_collections())
+                    # Check has collection or not
+                    if not milvus.exists_collection():
+                        logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
+                        continue
+                    # parse index info
+                    index_types = param["index.index_types"]
+                    nlists = param["index.nlists"]
+                    # parse top-k, nq, nprobe
+                    top_ks, nqs, nprobes = parser.search_params_parser(param)
+                    for index_type in index_types:
+                        for nlist in nlists:
+                            result = milvus.describe_index()
+                            logger.info(result)
+                            # milvus.drop_index()
+                            # milvus.create_index(index_type, nlist)
+                            result = milvus.describe_index()
+                            logger.info(result)
+                            logger.info(milvus.count())
+                            # preload index
+                            milvus.preload_collection()
+                            logger.info("Start warm up query")
+                            res = self.do_query(milvus, collection_name, [1], [1], 1, 1)
+                            logger.info("End warm up query")
+                            # Run query test
+                            for nprobe in nprobes:
+                                logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
+                                res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count)
+                                headers = ["Nq/Top-k"]
+                                headers.extend([str(top_k) for top_k in top_ks])
+                                utils.print_collection(headers, nqs, res)
+                    utils.remove_container(container)
+
+        elif run_type == "accuracy":
+            """
+            {
+                "dataset": "random_50m_1024_512", 
+                "index.index_types": ["flat", ivf_flat", "ivf_sq8"],
+                "index.nlists": [16384],
+                "nprobes": [1, 32, 128], 
+                "nqs": [100],
+                "top_ks": [1, 64], 
+                "server.use_blas_threshold": 1100, 
+                "server.cpu_cache_capacity": 256
+            }
+            """
+            for op_type, op_value in definition.items():
+                if op_type != "query":
+                    logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
+                    break
+                run_count = op_value["run_count"]
+                run_params = op_value["params"]
+                container = None
+
+                for index, param in enumerate(run_params):
+                    logger.info("Definition param: %s" % str(param))
+                    collection_name = param["dataset"]
+                    sift_acc = False
+                    if "sift_acc" in param:
+                        sift_acc = param["sift_acc"]
+                    (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+                    for k, v in param.items():
+                        if k.startswith("server."):                   
+                            utils.modify_config(k, v, type="server")
+                    volume_name = param["db_path_prefix"]
+                    container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
+                    time.sleep(2)
+                    milvus = MilvusClient(collection_name)
+                    # Check has collection or not
+                    if not milvus.exists_collection():
+                        logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
+                        continue
+
+                    # parse index info
+                    index_types = param["index.index_types"]
+                    nlists = param["index.nlists"]
+                    # parse top-k, nq, nprobe
+                    top_ks, nqs, nprobes = parser.search_params_parser(param)
+                    if sift_acc is True:
+                        # preload groundtruth data
+                        true_ids_all = self.get_groundtruth_ids(collection_size)
+                    acc_dict = {}
+                    for index_type in index_types:
+                        for nlist in nlists:
+                            result = milvus.describe_index()
+                            logger.info(result)
+                            milvus.create_index(index_type, nlist)
+                            # preload index
+                            milvus.preload_collection()
+                            # Run query test
+                            for nprobe in nprobes:
+                                logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
+                                for top_k in top_ks:
+                                    for nq in nqs:
+                                        result_ids = []
+                                        id_prefix = "%s_index_%s_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
+                                                    (collection_name, index_type, nlist, metric_type, nprobe, top_k, nq)
+                                        if sift_acc is False:
+                                            self.do_query_acc(milvus, collection_name, top_k, nq, nprobe, id_prefix)
+                                            if index_type != "flat":
+                                                # Compute accuracy
+                                                base_name = "%s_index_flat_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
+                                                    (collection_name, nlist, metric_type, nprobe, top_k, nq)
+                                                avg_acc = self.compute_accuracy(base_name, id_prefix)
+                                                logger.info("Query: <%s> accuracy: %s" % (id_prefix, avg_acc))
+                                        else:
+                                            result_ids, result_distances = self.do_query_ids(milvus, collection_name, top_k, nq, nprobe)
+                                            debug_file_ids = "0.5.3_result_ids"
+                                            debug_file_distances = "0.5.3_result_distances"
+                                            with open(debug_file_ids, "w+") as fd:
+                                                total = 0
+                                                for index, item in enumerate(result_ids):
+                                                    true_item = true_ids_all[:nq, :top_k].tolist()[index]
+                                                    tmp = set(item).intersection(set(true_item))
+                                                    total = total + len(tmp)
+                                                    fd.write("query: N-%d, intersection: %d, total: %d\n" % (index, len(tmp), total))
+                                                    fd.write("%s\n" % str(item))
+                                                    fd.write("%s\n" % str(true_item))
+                                            acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
+                                            logger.info("Query: <%s> accuracy: %s" % (id_prefix, acc_value))
+                    # # print accuracy collection
+                    # headers = [collection_name]
+                    # headers.extend([str(top_k) for top_k in top_ks])
+                    # utils.print_collection(headers, nqs, res)
+
+                    # remove container, and run next definition
+                    logger.info("remove container, and run next definition")
+                    utils.remove_container(container)
+
+        elif run_type == "stability":
+            for op_type, op_value in definition.items():
+                if op_type != "query":
+                    logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
+                    break
+                run_count = op_value["run_count"]
+                run_params = op_value["params"]
+                container = None
+                for index, param in enumerate(run_params):
+                    logger.info("Definition param: %s" % str(param))
+                    collection_name = param["dataset"]
+                    index_type = param["index_type"]
+                    volume_name = param["db_path_prefix"]
+                    (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+                    
+                    # set default test time
+                    if "during_time" not in param:
+                        during_time = 100 # seconds
+                    else:
+                        during_time = int(param["during_time"]) * 60
+                    # set default query process num
+                    if "query_process_num" not in param:
+                        query_process_num = 10
+                    else:
+                        query_process_num = int(param["query_process_num"])
+
+                    for k, v in param.items():
+                        if k.startswith("server."):                   
+                            utils.modify_config(k, v, type="server")
+
+                    container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
+                    time.sleep(2)
+                    milvus = MilvusClient(collection_name)
+                    # Check has collection or not
+                    if not milvus.exists_collection():
+                        logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
+                        continue
+
+                    start_time = time.time()
+                    insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
+                    i = 0
+                    while time.time() < start_time + during_time:
+                        i = i + 1
+                        processes = []
+                        # do query
+                        # for i in range(query_process_num):
+                        #     milvus_instance = MilvusClient(collection_name)
+                        #     top_k = random.choice([x for x in range(1, 100)])
+                        #     nq = random.choice([x for x in range(1, 100)])
+                        #     nprobe = random.choice([x for x in range(1, 1000)])
+                        #     # logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
+                        #     p = Process(target=self.do_query, args=(milvus_instance, collection_name, [top_k], [nq], [nprobe], run_count, ))
+                        #     processes.append(p)
+                        #     p.start()
+                        #     time.sleep(0.1)
+                        # for p in processes:
+                        #     p.join()
+                        milvus_instance = MilvusClient(collection_name)
+                        top_ks = random.sample([x for x in range(1, 100)], 3)
+                        nqs = random.sample([x for x in range(1, 1000)], 3)
+                        nprobe = random.choice([x for x in range(1, 500)])
+                        res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count)
+                        if i % 10 == 0:
+                            status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
+                            if not status.OK():
+                                logger.error(status)
+                            # status = milvus_instance.drop_index()
+                            # if not status.OK():
+                            #     logger.error(status)
+                            # index_type = random.choice(["flat", "ivf_flat", "ivf_sq8"])
+                            milvus_instance.create_index(index_type, 16384)
+                            result = milvus.describe_index()
+                            logger.info(result)
+                            # milvus_instance.create_index("ivf_sq8", 16384)
+                    utils.remove_container(container)
+
+        else:
+            logger.warning("Run type: %s not supported" % run_type)
+
diff --git a/tests/benchmark/docker_utils.py b/tests/benchmark/docker_utils.py
new file mode 100644
index 000000000..504a7f51c
--- /dev/null
+++ b/tests/benchmark/docker_utils.py
@@ -0,0 +1,126 @@
+# def pull_image(image):
+#     registry = image.split(":")[0]
+#     image_tag = image.split(":")[1]
+#     client = docker.APIClient(base_url='unix://var/run/docker.sock')
+#     logger.info("Start pulling image: %s" % image)
+#     return client.pull(registry, image_tag)
+
+
+# def run_server(image, mem_limit=None, timeout=30, test_type="local", volume_name=None, db_slave=None):
+#     import colors
+
+#     client = docker.from_env()
+#     # if mem_limit is None:
+#     #     mem_limit = psutil.virtual_memory().available
+#     # logger.info('Memory limit:', mem_limit)
+#     # cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1)
+#     # logger.info('Running on CPUs:', cpu_limit)
+#     for dir_item in ['logs', 'db']:
+#         try:
+#             os.mkdir(os.path.abspath(dir_item))
+#         except Exception as e:
+#             pass
+
+#     if test_type == "local":
+#         volumes = {
+#             os.path.abspath('conf'):
+#                 {'bind': '/opt/milvus/conf', 'mode': 'ro'},
+#             os.path.abspath('logs'):
+#                 {'bind': '/opt/milvus/logs', 'mode': 'rw'},
+#             os.path.abspath('db'):
+#                 {'bind': '/opt/milvus/db', 'mode': 'rw'},
+#         }
+#     elif test_type == "remote":
+#         if volume_name is None:
+#             raise Exception("No volume name")
+#         remote_log_dir = volume_name+'/logs'
+#         remote_db_dir = volume_name+'/db'
+
+#         for dir_item in [remote_log_dir, remote_db_dir]:
+#             if not os.path.isdir(dir_item):
+#                 os.makedirs(dir_item, exist_ok=True)
+#         volumes = {
+#             os.path.abspath('conf'):
+#                 {'bind': '/opt/milvus/conf', 'mode': 'ro'},
+#             remote_log_dir:
+#                 {'bind': '/opt/milvus/logs', 'mode': 'rw'},
+#             remote_db_dir:
+#                 {'bind': '/opt/milvus/db', 'mode': 'rw'}
+#         }
+#         # add volumes
+#         if db_slave and isinstance(db_slave, int):
+#             for i in range(2, db_slave+1):
+#                 remote_db_dir = volume_name+'/data'+str(i)
+#                 if not os.path.isdir(remote_db_dir):
+#                     os.makedirs(remote_db_dir, exist_ok=True)
+#                 volumes[remote_db_dir] = {'bind': '/opt/milvus/data'+str(i), 'mode': 'rw'}
+
+#     container = client.containers.run(
+#         image,
+#         volumes=volumes,
+#         runtime="nvidia",
+#         ports={'19530/tcp': 19530, '8080/tcp': 8080},
+#         # environment=["OMP_NUM_THREADS=48"],
+#         # cpuset_cpus=cpu_limit,
+#         # mem_limit=mem_limit,
+#         # environment=[""],
+#         detach=True)
+
+#     def stream_logs():
+#         for line in container.logs(stream=True):
+#             logger.info(colors.color(line.decode().rstrip(), fg='blue'))
+
+#     if sys.version_info >= (3, 0):
+#         t = threading.Thread(target=stream_logs, daemon=True)
+#     else:
+#         t = threading.Thread(target=stream_logs)
+#         t.daemon = True
+#     t.start()
+
+#     logger.info('Container: %s started' % container)
+#     return container
+#     # exit_code = container.wait(timeout=timeout)
+#     # # Exit if exit code
+#     # if exit_code == 0:
+#     #     return container
+#     # elif exit_code is not None:
+#     #     print(colors.color(container.logs().decode(), fg='red'))
+
+# def restart_server(container):
+#     client = docker.APIClient(base_url='unix://var/run/docker.sock')
+
+#     client.restart(container.name)
+#     logger.info('Container: %s restarted' % container.name)
+#     return container
+
+
+# def remove_container(container):
+#     container.remove(force=True)
+#     logger.info('Container: %s removed' % container)
+
+
+# def remove_all_containers(image):
+#     client = docker.from_env()
+#     try:
+#         for container in client.containers.list():
+#             if image in container.image.tags:
+#                 container.stop(timeout=30)
+#                 container.remove(force=True)
+#     except Exception as e:
+#         logger.error("Containers removed failed")
+
+
+# def container_exists(image):
+#     '''
+#     Check if container existed with the given image name
+#     @params: image name
+#     @return: container if exists
+#     '''
+#     res = False
+#     client = docker.from_env()
+#     for container in client.containers.list():
+#         if image in container.image.tags:
+#             # True
+#             res = container
+#     return res
+
diff --git a/tests/benchmark/executors/__init__.py b/tests/benchmark/executors/__init__.py
new file mode 100644
index 000000000..dc96d174c
--- /dev/null
+++ b/tests/benchmark/executors/__init__.py
@@ -0,0 +1,3 @@
+
+class BaseExecutor(object):
+    pass
\ No newline at end of file
diff --git a/tests/benchmark/executors/shell.py b/tests/benchmark/executors/shell.py
new file mode 100644
index 000000000..cb425b9b5
--- /dev/null
+++ b/tests/benchmark/executors/shell.py
@@ -0,0 +1,4 @@
+from . import BaseExecutor
+
+class ShellExecutor(BaseExecutor):
+    pass
\ No newline at end of file
diff --git a/tests/benchmark/handlers/__init__.py b/tests/benchmark/handlers/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/benchmark/helm_utils.py b/tests/benchmark/helm_utils.py
new file mode 100644
index 000000000..c69801a74
--- /dev/null
+++ b/tests/benchmark/helm_utils.py
@@ -0,0 +1,370 @@
+import os
+import pdb
+import time
+import logging
+import hashlib
+from yaml import full_load, dump
+import utils
+
+logger = logging.getLogger("milvus_benchmark.utils")
+REGISTRY_URL = "registry.zilliz.com/milvus/engine"
+IDC_NAS_URL = "//172.16.70.249/test"
+NAS_URL = "//192.168.1.126/test"
+
+
+def get_host_cpus(hostname):
+    from kubernetes import client, config
+    config.load_kube_config()
+    client.rest.logger.setLevel(logging.WARNING)
+    v1 = client.CoreV1Api()
+    cpus = v1.read_node(hostname).status.allocatable.get("cpu")
+    return cpus
+
+
+# update values.yaml
+def update_values(file_path, deploy_mode, hostname, milvus_config, server_config=None):
+    if not os.path.isfile(file_path):
+        raise Exception('File: %s not found' % file_path)
+    #  bak values.yaml
+    file_name = os.path.basename(file_path)
+    bak_file_name = file_name + ".bak"
+    file_parent_path = os.path.dirname(file_path)
+    bak_file_path = file_parent_path + '/' + bak_file_name
+    if os.path.exists(bak_file_path):
+        os.system("cp %s %s" % (bak_file_path, file_path))
+    else:
+        os.system("cp %s %s" % (file_path, bak_file_path))
+    with open(file_path) as f:
+        values_dict = full_load(f)
+        f.close()
+    cluster = False
+    if "cluster" in milvus_config and milvus_config["cluster"]:
+        cluster = True
+    for k, v in milvus_config.items():
+        if k.find("primary_path") != -1:
+            suffix_path = milvus_config["suffix_path"] if "suffix_path" in milvus_config else None
+            path_value = v
+            if suffix_path:
+                path_value = v + "_" + str(int(time.time()))
+            values_dict["primaryPath"] = path_value
+            values_dict['wal']['path'] = path_value + "/wal"
+            values_dict['logs']['path'] = path_value + "/logs"
+        # elif k.find("use_blas_threshold") != -1:
+        #     values_dict['useBLASThreshold'] = int(v)
+        elif k.find("gpu_search_threshold") != -1:
+            values_dict['gpu']['gpuSearchThreshold'] = int(v)
+            if cluster:
+                values_dict['readonly']['gpu']['gpuSearchThreshold'] = int(v)
+        elif k.find("cpu_cache_capacity") != -1:
+            values_dict['cache']['cacheSize'] = v
+            if cluster:
+                values_dict['readonly']['cache']['cacheSize'] = v
+        # elif k.find("cache_insert_data") != -1:
+        #     values_dict['cache']['cacheInsertData'] = v
+        elif k.find("insert_buffer_size") != -1:
+            values_dict['cache']['insertBufferSize'] = v
+            if cluster:
+                values_dict['readonly']['cache']['insertBufferSize'] = v
+        elif k.find("gpu_resource_config.enable") != -1:
+            values_dict['gpu']['enabled'] = v
+            if cluster:
+                values_dict['readonly']['gpu']['enabled'] = v
+        elif k.find("gpu_resource_config.cache_capacity") != -1:
+            values_dict['gpu']['cacheSize'] = v
+            if cluster:
+                values_dict['readonly']['gpu']['cacheSize'] = v
+        elif k.find("build_index_resources") != -1:
+            values_dict['gpu']['buildIndexDevices'] = v
+            if cluster:
+                values_dict['readonly']['gpu']['buildIndexDevices'] = v
+        elif k.find("search_resources") != -1:
+            values_dict['gpu']['searchDevices'] = v
+            if cluster:
+                values_dict['readonly']['gpu']['searchDevices'] = v
+        # wal
+        elif k.find("auto_flush_interval") != -1:
+            values_dict['storage']['autoFlushInterval'] = v
+            if cluster:
+                values_dict['readonly']['storage']['autoFlushInterval'] = v
+        elif k.find("wal_enable") != -1:
+            values_dict['wal']['enabled'] = v
+
+    # if values_dict['nodeSelector']:
+    #     logger.warning("nodeSelector has been set: %s" % str(values_dict['engine']['nodeSelector']))
+    #     return
+    values_dict["wal"]["recoveryErrorIgnore"] = True
+    # enable monitor
+    values_dict["metrics"]["enabled"] = True
+    values_dict["metrics"]["address"] = "192.168.1.237"
+    values_dict["metrics"]["port"] = 9091
+    # only test avx2 
+    values_dict["extraConfiguration"].update({"engine": {"simd_type": "avx2"}})
+    # stat_optimizer_enable
+    values_dict["extraConfiguration"]["engine"].update({"stat_optimizer_enable": False})
+
+    # enable read-write mode
+    if cluster:
+        values_dict["cluster"]["enabled"] = True
+        # update readonly log path
+        values_dict["readonly"]['logs']['path'] = values_dict['logs']['path'] + "/readonly"
+        if "readonly" in milvus_config:
+            if "replicas" in milvus_config["readonly"]:
+                values_dict["readonly"]["replicas"] = milvus_config["readonly"]["replicas"]
+
+    use_external_mysql = False
+    if "external_mysql" in milvus_config and milvus_config["external_mysql"]:
+        use_external_mysql = True
+    # meta mysql
+    if use_external_mysql:
+        values_dict["mysql"]["enabled"] = False
+        # values_dict["mysql"]["persistence"]["enabled"] = True
+        # values_dict["mysql"]["persistence"]["existingClaim"] = hashlib.md5(path_value.encode(encoding='UTF-8')).hexdigest()
+        values_dict['externalMysql']['enabled'] = True
+        if deploy_mode == "local":
+            values_dict['externalMysql']["ip"] = "192.168.1.238"
+        else:
+            values_dict['externalMysql']["ip"] = "milvus-mysql.test"
+        values_dict['externalMysql']["port"] = 3306
+        values_dict['externalMysql']["user"] = "root"
+        values_dict['externalMysql']["password"] = "milvus"
+        values_dict['externalMysql']["database"] = "db"
+    else:
+        values_dict["mysql"]["enabled"] = False
+    # update values.yaml with the given host
+    nas_url = NAS_URL
+    if hostname:
+        nas_url = IDC_NAS_URL
+        values_dict['nodeSelector'] = {'kubernetes.io/hostname': hostname}
+        cpus = server_config["cpus"]
+
+        # set limit/request cpus in resources
+        values_dict["image"]['resources'] = {
+            "limits": {
+                # "cpu": str(int(cpus)) + ".0"
+                "cpu": str(int(cpus)) + ".0"
+            },
+            "requests": {
+                # "cpu": str(int(cpus) // 2) + ".0"
+                "cpu": "4.0"
+            }
+        }
+        # update readonly resouces limits/requests
+        values_dict["readonly"]['resources'] = {
+            "limits": {
+                # "cpu": str(int(cpus)) + ".0"
+                "cpu": str(int(cpus)) + ".0"
+            },
+            "requests": {
+                # "cpu": str(int(cpus) // 2) + ".0"
+                "cpu": "4.0"
+            }
+        }
+        values_dict['tolerations'] = [{
+            "key": "worker",
+            "operator": "Equal",
+            "value": "performance",
+            "effect": "NoSchedule"
+        }]
+    # add extra volumes
+    values_dict['extraVolumes'] = [{
+        'name': 'test',
+        'flexVolume': {
+            'driver': "fstab/cifs",
+            'fsType': "cifs",
+            'secretRef': {
+                'name': "cifs-test-secret"
+            },
+            'options': {
+                'networkPath': nas_url,
+                'mountOptions': "vers=1.0"
+            }
+        }
+    }]
+    values_dict['extraVolumeMounts'] = [{
+        'name': 'test',
+        'mountPath': '/test'
+    }]
+
+    # add extra volumes for mysql
+    # values_dict['mysql']['persistence']['enabled'] = True
+    # values_dict['mysql']['configurationFilesPath'] = "/etc/mysql/mysql.conf.d/"
+    # values_dict['mysql']['imageTag'] = '5.6'
+    # values_dict['mysql']['securityContext'] = {
+    #         'enabled': True}
+    # mysql_db_path = "/test"
+    if deploy_mode == "cluster" and use_external_mysql:
+        # mount_path = values_dict["primaryPath"]+'/data'
+        # long_str = '- name: test-mysql\n  flexVolume:\n    driver: fstab/cifs\n    fsType: cifs\n    secretRef:\n      name: cifs-test-secret\n    options:\n      networkPath: //192.168.1.126/test\n      mountOptions: vers=1.0'
+        # values_dict['mysql']['extraVolumes'] = literal_str(long_str)
+        # long_str_2 = "- name: test-mysql\n  mountPath: %s" % mysql_db_path
+        # values_dict['mysql']['extraVolumeMounts'] = literal_str(long_str_2)
+        # mysql_cnf_str = '[mysqld]\npid-file=%s/mysql.pid\ndatadir=%s' % (mount_path, mount_path)
+        # values_dict['mysql']['configurationFiles'] = {}
+        # values_dict['mysql']['configurationFiles']['mysqld.cnf'] = literal_str(mysql_cnf_str)
+
+        values_dict['mysql']['enabled'] = False
+        values_dict['externalMysql']['enabled'] = True
+        values_dict['externalMysql']["ip"] = "192.168.1.197"
+        values_dict['externalMysql']["port"] = 3306
+        values_dict['externalMysql']["user"] = "root"
+        values_dict['externalMysql']["password"] = "Fantast1c"
+        values_dict['externalMysql']["database"] = "db"
+
+    # logger.debug(values_dict)
+    #  print(dump(values_dict))
+    with open(file_path, 'w') as f:
+        dump(values_dict, f, default_flow_style=False)
+    f.close()
+    # DEBUG
+    with open(file_path) as f:
+        for line in f.readlines():
+            line = line.strip("\n")
+
+
+# deploy server
+def helm_install_server(helm_path, deploy_mode, image_tag, image_type, name, namespace):
+    timeout = 300
+    logger.debug("Server deploy mode: %s" % deploy_mode)
+    host = "%s.%s.svc.cluster.local" % (name, namespace)
+    if deploy_mode == "single":
+        install_cmd = "helm install \
+                --set image.repository=%s \
+                --set image.tag=%s \
+                --set image.pullPolicy=Always \
+                --set service.type=ClusterIP \
+                -f ci/filebeat/values.yaml \
+                --namespace %s \
+                %s ." % (REGISTRY_URL, image_tag, namespace, name)
+    elif deploy_mode == "cluster":
+        install_cmd = "helm install \
+                --set cluster.enabled=true \
+                --set persistence.enabled=true \
+                --set mishards.image.tag=test \
+                --set mishards.image.pullPolicy=Always \
+                --set image.repository=%s \
+                --set image.tag=%s \
+                --set image.pullPolicy=Always \
+                --set service.type=ClusterIP \
+                -f ci/filebeat/values.yaml \
+                --namespace %s \
+                %s ." % (REGISTRY_URL, image_tag, namespace, name)
+    logger.debug(install_cmd)
+    logger.debug(host)
+    if os.system("cd %s && %s" % (helm_path, install_cmd)):
+        logger.error("Helm install failed: %s" % name)
+        return None
+    time.sleep(30)
+    # config.load_kube_config()
+    # v1 = client.CoreV1Api()
+    # pod_name = None
+    # pod_id = None
+    # pods = v1.list_namespaced_pod(namespace)
+    # for i in pods.items:
+    #     if i.metadata.name.find(name) != -1:
+    #         pod_name = i.metadata.name
+    #         pod_ip = i.status.pod_ip
+    # logger.debug(pod_name)
+    # logger.debug(pod_ip)
+    # return pod_name, pod_ip
+    return host
+
+
+# delete server
+@utils.retry(3)
+def helm_del_server(name, namespace):
+    # logger.debug("Sleep 600s before uninstall server")
+    # time.sleep(600)
+    del_cmd = "helm uninstall -n milvus %s" % name
+    logger.info(del_cmd)
+    if os.system(del_cmd):
+        logger.error("Helm delete name:%s failed" % name)
+        return False
+    return True
+
+
+def restart_server(helm_release_name, namespace):
+    res = True
+    timeout = 120000
+    # service_name = "%s.%s.svc.cluster.local" % (helm_release_name, namespace)
+    config.load_kube_config()
+    v1 = client.CoreV1Api()
+    pod_name = None
+    # config_map_names = v1.list_namespaced_config_map(namespace, pretty='true')
+    # body = {"replicas": 0}
+    pods = v1.list_namespaced_pod(namespace)
+    for i in pods.items:
+        if i.metadata.name.find(helm_release_name) != -1 and i.metadata.name.find("mysql") == -1:
+            pod_name = i.metadata.name
+            break
+            # v1.patch_namespaced_config_map(config_map_name, namespace, body, pretty='true')
+    # status_res = v1.read_namespaced_service_status(helm_release_name, namespace, pretty='true')
+    logger.debug("Pod name: %s" % pod_name)
+    if pod_name is not None:
+        try:
+            v1.delete_namespaced_pod(pod_name, namespace)
+        except Exception as e:
+            logger.error(str(e))
+            logger.error("Exception when calling CoreV1Api->delete_namespaced_pod")
+            res = False
+            return res
+        logger.error("Sleep 10s after pod deleted")
+        time.sleep(10)
+        # check if restart successfully
+        pods = v1.list_namespaced_pod(namespace)
+        for i in pods.items:
+            pod_name_tmp = i.metadata.name
+            logger.error(pod_name_tmp)
+            if pod_name_tmp == pod_name:
+                continue
+            elif pod_name_tmp.find(helm_release_name) == -1 or pod_name_tmp.find("mysql") != -1:
+                continue
+            else:
+                status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true')
+                logger.error(status_res.status.phase)
+                start_time = time.time()
+                ready_break = False
+                while time.time() - start_time <= timeout:
+                    logger.error(time.time())
+                    status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true')
+                    if status_res.status.phase == "Running":
+                        logger.error("Already running")
+                        ready_break = True
+                        break
+                    else:
+                        time.sleep(5)
+                if time.time() - start_time > timeout:
+                    logger.error("Restart pod: %s timeout" % pod_name_tmp)
+                    res = False
+                    return res
+                if ready_break:
+                    break
+    else:
+        raise Exception("Pod: %s not found" % pod_name)
+    follow = True
+    pretty = True
+    previous = True  # bool | Return previous terminated container logs. Defaults to false. (optional)
+    since_seconds = 56  # int | A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. (optional)
+    timestamps = True  # bool | If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. (optional)
+    container = "milvus"
+    # start_time = time.time()
+    # while time.time() - start_time <= timeout:
+    #     try:
+    #         api_response = v1.read_namespaced_pod_log(pod_name_tmp, namespace, container=container, follow=follow,
+    #                                                 pretty=pretty, previous=previous, since_seconds=since_seconds,
+    #                                                 timestamps=timestamps)
+    #         logging.error(api_response)
+    #         return res
+    #     except Exception as e:
+    #         logging.error("Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
+    #         # waiting for server start
+    #         time.sleep(2)
+    #         # res = False
+    #         # return res
+    # if time.time() - start_time > timeout:
+    #     logging.error("Restart pod: %s timeout" % pod_name_tmp)
+    #     res = False
+    return res
+
+
+if __name__ == '__main__':
+    print(type(get_host_cpus("idc-sh002")))
diff --git a/tests/benchmark/k8s_runner.py b/tests/benchmark/k8s_runner.py
new file mode 100644
index 000000000..5e2828a7b
--- /dev/null
+++ b/tests/benchmark/k8s_runner.py
@@ -0,0 +1,927 @@
+import os
+import logging
+import pdb
+import time
+import re
+import random
+import traceback
+import json
+import csv
+import threading
+from multiprocessing import Process
+import numpy as np
+from milvus import DataType
+from yaml import full_load, dump
+import concurrent.futures
+
+import locust_user
+from client import MilvusClient
+import parser
+from runner import Runner
+from milvus_metrics.api import report
+from milvus_metrics.models import Env, Hardware, Server, Metric
+import helm_utils
+import utils
+
+logger = logging.getLogger("milvus_benchmark.k8s_runner")
+namespace = "milvus"
+default_port = 19530
+DELETE_INTERVAL_TIME = 5
+# INSERT_INTERVAL = 100000
+INSERT_INTERVAL = 50000
+BIG_FLUSH_INTERVAL = 3600
+DEFAULT_FLUSH_INTERVAL = 1
+timestamp = int(time.time())
+default_path = "/var/lib/milvus"
+
+
+class K8sRunner(Runner):
+    """run docker mode"""
+
+    def __init__(self):
+        super(K8sRunner, self).__init__()
+        self.service_name = utils.get_unique_name()
+        self.host = None
+        self.port = default_port
+        self.hostname = None
+        self.env_value = None
+        self.hardware = None
+        self.deploy_mode = None 
+
+    def init_env(self, milvus_config, server_config, server_host, deploy_mode, image_type, image_tag):
+        logger.debug("Tests run on server host:")
+        logger.debug(server_host)
+        self.hostname = server_host
+        self.deploy_mode = deploy_mode
+        if self.hostname:
+            try:
+                cpus = helm_utils.get_host_cpus(self.hostname)
+            except Exception as e:
+                logger.error(str(e))
+                cpus = 64
+            logger.debug(type(cpus))
+            if server_config:
+                if "cpus" in server_config.keys():
+                    cpus = min(server_config["cpus"], int(cpus))
+                else:
+                    server_config.update({"cpus": cpus})
+            else:
+                server_config = {"cpus": cpus}
+            self.hardware = Hardware(name=self.hostname, cpus=cpus)
+        # update values
+        helm_path = os.path.join(os.getcwd(), "../milvus-helm/charts/milvus")
+        values_file_path = helm_path + "/values.yaml"
+        if not os.path.exists(values_file_path):
+            raise Exception("File %s not existed" % values_file_path)
+        if milvus_config:
+            helm_utils.update_values(values_file_path, deploy_mode, server_host, milvus_config, server_config)
+        try:
+            logger.debug("Start install server")
+            self.host = helm_utils.helm_install_server(helm_path, deploy_mode, image_tag, image_type, self.service_name,
+                                                       namespace)
+        except Exception as e:
+            logger.error("Helm install server failed: %s" % (str(e)))
+            logger.error(traceback.format_exc())
+            logger.error(self.hostname)
+            self.clean_up()
+            return False
+        logger.debug(server_config)
+        # for debugging
+        if not self.host:
+            logger.error("Helm install server failed")
+            self.clean_up()
+            return False
+        return True
+
+    def clean_up(self):
+        logger.debug("Start clean up: %s" % self.service_name)
+        helm_utils.helm_del_server(self.service_name, namespace)
+
+    def report_wrapper(self, milvus_instance, env_value, hostname, collection_info, index_info, search_params,
+                       run_params=None, server_config=None):
+        metric = Metric()
+        metric.set_run_id(timestamp)
+        metric.env = Env(env_value)
+        metric.env.OMP_NUM_THREADS = 0
+        metric.hardware = self.hardware
+        # TODO: removed
+        # server_version = milvus_instance.get_server_version()
+        # server_mode = milvus_instance.get_server_mode()
+        # commit = milvus_instance.get_server_commit()
+        server_version = "0.12.0"
+        server_mode = self.deploy_mode
+        metric.server = Server(version=server_version, mode=server_mode, build_commit=None)
+        metric.collection = collection_info
+        metric.index = index_info
+        metric.search = search_params
+        metric.run_params = run_params
+        return metric
+
+    def run(self, run_type, collection):
+        logger.debug(run_type)
+        logger.debug(collection)
+        collection_name = collection["collection_name"] if "collection_name" in collection else None
+        milvus_instance = MilvusClient(collection_name=collection_name, host=self.host)
+
+        # TODO: removed
+        # self.env_value = milvus_instance.get_server_config()
+        # ugly implemention
+        # self.env_value = utils.convert_nested(self.env_value)
+        # self.env_value.pop("logs")
+        # self.env_value.pop("network")
+        self.env_value = collection
+
+        if run_type == "insert_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            ni_per = collection["ni_per"]
+            build_index = collection["build_index"]
+            if milvus_instance.exists_collection():
+                milvus_instance.drop()
+                time.sleep(10)
+            index_info = {}
+            search_params = {}
+            vector_type = self.get_vector_type(data_type)
+            other_fields = collection["other_fields"] if "other_fields" in collection else None
+            milvus_instance.create_collection(dimension, data_type=vector_type,
+                                              other_fields=other_fields)
+            if build_index is True:
+                index_type = collection["index_type"]
+                index_param = collection["index_param"]
+                index_info = {
+                    "index_type": index_type,
+                    "index_param": index_param
+                }
+                index_field_name = utils.get_default_field_name(vector_type)
+                milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+                logger.debug(milvus_instance.describe_index())
+            res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
+            flush_time = 0.0
+            if "flush" in collection and collection["flush"] == "no":
+                logger.debug("No manual flush")
+            else:
+                start_time = time.time()
+                milvus_instance.flush()
+                flush_time = time.time() - start_time
+                logger.debug(milvus_instance.count())
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name,
+                "other_fields": other_fields,
+                "ni_per": ni_per
+            }
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         search_params)
+            total_time = res["total_time"]
+            build_time = 0
+            if build_index is True:
+                logger.debug("Start build index for last file")
+                start_time = time.time()
+                milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+                build_time = time.time() - start_time
+                total_time = total_time + build_time
+            metric.metrics = {
+                "type": run_type,
+                "value": {
+                    "total_time": total_time,
+                    "qps": res["qps"],
+                    "ni_time": res["ni_time"],
+                    "flush_time": flush_time,
+                    "build_time": build_time
+                }
+            }
+            report(metric)
+
+        elif run_type == "build_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            index_type = collection["index_type"]
+            index_param = collection["index_param"]
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            index_info = {
+                "index_type": index_type,
+                "index_param": index_param
+            }
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            search_params = {}
+            vector_type = self.get_vector_type(data_type)
+            index_field_name = utils.get_default_field_name(vector_type)
+            start_time = time.time()
+            # drop index
+            logger.debug("Drop index")
+            milvus_instance.drop_index(index_field_name)
+            # start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            # TODO: need to check
+            milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+            logger.debug(milvus_instance.describe_index())
+            logger.debug(milvus_instance.count())
+            end_time = time.time()
+            # end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         search_params)
+            metric.metrics = {
+                "type": "build_performance",
+                "value": {
+                    "build_time": round(end_time - start_time, 1),
+                }
+            }
+            report(metric)
+
+        elif run_type == "delete_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            ni_per = collection["ni_per"]
+            auto_flush = collection["auto_flush"] if "auto_flush" in collection else True
+            search_params = {}
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            if not milvus_instance.exists_collection():
+                logger.error(milvus_instance.show_collections())
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            length = milvus_instance.count()
+            logger.info(length)
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            ids = [i for i in range(length)]
+            loops = int(length / ni_per)
+            milvus_instance.load_collection()
+            # TODO: remove
+            # start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            start_time = time.time()
+            # if auto_flush is False:
+            #     milvus_instance.set_config("storage", "auto_flush_interval", BIG_FLUSH_INTERVAL)
+            for i in range(loops):
+                delete_ids = ids[i * ni_per: i * ni_per + ni_per]
+                logger.debug("Delete %d - %d" % (delete_ids[0], delete_ids[-1]))
+                milvus_instance.delete(delete_ids)
+                logger.debug("Table row counts: %d" % milvus_instance.count())
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+            start_flush_time = time.time()
+            milvus_instance.flush()
+            end_flush_time = time.time()
+            end_time = time.time()
+            # end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+            # milvus_instance.set_config("storage", "auto_flush_interval", DEFAULT_FLUSH_INTERVAL)
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         search_params)
+            delete_time = round(end_time - start_time, 1)
+            metric.metrics = {
+                "type": "delete_performance",
+                "value": {
+                    "delete_time": delete_time,
+                    "qps": round(collection_size / delete_time, 1)
+                }
+            }
+            if auto_flush is False:
+                flush_time = round(end_flush_time - start_flush_time, 1)
+                metric.metrics["value"].update({"flush_time": flush_time})
+            report(metric)
+
+        elif run_type == "get_ids_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            ids_length_per_segment = collection["ids_length_per_segment"]
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            search_params = {}
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            for ids_num in ids_length_per_segment:
+                segment_num, get_ids = milvus_instance.get_rand_ids_each_segment(ids_num)
+                start_time = time.time()
+                get_res = milvus_instance.get_entities(get_ids)
+                total_time = time.time() - start_time
+                avg_time = total_time / segment_num
+                run_params = {"ids_num": ids_num}
+                logger.info(
+                    "Segment num: %d, ids num per segment: %d, run_time: %f" % (segment_num, ids_num, total_time))
+                metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info,
+                                             index_info, search_params, run_params=run_params)
+                metric.metrics = {
+                    "type": run_type,
+                    "value": {
+                        "total_time": round(total_time, 1),
+                        "avg_time": round(avg_time, 1)
+                    }
+                }
+                report(metric)
+
+        elif run_type == "search_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            run_count = collection["run_count"]
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            # filter_query = collection["filter"] if "filter" in collection else None
+            filters = collection["filters"] if "filters" in collection else []
+            filter_query = []
+            search_params = collection["search_params"]
+            fields = self.get_fields(milvus_instance, collection_name)
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+                "fields": fields
+            }
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            milvus_instance.load_collection()
+            logger.info("Start warm up query")
+            res = self.do_query(milvus_instance, collection_name, vec_field_name, [1], [1], 2,
+                                search_param=search_params[0], filter_query=filter_query)
+            logger.info("End warm up query")
+            for search_param in search_params:
+                logger.info("Search param: %s" % json.dumps(search_param))
+                if not filters:
+                    filters.append(None)
+                for filter in filters:
+                    filter_param = []
+                    if isinstance(filter, dict) and "range" in filter:
+                        filter_query.append(eval(filter["range"]))
+                        filter_param.append(filter["range"])
+                    if isinstance(filter, dict) and "term" in filter:
+                        filter_query.append(eval(filter["term"]))
+                        filter_param.append(filter["term"])
+                    logger.info("filter param: %s" % json.dumps(filter_param))
+                    res = self.do_query(milvus_instance, collection_name, vec_field_name, top_ks, nqs, run_count,
+                                        search_param, filter_query=filter_query)
+                    headers = ["Nq/Top-k"]
+                    headers.extend([str(top_k) for top_k in top_ks])
+                    logger.info("Search param: %s" % json.dumps(search_param))
+                    utils.print_table(headers, nqs, res)
+                    for index_nq, nq in enumerate(nqs):
+                        for index_top_k, top_k in enumerate(top_ks):
+                            search_param_group = {
+                                "nq": nq,
+                                "topk": top_k,
+                                "search_param": search_param,
+                                "filter": filter_param
+                            }
+                            search_time = res[index_nq][index_top_k]
+                            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname,
+                                                         collection_info, index_info, search_param_group)
+                            metric.metrics = {
+                                "type": "search_performance",
+                                "value": {
+                                    "search_time": search_time
+                                }
+                            }
+                            report(metric)
+
+        elif run_type == "locust_insert_stress":
+            pass
+
+        elif run_type in ["locust_search_performance", "locust_insert_performance", "locust_mix_performance"]:
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            ni_per = collection["ni_per"]
+            build_index = collection["build_index"]
+            if milvus_instance.exists_collection():
+                milvus_instance.drop()
+                time.sleep(10)
+            index_info = {}
+            search_params = {}
+            vector_type = self.get_vector_type(data_type)
+            index_field_name = utils.get_default_field_name(vector_type)
+            milvus_instance.create_collection(dimension, data_type=vector_type, other_fields=None)
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            if build_index is True:
+                index_type = collection["index_type"]
+                index_param = collection["index_param"]
+                index_info = {
+                    "index_type": index_type,
+                    "index_param": index_param
+                }
+                milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+                logger.debug(milvus_instance.describe_index())
+            if run_type in ["locust_search_performance", "locust_mix_performance"]:
+                res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
+                if "flush" in collection and collection["flush"] == "no":
+                    logger.debug("No manual flush")
+                else:
+                    milvus_instance.flush()
+                if build_index is True:
+                    logger.debug("Start build index for last file")
+                    milvus_instance.create_index(index_field_name, index_type, metric_type, _async=True,
+                                                 index_param=index_param)
+                    logger.debug(milvus_instance.describe_index())
+                logger.debug("Table row counts: %d" % milvus_instance.count())
+                milvus_instance.load_collection()
+                logger.info("Start warm up query")
+                for i in range(2):
+                    res = self.do_query(milvus_instance, collection_name, vec_field_name, [1], [1], 2,
+                                        search_param={"nprobe": 16})
+                logger.info("End warm up query")
+            real_metric_type = utils.metric_type_trans(metric_type)
+            ### spawn locust requests
+            task = collection["task"]
+            connection_type = "single"
+            connection_num = task["connection_num"]
+            if connection_num > 1:
+                connection_type = "multi"
+            clients_num = task["clients_num"]
+            hatch_rate = task["hatch_rate"]
+            during_time = utils.timestr_to_int(task["during_time"])
+            task_types = task["types"]
+            run_params = {"tasks": {}, "clients_num": clients_num, "spawn_rate": hatch_rate, "during_time": during_time}
+            for task_type in task_types:
+                run_params["tasks"].update({task_type["type"]: task_type["weight"] if "weight" in task_type else 1})
+
+            # . collect stats
+            locust_stats = locust_user.locust_executor(self.host, self.port, collection_name,
+                                                       connection_type=connection_type, run_params=run_params)
+            logger.info(locust_stats)
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         search_params)
+            metric.metrics = {
+                "type": run_type,
+                "value": locust_stats}
+            report(metric)
+
+        elif run_type == "search_ids_stability":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            search_params = collection["search_params"]
+            during_time = collection["during_time"]
+            ids_length = collection["ids_length"]
+            ids = collection["ids"]
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            g_top_k = int(collection["top_ks"].split("-")[1])
+            l_top_k = int(collection["top_ks"].split("-")[0])
+            g_id = int(ids.split("-")[1])
+            l_id = int(ids.split("-")[0])
+            g_id_length = int(ids_length.split("-")[1])
+            l_id_length = int(ids_length.split("-")[0])
+
+            milvus_instance.load_collection()
+            # start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            # logger.debug(start_mem_usage)
+            start_time = time.time()
+            while time.time() < start_time + during_time * 60:
+                search_param = {}
+                top_k = random.randint(l_top_k, g_top_k)
+                ids_num = random.randint(l_id_length, g_id_length)
+                ids_param = [random.randint(l_id_length, g_id_length) for _ in range(ids_num)]
+                for k, v in search_params.items():
+                    search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
+                logger.debug("Query top-k: %d, ids_num: %d, param: %s" % (top_k, ids_num, json.dumps(search_param)))
+                result = milvus_instance.query_ids(top_k, ids_param, search_param=search_param)
+            # end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         {})
+            metric.metrics = {
+                "type": "search_ids_stability",
+                "value": {
+                    "during_time": during_time,
+                }
+            }
+            report(metric)
+
+        # for sift/deep datasets
+        # TODO: enable
+        elif run_type == "accuracy":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            search_params = collection["search_params"]
+            # mapping to search param list
+            search_params = self.generate_combinations(search_params)
+
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            milvus_instance.load_collection()
+            true_ids_all = self.get_groundtruth_ids(collection_size)
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            for search_param in search_params:
+                headers = ["Nq/Top-k"]
+                res = []
+                for nq in nqs:
+                    for top_k in top_ks:
+                        tmp_res = []
+                        search_param_group = {
+                            "nq": nq,
+                            "topk": top_k,
+                            "search_param": search_param,
+                            "metric_type": metric_type
+                        }
+                        logger.info("Query params: %s" % json.dumps(search_param_group))
+                        result_ids = self.do_query_ids(milvus_instance, collection_name, vec_field_name, top_k, nq,
+                                                       search_param=search_param)
+                        # mem_used = milvus_instance.get_mem_info()["memory_used"]
+                        acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
+                        logger.info("Query accuracy: %s" % acc_value)
+                        tmp_res.append(acc_value)
+                        # logger.info("Memory usage: %s" % mem_used)
+                        metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info,
+                                                     index_info, search_param_group)
+                        metric.metrics = {
+                            "type": "accuracy",
+                            "value": {
+                                "acc": acc_value
+                            }
+                        }
+                        report(metric)
+                        # logger.info("Memory usage: %s" % mem_used)
+                    res.append(tmp_res)
+                headers.extend([str(top_k) for top_k in top_ks])
+                logger.info("Search param: %s" % json.dumps(search_param))
+                utils.print_table(headers, nqs, res)
+
+        elif run_type == "ann_accuracy":
+            hdf5_source_file = collection["source_file"]
+            collection_name = collection["collection_name"]
+            index_types = collection["index_types"]
+            index_params = collection["index_params"]
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            search_params = collection["search_params"]
+            # mapping to search param list
+            search_params = self.generate_combinations(search_params)
+            # mapping to index param list
+            index_params = self.generate_combinations(index_params)
+
+            data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            dataset = utils.get_dataset(hdf5_source_file)
+            if milvus_instance.exists_collection(collection_name):
+                logger.info("Re-create collection: %s" % collection_name)
+                milvus_instance.drop()
+                time.sleep(DELETE_INTERVAL_TIME)
+            true_ids = np.array(dataset["neighbors"])
+            vector_type = self.get_vector_type_from_metric(metric_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            real_metric_type = utils.metric_type_trans(metric_type)
+
+            # re-create collection
+            if milvus_instance.exists_collection(collection_name):
+                milvus_instance.drop()
+                time.sleep(DELETE_INTERVAL_TIME)
+            milvus_instance.create_collection(dimension, data_type=vector_type)
+            insert_vectors = self.normalize(metric_type, np.array(dataset["train"]))
+            if len(insert_vectors) != dataset["train"].shape[0]:
+                raise Exception("Row count of insert vectors: %d is not equal to dataset size: %d" % (
+                len(insert_vectors), dataset["train"].shape[0]))
+            logger.debug("The row count of entities to be inserted: %d" % len(insert_vectors))
+            # Insert batch once
+            # milvus_instance.insert(insert_vectors)
+            loops = len(insert_vectors) // INSERT_INTERVAL + 1
+            for i in range(loops):
+                start = i * INSERT_INTERVAL
+                end = min((i + 1) * INSERT_INTERVAL, len(insert_vectors))
+                if start < end:
+                    tmp_vectors = insert_vectors[start:end]
+                    ids = [i for i in range(start, end)]
+                    if not isinstance(tmp_vectors, list):
+                        entities = milvus_instance.generate_entities(tmp_vectors.tolist(), ids)
+                        res_ids = milvus_instance.insert(entities, ids=ids)
+                    else:
+                        entities = milvus_instance.generate_entities(tmp_vectors, ids)
+                        res_ids = milvus_instance.insert(entities, ids=ids)
+                    assert res_ids == ids
+            milvus_instance.flush()
+            res_count = milvus_instance.count()
+            logger.info("Table: %s, row count: %d" % (collection_name, res_count))
+            if res_count != len(insert_vectors):
+                raise Exception("Table row count is not equal to insert vectors")
+            for index_type in index_types:
+                for index_param in index_params:
+                    logger.debug("Building index with param: %s" % json.dumps(index_param))
+                    if milvus_instance.get_config("cluster.enable") == "true":
+                        milvus_instance.create_index(vec_field_name, index_type, metric_type, _async=True,
+                                                     index_param=index_param)
+                    else:
+                        milvus_instance.create_index(vec_field_name, index_type, metric_type,
+                                                     index_param=index_param)
+                    logger.info(milvus_instance.describe_index())
+                    logger.info("Start load collection: %s" % collection_name)
+                    milvus_instance.load_collection()
+                    logger.info("End load collection: %s" % collection_name)
+                    index_info = {
+                        "index_type": index_type,
+                        "index_param": index_param
+                    }
+                    logger.debug(index_info)
+                    warm_up = True
+                    for search_param in search_params:
+                        for nq in nqs:
+                            query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
+                            if not isinstance(query_vectors, list):
+                                query_vectors = query_vectors.tolist()
+                            for top_k in top_ks:
+                                search_param_group = {
+                                    "nq": len(query_vectors),
+                                    "topk": top_k,
+                                    "search_param": search_param,
+                                    "metric_type": metric_type
+                                }
+                                logger.debug(search_param_group)
+                                vector_query = {"vector": {vec_field_name: {
+                                    "topk": top_k,
+                                    "query": query_vectors,
+                                    "metric_type": real_metric_type,
+                                    "params": search_param}
+                                }}
+                                for i in range(2):
+                                    result = milvus_instance.query(vector_query)
+                                warm_up = False
+                                logger.info("End warm up")
+                                result = milvus_instance.query(vector_query)
+                                result_ids = milvus_instance.get_ids(result)
+                                acc_value = self.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids)
+                                logger.info("Query ann_accuracy: %s" % acc_value)
+                                metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname,
+                                                             collection_info, index_info, search_param_group)
+                                metric.metrics = {
+                                    "type": "ann_accuracy",
+                                    "value": {
+                                        "acc": acc_value
+                                    }
+                                }
+                                report(metric)
+
+        elif run_type == "search_stability":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            search_params = collection["search_params"]
+            during_time = collection["during_time"]
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            g_top_k = int(collection["top_ks"].split("-")[1])
+            g_nq = int(collection["nqs"].split("-")[1])
+            l_top_k = int(collection["top_ks"].split("-")[0])
+            l_nq = int(collection["nqs"].split("-")[0])
+            milvus_instance.load_collection()
+            # start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            # logger.debug(start_mem_usage)
+            start_row_count = milvus_instance.count()
+            logger.debug(milvus_instance.describe_index())
+            logger.info(start_row_count)
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            real_metric_type = utils.metric_type_trans(metric_type)
+            start_time = time.time()
+            while time.time() < start_time + during_time * 60:
+                search_param = {}
+                top_k = random.randint(l_top_k, g_top_k)
+                nq = random.randint(l_nq, g_nq)
+                for k, v in search_params.items():
+                    search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
+                query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
+                logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
+                vector_query = {"vector": {vec_field_name: {
+                    "topk": top_k,
+                    "query": query_vectors[:nq],
+                    "metric_type": real_metric_type,
+                    "params": search_param}
+                }}
+                milvus_instance.query(vector_query)
+            # end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         {})
+            metric.metrics = {
+                "type": "search_stability",
+                "value": {
+                    "during_time": during_time,
+                }
+            }
+            report(metric)
+
+        elif run_type == "loop_stability":
+            # init data
+            milvus_instance.clean_db()
+            pull_interval = collection["pull_interval"]
+            collection_num = collection["collection_num"]
+            concurrent = collection["concurrent"] if "concurrent" in collection else False
+            concurrent_num = collection_num
+            dimension = collection["dimension"] if "dimension" in collection else 128
+            insert_xb = collection["insert_xb"] if "insert_xb" in collection else 100000
+            index_types = collection["index_types"] if "index_types" in collection else ['ivf_sq8']
+            index_param = {"nlist": 256}
+            collection_names = []
+            milvus_instances_map = {}
+            insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
+            ids = [i for i in range(insert_xb)]
+            # initialize and prepare
+            for i in range(collection_num):
+                name = utils.get_unique_name(prefix="collection_%d_" % i)
+                collection_names.append(name)
+                metric_type = random.choice(["l2", "ip"])
+                # default float_vector
+                milvus_instance = MilvusClient(collection_name=name, host=self.host)
+                milvus_instance.create_collection(dimension, other_fields=None)
+                index_type = random.choice(index_types)
+                field_name = utils.get_default_field_name()
+                milvus_instance.create_index(field_name, index_type, metric_type, index_param=index_param)
+                logger.info(milvus_instance.describe_index())
+                insert_vectors = utils.normalize(metric_type, insert_vectors)
+                entities = milvus_instance.generate_entities(insert_vectors, ids)
+                res_ids = milvus_instance.insert(entities, ids=ids)
+                milvus_instance.flush()
+                milvus_instances_map.update({name: milvus_instance})
+                logger.info(milvus_instance.describe_index())
+
+                # loop time unit: min -> s
+            pull_interval_seconds = pull_interval * 60
+            tasks = ["insert_rand", "query_rand", "flush"]
+            i = 1
+            while True:
+                logger.info("Loop time: %d" % i)
+                start_time = time.time()
+                while time.time() - start_time < pull_interval_seconds:
+                    if concurrent:
+                        threads = []
+                        for name in collection_names:
+                            task_name = random.choice(tasks)
+                            task_run = getattr(milvus_instances_map[name], task_name)
+                            t = threading.Thread(target=task_run, args=())
+                            threads.append(t)
+                            t.start()
+                        for t in threads:
+                            t.join()
+                        # with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
+                        #     future_results = {executor.submit(getattr(milvus_instances_map[mp[j][0]], mp[j][1])): j for j in range(concurrent_num)}
+                        #     for future in concurrent.futures.as_completed(future_results):
+                        #         future.result()
+                    else:
+                        tmp_collection_name = random.choice(collection_names)
+                        task_name = random.choice(tasks)
+                        logger.info(tmp_collection_name)
+                        logger.info(task_name)
+                        task_run = getattr(milvus_instances_map[tmp_collection_name], task_name)
+                        task_run()
+
+                logger.debug("Restart server")
+                helm_utils.restart_server(self.service_name, namespace)
+                # new connection
+                # for name in collection_names:
+                #     milvus_instance = MilvusClient(collection_name=name, host=self.host)
+                #     milvus_instances_map.update({name: milvus_instance})
+                time.sleep(30)
+                i = i + 1
+
+        elif run_type == "stability":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            during_time = collection["during_time"]
+            operations = collection["operations"]
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            if not milvus_instance.exists_collection():
+                logger.error(milvus_instance.show_collections())
+                raise Exception("Table name: %s not existed" % collection_name)
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            # start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            start_row_count = milvus_instance.count()
+            logger.info(start_row_count)
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            real_metric_type = utils.metric_type_trans(metric_type)
+            query_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
+            if "insert" in operations:
+                insert_xb = operations["insert"]["xb"]
+            if "delete" in operations:
+                delete_xb = operations["delete"]["xb"]
+            if "query" in operations:
+                g_top_k = int(operations["query"]["top_ks"].split("-")[1])
+                l_top_k = int(operations["query"]["top_ks"].split("-")[0])
+                g_nq = int(operations["query"]["nqs"].split("-")[1])
+                l_nq = int(operations["query"]["nqs"].split("-")[0])
+                search_params = operations["query"]["search_params"]
+            i = 0
+            start_time = time.time()
+            while time.time() < start_time + during_time * 60:
+                i = i + 1
+                q = self.gen_executors(operations)
+                for name in q:
+                    try:
+                        if name == "insert":
+                            insert_ids = random.sample(list(range(collection_size)), insert_xb)
+                            insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
+                            entities = milvus_instance.generate_entities(insert_vectors, insert_ids)
+                            milvus_instance.insert(entities, ids=insert_ids)
+                        elif name == "delete":
+                            delete_ids = random.sample(list(range(collection_size)), delete_xb)
+                            milvus_instance.delete(delete_ids)
+                        elif name == "query":
+                            top_k = random.randint(l_top_k, g_top_k)
+                            nq = random.randint(l_nq, g_nq)
+                            search_param = {}
+                            for k, v in search_params.items():
+                                search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
+                            logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
+                            vector_query = {"vector": {vec_field_name: {
+                                "topk": top_k,
+                                "query": query_vectors[:nq],
+                                "metric_type": real_metric_type,
+                                "params": search_param}
+                            }}
+                            result = milvus_instance.query(vector_query)
+                        elif name in ["flush", "compact"]:
+                            func = getattr(milvus_instance, name)
+                            func()
+                        logger.debug(milvus_instance.count())
+                    except Exception as e:
+                        logger.error(name)
+                        logger.error(str(e))
+                        raise
+                logger.debug("Loop time: %d" % i)
+            # end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            end_row_count = milvus_instance.count()
+            metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info,
+                                         {})
+            metric.metrics = {
+                "type": "stability",
+                "value": {
+                    "during_time": during_time,
+                    "row_count_increments": end_row_count - start_row_count
+                }
+            }
+            report(metric)
+
+        elif run_type == "debug":
+            time.sleep(7200)
+            default_insert_vectors = [[random.random() for _ in range(128)] for _ in range(500000)]
+            interval = 50000
+            for loop in range(1, 7):
+                insert_xb = loop * interval
+                insert_vectors = default_insert_vectors[:insert_xb]
+                insert_ids = [i for i in range(insert_xb)]
+                entities = milvus_instance.generate_entities(insert_vectors, insert_ids)
+                for j in range(5):
+                    milvus_instance.insert(entities, ids=insert_ids)
+                    time.sleep(10)
+
+        else:
+            raise Exception("Run type not defined")
+        logger.debug("All test finished")
diff --git a/tests/benchmark/local_runner.py b/tests/benchmark/local_runner.py
new file mode 100644
index 000000000..408fc95f5
--- /dev/null
+++ b/tests/benchmark/local_runner.py
@@ -0,0 +1,732 @@
+import os
+import logging
+import pdb
+import string
+import time
+import random
+import json
+import csv
+from multiprocessing import Process
+import numpy as np
+import concurrent.futures
+from queue import Queue
+
+import locust_user
+from milvus import DataType
+from client import MilvusClient
+from runner import Runner
+import utils
+import parser
+
+
+DELETE_INTERVAL_TIME = 5
+INSERT_INTERVAL = 50000
+logger = logging.getLogger("milvus_benchmark.local_runner")
+
+
+class LocalRunner(Runner):
+    """run local mode"""
+    def __init__(self, host, port):
+        super(LocalRunner, self).__init__()
+        self.host = host
+        self.port = port
+
+    def run(self, run_type, collection):
+        logger.debug(run_type)
+        logger.debug(collection)
+        collection_name = collection["collection_name"] if "collection_name" in collection else None
+        milvus_instance = MilvusClient(collection_name=collection_name, host=self.host, port=self.port)
+        logger.info(milvus_instance.show_collections())
+        # TODO:
+        # self.env_value = milvus_instance.get_server_config()
+        # ugly implemention
+        # self.env_value = utils.convert_nested(self.env_value)
+        # self.env_value.pop("logs")
+        # self.env_value.pop("network")
+        # logger.info(self.env_value)
+
+        if run_type in ["insert_performance", "insert_flush_performance"]:
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            ni_per = collection["ni_per"]
+            build_index = collection["build_index"]
+            if milvus_instance.exists_collection():
+                milvus_instance.drop()
+                time.sleep(10)
+            vector_type = self.get_vector_type(data_type)
+            other_fields = collection["other_fields"] if "other_fields" in collection else None
+            milvus_instance.create_collection(dimension, data_type=vector_type, other_fields=other_fields)
+            if build_index is True:
+                index_type = collection["index_type"]
+                index_param = collection["index_param"]
+                index_field_name = utils.get_default_field_name(vector_type)
+                milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+            res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
+            milvus_instance.flush()
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+            if build_index is True:
+                logger.debug("Start build index for last file")
+                milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+
+        elif run_type == "delete_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            ni_per = collection["ni_per"]
+            auto_flush = collection["auto_flush"] if "auto_flush" in collection else True
+            if not milvus_instance.exists_collection():
+                logger.error(milvus_instance.show_collections())
+                logger.error("Table: %s not found" % collection_name)
+                return
+            length = milvus_instance.count() 
+            ids = [i for i in range(length)] 
+            loops = int(length / ni_per)
+            if auto_flush is False:
+                milvus_instance.set_config("storage", "auto_flush_interval", BIG_FLUSH_INTERVAL)
+            for i in range(loops):
+                delete_ids = ids[i*ni_per: i*ni_per+ni_per]
+                logger.debug("Delete %d - %d" % (delete_ids[0], delete_ids[-1]))
+                milvus_instance.delete(delete_ids)
+                logger.debug("Table row counts: %d" % milvus_instance.count())
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+            milvus_instance.flush()
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+
+        elif run_type == "build_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            index_type = collection["index_type"]
+            index_param = collection["index_param"]
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            vector_type = self.get_vector_type(data_type)
+            index_field_name = utils.get_default_field_name(vector_type)
+            # drop index
+            logger.debug("Drop index")
+            milvus_instance.drop_index(index_field_name)
+            start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            start_time = time.time()
+            milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+            end_time = time.time()
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+            end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            logger.debug("Diff memory: %s, current memory usage: %s, build time: %s" % ((end_mem_usage - start_mem_usage), end_mem_usage, round(end_time - start_time, 1)))
+
+        elif run_type == "search_performance":
+            (data_type, collection_size,  dimension, metric_type) = parser.collection_parser(collection_name)
+            run_count = collection["run_count"]
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            search_params = collection["search_params"]
+            filter_query = []
+            filters = collection["filters"] if "filters" in collection else []
+            # pdb.set_trace()
+            # ranges = collection["range"] if "range" in collection else None
+            # terms = collection["term"] if "term" in collection else None
+            # if ranges:
+            #     filter_query.append(eval(ranges))
+            # if terms:
+            #     filter_query.append(eval(terms))
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            # for debugging
+            # time.sleep(3600)
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            logger.info(milvus_instance.count())
+            result = milvus_instance.describe_index()
+            logger.info(result)
+            milvus_instance.preload_collection()
+            mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            logger.info(mem_usage)
+            for search_param in search_params:
+                logger.info("Search param: %s" % json.dumps(search_param))
+                filter_param = []
+                if not filters:
+                    filters.append(None)
+                for filter in filters:
+                    if isinstance(filter, dict) and "range" in filter:
+                        filter_query.append(eval(filter["range"]))
+                        filter_param.append(filter["range"])
+                    if isinstance(filter, dict) and "term" in filter:
+                        filter_query.append(eval(filter["term"]))
+                        filter_param.append(filter["term"])
+                    logger.info("filter param: %s" % json.dumps(filter_param))
+                    res = self.do_query(milvus_instance, collection_name, vec_field_name, top_ks, nqs, run_count, search_param, filter_query)
+                    headers = ["Nq/Top-k"]
+                    headers.extend([str(top_k) for top_k in top_ks])
+                    logger.info("Search param: %s" % json.dumps(search_param))
+                    utils.print_table(headers, nqs, res)
+                    mem_usage = milvus_instance.get_mem_info()["memory_used"]
+                    logger.info(mem_usage)
+
+        elif run_type == "locust_search_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            ni_per = collection["ni_per"]
+            build_index = collection["build_index"]
+            vector_type = self.get_vector_type(data_type)
+            index_field_name = utils.get_default_field_name(vector_type)
+            # if build_index is True:
+            #     index_type = collection["index_type"]
+            #     index_param = collection["index_param"]
+            # # TODO: debug
+            # if milvus_instance.exists_collection():
+            #     milvus_instance.drop()
+            #     time.sleep(10)
+            # other_fields = collection["other_fields"] if "other_fields" in collection else None
+            # milvus_instance.create_collection(dimension, data_type=vector_type, other_fields=other_fields)
+            # milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+            # res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
+            # milvus_instance.flush()
+            # logger.debug("Table row counts: %d" % milvus_instance.count())
+            # if build_index is True:
+            #     logger.debug("Start build index for last file")
+            #     milvus_instance.create_index(index_field_name, index_type, metric_type, index_param=index_param)
+            real_metric_type = utils.metric_type_trans(metric_type)
+            ### spawn locust requests
+            task = collection["task"]
+            connection_type = "single"
+            connection_num = task["connection_num"]
+            if connection_num > 1:
+                connection_type = "multi"
+            clients_num = task["clients_num"]
+            hatch_rate = task["hatch_rate"]
+            during_time = utils.timestr_to_int(task["during_time"])
+            task_types = task["types"]
+            # """
+            # task: 
+            #     connection_num: 1
+            #     clients_num: 100
+            #     hatch_rate: 2
+            #     during_time: 5m
+            #     types:
+            #     -
+            #         type: query
+            #         weight: 1
+            #         params:
+            #         top_k: 10
+            #         nq: 1
+            #         # filters:
+            #         #   -
+            #         #     range:
+            #         #       int64:
+            #         #         LT: 0
+            #         #         GT: 1000000
+            #         search_param:
+            #             nprobe: 16
+            # """
+            run_params = {"tasks": {}, "clients_num": clients_num, "spawn_rate": hatch_rate, "during_time": during_time}
+            for task_type in task_types:
+                run_params["tasks"].update({task_type["type"]: task_type["weight"] if "weight" in task_type else 1})
+
+            #. collect stats
+            locust_stats = locust_user.locust_executor(self.host, self.port, collection_name, connection_type=connection_type, run_params=run_params)
+            logger.info(locust_stats)
+
+        elif run_type == "search_ids_stability":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            search_params = collection["search_params"]
+            during_time = collection["during_time"]
+            ids_length = collection["ids_length"]
+            ids = collection["ids"]
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            g_top_k = int(collection["top_ks"].split("-")[1])
+            l_top_k = int(collection["top_ks"].split("-")[0])
+            g_id = int(ids.split("-")[1])
+            l_id = int(ids.split("-")[0])
+            g_id_length = int(ids_length.split("-")[1])
+            l_id_length = int(ids_length.split("-")[0])
+
+            milvus_instance.preload_collection()
+            start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            logger.debug(start_mem_usage)
+            start_time = time.time()
+            while time.time() < start_time + during_time * 60:
+                search_param = {}
+                top_k = random.randint(l_top_k, g_top_k)
+                ids_num = random.randint(l_id_length, g_id_length)
+                l_ids = random.randint(l_id, g_id-ids_num)
+                # ids_param = [random.randint(l_id_length, g_id_length) for _ in range(ids_num)]
+                ids_param = [id for id in range(l_ids, l_ids+ids_num)]
+                for k, v in search_params.items():
+                    search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
+                logger.debug("Query top-k: %d, ids_num: %d, param: %s" % (top_k, ids_num, json.dumps(search_param)))
+                result = milvus_instance.query_ids(top_k, ids_param, search_param=search_param)
+            end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            metrics = {
+                "during_time": during_time,
+                "start_mem_usage": start_mem_usage,
+                "end_mem_usage": end_mem_usage,
+                "diff_mem": end_mem_usage - start_mem_usage,
+            }
+            logger.info(metrics)
+
+        elif run_type == "search_performance_concurrents":
+            data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
+            hdf5_source_file = collection["source_file"]
+            use_single_connection = collection["use_single_connection"]
+            concurrents = collection["concurrents"]
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            search_params = self.generate_combinations(collection["search_params"])
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            logger.info(milvus_instance.count())
+            result = milvus_instance.describe_index()
+            logger.info(result)
+            milvus_instance.preload_collection()
+            dataset = utils.get_dataset(hdf5_source_file)
+            for concurrent_num in concurrents:
+                top_k = top_ks[0] 
+                for nq in nqs:
+                    mem_usage = milvus_instance.get_mem_info()["memory_used"]
+                    logger.info(mem_usage)
+                    query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq])) 
+                    logger.debug(search_params)
+                    for search_param in search_params:
+                        logger.info("Search param: %s" % json.dumps(search_param))
+                        total_time = 0.0
+                        if use_single_connection is True:
+                            connections = [MilvusClient(collection_name=collection_name, host=self.host, port=self.port)]
+                            with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
+                                future_results = {executor.submit(
+                                    self.do_query_qps, connections[0], query_vectors, top_k, search_param=search_param) : index for index in range(concurrent_num)}
+                        else:
+                            connections = [MilvusClient(collection_name=collection_name, host=self.hos, port=self.port) for i in range(concurrent_num)]
+                            with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
+                                future_results = {executor.submit(
+                                    self.do_query_qps, connections[index], query_vectors, top_k, search_param=search_param) : index for index in range(concurrent_num)}
+                        for future in concurrent.futures.as_completed(future_results):
+                            total_time = total_time + future.result()
+                        qps_value = total_time / concurrent_num 
+                        logger.debug("QPS value: %f, total_time: %f, request_nums: %f" % (qps_value, total_time, concurrent_num))
+                    mem_usage = milvus_instance.get_mem_info()["memory_used"]
+                    logger.info(mem_usage)
+
+        elif run_type == "ann_accuracy":
+            hdf5_source_file = collection["source_file"]
+            collection_name = collection["collection_name"]
+            index_types = collection["index_types"]
+            index_params = collection["index_params"]
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            search_params = collection["search_params"]
+            # mapping to search param list
+            search_params = self.generate_combinations(search_params)
+            # mapping to index param list
+            index_params = self.generate_combinations(index_params)
+            data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
+            dataset = utils.get_dataset(hdf5_source_file)
+            true_ids = np.array(dataset["neighbors"])
+            vector_type = self.get_vector_type_from_metric(metric_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            real_metric_type = utils.metric_type_trans(metric_type)
+
+            # re-create collection
+            if milvus_instance.exists_collection(collection_name):
+                milvus_instance.drop()
+                time.sleep(DELETE_INTERVAL_TIME)
+            milvus_instance.create_collection(dimension, data_type=vector_type)
+            insert_vectors = self.normalize(metric_type, np.array(dataset["train"]))
+            if len(insert_vectors) != dataset["train"].shape[0]:
+                raise Exception("Row count of insert vectors: %d is not equal to dataset size: %d" % (len(insert_vectors), dataset["train"].shape[0]))
+            logger.debug("The row count of entities to be inserted: %d" % len(insert_vectors))
+            # insert batch once
+            # milvus_instance.insert(insert_vectors)
+            loops = len(insert_vectors) // INSERT_INTERVAL + 1
+            for i in range(loops):
+                start = i*INSERT_INTERVAL
+                end = min((i+1)*INSERT_INTERVAL, len(insert_vectors))
+                if start < end:
+                    tmp_vectors = insert_vectors[start:end]
+                    ids = [i for i in range(start, end)]
+                    if not isinstance(tmp_vectors, list):
+                        entities = milvus_instance.generate_entities(tmp_vectors.tolist(), ids)
+                        res_ids = milvus_instance.insert(entities, ids=ids)
+                    else:
+                        entities = milvus_instance.generate_entities(tmp_vectors, ids)
+                        res_ids = milvus_instance.insert(entities, ids=ids)
+                    assert res_ids == ids
+            milvus_instance.flush()
+            res_count = milvus_instance.count()
+            logger.info("Table: %s, row count: %d" % (collection_name, res_count))
+            if res_count != len(insert_vectors):
+                raise Exception("Table row count is not equal to insert vectors")
+            for index_type in index_types:
+                for index_param in index_params:
+                    logger.debug("Building index with param: %s, metric_type: %s" % (json.dumps(index_param), metric_type))
+                    milvus_instance.create_index(vec_field_name, index_type, metric_type, index_param=index_param)
+                    logger.info("Start preload collection: %s" % collection_name)
+                    milvus_instance.preload_collection()
+                    for search_param in search_params:
+                        for nq in nqs:
+                            query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
+                            if not isinstance(query_vectors, list):
+                                query_vectors = query_vectors.tolist()
+                            for top_k in top_ks:
+                                logger.debug("Search nq: %d, top-k: %d, search_param: %s, metric_type: %s" % (nq, top_k, json.dumps(search_param), metric_type))
+                                vector_query = {"vector": {vec_field_name: {
+                                    "topk": top_k,
+                                    "query": query_vectors,
+                                    "metric_type": real_metric_type,
+                                    "params": search_param}
+                                }}
+                                result = milvus_instance.query(vector_query)
+                                result_ids = milvus_instance.get_ids(result)
+                                # pdb.set_trace()
+                                acc_value = self.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids)
+                                logger.info("Query ann_accuracy: %s" % acc_value)
+
+        elif run_type == "accuracy":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            search_params = collection["search_params"]
+            # mapping to search param list
+            search_params = self.generate_combinations(search_params)
+
+            top_ks = collection["top_ks"]
+            nqs = collection["nqs"]
+            collection_info = {
+                "dimension": dimension,
+                "metric_type": metric_type,
+                "dataset_name": collection_name
+            }
+            if not milvus_instance.exists_collection():
+                logger.error("Table name: %s not existed" % collection_name)
+                return
+            logger.info(milvus_instance.count())
+            index_info = milvus_instance.describe_index()
+            logger.info(index_info)
+            milvus_instance.preload_collection()
+            true_ids_all = self.get_groundtruth_ids(collection_size)
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            for search_param in search_params:
+                headers = ["Nq/Top-k"]
+                res = []
+                for nq in nqs:
+                    tmp_res = []
+                    for top_k in top_ks:
+                        search_param_group = {
+                            "nq": nq,
+                            "topk": top_k,
+                            "search_param": search_param,
+                            "metric_type": metric_type
+                        }
+                        logger.info("Query params: %s" % json.dumps(search_param_group))
+                        result_ids = self.do_query_ids(milvus_instance, collection_name, vec_field_name, top_k, nq, search_param=search_param)
+                        mem_used = milvus_instance.get_mem_info()["memory_used"]
+                        acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
+                        logger.info("Query accuracy: %s" % acc_value)
+                        tmp_res.append(acc_value)
+                        logger.info("Memory usage: %s" % mem_used)
+                    res.append(tmp_res)
+                headers.extend([str(top_k) for top_k in top_ks])
+                logger.info("Search param: %s" % json.dumps(search_param))
+                utils.print_table(headers, nqs, res)
+
+        elif run_type == "stability":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
+            during_time = collection["during_time"]
+            operations = collection["operations"]
+            if not milvus_instance.exists_collection():
+                logger.error(milvus_instance.show_collections())
+                raise Exception("Table name: %s not existed" % collection_name)
+            milvus_instance.preload_collection()
+            start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            start_row_count = milvus_instance.count()
+            logger.info(start_row_count)
+            vector_type = self.get_vector_type(data_type)
+            vec_field_name = utils.get_default_field_name(vector_type)
+            real_metric_type = utils.metric_type_trans(metric_type)
+            query_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
+            if "insert" in operations:
+                insert_xb = operations["insert"]["xb"]
+            if "delete" in operations:
+                delete_xb = operations["delete"]["xb"]
+            if "query" in operations:
+                g_top_k = int(operations["query"]["top_ks"].split("-")[1])
+                l_top_k = int(operations["query"]["top_ks"].split("-")[0])
+                g_nq = int(operations["query"]["nqs"].split("-")[1])
+                l_nq = int(operations["query"]["nqs"].split("-")[0])
+                search_params = operations["query"]["search_params"]
+            i = 0
+            start_time = time.time()
+            while time.time() < start_time + during_time * 60:
+                i = i + 1
+                q = self.gen_executors(operations)
+                for name in q:
+                    try:
+                        if name == "insert":
+                            insert_ids = random.sample(list(range(collection_size)), insert_xb)
+                            insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
+                            entities = milvus_instance.generate_entities(insert_vectors, insert_ids)
+                            milvus_instance.insert(entities, ids=insert_ids)
+                        elif name == "delete":
+                            delete_ids = random.sample(list(range(collection_size)), delete_xb)
+                            milvus_instance.delete(delete_ids)
+                        elif name == "query":
+                            top_k = random.randint(l_top_k, g_top_k)
+                            nq = random.randint(l_nq, g_nq)
+                            search_param = {}
+                            for k, v in search_params.items():
+                                search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
+                            logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
+                            vector_query = {"vector": {vec_field_name: {
+                                "topk": top_k,
+                                "query": query_vectors[:nq],
+                                "metric_type": real_metric_type,
+                                "params": search_param}
+                            }}
+                            result = milvus_instance.query(vector_query)
+                        elif name in ["flush", "compact"]:
+                            func = getattr(milvus_instance, name)
+                            func()
+                        logger.debug(milvus_instance.count())
+                    except Exception as e:
+                        logger.error(name)
+                        logger.error(str(e))
+                        raise
+                logger.debug("Loop time: %d" % i)
+            end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
+            end_row_count = milvus_instance.count()
+            metrics = {
+                "during_time": during_time,
+                "start_mem_usage": start_mem_usage,
+                "end_mem_usage": end_mem_usage,
+                "diff_mem": end_mem_usage - start_mem_usage,
+                "row_count_increments": end_row_count - start_row_count
+            }
+            logger.info(metrics)
+
+        elif run_type == "loop_stability":
+            # init data
+            milvus_instance.clean_db()
+            pull_interval = collection["pull_interval"]
+            collection_num = collection["collection_num"]
+            concurrent = collection["concurrent"] if "concurrent" in collection else False
+            concurrent_num = collection_num
+            dimension = collection["dimension"] if "dimension" in collection else 128
+            insert_xb = collection["insert_xb"] if "insert_xb" in collection else 100000
+            index_types = collection["index_types"] if "index_types" in collection else ['ivf_sq8']
+            index_param = {"nlist": 256}
+            collection_names = []
+            milvus_instances_map = {}
+            insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
+            ids = [i for i in range(insert_xb)]
+            # initialize and prepare
+            for i in range(collection_num):
+                name = utils.get_unique_name(prefix="collection_%d_" % i)
+                collection_names.append(name)
+                metric_type = random.choice(["l2", "ip"])
+                # default float_vector
+                milvus_instance = MilvusClient(collection_name=name, host=self.host)
+                milvus_instance.create_collection(dimension, other_fields=None)
+                index_type = random.choice(index_types)
+                field_name = utils.get_default_field_name()
+                milvus_instance.create_index(field_name, index_type, metric_type, index_param=index_param)
+                logger.info(milvus_instance.describe_index())
+                insert_vectors = utils.normalize(metric_type, insert_vectors)
+                entities = milvus_instance.generate_entities(insert_vectors, ids)
+                res_ids = milvus_instance.insert(entities, ids=ids)
+                milvus_instance.flush()
+                milvus_instances_map.update({name: milvus_instance})
+                logger.info(milvus_instance.describe_index())
+
+            # loop time unit: min -> s
+            pull_interval_seconds = pull_interval * 60
+            tasks = ["insert_rand", "delete_rand", "query_rand", "flush", "compact"]
+            i = 1
+            while True:
+                logger.info("Loop time: %d" % i)
+                start_time = time.time()
+                while time.time() - start_time < pull_interval_seconds:
+                    if concurrent:
+                        threads = []
+                        for name in collection_names:
+                            task_name = random.choice(tasks)
+                            task_run = getattr(milvus_instances_map[name], task_name)
+                            t = threading.Thread(target=task_run, args=())
+                            threads.append(t)
+                            t.start()
+                        for t in threads:
+                            t.join()
+                        # with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
+                        #     future_results = {executor.submit(getattr(milvus_instances_map[mp[j][0]], mp[j][1])): j for j in range(concurrent_num)}
+                        #     for future in concurrent.futures.as_completed(future_results):
+                        #         future.result()
+                    else:
+                        tmp_collection_name = random.choice(collection_names)
+                        task_name = random.choice(tasks)
+                        logger.info(tmp_collection_name)
+                        logger.info(task_name)
+                        task_run = getattr(milvus_instances_map[tmp_collection_name], task_name)
+                        task_run()
+                # new connection
+                # for name in collection_names:
+                #     milvus_instance = MilvusClient(collection_name=name, host=self.host)
+                #     milvus_instances_map.update({name: milvus_instance})
+                i = i + 1
+
+        elif run_type == "locust_mix_performance":
+            (data_type, collection_size, dimension, metric_type) = parser.collection_parser(
+                collection_name)
+            ni_per = collection["ni_per"]
+            build_index = collection["build_index"]
+            vector_type = self.get_vector_type(data_type)
+            index_field_name = utils.get_default_field_name(vector_type)
+            # drop exists collection
+            if milvus_instance.exists_collection():
+                milvus_instance.drop()
+                time.sleep(10)
+            # create collection
+            other_fields = collection["other_fields"] if "other_fields" in collection else None
+            milvus_instance.create_collection(dimension, data_type=DataType.FLOAT_VECTOR, collection_name=collection_name, other_fields=other_fields)
+            logger.info(milvus_instance.get_info())
+            # insert entities
+            insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(ni_per)]
+            insert_ids = random.sample(list(range(collection_size)), ni_per)
+            insert_vectors = utils.normalize(metric_type, insert_vectors)
+            entities = milvus_instance.generate_entities(insert_vectors, insert_ids, collection_name)
+            milvus_instance.insert(entities, ids=insert_ids)
+            # flush
+            milvus_instance.flush()
+            logger.info(milvus_instance.get_stats())
+            logger.debug("Table row counts: %d" % milvus_instance.count())
+            # create index
+            if build_index is True:
+                index_type = collection["index_type"]
+                index_param = collection["index_param"]
+                logger.debug("Start build index for last file")
+                milvus_instance.create_index(index_field_name, index_type, metric_type, index_param)
+                logger.debug(milvus_instance.describe_index())
+            # locust
+            task = collection["tasks"]
+            task_file = utils.get_unique_name()
+            task_file_script = task_file + '.py'
+            task_file_csv = task_file + '_stats.csv'
+            task_types = task["types"]
+            connection_type = "single"
+            connection_num = task["connection_num"]
+            if connection_num > 1:
+                connection_type = "multi"
+            clients_num = task["clients_num"]
+            hatch_rate = task["hatch_rate"]
+            during_time = task["during_time"]
+            def_strs = ""
+            # define def str
+            for task_type in task_types:
+                type = task_type["type"]
+                weight = task_type["weight"]
+                if type == "flush":
+                    def_str = """
+    @task(%d)
+    def flush(self):
+        client = get_client(collection_name)
+        client.flush(collection_name=collection_name)
+                        """ % weight
+                if type == "compact":
+                    def_str = """
+    @task(%d)
+    def compact(self):
+        client = get_client(collection_name)
+        client.compact(collection_name)
+                        """ % weight
+                if type == "query":
+                    def_str = """
+    @task(%d)
+    def query(self):
+        client = get_client(collection_name)
+        params = %s
+        X = [[random.random() for i in range(dim)] for i in range(params["nq"])]
+        vector_query = {"vector": {"%s": {
+        "topk": params["top_k"], 
+        "query": X, 
+        "metric_type": "%s", 
+        "params": params["search_param"]}}}
+        client.query(vector_query, filter_query=params["filters"], collection_name=collection_name)
+                        """ % (weight, task_type["params"], index_field_name, utils.metric_type_trans(metric_type))
+                if type == "insert":
+                    def_str = """
+    @task(%d)
+    def insert(self):
+        client = get_client(collection_name)
+        params = %s
+        insert_ids = random.sample(list(range(100000)), params["nb"])
+        insert_vectors = [[random.random() for _ in range(dim)] for _ in range(params["nb"])]
+        insert_vectors = utils.normalize("l2", insert_vectors)
+        entities = generate_entities(insert_vectors, insert_ids)
+        client.insert(entities,ids=insert_ids, collection_name=collection_name)
+                    """ % (weight, task_type["params"])
+                if type == "delete":
+                    def_str = """
+    @task(%d)
+    def delete(self):
+        client = get_client(collection_name)
+        ids = [random.randint(1, 1000000) for i in range(1)]
+        client.delete(ids, collection_name)
+                        """ % weight
+                def_strs += def_str
+                print(def_strs)
+                # define locust code str
+                code_str = """
+import random
+import json
+from locust import User, task, between
+from locust_task import MilvusTask
+from client import MilvusClient
+import utils
+
+host = '%s'
+port = %s
+collection_name = '%s'
+dim = %s
+connection_type = '%s'
+m = MilvusClient(host=host, port=port)
+
+
+def get_client(collection_name):
+    if connection_type == 'single':
+        return MilvusTask(m=m)
+    elif connection_type == 'multi':
+        return MilvusTask(connection_type='multi', host=host, port=port, collection_name=collection_name)
+  
+        
+def generate_entities(vectors, ids):
+    return m.generate_entities(vectors, ids, collection_name)
+
+
+class MixTask(User):
+    wait_time = between(0.001, 0.002)
+    %s
+        """ % (self.host, self.port, collection_name, dimension, connection_type, def_strs)
+            with open(task_file_script, "w+") as fd:
+                fd.write(code_str)
+            locust_cmd = "locust -f %s --headless --csv=%s -u %d -r %d -t %s" % (
+                task_file_script,
+                task_file,
+                clients_num,
+                hatch_rate,
+                during_time)
+            logger.info(locust_cmd)
+            try:
+                res = os.system(locust_cmd)
+            except Exception as e:
+                logger.error(str(e))
+                return
+
+            # . retrieve and collect test statistics
+            metric = None
+            with open(task_file_csv, newline='') as fd:
+                dr = csv.DictReader(fd)
+                for row in dr:
+                    if row["Name"] != "Aggregated":
+                        continue
+                    metric = row
+            logger.info(metric)
+
+        else:
+            raise Exception("Run type not defined")
+        logger.debug("All test finished")
diff --git a/tests/benchmark/locust_file.py b/tests/benchmark/locust_file.py
new file mode 100644
index 000000000..97f1f65d4
--- /dev/null
+++ b/tests/benchmark/locust_file.py
@@ -0,0 +1,30 @@
+
+import random
+from locust import HttpUser, task, between
+
+
+collection_name = "random_1m_2048_512_ip_sq8"
+headers = {'Content-Type': "application/json"}
+url = '/collections/%s/vectors' % collection_name
+top_k = 2
+nq = 1
+dim = 512
+vectors =  [[random.random() for _ in range(dim)] for _ in range(nq)] 
+data = {
+    "search":{
+        "topk": top_k,
+        "vectors": vectors,
+        "params": {
+            "nprobe": 1
+        }
+    }
+}
+
+class MyUser(HttpUser):
+    wait_time = between(0, 0.1)
+    host = "http://192.168.1.112:19122"
+
+    @task
+    def search(self):
+        response = self.client.put(url=url, json=data, headers=headers, timeout=2)
+        print(response) 
diff --git a/tests/benchmark/locust_flush_task.py b/tests/benchmark/locust_flush_task.py
new file mode 100644
index 000000000..32de83ae7
--- /dev/null
+++ b/tests/benchmark/locust_flush_task.py
@@ -0,0 +1,33 @@
+import random
+from locust import User, task, between
+from locust_task import MilvusTask
+from client import MilvusClient
+from milvus import DataType
+
+connection_type = "single"
+host = "192.168.1.6"
+port = 19530
+collection_name = "create_collection_CZkkwJgo"
+dim = 128
+nb = 50000
+m = MilvusClient(host=host, port=port, collection_name=collection_name)
+m.clean_db()
+m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=True, other_fields=None)
+vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
+entities = m.generate_entities(vectors)
+
+
+class FlushTask(User):
+    wait_time = between(0.001, 0.002)
+    if connection_type == "single":
+        client = MilvusTask(m=m)
+    else:
+        client = MilvusTask(host=host, port=port, collection_name=collection_name)
+
+    # def insert(self):
+    #     self.client.insert(entities)
+
+    @task(1)
+    def flush(self):
+        self.client.insert(entities)
+        self.client.flush(collection_name)
diff --git a/tests/benchmark/locust_get_entity_task.py b/tests/benchmark/locust_get_entity_task.py
new file mode 100644
index 000000000..28df3daa7
--- /dev/null
+++ b/tests/benchmark/locust_get_entity_task.py
@@ -0,0 +1,36 @@
+import logging
+import random
+from locust import User, task, between
+from locust_task import MilvusTask
+from client import MilvusClient
+from milvus import DataType
+
+connection_type = "single"
+host = "192.168.1.6"
+port = 19530
+collection_name = "sift_10m_100000_128_l2"
+dim = 128
+m = MilvusClient(host=host, port=port, collection_name=collection_name)
+# m.clean_db()
+# m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=True, other_fields=None)
+nb = 6000
+# vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
+# entities = m.generate_entities(vectors)
+ids = [i for i in range(nb)]
+
+class GetEntityTask(User):
+    wait_time = between(0.001, 0.002)
+    if connection_type == "single":
+        client = MilvusTask(m=m)
+    else:
+        client = MilvusTask(host=host, port=port, collection_name=collection_name)
+
+    # def insert(self):
+    #     self.client.insert(entities)
+
+    @task(1)
+    def get_entity_by_id(self):
+        # num = random.randint(100, 200)
+        # get_ids = random.sample(ids, num)
+        self.client.get_entities([0])
+        # logging.getLogger().info(len(get_res))
diff --git a/tests/benchmark/locust_insert_task.py b/tests/benchmark/locust_insert_task.py
new file mode 100644
index 000000000..fc2951a98
--- /dev/null
+++ b/tests/benchmark/locust_insert_task.py
@@ -0,0 +1,33 @@
+import random
+from locust import User, task, between
+from locust_task import MilvusTask
+from client import MilvusClient
+from milvus import DataType
+
+connection_type = "single"
+host = "192.168.1.6"
+port = 19530
+collection_name = "create_collection_hello"
+dim = 128
+nb = 50000
+m = MilvusClient(host=host, port=port, collection_name=collection_name)
+# m.clean_db()
+m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=True, other_fields=None)
+vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
+entities = m.generate_entities(vectors)
+
+
+class FlushTask(User):
+    wait_time = between(0.001, 0.002)
+    if connection_type == "single":
+        client = MilvusTask(m=m)
+    else:
+        client = MilvusTask(host=host, port=port, collection_name=collection_name)
+
+    @task(1)
+    def insert(self):
+        self.client.insert(entities)
+    # @task(1)
+    # def create_partition(self):
+    #     tag = 'tag_'.join(random.choice(string.ascii_letters) for _ in range(8))
+    #     self.client.create_partition(tag, collection_name)
diff --git a/tests/benchmark/locust_search_task.py b/tests/benchmark/locust_search_task.py
new file mode 100644
index 000000000..f58cb0d6e
--- /dev/null
+++ b/tests/benchmark/locust_search_task.py
@@ -0,0 +1,46 @@
+import random
+from client import MilvusClient
+from locust_task import MilvusTask
+from locust import User, task, between
+
+connection_type = "single"
+host = "172.16.50.9"
+port = 19530
+collection_name = "sift_1m_2000000_128_l2_2"
+m = MilvusClient(host=host, port=port, collection_name=collection_name)
+dim = 128
+top_k = 5
+nq = 1
+X = [[random.random() for i in range(dim)] for i in range(nq)]
+search_params = {"nprobe": 16}
+vector_query = {"vector": {'float_vector': {
+    "topk": top_k,
+    "query": X,
+    "params": search_params,
+    'metric_type': 'L2'}}}
+# m.clean_db()
+
+
+class QueryTask(User):
+    wait_time = between(0.001, 0.002)
+
+    def preload(self):
+        self.client.preload_collection()
+
+    @task(10)
+    def query(self):
+        if connection_type == "single":
+            client = MilvusTask(m=m, connection_type=connection_type)
+        elif connection_type == "multi":
+            client = MilvusTask(host, port, collection_name, connection_type=connection_type)
+        top_k = 10
+        search_param = {"nprobe": 16}
+        X = [[random.random() for i in range(dim)]]
+        vector_query = {"vector": {"float_vector": {
+            "topk": top_k,
+            "query": X,
+            "metric_type": "L2",
+            "params": search_param}
+        }}
+        filter_query = None
+        client.query(vector_query, filter_query=filter_query, collection_name=collection_name)
\ No newline at end of file
diff --git a/tests/benchmark/locust_task.py b/tests/benchmark/locust_task.py
new file mode 100644
index 000000000..0330b3664
--- /dev/null
+++ b/tests/benchmark/locust_task.py
@@ -0,0 +1,37 @@
+import time
+import pdb
+import random
+import logging
+from locust import User, events
+from client import MilvusClient
+
+
+class MilvusTask(object):
+    def __init__(self, *args, **kwargs):
+        self.request_type = "grpc"
+        connection_type = kwargs.get("connection_type")
+        if connection_type == "single":
+            self.m = kwargs.get("m")
+        elif connection_type == "multi":
+            host = kwargs.get("host")
+            port = kwargs.get("port")
+            collection_name = kwargs.get("collection_name")
+            self.m = MilvusClient(host=host, port=port, collection_name=collection_name)
+        # logging.getLogger().error(id(self.m))
+
+    def __getattr__(self, name):
+        func = getattr(self.m, name)
+
+        def wrapper(*args, **kwargs):
+            start_time = time.time()
+            try:
+                result = func(*args, **kwargs)
+                total_time = int((time.time() - start_time) * 1000)
+                events.request_success.fire(request_type=self.request_type, name=name, response_time=total_time,
+                                            response_length=0)
+            except Exception as e:
+                total_time = int((time.time() - start_time) * 1000)
+                events.request_failure.fire(request_type=self.request_type, name=name, response_time=total_time,
+                                            exception=e, response_length=0)
+
+        return wrapper
diff --git a/tests/benchmark/locust_tasks.py b/tests/benchmark/locust_tasks.py
new file mode 100644
index 000000000..277b9ebee
--- /dev/null
+++ b/tests/benchmark/locust_tasks.py
@@ -0,0 +1,45 @@
+import random
+import time
+import logging
+from locust import TaskSet, task
+
+dim = 128
+X = [[random.random() for _ in range(dim)] for _ in range(1)]
+
+
+class Tasks(TaskSet):
+
+    @task
+    def query(self):
+        top_k = 10
+        search_param = {"nprobe": 16}
+        X = [[random.random() for i in range(dim)]]
+        vector_query = {"vector": {"float_vector": {
+            "topk": top_k, 
+            "query": X, 
+            "metric_type": "L2", 
+            "params": search_param}
+        }}
+        filter_query = None
+        self.client.query(vector_query, filter_query=filter_query, log=False)
+
+    @task
+    def flush(self):
+        self.client.flush(log=False)
+
+    @task
+    def get(self):
+        self.client.get()
+
+    @task
+    def delete(self):
+        self.client.delete([random.randint(1, 1000000)], log=False)
+
+    def insert(self):
+        ids = [random.randint(1, 10000000)]
+        entities = self.client.generate_entities(X, ids)
+        self.client.insert(entities, ids, log=False)
+
+    @task
+    def insert_rand(self):
+        self.client.insert_rand(log=False)
diff --git a/tests/benchmark/locust_test.py b/tests/benchmark/locust_test.py
new file mode 100644
index 000000000..95192a70e
--- /dev/null
+++ b/tests/benchmark/locust_test.py
@@ -0,0 +1,18 @@
+from locust_user import locust_executor
+from client import MilvusClient
+from milvus import DataType
+
+
+if __name__ == "__main__":
+    connection_type = "single"
+    host = "192.168.1.239"
+    # host = "172.16.50.15"
+    port = 19530
+    collection_name = "sift_1m_2000000_128_l2_2"
+    run_params = {"tasks": {"insert_rand": 5, "query": 10, "flush": 2}, "clients_num": 10, "spawn_rate": 2, "during_time": 3600}
+    dim = 128
+    m = MilvusClient(host=host, port=port, collection_name=collection_name)
+    m.clean_db()
+    m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=False, other_fields=None)
+
+    locust_executor(host, port, collection_name, run_params=run_params)
diff --git a/tests/benchmark/locust_user.py b/tests/benchmark/locust_user.py
new file mode 100644
index 000000000..5053c4a59
--- /dev/null
+++ b/tests/benchmark/locust_user.py
@@ -0,0 +1,70 @@
+import logging
+import random
+import pdb
+import gevent
+import gevent.monkey
+gevent.monkey.patch_all()
+
+from locust import User, between, events, stats
+from locust.env import Environment
+import locust.stats
+from locust.stats import stats_printer, print_stats
+
+locust.stats.CONSOLE_STATS_INTERVAL_SEC = 30
+from locust.log import setup_logging, greenlet_exception_logger
+
+from locust_tasks import Tasks
+from client import MilvusClient
+from locust_task import MilvusTask
+
+logger = logging.getLogger("__locust__")
+
+class MyUser(User):
+    # task_set = None
+    wait_time = between(0.001, 0.002)
+
+
+def locust_executor(host, port, collection_name, connection_type="single", run_params=None):
+    m = MilvusClient(host=host, port=port, collection_name=collection_name)
+    MyUser.tasks = {}
+    tasks = run_params["tasks"]
+    for op, weight in tasks.items():
+        task = {eval("Tasks."+op): weight}
+        MyUser.tasks.update(task)
+    logger.error(MyUser.tasks)
+    # MyUser.tasks = {Tasks.query: 1, Tasks.flush: 1}
+    MyUser.client = MilvusTask(host=host, port=port, collection_name=collection_name, connection_type=connection_type, m=m)
+    env = Environment(events=events, user_classes=[MyUser])
+    runner = env.create_local_runner()
+    # setup logging
+    # setup_logging("WARNING", "/dev/null")
+    setup_logging("WARNING", "/dev/null")
+    greenlet_exception_logger(logger=logger)
+    gevent.spawn(stats_printer(env.stats))
+    # env.create_web_ui("127.0.0.1", 8089)
+    # gevent.spawn(stats_printer(env.stats), env, "test", full_history=True)
+    # events.init.fire(environment=env, runner=runner)
+    clients_num = run_params["clients_num"]
+    spawn_rate = run_params["spawn_rate"]
+    during_time = run_params["during_time"]
+    runner.start(clients_num, spawn_rate=spawn_rate)
+    gevent.spawn_later(during_time, lambda: runner.quit())
+    runner.greenlet.join()
+    print_stats(env.stats)
+    result = {
+        "rps": round(env.stats.total.current_rps, 1),
+        "fail_ratio": env.stats.total.fail_ratio,
+        "max_response_time": round(env.stats.total.max_response_time, 1),
+        "min_response_time": round(env.stats.total.avg_response_time, 1)
+    }
+    runner.stop()
+    return result
+
+
+if __name__ == '__main__':
+    connection_type = "single"
+    host = "192.168.1.112"
+    port = 19530
+    collection_name = "sift_1m_2000000_128_l2_2"
+    run_params = {"tasks": {"query": 1, "flush": 1}, "clients_num": 1, "spawn_rate": 1, "during_time": 3}
+    locust_executor(host, port, collection_name, run_params=run_params)
diff --git a/tests/benchmark/main.py b/tests/benchmark/main.py
new file mode 100644
index 000000000..1b2d8c9d1
--- /dev/null
+++ b/tests/benchmark/main.py
@@ -0,0 +1,199 @@
+import os
+import sys
+import time
+from datetime import datetime
+import pdb
+import argparse
+import logging
+import traceback
+from multiprocessing import Process
+from queue import Queue
+from logging import handlers
+from yaml import full_load, dump
+from local_runner import LocalRunner
+from docker_runner import DockerRunner
+import parser
+
+DEFAULT_IMAGE = "milvusdb/milvus:latest"
+LOG_FOLDER = "logs"
+NAMESPACE = "milvus"
+LOG_PATH = "/test/milvus/benchmark/logs/"
+BRANCH = "0.11.1"
+
+logger = logging.getLogger('milvus_benchmark')
+logger.setLevel(logging.INFO)
+# create file handler which logs even debug messages
+fh = logging.FileHandler(LOG_PATH+'benchmark-{}-{:%Y-%m-%d}.log'.format(BRANCH, datetime.now()))
+fh.setLevel(logging.DEBUG)
+# create console handler with a higher log level
+ch = logging.StreamHandler()
+ch.setLevel(logging.INFO)
+# create formatter and add it to the handlers
+formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+fh.setFormatter(formatter)
+ch.setFormatter(formatter)
+# add the handlers to the logger
+logger.addHandler(fh)
+logger.addHandler(ch)
+
+def positive_int(s):
+    i = None
+    try:
+        i = int(s)
+    except ValueError:
+        pass
+    if not i or i < 1:
+        raise argparse.ArgumentTypeError("%r is not a positive integer" % s)
+    return i
+
+
+def get_image_tag(image_version, image_type):
+    return "%s-%s-centos7-release" % (image_version, image_type)
+    # return "%s-%s-centos7-release" % ("0.7.1", image_type)
+    # return "%s-%s-centos7-release" % ("PR-2780", image_type)
+
+
+def queue_worker(queue):
+    from k8s_runner import K8sRunner
+    while not queue.empty():
+        q = queue.get()
+        suite = q["suite"]
+        server_host = q["server_host"]
+        deploy_mode = q["deploy_mode"]
+        image_type = q["image_type"]
+        image_tag = q["image_tag"]
+
+        with open(suite) as f:
+            suite_dict = full_load(f)
+            f.close()
+        logger.debug(suite_dict)
+
+        run_type, run_params = parser.operations_parser(suite_dict)
+        collections = run_params["collections"]
+        for collection in collections:
+            # run tests
+            milvus_config = collection["milvus"] if "milvus" in collection else None
+            server_config = collection["server"] if "server" in collection else None
+            logger.debug(milvus_config)
+            logger.debug(server_config)
+            runner = K8sRunner()
+            if runner.init_env(milvus_config, server_config, server_host, deploy_mode, image_type, image_tag):
+                logger.debug("Start run tests")
+                try:
+                    runner.run(run_type, collection)
+                except Exception as e:
+                    logger.error(str(e))
+                    logger.error(traceback.format_exc())
+                finally:
+                    time.sleep(60)
+                    runner.clean_up()
+            else:
+                logger.error("Runner init failed")
+    if server_host:
+        logger.debug("All task finished in queue: %s" % server_host)
+
+
+def main():
+    arg_parser = argparse.ArgumentParser(
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    # helm mode with scheduler
+    arg_parser.add_argument(
+        "--image-version",
+        default="",
+        help="image version")
+    arg_parser.add_argument(
+        "--schedule-conf",
+        metavar='FILE',
+        default='',
+        help="load test schedule from FILE")
+    arg_parser.add_argument(
+        "--deploy-mode",
+        default='',
+        help="single node or multi nodes")
+
+    # local mode
+    arg_parser.add_argument(
+        '--local',
+        action='store_true',
+        help='use local milvus server')
+    arg_parser.add_argument(
+        '--host',
+        help='server host ip param for local mode',
+        default='127.0.0.1')
+    arg_parser.add_argument(
+        '--port',
+        help='server port param for local mode',
+        default='19530')
+    arg_parser.add_argument(
+        '--suite',
+        metavar='FILE',
+        help='load test suite from FILE',
+        default='')
+
+    args = arg_parser.parse_args()
+
+    if args.schedule_conf:
+        if args.local:
+            raise Exception("Helm mode with scheduler and other mode are incompatible")
+        if not args.image_version:
+            raise Exception("Image version not given")
+        image_version = args.image_version
+        deploy_mode = args.deploy_mode
+        with open(args.schedule_conf) as f:
+            schedule_config = full_load(f)
+            f.close()
+        queues = []
+        # server_names = set()
+        server_names = []
+        for item in schedule_config:
+            server_host = item["server"] if "server" in item else ""
+            suite_params = item["suite_params"]
+            server_names.append(server_host)
+            q = Queue()
+            for suite_param in suite_params:
+                suite = "suites/"+suite_param["suite"]
+                image_type = suite_param["image_type"]
+                image_tag = get_image_tag(image_version, image_type)    
+                q.put({
+                    "suite": suite,
+                    "server_host": server_host,
+                    "deploy_mode": deploy_mode,
+                    "image_tag": image_tag,
+                    "image_type": image_type
+                })
+            queues.append(q)
+        logger.error(queues)
+        thread_num = len(server_names)
+        processes = []
+
+        for i in range(thread_num):
+            x = Process(target=queue_worker, args=(queues[i], ))
+            processes.append(x)
+            x.start()
+            time.sleep(10)
+        for x in processes:
+            x.join()
+
+        # queue_worker(queues[0])
+
+    elif args.local:
+        # for local mode
+        host = args.host
+        port = args.port
+        suite = args.suite
+        with open(suite) as f:
+            suite_dict = full_load(f)
+            f.close()
+        logger.debug(suite_dict)
+        run_type, run_params = parser.operations_parser(suite_dict)
+        collections = run_params["collections"]
+        if len(collections) > 1:
+            raise Exception("Multi collections not supported in Local Mode")
+        collection = collections[0]
+        runner = LocalRunner(host, port)
+        logger.info("Start run local mode test, test type: %s" % run_type)
+        runner.run(run_type, collection)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/benchmark/mix_task.py b/tests/benchmark/mix_task.py
new file mode 100644
index 000000000..912989f85
--- /dev/null
+++ b/tests/benchmark/mix_task.py
@@ -0,0 +1,42 @@
+import random
+from locust import User, task, between
+from locust_task import MilvusTask
+from client import MilvusClient
+
+connection_type = "single"
+host = "192.168.1.29"
+port = 19530
+collection_name = "sift_128_euclidean"
+dim = 128
+m = MilvusClient(host=host, port=port, collection_name=collection_name)
+
+
+class MixTask(User):
+    wait_time = between(0.001, 0.002)
+    print("in query task")
+    if connection_type == "single":
+        client = MilvusTask(m=m)
+    else:
+        client = MilvusTask(host=host, port=port, collection_name=collection_name)
+
+    @task(30)
+    def query(self):
+        top_k = 10
+        X = [[random.random() for i in range(dim)] for i in range(1)]
+        search_param = {"nprobe": 16}
+        self.client.query(X, top_k, search_param)
+
+    @task(10)
+    def insert(self):
+        id = random.randint(10000000, 10000000000)
+        X = [[random.random() for i in range(dim)] for i in range(1)]
+        self.client.insert(X, ids=[id])
+
+    @task(1)
+    def flush(self):
+        self.client.flush()
+
+    # @task(5)
+    # def delete(self):
+    #     self.client.delete([random.randint(1, 1000000)])
+
diff --git a/tests/benchmark/operation.py b/tests/benchmark/operation.py
new file mode 100644
index 000000000..348fa47f4
--- /dev/null
+++ b/tests/benchmark/operation.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+import pdb
+import time
+
+class Base(object):
+    pass
+
+
+class Insert(Base):
+    pass
\ No newline at end of file
diff --git a/tests/benchmark/parser.py b/tests/benchmark/parser.py
new file mode 100644
index 000000000..428f1db0d
--- /dev/null
+++ b/tests/benchmark/parser.py
@@ -0,0 +1,85 @@
+import pdb
+import logging
+
+logger = logging.getLogger("milvus_benchmark.parser")
+
+
+def operations_parser(operations):
+    if not operations:
+        raise Exception("No operations in suite defined")
+    for run_type, run_params in operations.items():
+        logger.debug(run_type)
+        return (run_type, run_params)
+
+
+def collection_parser(collection_name):
+    tmp = collection_name.split("_")
+    # if len(tmp) != 5:
+    #     return None
+    data_type = tmp[0]
+    collection_size_unit = tmp[1][-1]
+    collection_size = tmp[1][0:-1]
+    if collection_size_unit == "m":
+        collection_size = int(collection_size) * 1000000
+    elif collection_size_unit == "b":
+        collection_size = int(collection_size) * 1000000000
+    dimension = int(tmp[2])
+    metric_type = str(tmp[3])
+    return (data_type, collection_size, dimension, metric_type)
+
+
+def parse_ann_collection_name(collection_name):
+    data_type = collection_name.split("_")[0]
+    dimension = int(collection_name.split("_")[1])
+    metric = collection_name.split("_")[2]
+    # metric = collection_name.attrs['distance']
+    # dimension = len(collection_name["train"][0])
+    if metric == "euclidean":
+        metric_type = "l2"
+    elif metric  == "angular":
+        metric_type = "ip"
+    elif metric  == "jaccard":
+        metric_type = "jaccard"
+    elif metric == "hamming":
+        metric_type = "hamming"
+    return (data_type, dimension, metric_type)
+
+
+def search_params_parser(param):
+    # parse top-k, set default value if top-k not in param
+    if "top_ks" not in param:
+        top_ks = [10]
+    else:
+        top_ks = param["top_ks"]
+    if isinstance(top_ks, int):
+        top_ks = [top_ks]
+    elif isinstance(top_ks, list):
+        top_ks = list(top_ks)
+    else:
+        logger.warning("Invalid format top-ks: %s" % str(top_ks))
+
+    # parse nqs, set default value if nq not in param
+    if "nqs" not in param:
+        nqs = [10]
+    else:
+        nqs = param["nqs"]
+    if isinstance(nqs, int):
+        nqs = [nqs]
+    elif isinstance(nqs, list):
+        nqs = list(nqs)
+    else:
+        logger.warning("Invalid format nqs: %s" % str(nqs))
+
+    # parse nprobes
+    if "nprobes" not in param:
+        nprobes = [1]
+    else:
+        nprobes = param["nprobes"]
+    if isinstance(nprobes, int):
+        nprobes = [nprobes]
+    elif isinstance(nprobes, list):
+        nprobes = list(nprobes)
+    else:
+        logger.warning("Invalid format nprobes: %s" % str(nprobes))    
+
+    return top_ks, nqs, nprobes
diff --git a/tests/benchmark/requirements.txt b/tests/benchmark/requirements.txt
new file mode 100644
index 000000000..96d101ed5
--- /dev/null
+++ b/tests/benchmark/requirements.txt
@@ -0,0 +1,12 @@
+pymilvus-test>=0.5.0,<0.6.0
+scipy>=1.3.1
+scikit-learn>=0.19.1
+h5py>=2.7.1
+# influxdb==5.2.2
+pyyaml>=5.1
+tableprint==0.8.0
+ansicolors==1.1.8
+kubernetes==10.0.1
+# rq==1.2.0
+locust>=1.3.2
+pymongo==3.10.0
diff --git a/tests/benchmark/results/__init__.py b/tests/benchmark/results/__init__.py
new file mode 100644
index 000000000..81b55da1c
--- /dev/null
+++ b/tests/benchmark/results/__init__.py
@@ -0,0 +1,11 @@
+
+class Reporter(object):
+    def __init__(self):
+        pass
+
+    def report(self, result):
+        pass
+    
+
+class BaseResult(object):
+    pass
\ No newline at end of file
diff --git a/tests/benchmark/results/reporter.py b/tests/benchmark/results/reporter.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/benchmark/runner.py b/tests/benchmark/runner.py
new file mode 100644
index 000000000..0a93cf9fa
--- /dev/null
+++ b/tests/benchmark/runner.py
@@ -0,0 +1,369 @@
+import os
+import threading
+import logging
+import pdb
+import time
+import random
+import grpc
+from multiprocessing import Process
+from itertools import product
+import numpy as np
+import sklearn.preprocessing
+from milvus import DataType
+from client import MilvusClient
+import utils
+import parser
+
+logger = logging.getLogger("milvus_benchmark.runner")
+
+VECTORS_PER_FILE = 1000000
+SIFT_VECTORS_PER_FILE = 100000
+BINARY_VECTORS_PER_FILE = 2000000
+
+MAX_NQ = 10001
+FILE_PREFIX = "binary_"
+
+# FOLDER_NAME = 'ann_1000m/source_data'
+SRC_BINARY_DATA_DIR = '/test/milvus/raw_data/random/'
+SIFT_SRC_DATA_DIR = '/test/milvus/raw_data/sift1b/'
+DEEP_SRC_DATA_DIR = '/test/milvus/raw_data/deep1b/'
+BINARY_SRC_DATA_DIR = '/test/milvus/raw_data/binary/'
+SIFT_SRC_GROUNDTRUTH_DATA_DIR = SIFT_SRC_DATA_DIR + 'gnd'
+
+WARM_TOP_K = 1
+WARM_NQ = 1
+DEFAULT_DIM = 512
+
+
+GROUNDTRUTH_MAP = {
+    "1000000": "idx_1M.ivecs",
+    "2000000": "idx_2M.ivecs",
+    "5000000": "idx_5M.ivecs",
+    "10000000": "idx_10M.ivecs",
+    "20000000": "idx_20M.ivecs",
+    "50000000": "idx_50M.ivecs",
+    "100000000": "idx_100M.ivecs",
+    "200000000": "idx_200M.ivecs",
+    "500000000": "idx_500M.ivecs",
+    "1000000000": "idx_1000M.ivecs",
+}
+
+
+def gen_file_name(idx, dimension, data_type):
+    s = "%05d" % idx
+    fname = FILE_PREFIX + str(dimension) + "d_" + s + ".npy"
+    if data_type == "random":
+        fname = SRC_BINARY_DATA_DIR+fname
+    elif data_type == "sift":
+        fname = SIFT_SRC_DATA_DIR+fname
+    elif data_type == "deep":
+        fname = DEEP_SRC_DATA_DIR+fname
+    elif data_type == "binary":
+        fname = BINARY_SRC_DATA_DIR+fname
+    return fname
+
+
+def get_vectors_from_binary(nq, dimension, data_type):
+    # use the first file, nq should be less than VECTORS_PER_FILE
+    if nq > MAX_NQ:
+        raise Exception("Over size nq")
+    if data_type == "random":
+        file_name = SRC_BINARY_DATA_DIR+'query_%d.npy' % dimension
+    elif data_type == "sift":
+        file_name = SIFT_SRC_DATA_DIR+'query.npy'
+    elif data_type == "deep":
+        file_name = DEEP_SRC_DATA_DIR+'query.npy'
+    elif data_type == "binary":
+        file_name = BINARY_SRC_DATA_DIR+'query.npy'
+    data = np.load(file_name)
+    vectors = data[0:nq].tolist()
+    return vectors
+
+
+class Runner(object):
+    def __init__(self):
+        pass
+
+    def gen_executors(self, operations):
+        l = []
+        for name, operation in operations.items():
+            weight = operation["weight"] if "weight" in operation else 1
+            l.extend([name] * weight)
+        random.shuffle(l)
+        return l
+        
+    def get_vector_type(self, data_type):
+        vector_type = ''
+        if data_type in ["random", "sift", "deep", "glove"]:
+            vector_type = DataType.FLOAT_VECTOR
+        elif data_type in ["binary"]:
+            vector_type = DataType.BINARY_VECTOR
+        else:
+            raise Exception("Data type: %s not defined" % data_type)
+        return vector_type
+
+    def get_vector_type_from_metric(self, metric_type):
+        vector_type = ''
+        if metric_type in ["hamming", "jaccard"]:
+            vector_type = DataType.BINARY_VECTOR
+        else:
+            vector_type = DataType.FLOAT_VECTOR
+        return vector_type
+
+    def normalize(self, metric_type, X):
+        if metric_type == "ip":
+            logger.info("Set normalize for metric_type: %s" % metric_type)
+            X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
+            X = X.astype(np.float32)
+        elif metric_type == "l2":
+            X = X.astype(np.float32)
+        elif metric_type in ["jaccard", "hamming", "sub", "super"]:
+            tmp = []
+            for item in X:
+                new_vector = bytes(np.packbits(item, axis=-1).tolist())
+                tmp.append(new_vector)
+            X = tmp
+        return X
+
+    def generate_combinations(self, args):
+        if isinstance(args, list):
+            args = [el if isinstance(el, list) else [el] for el in args]
+            return [list(x) for x in product(*args)]
+        elif isinstance(args, dict):
+            flat = []
+            for k, v in args.items():
+                if isinstance(v, list):
+                    flat.append([(k, el) for el in v])
+                else:
+                    flat.append([(k, v)])
+            return [dict(x) for x in product(*flat)]
+        else:
+            raise TypeError("No args handling exists for %s" % type(args).__name__)
+
+    def do_insert(self, milvus, collection_name, data_type, dimension, size, ni):
+        '''
+        @params:
+            mivlus: server connect instance
+            dimension: collection dimensionn
+            # index_file_size: size trigger file merge
+            size: row count of vectors to be insert
+            ni: row count of vectors to be insert each time
+            # store_id: if store the ids returned by call add_vectors or not
+        @return:
+            total_time: total time for all insert operation
+            qps: vectors added per second
+            ni_time: avarage insert operation time
+        '''
+        bi_res = {}
+        total_time = 0.0
+        qps = 0.0
+        ni_time = 0.0
+        if data_type == "random":
+            if dimension == 512:
+                vectors_per_file = VECTORS_PER_FILE
+            elif dimension == 4096:
+                vectors_per_file = 100000
+            elif dimension == 16384:
+                vectors_per_file = 10000
+        elif data_type == "sift":
+            vectors_per_file = SIFT_VECTORS_PER_FILE
+        elif data_type in ["binary"]:
+            vectors_per_file = BINARY_VECTORS_PER_FILE
+        else:
+            raise Exception("data_type: %s not supported" % data_type)
+        if size % vectors_per_file or size % ni:
+            raise Exception("Not invalid collection size or ni")
+        i = 0
+        while i < (size // vectors_per_file):
+            vectors = []
+            if vectors_per_file >= ni:
+                file_name = gen_file_name(i, dimension, data_type)
+                # logger.info("Load npy file: %s start" % file_name)
+                data = np.load(file_name)
+                # logger.info("Load npy file: %s end" % file_name)
+                for j in range(vectors_per_file // ni):
+                    vectors = data[j*ni:(j+1)*ni].tolist()
+                    if vectors:
+                        # start insert vectors
+                        start_id = i * vectors_per_file + j * ni
+                        end_id = start_id + len(vectors)
+                        logger.debug("Start id: %s, end id: %s" % (start_id, end_id))
+                        ids = [k for k in range(start_id, end_id)]
+                        entities = milvus.generate_entities(vectors, ids)
+                        ni_start_time = time.time()
+                        try:
+                            res_ids = milvus.insert(entities, ids=ids)
+                        except grpc.RpcError as e:
+                            if e.code() == grpc.StatusCode.UNAVAILABLE:
+                                logger.debug("Retry insert")
+                                def retry():
+                                    res_ids = milvus.insert(entities, ids=ids)
+
+                                t0 = threading.Thread(target=retry)
+                                t0.start()
+                                t0.join()
+                                logger.debug("Retry successfully")
+                            raise e
+                        assert ids == res_ids
+                        # milvus.flush()
+                        logger.debug(milvus.count())
+                        ni_end_time = time.time()
+                        total_time = total_time + ni_end_time - ni_start_time
+                i += 1
+            else:
+                vectors.clear()
+                loops = ni // vectors_per_file
+                for j in range(loops):
+                    file_name = gen_file_name(loops*i+j, dimension, data_type)
+                    data = np.load(file_name)
+                    vectors.extend(data.tolist())
+                if vectors:
+                    start_id = i * vectors_per_file
+                    end_id = start_id + len(vectors)
+                    logger.info("Start id: %s, end id: %s" % (start_id, end_id))
+                    ids = [k for k in range(start_id, end_id)]
+                    entities = milvus.generate_entities(vectors, ids)
+                    ni_start_time = time.time()
+                    try:
+                        res_ids = milvus.insert(entities, ids=ids)
+                    except grpc.RpcError as e:
+                        if e.code() == grpc.StatusCode.UNAVAILABLE:
+                            logger.debug("Retry insert")
+                            def retry():
+                                res_ids = milvus.insert(entities, ids=ids)
+
+                            t0 = threading.Thread(target=retry)
+                            t0.start()
+                            t0.join()
+                            logger.debug("Retry successfully")
+                        raise e
+
+                    assert ids == res_ids
+                    # milvus.flush()
+                    logger.debug(milvus.count())
+                    ni_end_time = time.time()
+                    total_time = total_time + ni_end_time - ni_start_time
+                i += loops
+        qps = round(size / total_time, 2)
+        ni_time = round(total_time / (size / ni), 2)
+        bi_res["total_time"] = round(total_time, 2)
+        bi_res["qps"] = qps
+        bi_res["ni_time"] = ni_time
+        return bi_res
+
+    def do_query(self, milvus, collection_name, vec_field_name, top_ks, nqs, run_count=1, search_param=None, filter_query=None):
+        bi_res = []
+        (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+        base_query_vectors = get_vectors_from_binary(MAX_NQ, dimension, data_type)
+        for nq in nqs:
+            tmp_res = []
+            query_vectors = base_query_vectors[0:nq]
+            for top_k in top_ks:
+                avg_query_time = 0.0
+                min_query_time = 0.0
+                logger.info("Start query, query params: top-k: {}, nq: {}, actually length of vectors: {}".format(top_k, nq, len(query_vectors)))
+                for i in range(run_count):
+                    logger.debug("Start run query, run %d of %s" % (i+1, run_count))
+                    start_time = time.time()
+                    vector_query = {"vector": {vec_field_name: {
+                        "topk": top_k, 
+                        "query": query_vectors, 
+                        "metric_type": utils.metric_type_trans(metric_type), 
+                        "params": search_param}
+                    }}
+                    query_res = milvus.query(vector_query, filter_query=filter_query)
+                    interval_time = time.time() - start_time
+                    if (i == 0) or (min_query_time > interval_time):
+                        min_query_time = interval_time
+                logger.info("Min query time: %.2f" % min_query_time)
+                tmp_res.append(round(min_query_time, 2))
+            bi_res.append(tmp_res)
+        return bi_res
+
+    def do_query_qps(self, milvus, query_vectors, top_k, search_param):
+        start_time = time.time()
+        result = milvus.query(query_vectors, top_k, search_param) 
+        end_time = time.time()
+        return end_time - start_time
+
+    def do_query_ids(self, milvus, collection_name, vec_field_name, top_k, nq, search_param=None, filter_query=None):
+        (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+        base_query_vectors = get_vectors_from_binary(MAX_NQ, dimension, data_type)
+        query_vectors = base_query_vectors[0:nq]
+        logger.info("Start query, query params: top-k: {}, nq: {}, actually length of vectors: {}".format(top_k, nq, len(query_vectors)))
+        vector_query = {"vector": {vec_field_name: {
+            "topk": top_k, 
+            "query": query_vectors, 
+            "metric_type": utils.metric_type_trans(metric_type), 
+            "params": search_param}
+        }}
+        query_res = milvus.query(vector_query, filter_query=filter_query)
+        result_ids = milvus.get_ids(query_res)
+        return result_ids
+
+    def do_query_acc(self, milvus, collection_name, top_k, nq, id_store_name, search_param=None):
+        (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
+        base_query_vectors = get_vectors_from_binary(MAX_NQ, dimension, data_type)
+        vectors = base_query_vectors[0:nq]
+        logger.info("Start query, query params: top-k: {}, nq: {}, actually length of vectors: {}".format(top_k, nq, len(vectors)))
+        query_res = milvus.query(vectors, top_k, search_param=None)
+        # if file existed, cover it
+        if os.path.isfile(id_store_name):
+            os.remove(id_store_name)
+        with open(id_store_name, 'a+') as fd:
+            for nq_item in query_res:
+                for item in nq_item:
+                    fd.write(str(item.id)+'\t')
+                fd.write('\n')
+
+    # compute and print accuracy
+    def compute_accuracy(self, flat_file_name, index_file_name):
+        flat_id_list = []; index_id_list = []
+        logger.info("Loading flat id file: %s" % flat_file_name)
+        with open(flat_file_name, 'r') as flat_id_fd:
+            for line in flat_id_fd:
+                tmp_list = line.strip("\n").strip().split("\t")
+                flat_id_list.append(tmp_list)
+        logger.info("Loading index id file: %s" % index_file_name)
+        with open(index_file_name) as index_id_fd:
+            for line in index_id_fd:
+                tmp_list = line.strip("\n").strip().split("\t")
+                index_id_list.append(tmp_list)
+        if len(flat_id_list) != len(index_id_list):
+            raise Exception("Flat index result length: <flat: %s, index: %s> not match, Acc compute exiting ..." % (len(flat_id_list), len(index_id_list)))
+        # get the accuracy
+        return self.get_recall_value(flat_id_list, index_id_list)
+
+    def get_recall_value(self, true_ids, result_ids):
+        """
+        Use the intersection length
+        """
+        sum_radio = 0.0
+        for index, item in enumerate(result_ids):
+            # tmp = set(item).intersection(set(flat_id_list[index]))
+            tmp = set(true_ids[index]).intersection(set(item))
+            sum_radio = sum_radio + len(tmp) / len(item)
+            # logger.debug(sum_radio)
+        return round(sum_radio / len(result_ids), 3)
+
+    """
+    Implementation based on:
+        https://github.com/facebookresearch/faiss/blob/master/benchs/datasets.py
+    """
+    def get_groundtruth_ids(self, collection_size):
+        fname = GROUNDTRUTH_MAP[str(collection_size)]
+        fname = SIFT_SRC_GROUNDTRUTH_DATA_DIR + "/" + fname
+        a = np.fromfile(fname, dtype='int32')
+        d = a[0]
+        true_ids = a.reshape(-1, d + 1)[:, 1:].copy()
+        return true_ids
+
+    def get_fields(self, milvus, collection_name):
+        fields = []
+        info = milvus.get_info(collection_name)
+        for item in info["fields"]:
+            fields.append(item["name"])
+        return fields
+
+    # def get_filter_query(self, filter_query):
+        # for filter in filter_query:
diff --git a/tests/benchmark/runners/__init__.py b/tests/benchmark/runners/__init__.py
new file mode 100644
index 000000000..4b082462d
--- /dev/null
+++ b/tests/benchmark/runners/__init__.py
@@ -0,0 +1,11 @@
+
+
+class BaseRunner(object):
+    def __init__(self):
+        pass
+
+    def set_up(self):
+        pass
+
+    def tear_down(self):
+        pass
diff --git a/tests/benchmark/runners/locust_runner.py b/tests/benchmark/runners/locust_runner.py
new file mode 100644
index 000000000..afe2b1e04
--- /dev/null
+++ b/tests/benchmark/runners/locust_runner.py
@@ -0,0 +1,75 @@
+import time
+import random
+from locust import Locust, TaskSet, events, task, between
+from client import MilvusClient
+from . import BasicRunner
+
+
+dim = 128
+top_k = 10
+X = [[random.random() for i in range(dim)] for i in range(1)]
+search_param = {"nprobe": 16}
+
+
+class MilvusTask(object):
+    def __init__(self, type="single", args):
+        self.type = type
+        self.m = None
+        if type == "single":
+            self.m = MilvusClient(host=args["host"], port=args["port"], collection_name=args["collection_name"])
+        elif type == "multi":
+            self.m = MilvusClient(host=args["m"])
+
+    def query(self, *args, **kwargs):
+        name = "milvus_search"
+        request_type = "grpc"
+        start_time = time.time()
+        try:
+            # result = self.m.getattr(*args, **kwargs)
+            status, result = self.m.query(*args, **kwargs)
+        except Exception as e:
+            total_time = int((time.time() - start_time) * 1000)
+            events.request_failure.fire(request_type=request_type, name=name, response_time=total_time, exception=e, response_length=0)
+        else:
+            if not status.OK:
+                total_time = int((time.time() - start_time) * 1000)
+                events.request_failure.fire(request_type=request_type, name=name, response_time=total_time, exception=e, response_length=0)
+            else:
+                total_time = int((time.time() - start_time) * 1000)
+                events.request_success.fire(request_type=request_type, name=name, response_time=total_time, response_length=0)
+                # In this example, I've hardcoded response_length=0. If we would want the response length to be
+                # reported correctly in the statistics, we would probably need to hook in at a lower level
+
+
+class MilvusLocust(Locust):
+    def __init__(self, *args, **kwargs):
+        super(MilvusLocust, self).__init__(*args, **kwargs)
+        self.client = MilvusTask(self.host, self.port, self.collection_name)
+
+
+class Query(MilvusLocust):
+    host = "192.168.1.183"
+    port = 19530
+    collection_name = "sift_128_euclidean"
+    # m = MilvusClient(host=host, port=port, collection_name=collection_name)
+    wait_time = between(0.001, 0.002)
+
+    class task_set(TaskSet):
+        @task
+        def query(self):
+            self.client.query(X, top_k, search_param)
+
+
+class LocustRunner(BasicRunner):
+    """Only one client, not support M/S mode"""
+    def __init__(self, args):
+        # Start client with params including client number && last time && hatch rate ...
+        pass
+
+    def set_up(self):
+        # helm install locust client
+        pass
+
+    def tear_down(self):
+        # helm uninstall
+        pass
diff --git a/tests/benchmark/scheduler/010_data.json b/tests/benchmark/scheduler/010_data.json
new file mode 100644
index 000000000..d7074b63f
--- /dev/null
+++ b/tests/benchmark/scheduler/010_data.json
@@ -0,0 +1,65 @@
+[
+    {
+        "server": "athena",
+        "suite_params": [
+            {
+                "suite": "080_gpu_accuracy.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "080_search_stability.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "gpu_accuracy_ann.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    },
+    {
+        "server": "poseidon",
+        "suite_params": [
+            {
+                "suite": "080_gpu_search.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "080_cpu_search.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "080_gpu_build.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "080_cpu_accuracy.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "locust_search.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    },
+    {
+        "server": "apollo",
+        "suite_params": [
+            {
+                "suite": "cpu_accuracy_ann.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "080_cpu_build.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "080_insert_performance.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "add_flush_performance.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/011_data.json b/tests/benchmark/scheduler/011_data.json
new file mode 100644
index 000000000..7b40300c5
--- /dev/null
+++ b/tests/benchmark/scheduler/011_data.json
@@ -0,0 +1,62 @@
+[
+    {
+        "server": "idc-sh002",
+        "suite_params": [
+            {
+                "suite": "011_cpu_accuracy_ann.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "011_gpu_accuracy_ann.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    },
+    {
+        "server": "idc-sh003",
+        "suite_params": [
+            {
+                "suite": "locust_mix.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    },
+    {
+        "server": "idc-sh004",
+        "suite_params": [
+            {
+                "suite": "011_insert_performance.yaml",
+                "image_type": "cpu"
+            },
+            {   
+                "suite": "011_gpu_accuracy.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "011_gpu_build.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    },
+    {
+        "server": "idc-sh005",
+        "suite_params": [
+            {
+                "suite": "011_gpu_search.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "011_cpu_search.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "011_cpu_accuracy.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "011_locust_search.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/011_data_acc_debug.json b/tests/benchmark/scheduler/011_data_acc_debug.json
new file mode 100644
index 000000000..3bb0df225
--- /dev/null
+++ b/tests/benchmark/scheduler/011_data_acc_debug.json
@@ -0,0 +1,11 @@
+[
+  {
+        "server": "apollo",
+        "suite_params": [
+            {
+                "suite": "011_cpu_accuracy_ann.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
\ No newline at end of file
diff --git a/tests/benchmark/scheduler/011_data_gpu_build.json b/tests/benchmark/scheduler/011_data_gpu_build.json
new file mode 100644
index 000000000..ed9642fa9
--- /dev/null
+++ b/tests/benchmark/scheduler/011_data_gpu_build.json
@@ -0,0 +1,11 @@
+[
+  {
+        "server": "eros",
+        "suite_params": [
+            {
+                "suite": "011_gpu_build_sift10m.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
\ No newline at end of file
diff --git a/tests/benchmark/scheduler/011_data_insert.json b/tests/benchmark/scheduler/011_data_insert.json
new file mode 100644
index 000000000..c8bb875ac
--- /dev/null
+++ b/tests/benchmark/scheduler/011_data_insert.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "eros",
+        "suite_params": [
+            {
+                "suite": "011_insert_data.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+  ]
\ No newline at end of file
diff --git a/tests/benchmark/scheduler/011_data_search_debug.json b/tests/benchmark/scheduler/011_data_search_debug.json
new file mode 100644
index 000000000..dec44ac09
--- /dev/null
+++ b/tests/benchmark/scheduler/011_data_search_debug.json
@@ -0,0 +1,11 @@
+[
+  {
+        "server": "athena",
+        "suite_params": [
+          {
+            "suite": "011_gpu_search_debug.yaml",
+            "image_type": "gpu"
+          }
+        ]
+    }
+]
\ No newline at end of file
diff --git a/tests/benchmark/scheduler/011_delete.json b/tests/benchmark/scheduler/011_delete.json
new file mode 100644
index 000000000..cc8000499
--- /dev/null
+++ b/tests/benchmark/scheduler/011_delete.json
@@ -0,0 +1,15 @@
+[
+  {
+        "server": "apollo",
+        "suite_params": [
+            {
+                "suite": "011_insert_performance.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "011_delete_performance.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
\ No newline at end of file
diff --git a/tests/benchmark/scheduler/080_data.json b/tests/benchmark/scheduler/080_data.json
new file mode 100644
index 000000000..d7074b63f
--- /dev/null
+++ b/tests/benchmark/scheduler/080_data.json
@@ -0,0 +1,65 @@
+[
+    {
+        "server": "athena",
+        "suite_params": [
+            {
+                "suite": "080_gpu_accuracy.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "080_search_stability.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "gpu_accuracy_ann.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    },
+    {
+        "server": "poseidon",
+        "suite_params": [
+            {
+                "suite": "080_gpu_search.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "080_cpu_search.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "080_gpu_build.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "080_cpu_accuracy.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "locust_search.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    },
+    {
+        "server": "apollo",
+        "suite_params": [
+            {
+                "suite": "cpu_accuracy_ann.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "080_cpu_build.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "080_insert_performance.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "add_flush_performance.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/acc.json b/tests/benchmark/scheduler/acc.json
new file mode 100644
index 000000000..e4d47bd94
--- /dev/null
+++ b/tests/benchmark/scheduler/acc.json
@@ -0,0 +1,15 @@
+[
+    {
+        "server": "poseidon",
+        "suite_params": [
+            {
+                "suite": "crud_add.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "gpu_accuracy_sift1m.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/build.json b/tests/benchmark/scheduler/build.json
new file mode 100644
index 000000000..f269669e1
--- /dev/null
+++ b/tests/benchmark/scheduler/build.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "eros",
+        "suite_params": [
+            {
+                "suite": "011_gpu_build_sift1b.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/clean.json b/tests/benchmark/scheduler/clean.json
new file mode 100644
index 000000000..be9cbbe23
--- /dev/null
+++ b/tests/benchmark/scheduler/clean.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "poseidon",
+        "suite_params": [
+            {
+                "suite": "clean.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/debug.json b/tests/benchmark/scheduler/debug.json
new file mode 100644
index 000000000..5028b5304
--- /dev/null
+++ b/tests/benchmark/scheduler/debug.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "idc-sh002",
+        "suite_params": [
+            {
+                "suite": "011_cpu_search_sift10m.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/default_config.json b/tests/benchmark/scheduler/default_config.json
new file mode 100644
index 000000000..6b76ddc4c
--- /dev/null
+++ b/tests/benchmark/scheduler/default_config.json
@@ -0,0 +1,53 @@
+[
+    {
+        "server": "apollo",
+        "suite_params": [
+            {
+                "suite": "cpu_accuracy_ann.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    },
+    {
+        "server": "poseidon",
+        "suite_params": [
+            {
+                "suite": "gpu_search_performance.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "cpu_search_performance.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "insert_performance.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "gpu_accuracy.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    },
+    {
+        "server": "eros",
+        "suite_params": [
+            {
+                "suite": "gpu_accuracy_ann.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "gpu_search_stability.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "gpu_build_performance.yaml",
+                "image_type": "gpu"
+            },
+            {
+                "suite": "cpu_build_performance.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
\ No newline at end of file
diff --git a/tests/benchmark/scheduler/file_size.json b/tests/benchmark/scheduler/file_size.json
new file mode 100644
index 000000000..f8da372e4
--- /dev/null
+++ b/tests/benchmark/scheduler/file_size.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "athena",
+        "suite_params": [
+            {
+                "suite": "file_size.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+ ]
diff --git a/tests/benchmark/scheduler/filter.json b/tests/benchmark/scheduler/filter.json
new file mode 100644
index 000000000..2a0baed66
--- /dev/null
+++ b/tests/benchmark/scheduler/filter.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "poseidon",
+        "suite_params": [
+            {
+                "suite": "011_search_dsl.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/idc.json b/tests/benchmark/scheduler/idc.json
new file mode 100644
index 000000000..598be6da1
--- /dev/null
+++ b/tests/benchmark/scheduler/idc.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "idc-sh004",
+        "suite_params": [
+            {
+                "suite": "011_cpu_search_debug.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/insert.json b/tests/benchmark/scheduler/insert.json
new file mode 100644
index 000000000..4df97657a
--- /dev/null
+++ b/tests/benchmark/scheduler/insert.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "idc-sh002",
+        "suite_params": [
+            {
+                "suite": "011_insert_data.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/jaccard.json b/tests/benchmark/scheduler/jaccard.json
new file mode 100644
index 000000000..5d5ebd6d1
--- /dev/null
+++ b/tests/benchmark/scheduler/jaccard.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "athena",
+        "suite_params": [
+            {
+                "suite": "011_cpu_search_binary.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/locust.json b/tests/benchmark/scheduler/locust.json
new file mode 100644
index 000000000..b82ccf65b
--- /dev/null
+++ b/tests/benchmark/scheduler/locust.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "idc-sh002",
+        "suite_params": [
+            {
+                "suite": "locust_cluster_search.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/locust_mix_debug.json b/tests/benchmark/scheduler/locust_mix_debug.json
new file mode 100644
index 000000000..343d42933
--- /dev/null
+++ b/tests/benchmark/scheduler/locust_mix_debug.json
@@ -0,0 +1,10 @@
+[
+    {
+        "suite_params": [
+            {
+                "suite": "locust_mix.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/loop.json b/tests/benchmark/scheduler/loop.json
new file mode 100644
index 000000000..02fc1e261
--- /dev/null
+++ b/tests/benchmark/scheduler/loop.json
@@ -0,0 +1,10 @@
+[
+    {
+        "suite_params": [
+            {
+                "suite": "loop_stability.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/search.json b/tests/benchmark/scheduler/search.json
new file mode 100644
index 000000000..68dc7776a
--- /dev/null
+++ b/tests/benchmark/scheduler/search.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "athena",
+        "suite_params": [
+            {
+                "suite": "011_cpu_search_sift1b.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/shards.json b/tests/benchmark/scheduler/shards.json
new file mode 100644
index 000000000..2a9bb4352
--- /dev/null
+++ b/tests/benchmark/scheduler/shards.json
@@ -0,0 +1,18 @@
+[
+    {
+        "suite_params": [
+            {
+                "suite": "shards_insert_performance.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "shards_ann_debug.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "shards_loop_stability.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/shards_ann.json b/tests/benchmark/scheduler/shards_ann.json
new file mode 100644
index 000000000..6282c234c
--- /dev/null
+++ b/tests/benchmark/scheduler/shards_ann.json
@@ -0,0 +1,10 @@
+[
+    {
+        "suite_params": [
+            {
+                "suite": "shards_ann_debug.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/shards_debug.json b/tests/benchmark/scheduler/shards_debug.json
new file mode 100644
index 000000000..9e79ee775
--- /dev/null
+++ b/tests/benchmark/scheduler/shards_debug.json
@@ -0,0 +1,15 @@
+[
+    {
+        "server": "apollo",
+        "suite_params": [
+            {
+                "suite": "shards_insert_performance_sift1m.yaml",
+                "image_type": "cpu"
+            },
+            {
+                "suite": "shards_search_performance_sift1m.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/shards_stability.json b/tests/benchmark/scheduler/shards_stability.json
new file mode 100644
index 000000000..c2b71d947
--- /dev/null
+++ b/tests/benchmark/scheduler/shards_stability.json
@@ -0,0 +1,10 @@
+[
+    {
+        "suite_params": [
+            {
+                "suite": "shards_loop_stability.yaml",
+                "image_type": "cpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/scheduler/stability.json b/tests/benchmark/scheduler/stability.json
new file mode 100644
index 000000000..497a16120
--- /dev/null
+++ b/tests/benchmark/scheduler/stability.json
@@ -0,0 +1,11 @@
+[
+    {
+        "server": "eros",
+        "suite_params": [
+            {
+                "suite": "gpu_search_stability.yaml",
+                "image_type": "gpu"
+            }
+        ]
+    }
+]
diff --git a/tests/benchmark/search_task.py b/tests/benchmark/search_task.py
new file mode 100644
index 000000000..fc40907c7
--- /dev/null
+++ b/tests/benchmark/search_task.py
@@ -0,0 +1,50 @@
+import random
+import logging
+from locust import User, task, between
+from locust_task import MilvusTask
+from client import MilvusClient
+from milvus import DataType
+import utils
+
+connection_type = "single"
+host = "172.16.50.9"
+port = 19530
+collection_name = "sift_5m_2000000_128_l2_2"
+dim = 128
+m = MilvusClient(host=host, port=port, collection_name=collection_name)
+# m.clean_db()
+# m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=True, other_fields=None)
+vectors = [[random.random() for _ in range(dim)] for _ in range(1000)]
+entities = m.generate_entities(vectors)
+ids = [i for i in range(10000000)]
+
+
+class QueryTask(User):
+    wait_time = between(0.001, 0.002)
+    # if connection_type == "single":
+    #     client = MilvusTask(m=m)
+    # else:
+    #     client = MilvusTask(host=host, port=port, collection_name=collection_name)
+    client = MilvusTask(host, port, collection_name, connection_type=connection_type)
+
+    # @task
+    # def query(self):
+    #     top_k = 5
+    #     X = [[random.random() for i in range(dim)] for i in range(1)]
+    #     search_param = {"nprobe": 16}
+    #     self.client.query(X, top_k, search_param)
+
+    @task(1)
+    def insert(self):
+        self.client.insert(entities)
+
+    # @task(1)
+    # def create(self):
+    #     collection_name = utils.get_unique_name(prefix="locust")
+    #     self.client.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=True, collection_name=collection_name, other_fields=None)
+
+    # @task(1)
+    # def delete(self):
+    #     delete_ids = random.sample(ids, 100)
+    #     logging.error(delete_ids)
+    #     self.client.delete(delete_ids)
diff --git a/tests/benchmark/suites/011_add_flush_performance.yaml b/tests/benchmark/suites/011_add_flush_performance.yaml
new file mode 100644
index 000000000..b8a7090d9
--- /dev/null
+++ b/tests/benchmark/suites/011_add_flush_performance.yaml
@@ -0,0 +1,20 @@
+insert_flush_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_2m_128_128_l2_flush
+        cache_config.cpu_cache_capacity: 8
+        cache_config.insert_buffer_size: 2
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false 
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        db_config.auto_flush_interval: 300
+      collection_name: sift_2m_128_l2
+      ni_per: 100000
diff --git a/tests/benchmark/suites/011_cluster_cpu_accuracy_ann.yaml b/tests/benchmark/suites/011_cluster_cpu_accuracy_ann.yaml
new file mode 100644
index 000000000..63f26c130
--- /dev/null
+++ b/tests/benchmark/suites/011_cluster_cpu_accuracy_ann.yaml
@@ -0,0 +1,336 @@
+ann_accuracy:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_flat', 'ivf_sq8']
+      index_params:
+        nlist: [1024, 16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['annoy']
+      index_params:
+        n_trees: [8, 32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_k: [50, 100, 500, 1000]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['hnsw']
+      index_params:
+        M: [16]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['ivf_flat', 'ivf_sq8']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [20]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    - milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['annoy']
+      index_params:
+        n_trees: [8, 32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_k: [50, 100, 500, 1000]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['hnsw']
+      index_params:
+        M: [36]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [10, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        cluster: true
+        readonly:
+          replicas: 2
+        external_mysql: true
+
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['nsg']
+      index_params:
+        search_length: 45
+        out_degree: 50
+        candidate_pool_size: 300
+        knng: 100
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_length: [50]
diff --git a/tests/benchmark/suites/011_cpu_accuracy.yaml b/tests/benchmark/suites/011_cpu_accuracy.yaml
new file mode 100644
index 000000000..b25484970
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_accuracy.yaml
@@ -0,0 +1,55 @@
+accuracy:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8
+        cache_config.cpu_cache_capacity: 8GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sift_10m_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8
+        cache_config.cpu_cache_capacity: 8GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sift_10m_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_hnsw
+        cache_config.cpu_cache_capacity: 8GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sift_10m_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        ef: [64, 100, 200, 500, 700]
diff --git a/tests/benchmark/suites/011_cpu_accuracy_ann.yaml b/tests/benchmark/suites/011_cpu_accuracy_ann.yaml
new file mode 100644
index 000000000..d7b736dd1
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_accuracy_ann.yaml
@@ -0,0 +1,260 @@
+ann_accuracy:
+  collections:
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_flat', 'ivf_sq8']
+      index_params:
+        nlist: [1024, 16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['annoy']
+      index_params:
+        n_trees: [8, 32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_k: [50, 100, 500, 1000]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['hnsw']
+      index_params:
+        M: [16]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['ivf_flat', 'ivf_sq8']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [20]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    - milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['annoy']
+      index_params:
+        n_trees: [8, 32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_k: [50, 100, 500, 1000]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['hnsw']
+      index_params:
+        M: [36]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [10, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['nsg']
+      index_params:
+        search_length: 45
+        out_degree: 50
+        candidate_pool_size: 300
+        knng: 100
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_length: [50]
diff --git a/tests/benchmark/suites/011_cpu_build.yaml b/tests/benchmark/suites/011_cpu_build.yaml
new file mode 100644
index 000000000..17376451d
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_build.yaml
@@ -0,0 +1,40 @@
+build_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/random_1m_1024_512_l2_ivf
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: random_1m_1024_512_l2
+      index_type: ivf_flat
+      index_param:
+        nlist: 16384 
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_1m_128_128_l2_pq
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1m_128_128_l2
+      index_type: ivf_pq
+      index_param:
+        nlist: 8092
+        m: 32
diff --git a/tests/benchmark/suites/011_cpu_build_binary.yaml b/tests/benchmark/suites/011_cpu_build_binary.yaml
new file mode 100644
index 000000000..d3beb3bd5
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_build_binary.yaml
@@ -0,0 +1,11 @@
+build_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard_ivf
+        cache_config.cpu_cache_capacity: 8GB
+        gpu_resource_config.enable: false
+      collection_name: binary_50m_512_jaccard
+      index_type: bin_ivf_flat
+      index_param:
+        nlist: 2048
diff --git a/tests/benchmark/suites/011_cpu_search.yaml b/tests/benchmark/suites/011_cpu_search.yaml
new file mode 100644
index 000000000..e576b913e
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_search.yaml
@@ -0,0 +1,255 @@
+search_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8
+        cache_config.cpu_cache_capacity: 150GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_1b_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2
+        cache_config.cpu_cache_capacity: 64GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_10m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2
+        cache_config.cpu_cache_capacity: 64GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat
+        cache_config.cpu_cache_capacity: 64GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_pq
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_10m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_ivf_flat
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_10m_128_l2
+      run_count: 2
+      top_ks: [1, 1000]
+      nqs: [1, 100, 1000]
+      search_params:
+        -
+          nprobe: 8
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_nsg
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_10m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+         search_length: 50
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_annoy
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_10m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+         search_k: 100
+        -
+         search_k: 500
+        -
+         search_k: 1000
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: binary_50m_512_jaccard
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard_ivf
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: binary_50m_512_jaccard
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
diff --git a/tests/benchmark/suites/011_cpu_search_binary.yaml b/tests/benchmark/suites/011_cpu_search_binary.yaml
new file mode 100644
index 000000000..6539999b4
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_search_binary.yaml
@@ -0,0 +1,49 @@
+search_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: binary_50m_512_jaccard
+      run_count: 2
+      top_ks: [10, 1, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard_ivf
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: binary_50m_512_jaccard
+      run_count: 2
+      top_ks: [10, 1, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
diff --git a/tests/benchmark/suites/011_cpu_search_debug.yaml b/tests/benchmark/suites/011_cpu_search_debug.yaml
new file mode 100644
index 000000000..eca76ee3e
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_search_debug.yaml
@@ -0,0 +1,26 @@
+search_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8
+        cache_config.cpu_cache_capacity: 150GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_1b_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
diff --git a/tests/benchmark/suites/011_cpu_search_sift10m_filter.yaml b/tests/benchmark/suites/011_cpu_search_sift10m_filter.yaml
new file mode 100644
index 000000000..284aff9fd
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_search_sift10m_filter.yaml
@@ -0,0 +1,97 @@
+search_performance:
+  collections:
+     -
+       server:
+         db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2/
+         cache_config.cpu_cache_capacity: 32GB
+         engine_config.use_blas_threshold: 0
+         engine_config.gpu_search_threshold: 100
+         gpu_resource_config.enable: true
+         gpu_resource_config.cache_capacity: 6GB
+         gpu_resource_config.search_resources:
+           - gpu0
+           - gpu1
+         gpu_resource_config.build_index_resources:
+           - gpu0
+           - gpu1
+         wal_enable: true
+       collection_name: sift_10m_128_l2
+       run_count: 2
+       top_ks: [1, 1000]
+       nqs: [1, 100, 1200]
+       search_params:
+         -
+           nprobe: 8
+     -
+       server:
+         db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_ivf_flat
+         cache_config.cpu_cache_capacity: 32GB
+         engine_config.use_blas_threshold: 0
+         engine_config.gpu_search_threshold: 100
+         gpu_resource_config.enable: true
+         gpu_resource_config.cache_capacity: 6GB
+         gpu_resource_config.search_resources:
+           - gpu0
+           - gpu1
+         gpu_resource_config.build_index_resources:
+           - gpu0
+           - gpu1
+         wal_enable: true
+       collection_name: sift_10m_128_l2
+       run_count: 2
+       top_ks: [1, 10, 100, 1000]
+       nqs: [1, 10, 100, 1000, 1200]
+       search_params:
+         -
+           nprobe: 8
+         -
+           nprobe: 32
+
+     -
+       server:
+         db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8
+         cache_config.cpu_cache_capacity: 32GB
+         engine_config.use_blas_threshold: 0
+         engine_config.gpu_search_threshold: 100
+         gpu_resource_config.enable: true
+         gpu_resource_config.cache_capacity: 6GB
+         gpu_resource_config.search_resources:
+           - gpu0
+           - gpu1
+         gpu_resource_config.build_index_resources:
+           - gpu0
+           - gpu1
+         wal_enable: true
+       collection_name: sift_10m_128_l2
+       run_count: 2
+       top_ks: [1, 10, 100, 1000]
+       nqs: [1, 10, 100, 1000, 1200]
+       search_params:
+         -
+           nprobe: 8
+         -
+           nprobe: 32
+     -
+       server:
+         db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_pq
+         cache_config.cpu_cache_capacity: 32GB
+         engine_config.use_blas_threshold: 0
+         engine_config.gpu_search_threshold: 100
+         gpu_resource_config.enable: true
+         gpu_resource_config.cache_capacity: 6GB
+         gpu_resource_config.search_resources:
+           - gpu0
+           - gpu1
+         gpu_resource_config.build_index_resources:
+           - gpu0
+           - gpu1
+         wal_enable: true
+       collection_name: sift_10m_128_l2
+       run_count: 2
+       top_ks: [1, 10, 100, 1000]
+       nqs: [1, 10, 100, 1000, 1200]
+       search_params:
+         -
+           nprobe: 8
+         -
+           nprobe: 32
\ No newline at end of file
diff --git a/tests/benchmark/suites/011_cpu_search_sift50m.yaml b/tests/benchmark/suites/011_cpu_search_sift50m.yaml
new file mode 100644
index 000000000..6a96e0e72
--- /dev/null
+++ b/tests/benchmark/suites/011_cpu_search_sift50m.yaml
@@ -0,0 +1,98 @@
+search_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 1000]
+      nqs: [1, 100, 1200]
+      search_params:
+        -
+          nprobe: 8
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat_16384
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8_16384
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_pq_16384
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
\ No newline at end of file
diff --git a/tests/benchmark/suites/011_gpu_accuracy.yaml b/tests/benchmark/suites/011_gpu_accuracy.yaml
new file mode 100644
index 000000000..abd4d30ca
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_accuracy.yaml
@@ -0,0 +1,61 @@
+accuracy:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB 
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_10m_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB 
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_10m_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1]
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_ip
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB 
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_10m_128_ip
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1]
diff --git a/tests/benchmark/suites/011_gpu_accuracy_ann.yaml b/tests/benchmark/suites/011_gpu_accuracy_ann.yaml
new file mode 100644
index 000000000..fb2fad25a
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_accuracy_ann.yaml
@@ -0,0 +1,165 @@
+ann_accuracy:
+  collections:
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['hnsw']
+      index_params:
+        M: [16]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true 
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['hnsw']
+      index_params:
+        M: [36]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [10, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/kosarak-27983-jaccard.hdf5
+      collection_name: kosarak_27984_jaccard
+      index_types: ['bin_flat', 'bin_ivf_flat']
+      index_params:
+        nlist: [2048]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-256-hamming.hdf5
+      collection_name: sift_256_hamming
+      index_types: ['bin_flat', 'bin_ivf_flat']
+      index_params:
+        nlist: [2048]
+      top_ks: [100]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
diff --git a/tests/benchmark/suites/011_gpu_build.yaml b/tests/benchmark/suites/011_gpu_build.yaml
new file mode 100644
index 000000000..488dc16b6
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_build.yaml
@@ -0,0 +1,21 @@
+build_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8_4096
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_10m_100000_128_l2
+      index_type: ivf_sq8
+      index_param:
+        nlist: 4096
+
diff --git a/tests/benchmark/suites/011_gpu_search.yaml b/tests/benchmark/suites/011_gpu_search.yaml
new file mode 100644
index 000000000..e717c6e32
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_search.yaml
@@ -0,0 +1,251 @@
+search_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8h
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_pq
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_hnsw
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_10m_128_l2
+      run_count: 2
+      top_ks: [100]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          ef: 100
+        -
+          ef: 200
+        -
+          ef: 500
+        -
+          ef: 1000
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_annoy
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_10m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+         search_k: 100
+        -
+         search_k: 500
+        -
+         search_k: 1000
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_nsg
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_10m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+         search_length: 50
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8h
+        cache_config.cpu_cache_capacity: 150GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_1b_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8
+        cache_config.cpu_cache_capacity: 150GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_1b_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
diff --git a/tests/benchmark/suites/011_gpu_search_sift10m_filter.yaml b/tests/benchmark/suites/011_gpu_search_sift10m_filter.yaml
new file mode 100644
index 000000000..012153d98
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_search_sift10m_filter.yaml
@@ -0,0 +1,122 @@
+search_performance:
+  collections:
+#    -
+#      server:
+#        db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2/
+#        cache_config.cpu_cache_capacity: 32GB
+#        engine_config.use_blas_threshold: 0
+#        engine_config.gpu_search_threshold: 100
+#        gpu_resource_config.enable: true
+#        gpu_resource_config.cache_capacity: 6GB
+#        gpu_resource_config.search_resources:
+#          - gpu0
+#          - gpu1
+#        gpu_resource_config.build_index_resources:
+#          - gpu0
+#          - gpu1
+#        wal_enable: true
+#      collection_name: sift_10m_128_l2
+#      run_count: 2
+#      top_ks: [1, 1000]
+#      nqs: [1, 100, 1200]
+#      search_params:
+#        -
+#          nprobe: 8
+#    -
+#      server:
+#        db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_ivf_flat
+#        cache_config.cpu_cache_capacity: 32GB
+#        engine_config.use_blas_threshold: 0
+#        engine_config.gpu_search_threshold: 100
+#        gpu_resource_config.enable: true
+#        gpu_resource_config.cache_capacity: 6GB
+#        gpu_resource_config.search_resources:
+#          - gpu0
+#          - gpu1
+#        gpu_resource_config.build_index_resources:
+#          - gpu0
+#          - gpu1
+#        wal_enable: true
+#      collection_name: sift_10m_128_l2
+#      run_count: 2
+#      top_ks: [1, 10, 100, 1000]
+#      nqs: [1, 10, 100, 1000, 1200]
+#      search_params:
+#        -
+#          nprobe: 8
+#        -
+#          nprobe: 32
+
+#    -
+#      server:
+#        db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8
+#        cache_config.cpu_cache_capacity: 32GB
+#        engine_config.use_blas_threshold: 0
+#        engine_config.gpu_search_threshold: 100
+#        gpu_resource_config.enable: true
+#        gpu_resource_config.cache_capacity: 6GB
+#        gpu_resource_config.search_resources:
+#          - gpu0
+#          - gpu1
+#        gpu_resource_config.build_index_resources:
+#          - gpu0
+#          - gpu1
+#        wal_enable: true
+#      collection_name: sift_10m_128_l2
+#      run_count: 2
+#      top_ks: [1, 10, 100, 1000]
+#      nqs: [1, 10, 100, 1000, 1200]
+#      search_params:
+#        -
+#          nprobe: 8
+#        -
+#          nprobe: 32
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8h
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_10m_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      filters:
+       -
+         term: "{'term': {'float': {'values': [float(i) for i in range(collection_size // 1000)]}}}"
+      search_params:
+        -
+          nprobe: 32
+#    -
+#      server:
+#        db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_pq
+#        cache_config.cpu_cache_capacity: 32GB
+#        engine_config.use_blas_threshold: 0
+#        engine_config.gpu_search_threshold: 100
+#        gpu_resource_config.enable: true
+#        gpu_resource_config.cache_capacity: 6GB
+#        gpu_resource_config.search_resources:
+#          - gpu0
+#          - gpu1
+#        gpu_resource_config.build_index_resources:
+#          - gpu0
+#          - gpu1
+#        wal_enable: true
+#      collection_name: sift_10m_128_l2
+#      run_count: 2
+#      top_ks: [1, 10, 100, 1000]
+#      nqs: [1, 10, 100, 1000, 1200]
+#      search_params:
+#        -
+#          nprobe: 8
+#        -
+#          nprobe: 32
\ No newline at end of file
diff --git a/tests/benchmark/suites/011_gpu_search_sift50m.yaml b/tests/benchmark/suites/011_gpu_search_sift50m.yaml
new file mode 100644
index 000000000..7aae432ca
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_search_sift50m.yaml
@@ -0,0 +1,121 @@
+search_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 1000]
+      nqs: [1, 100, 1200]
+      search_params:
+        -
+          nprobe: 8
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_ivf_flat
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_sq8
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_sq8h
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_pq
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_50m_100000_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 1000, 1200]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
\ No newline at end of file
diff --git a/tests/benchmark/suites/011_gpu_stability.yaml b/tests/benchmark/suites/011_gpu_stability.yaml
new file mode 100644
index 000000000..9e7ec4f00
--- /dev/null
+++ b/tests/benchmark/suites/011_gpu_stability.yaml
@@ -0,0 +1,39 @@
+stability:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_011/sift_10m_1024_128_l2_sq8_stability
+        cache_config.cpu_cache_capacity: 64GB
+        cache_config.cache_insert_data: true
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 50
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1m_50000_128_l2_2
+      during_time: 5
+      operations:
+        insert:
+          weight: 4
+          xb: 100
+        delete:
+          weight: 4
+          xb: 100
+        flush:
+          weight: 1
+          # async: true
+        compact:
+          weight: 1
+        #   # async: true
+        query:
+          weight: 2
+          # async: true
+          top_ks: 1-100
+          nqs: 1-100
+          search_params:
+            nprobe: 1-100
diff --git a/tests/benchmark/suites/011_insert_data.yaml b/tests/benchmark/suites/011_insert_data.yaml
new file mode 100644
index 000000000..f36fe0a87
--- /dev/null
+++ b/tests/benchmark/suites/011_insert_data.yaml
@@ -0,0 +1,57 @@
+insert_performance:
+  collections:
+     -
+       milvus:
+         db_config.primary_path: /test/milvus/db_data_011/cluster/sift_10m_128_l2
+         cache_config.cpu_cache_capacity: 4GB
+         engine_config.use_blas_threshold: 1100
+         engine_config.gpu_search_threshold: 1
+         gpu_resource_config.enable: true
+         gpu_resource_config.cache_capacity: 4GB
+         gpu_resource_config.search_resources:
+           - gpu0
+           - gpu1
+         gpu_resource_config.build_index_resources:
+           - gpu0
+           - gpu1
+         wal_enable: true
+#         cluster: true
+#         external_mysql: true
+       collection_name: sift_10m_128_l2_011
+#       other_fields: int,float
+       ni_per: 50000
+       build_index: false
+       index_type: ivf_sq8
+       index_param:
+         nlist: 1024
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2
+    #     cache_config.cpu_cache_capacity: 4GB
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 1
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4GB
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #     wal_enable: true
+    #   collection_name: sift_50m_100000_128_l2
+    #   ni_per: 50000
+    #   build_index: false
+    #   index_type: ivf_sq8
+    #   index_param:
+    #     nlist: 1024
+#    -
+#      server:
+#        db_config.primary_path: /test/milvus/db_data_011/sift_1b_524288_128_l2_debug
+#      collection_name: sift_1b_524288_128_l2
+#      ni_per: 100000
+#      # flush: no
+#      build_index: false
+#      index_type: ivf_sq8
+#      index_param:
+#        nlist: 4096
diff --git a/tests/benchmark/suites/011_insert_performance.yaml b/tests/benchmark/suites/011_insert_performance.yaml
new file mode 100644
index 000000000..0bd364340
--- /dev/null
+++ b/tests/benchmark/suites/011_insert_performance.yaml
@@ -0,0 +1,113 @@
+insert_performance:
+  collections:
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 8GB
+        engine_config.use_blas_threshold: 1100
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_5m_128_l2
+      ni_per: 50000
+      build_index: false
+      index_type: ivf_sq8
+      index_param:
+        nlist: 1024
+
+    -
+      milvus:
+        cache_config.cpu_cache_capacity: 8GB
+        engine_config.use_blas_threshold: 1100
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_5m_128_l2
+      ni_per: 50000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 1024
+
+    # -
+    #   server:
+    #     cache_config.cpu_cache_capacity: 8GB
+    #     engine_config.use_blas_threshold: 1100
+    #     gpu_resource_config.enable: false
+    #     gpu_resource_config.cache_capacity: 4GB
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #     wal_enable: true
+    #   collection_name: sift_5m_100000_128_l2
+    #   ni_per: 50000
+    #   flush: no
+    #   build_index: false
+    #   index_type: ivf_sq8
+    #   index_param:
+    #     nlist: 1024
+
+    # -
+    #   server:
+    #     cache_config.cpu_cache_capacity: 8GB
+    #     engine_config.use_blas_threshold: 1100
+    #     gpu_resource_config.enable: false
+    #     gpu_resource_config.cache_capacity: 4GB
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #     wal_enable: true
+    #   collection_name: sift_5m_100000_128_l2
+    #   ni_per: 100000
+    #   flush: no
+    #   build_index: false
+    #   index_type: ivf_sq8
+    #   index_param:
+    #     nlist: 1024
+
+    # -
+    #   server:
+    #     cache_config.cpu_cache_capacity: 8GB
+    #     engine_config.use_blas_threshold: 1100
+    #     gpu_resource_config.enable: false
+    #     gpu_resource_config.cache_capacity: 4GB
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #     wal_enable: true
+    #   collection_name: sift_5m_100000_128_l2
+    #   ni_per: 200000
+    #   flush: no
+    #   build_index: false
+    #   index_type: ivf_sq8
+    #   index_param:
+    #     nlist: 1024
+
+    # -
+    #   server:
+    #     cache_config.cpu_cache_capacity: 8GB
+    #     engine_config.use_blas_threshold: 1100
+    #     gpu_resource_config.enable: false
+    #     gpu_resource_config.cache_capacity: 4GB
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #     wal_enable: true
+    #   collection_name: sift_5m_100000_128_l2
+    #   ni_per: 500000
+    #   flush: no
+    #   build_index: false
+    #   index_type: ivf_sq8
+    #   index_param:
+    #     nlist: 1024
diff --git a/tests/benchmark/suites/011_locust_insert.yaml b/tests/benchmark/suites/011_locust_insert.yaml
new file mode 100644
index 000000000..1d4b79997
--- /dev/null
+++ b/tests/benchmark/suites/011_locust_insert.yaml
@@ -0,0 +1,33 @@
+locust_insert_performance:
+  collections:
+    - 
+      milvus:
+        mysql: true
+        db_config.primary_path: /test/milvus/db_data_011/insert_sift_1m_128_l2_2
+        cache_config.cpu_cache_capacity: 8GB
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_1m_128_l2_011
+      ni_per: 50000
+      build_index: false
+      index_type: ivf_sq8
+      index_param:
+        nlist: 1024
+      task: 
+        connection_num: 1
+        clients_num: 10
+        hatch_rate: 2
+        during_time: 30
+        types:
+          -
+            type: insert
+            weight: 1
+            params: None
diff --git a/tests/benchmark/suites/011_locust_search.yaml b/tests/benchmark/suites/011_locust_search.yaml
new file mode 100644
index 000000000..fa8455997
--- /dev/null
+++ b/tests/benchmark/suites/011_locust_search.yaml
@@ -0,0 +1,43 @@
+locust_search_performance:
+  collections:
+    - 
+      milvus:
+        cache_config.cpu_cache_capacity: 8GB
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_1m_128_l2_2
+      ni_per: 50000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 16384
+      task: 
+        connection_num: 1
+        clients_num: 100
+        hatch_rate: 2
+        during_time: 300
+        types:
+          -
+            type: query
+            weight: 1
+            params:
+              top_k: 10
+              nq: 1
+              # filters:
+              #   -
+              #     range:
+              #       int64:
+              #         LT: 0
+              #         GT: 1000000
+              search_param:
+                nprobe: 16
diff --git a/tests/benchmark/suites/011_search_stability.yaml b/tests/benchmark/suites/011_search_stability.yaml
new file mode 100644
index 000000000..840b9378e
--- /dev/null
+++ b/tests/benchmark/suites/011_search_stability.yaml
@@ -0,0 +1,20 @@
+search_stability:
+  collections:
+    - 
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8_16384_stability
+        cache_config.cpu_cache_capacity: 32GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sift_50m_128_l2
+      during_time: 180
+      top_ks: 1-200
+      nqs: 1-200
+      search_params:
+        nprobe: 1-100
diff --git a/tests/benchmark/suites/cluster_locust_mix.yaml b/tests/benchmark/suites/cluster_locust_mix.yaml
new file mode 100644
index 000000000..cb1b45764
--- /dev/null
+++ b/tests/benchmark/suites/cluster_locust_mix.yaml
@@ -0,0 +1,47 @@
+locust_mix_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/locust_mix
+        suffix_path: true
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          gpu0
+        gpu_resource_config.build_index_resources:
+          gpu0
+        wal_enable: true
+        external_mysql: true
+        cluster: true
+        readonly:
+          replicas: 2 
+      collection_name: sift_1m_500000_128_l2
+      ni_per: 50000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 1024
+      task:
+        types:
+          - type: flush
+            weight: 1
+          -
+            type: query
+            weight: 30
+            params:
+              top_k: 10
+              nq: 100
+              search_param:
+                nprobe: 16
+          -
+            type: insert
+            weight: 10
+            params:
+              nb: 1
+        connection_num: 1
+        clients_num: 10
+        hatch_rate: 2
+        during_time: 600 
diff --git a/tests/benchmark/suites/cpu_accuracy.yaml b/tests/benchmark/suites/cpu_accuracy.yaml
new file mode 100644
index 000000000..d78017808
--- /dev/null
+++ b/tests/benchmark/suites/cpu_accuracy.yaml
@@ -0,0 +1,61 @@
+accuracy:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_ip_sq8_wal
+        cache_config.cpu_cache_capacity: 30
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6 
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_2048_128_ip
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_hnsw_wal
+        cache_config.cpu_cache_capacity: 64
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_2048_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        ef: [64, 100, 200, 500, 700]
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8_wal
+        cache_config.cpu_cache_capacity: 150
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false 
+        gpu_resource_config.cache_capacity: 6 
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1b_2048_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
diff --git a/tests/benchmark/suites/cpu_accuracy_ann.yaml b/tests/benchmark/suites/cpu_accuracy_ann.yaml
new file mode 100644
index 000000000..b980186ae
--- /dev/null
+++ b/tests/benchmark/suites/cpu_accuracy_ann.yaml
@@ -0,0 +1,212 @@
+ann_accuracy:
+  collections:
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_flat', 'ivf_sq8']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['annoy']
+      index_params:
+        n_trees: [8, 32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        search_k: [50, 100, 500, 1000]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['hnsw']
+      index_params:
+        M: [16]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['ivf_flat', 'ivf_sq8']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [20]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_types: ['hnsw']
+      index_params:
+        M: [36]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [10, 16, 32, 64, 128, 256, 512]
diff --git a/tests/benchmark/suites/cpu_build_performance.yaml b/tests/benchmark/suites/cpu_build_performance.yaml
new file mode 100644
index 000000000..660aff14f
--- /dev/null
+++ b/tests/benchmark/suites/cpu_build_performance.yaml
@@ -0,0 +1,19 @@
+build_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_070/sift_50m_1024_128_l2_sq8h_wal
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true 
+        gpu_resource_config.cache_capacity: 6
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_1024_128_l2
+      index_type: ivf_sq8h
+      nlist: 16384
diff --git a/tests/benchmark/suites/cpu_search_binary.yaml b/tests/benchmark/suites/cpu_search_binary.yaml
new file mode 100644
index 000000000..37aa2d0a3
--- /dev/null
+++ b/tests/benchmark/suites/cpu_search_binary.yaml
@@ -0,0 +1,67 @@
+search_performance:
+  collections:
+    # - 
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_7/sub_50m_512_512_sub_wal
+    #     cache_config.cpu_cache_capacity: 32
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 1
+    #     gpu_resource_config.enable: false
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #   collection_name: sub_50m_512_512_sub
+    #   run_count: 2
+    #   top_ks: [1, 10, 100, 1000]
+    #   nqs: [1, 10, 100, 200, 500, 1000]
+    #   search_params:
+    #     -
+    #       nprobe: 8
+    #     -
+    #       nprobe: 32
+
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_7/super_50m_512_512_super_wal
+    #     cache_config.cpu_cache_capacity: 32
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 1
+    #     gpu_resource_config.enable: false
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #   collection_name: super_50m_512_512_super
+    #   run_count: 2
+    #   top_ks: [1, 10, 100, 1000]
+    #   nqs: [1, 10, 100, 200, 500, 1000]
+    #   search_params:
+    #     -
+    #       nprobe: 8
+    #     -
+    #       nprobe: 32
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/jaccard_50m_512_512_jaccard_wal
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 0
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: jaccard_50m_512_512_jaccard
+      run_count: 2
+      top_ks: [1, 10, 100, 1000]
+      nqs: [1, 10, 100, 200, 500, 1000]
+      search_params:
+        -
+          nprobe: 8
+        -
+          nprobe: 32
diff --git a/tests/benchmark/suites/cpu_search_performance_jaccard.yaml b/tests/benchmark/suites/cpu_search_performance_jaccard.yaml
new file mode 100644
index 000000000..0266cabab
--- /dev/null
+++ b/tests/benchmark/suites/cpu_search_performance_jaccard.yaml
@@ -0,0 +1,20 @@
+search_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_070/jaccard_50m_512_512_jaccard_wal
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: jaccard_50m_512_512_jaccard
+      run_count: 2
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 10, 100, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
diff --git a/tests/benchmark/suites/cpu_search_performance_sift50m.yaml b/tests/benchmark/suites/cpu_search_performance_sift50m.yaml
new file mode 100644
index 000000000..b74bb9e56
--- /dev/null
+++ b/tests/benchmark/suites/cpu_search_performance_sift50m.yaml
@@ -0,0 +1,20 @@
+search_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sift_50m_1024_128_l2
+      run_count: 2
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 10, 100, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
diff --git a/tests/benchmark/suites/gpu_accuracy.yaml b/tests/benchmark/suites/gpu_accuracy.yaml
new file mode 100644
index 000000000..2f5a08621
--- /dev/null
+++ b/tests/benchmark/suites/gpu_accuracy.yaml
@@ -0,0 +1,41 @@
+accuracy:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_sq8_wal
+        cache_config.cpu_cache_capacity: 30
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6 
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_2048_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8h_wal
+        cache_config.cpu_cache_capacity: 150
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 6
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1b_2048_128_l2
+      top_ks: [64]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
diff --git a/tests/benchmark/suites/gpu_accuracy_ann.yaml b/tests/benchmark/suites/gpu_accuracy_ann.yaml
new file mode 100644
index 000000000..900abf73e
--- /dev/null
+++ b/tests/benchmark/suites/gpu_accuracy_ann.yaml
@@ -0,0 +1,172 @@
+ann_accuracy:
+  collections:
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_file_sizes: [1024]
+      index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 16384]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_file_sizes: [1024]
+      index_types: ['ivf_pq']
+      index_params:
+        nlist: [16384]
+        m: [32]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_file_sizes: [256]
+      index_types: ['hnsw']
+      index_params:
+        M: [16]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true 
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_file_sizes: [1024]
+      index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 16384]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_file_sizes: [256]
+      index_types: ['hnsw']
+      index_params:
+        M: [36]
+        efConstruction: [500]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        ef: [10, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/kosarak-27983-jaccard.hdf5
+      collection_name: kosarak_27984_jaccard
+      index_file_sizes: [1024]
+      index_types: ['flat', 'ivf_flat']
+      index_params:
+        nlist: [2048]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-256-hamming.hdf5
+      collection_name: sift_256_hamming
+      index_file_sizes: [1024]
+      index_types: ['flat', 'ivf_flat']
+      index_params:
+        nlist: [2048]
+      top_ks: [100]
+      nqs: [1000]
+      search_params:
+        nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
diff --git a/tests/benchmark/suites/gpu_accuracy_ann_debug.yaml b/tests/benchmark/suites/gpu_accuracy_ann_debug.yaml
new file mode 100644
index 000000000..4748845ed
--- /dev/null
+++ b/tests/benchmark/suites/gpu_accuracy_ann_debug.yaml
@@ -0,0 +1,24 @@
+ann_accuracy:
+  collections:
+    -
+      server:
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_types: ['flat']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 512, 16384]
diff --git a/tests/benchmark/suites/gpu_accuracy_sift1b.yaml b/tests/benchmark/suites/gpu_accuracy_sift1b.yaml
new file mode 100644
index 000000000..dbfe2abe8
--- /dev/null
+++ b/tests/benchmark/suites/gpu_accuracy_sift1b.yaml
@@ -0,0 +1,59 @@
+accuracy:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
+        cache_config.cpu_cache_capacity: 150
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1b_2048_128_l2
+      search_params:
+        nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+        top_ks: [64]
+        nqs: [1000]
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h
+        cache_config.cpu_cache_capacity: 150
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1b_2048_128_l2
+      search_params:
+        nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+        top_ks: [64]
+        nqs: [1000]
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
+        cache_config.cpu_cache_capacity: 150
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_1b_2048_128_l2
+      search_params:
+        nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
+        top_ks: [64]
+        nqs: [1000]
\ No newline at end of file
diff --git a/tests/benchmark/suites/gpu_build_performance_jaccard50m.yaml b/tests/benchmark/suites/gpu_build_performance_jaccard50m.yaml
new file mode 100644
index 000000000..fdf9cccd2
--- /dev/null
+++ b/tests/benchmark/suites/gpu_build_performance_jaccard50m.yaml
@@ -0,0 +1,20 @@
+build_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/jaccard_50m_512_512_jaccard_ivf_wal_debug
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: jaccard_50m_512_512_jaccard
+      index_type: ivf_flat
+      index_param:
+        nlist: 2048
diff --git a/tests/benchmark/suites/gpu_search_performance.yaml b/tests/benchmark/suites/gpu_search_performance.yaml
new file mode 100644
index 000000000..78f6334a7
--- /dev/null
+++ b/tests/benchmark/suites/gpu_search_performance.yaml
@@ -0,0 +1,247 @@
+search_performance:
+  collections:
+    # sift_50m
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_ivf
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_1024_128_l2
+      run_count: 2
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 10, 100, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_sq8
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_1024_128_l2
+      run_count: 2
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 10, 100, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_sq8h
+        cache_config.cpu_cache_capacity: 16
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_1024_128_l2
+      run_count: 2
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 10, 100, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
+    #     cache_config.cpu_cache_capacity: 32
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_nsg
+    #     cache_config.cpu_cache_capacity: 50
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+
+    # random_50m
+    # - 
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu_crud/random_50m_1024_512_ip_ivf
+    #     cache_config.cpu_cache_capacity: 110
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: random_50m_1024_512_ip
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8
+    #     cache_config.cpu_cache_capacity: 30
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: random_50m_1024_512_ip
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8h
+    #     cache_config.cpu_cache_capacity: 30
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: random_50m_1024_512_ip
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_nsg
+    #     cache_config.cpu_cache_capacity: 200
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 6
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: random_50m_1024_512_ip
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+
+    # sift_1b
+    # - 
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_1b_1024_128_l2_sq8
+    #     cache_config.cpu_cache_capacity: 150
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_1b_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_1b_2048_128_l2_sq8h
+    #     cache_config.cpu_cache_capacity: 150
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_1b_2048_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
+    #     cache_config.cpu_cache_capacity: 150
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_1b_2048_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
diff --git a/tests/benchmark/suites/gpu_search_performance_jaccard50m.yaml b/tests/benchmark/suites/gpu_search_performance_jaccard50m.yaml
new file mode 100644
index 000000000..c9a1ed99b
--- /dev/null
+++ b/tests/benchmark/suites/gpu_search_performance_jaccard50m.yaml
@@ -0,0 +1,22 @@
+search_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_ivf
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: jaccard_50m_128_512_jaccard
+      run_count: 1
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 16, 64, 128, 256, 512, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
\ No newline at end of file
diff --git a/tests/benchmark/suites/gpu_search_performance_sift50m.yaml b/tests/benchmark/suites/gpu_search_performance_sift50m.yaml
new file mode 100644
index 000000000..4f49bf493
--- /dev/null
+++ b/tests/benchmark/suites/gpu_search_performance_sift50m.yaml
@@ -0,0 +1,146 @@
+search_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf
+        cache_config.cpu_cache_capacity: 32
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 200
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_1024_128_l2
+      run_count: 2
+      search_params:
+        nprobes: [8, 32]
+        top_ks: [1, 10, 100, 1000]
+        nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
+    #     cache_config.cpu_cache_capacity: 16
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h
+    #     cache_config.cpu_cache_capacity: 16
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+
+    # git issue num: #626
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
+    #     cache_config.cpu_cache_capacity: 32
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg
+    #     cache_config.cpu_cache_capacity: 50
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192
+    #     cache_config.cpu_cache_capacity: 16
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
+    # -
+    #   server:
+    #     db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
+    #     cache_config.cpu_cache_capacity: 16
+    #     engine_config.use_blas_threshold: 1100
+    #     engine_config.gpu_search_threshold: 200
+    #     gpu_resource_config.enable: true
+    #     gpu_resource_config.cache_capacity: 4
+    #     gpu_resource_config.search_resources:
+    #       - gpu0
+    #       - gpu1
+    #     gpu_resource_config.build_index_resources:
+    #       - gpu0
+    #       - gpu1
+    #   collection_name: sift_50m_1024_128_l2
+    #   run_count: 2
+    #   search_params:
+    #     nprobes: [8, 32]
+    #     top_ks: [1, 10, 100, 1000]
+    #     nqs: [1, 10, 100, 200, 500, 1000]
\ No newline at end of file
diff --git a/tests/benchmark/suites/gpu_search_stability.yaml b/tests/benchmark/suites/gpu_search_stability.yaml
new file mode 100644
index 000000000..a41ea817c
--- /dev/null
+++ b/tests/benchmark/suites/gpu_search_stability.yaml
@@ -0,0 +1,23 @@
+search_stability:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_sq8
+        cache_config.cpu_cache_capacity: 50
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+          - gpu2
+          - gpu3
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sift_50m_1024_128_l2
+      during_time: 240
+      search_params:
+        nprobes: 1-200
+        top_ks: 1-200
+        nqs: 1-200
diff --git a/tests/benchmark/suites/gpu_stability_sift50m.yaml b/tests/benchmark/suites/gpu_stability_sift50m.yaml
new file mode 100644
index 000000000..56faf7b20
--- /dev/null
+++ b/tests/benchmark/suites/gpu_stability_sift50m.yaml
@@ -0,0 +1,27 @@
+stability:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h_stability
+        cache_config.cpu_cache_capacity: 64
+        cache_config.cache_insert_data: true
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 100
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+      collection_name: sift_50m_1024_128_l2
+      during_time: 480
+      search_params:
+        nprobes: 1-200
+        top_ks: 1-200
+        nqs: 1-200
+      # length of insert vectors
+      insert_xb: 100000
+      # insert after search 4 times
+      insert_interval: 4
\ No newline at end of file
diff --git a/tests/benchmark/suites/insert_binary.yaml b/tests/benchmark/suites/insert_binary.yaml
new file mode 100644
index 000000000..79fa2b356
--- /dev/null
+++ b/tests/benchmark/suites/insert_binary.yaml
@@ -0,0 +1,39 @@
+insert_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/sub_50m_512_512_sub_wal
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: sub_50m_512_512_sub
+      ni_per: 100000
+      build_index: false
+      index_type: flat
+      index_param:
+        nlist: 2048
+
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_7/super_50m_512_512_super_wal
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: super_50m_512_512_super
+      ni_per: 100000
+      build_index: false
+      index_type: flat
+      index_param:
+        nlist: 2048
diff --git a/tests/benchmark/suites/insert_performance_deep1b.yaml b/tests/benchmark/suites/insert_performance_deep1b.yaml
new file mode 100644
index 000000000..27dc83c63
--- /dev/null
+++ b/tests/benchmark/suites/insert_performance_deep1b.yaml
@@ -0,0 +1,87 @@
+insert_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_ivf
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: deep_1b_1024_96_ip
+      ni_per: 100000
+      build_index: false
+      # index_type: ivf_flat
+      # nlist: 16384
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_sq8
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: deep_1b_1024_96_ip
+      ni_per: 100000
+      build_index: false
+      # index_type: ivf_sq8
+      # nlist: 16384
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_sq8h
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: deep_1b_1024_96_ip
+      ni_per: 100000
+      build_index: false
+      # index_type: ivf_sq8h
+      # nlist: 16384
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_pq
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: deep_1b_1024_96_ip
+      ni_per: 100000
+      build_index: false
+      # index_type: ivf_pq
+      # nlist: 16384
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_nsg
+        cache_config.cpu_cache_capacity: 8
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+      collection_name: deep_1b_1024_96_ip
+      ni_per: 100000
+      build_index: false
+      # index_type: nsg
+      # nlist: 16384
\ No newline at end of file
diff --git a/tests/benchmark/suites/locust_cluster_search.yaml b/tests/benchmark/suites/locust_cluster_search.yaml
new file mode 100644
index 000000000..d7641819a
--- /dev/null
+++ b/tests/benchmark/suites/locust_cluster_search.yaml
@@ -0,0 +1,45 @@
+locust_search_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_1m_128_l2_2
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 8GB
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      collection_name: sift_1m_128_l2_2
+      ni_per: 50000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 16384
+      task:
+        connection_num: 1
+        clients_num: 100
+        hatch_rate: 10
+        during_time: 10
+        types:
+          -
+            type: query
+            weight: 1
+            params:
+              top_k: 10
+              nq: 1
+              # filters:
+              #   -
+              #     range:
+              #       int64:
+              #         LT: 0
+              #         GT: 1000000
+              search_param:
+                nprobe: 16
diff --git a/tests/benchmark/suites/locust_insert.yaml b/tests/benchmark/suites/locust_insert.yaml
new file mode 100644
index 000000000..cdbdeaca2
--- /dev/null
+++ b/tests/benchmark/suites/locust_insert.yaml
@@ -0,0 +1,23 @@
+locust_insert_performance:
+  collections:
+    - 
+      server:
+        cache_config.cpu_cache_capacity: 8GB
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+      task: 
+        type: insert
+        connection_num: 1
+        clients_num: 10
+        hatch_rate: 5
+        during_time: 2m
diff --git a/tests/benchmark/suites/locust_mix.yaml b/tests/benchmark/suites/locust_mix.yaml
new file mode 100644
index 000000000..71be3afd6
--- /dev/null
+++ b/tests/benchmark/suites/locust_mix.yaml
@@ -0,0 +1,47 @@
+locust_mix_performance:
+  collections:
+    -
+      milvus:
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: true
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          gpu0
+        gpu_resource_config.build_index_resources:
+          gpu0
+        wal_enable: true
+      collection_name: sift_1m_500000_128_l2
+      other_fields: int
+      ni_per: 50000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 1024
+      task:
+        types:
+          - type: flush
+            weight: 1
+          -
+            type: query
+            weight: 20
+            params:
+              top_k: 10
+              nq: 100
+              filters:
+                - range:
+                    int64:
+                      LT: 0
+                      GT: 1000000
+              search_param:
+                nprobe: 16
+          -
+            type: insert
+            weight: 10
+            params:
+              nb: 1
+        connection_num: 1
+        clients_num: 10
+        hatch_rate: 2
+        during_time: 600 
diff --git a/tests/benchmark/suites/locust_search.yaml b/tests/benchmark/suites/locust_search.yaml
new file mode 100644
index 000000000..eac80389c
--- /dev/null
+++ b/tests/benchmark/suites/locust_search.yaml
@@ -0,0 +1,49 @@
+locust_search_performance:
+  collections:
+    -
+      milvus:
+        db_config.primary_path: /test/milvus/db_data_011/cluster/sift_1m_128_l2_2
+        suffix_path: true
+        cache_config.cpu_cache_capacity: 8GB
+        cache_config.insert_buffer_size: 2GB
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.cache_capacity: 4GB
+        gpu_resource_config.search_resources:
+          - gpu0
+          - gpu1
+        gpu_resource_config.build_index_resources:
+          - gpu0
+          - gpu1
+        wal_enable: true
+        cluster: true
+        external_mysql: true
+        readonly:
+            replicas: 1
+      collection_name: sift_1m_128_l2_2
+      ni_per: 50000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 16384
+      task:
+        connection_num: 2
+        clients_num: 100
+        hatch_rate: 10
+        during_time: 3600
+        types:
+          -
+            type: query
+            weight: 1
+            params:
+              top_k: 10
+              nq: 1
+              # filters:
+              #   -
+              #     range:
+              #       int64:
+              #         LT: 0
+              #         GT: 1000000
+              search_param:
+                nprobe: 16
diff --git a/tests/benchmark/suites/loop_stability.yaml b/tests/benchmark/suites/loop_stability.yaml
new file mode 100644
index 000000000..a304695fe
--- /dev/null
+++ b/tests/benchmark/suites/loop_stability.yaml
@@ -0,0 +1,17 @@
+loop_stability:
+  collections:
+    - 
+      server:
+        suffix_path: true 
+        db_config.primary_path: /test/milvus/db_data_11/loop_stability
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 10
+        gpu_resource_config.enable: true 
+        gpu_resource_config.cache_capacity: 2GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      pull_interval: 20
+      collection_num: 4
diff --git a/tests/benchmark/suites/shards_ann_debug.yaml b/tests/benchmark/suites/shards_ann_debug.yaml
new file mode 100644
index 000000000..ba5db54eb
--- /dev/null
+++ b/tests/benchmark/suites/shards_ann_debug.yaml
@@ -0,0 +1,25 @@
+ann_accuracy:
+  collections:
+    -
+      source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
+      collection_name: sift_128_euclidean
+      index_file_sizes: [1024]
+      index_types: ['flat', 'ivf_sq8']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 32, 512]
+
+    -
+      source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
+      collection_name: glove_200_angular
+      index_file_sizes: [1024]
+      index_types: ['flat', 'ivf_sq8']
+      index_params:
+        nlist: [16384]
+      top_ks: [10]
+      nqs: [10000]
+      search_params:
+        nprobe: [1, 32, 512]
diff --git a/tests/benchmark/suites/shards_insert_performance.yaml b/tests/benchmark/suites/shards_insert_performance.yaml
new file mode 100644
index 000000000..eada67a3b
--- /dev/null
+++ b/tests/benchmark/suites/shards_insert_performance.yaml
@@ -0,0 +1,17 @@
+insert_performance:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_8/shards_sift_1m_128_128_l2_insert
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false 
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_1m_128_128_l2
+      ni_per: 10000
+      build_index: false
+      index_type: flat
diff --git a/tests/benchmark/suites/shards_insert_performance_sift1m.yaml b/tests/benchmark/suites/shards_insert_performance_sift1m.yaml
new file mode 100644
index 000000000..dbc2929fd
--- /dev/null
+++ b/tests/benchmark/suites/shards_insert_performance_sift1m.yaml
@@ -0,0 +1,19 @@
+insert_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_010/shards_sift_1m_128_128_l2_insert
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 1
+        gpu_resource_config.enable: false
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      collection_name: sift_1m_1024_128_l2
+      ni_per: 10000
+      build_index: true
+      index_type: ivf_sq8
+      index_param:
+        nlist: 16384
diff --git a/tests/benchmark/suites/shards_loop_stability.yaml b/tests/benchmark/suites/shards_loop_stability.yaml
new file mode 100644
index 000000000..4494b8f4b
--- /dev/null
+++ b/tests/benchmark/suites/shards_loop_stability.yaml
@@ -0,0 +1,16 @@
+loop_stability:
+  collections:
+    - 
+      server:
+        db_config.primary_path: /test/milvus/db_data_8/shards_loop_stability
+        engine_config.use_blas_threshold: 1100
+        engine_config.gpu_search_threshold: 10
+        gpu_resource_config.enable: true 
+        gpu_resource_config.cache_capacity: 2GB
+        gpu_resource_config.search_resources:
+          - gpu0
+        gpu_resource_config.build_index_resources:
+          - gpu0
+        wal_enable: true
+      pull_interval: 2 
+      collection_num: 2
diff --git a/tests/benchmark/suites/shards_search_performance_sift1m.yaml b/tests/benchmark/suites/shards_search_performance_sift1m.yaml
new file mode 100644
index 000000000..71ed51ce7
--- /dev/null
+++ b/tests/benchmark/suites/shards_search_performance_sift1m.yaml
@@ -0,0 +1,12 @@
+search_performance:
+  collections:
+    -
+      server:
+        db_config.primary_path: /test/milvus/db_data_010/shards_sift_1m_128_128_l2_insert
+        wal_enable: true
+      collection_name: sift_1m_1024_128_l2
+      run_count: 2
+      top_ks: [1, 10, 100]
+      nqs: [1, 10, 100]
+      search_params:
+        - nprobe: 8
diff --git a/tests/benchmark/task/task.py b/tests/benchmark/task/task.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/benchmark/test_loop.py b/tests/benchmark/test_loop.py
new file mode 100644
index 000000000..224c796a0
--- /dev/null
+++ b/tests/benchmark/test_loop.py
@@ -0,0 +1,52 @@
+import time
+import random
+import logging
+from client import MilvusClient
+import utils
+
+
+if __name__ == "__main__":
+    milvus_instance = MilvusClient()
+    milvus_instance.clean_db()
+    p_num = 1
+    dimension = 128
+    insert_xb = 100000
+    index_types = ['flat']
+    index_param = {"nlist": 2048}
+    collection_names = []
+    milvus_instances_map = {}
+    insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
+
+    for i in range(collection_num):
+        name = utils.get_unique_name(prefix="collection_")
+        print(name)
+        collection_names.append(name)
+        metric_type = "ip"
+        # metric_type = random.choice(["l2", "ip"])
+        index_file_size = random.randint(10, 100)
+        milvus_instance.create_collection(name, dimension, index_file_size, metric_type)
+        milvus_instance = MilvusClient(collection_name=name)
+        index_type = random.choice(index_types)
+        milvus_instance.create_index(index_type, index_param=index_param)
+        insert_vectors = utils.normalize(metric_type, insert_vectors)
+        milvus_instance.insert(insert_vectors)
+        milvus_instance.flush()
+        milvus_instances_map.update({name: milvus_instance})
+        print(milvus_instance.describe_index(), milvus_instance.describe(), milvus_instance.count())
+
+    # tasks = ["insert_rand", "delete_rand", "query_rand", "flush", "compact"]
+    tasks = ["insert_rand", "query_rand", "flush"]
+    i = 1
+    while True:
+        print("Loop time: %d" % i)
+        start_time = time.time()
+        while time.time() - start_time < pull_interval_seconds:
+            # choose collection
+            tmp_collection_name = random.choice(collection_names)
+            # choose task from task
+            task_name = random.choice(tasks)
+            # print(tmp_collection_name, task_name) 
+            func = getattr(milvus_instances_map[tmp_collection_name], task_name)
+            func()
+        print("Restart")
+        i = i + 1
diff --git a/tests/benchmark/utils.py b/tests/benchmark/utils.py
new file mode 100644
index 000000000..3ee7f5e74
--- /dev/null
+++ b/tests/benchmark/utils.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+import os
+import sys
+import pdb
+import time
+import json
+import datetime
+import argparse
+import threading
+import logging
+import string
+import random
+# import multiprocessing
+import numpy as np
+# import psutil
+import sklearn.preprocessing
+import h5py
+# import docker
+from yaml import full_load, dump
+import yaml
+import tableprint as tp
+from pprint import pprint
+from milvus import DataType
+
+logger = logging.getLogger("milvus_benchmark.utils")
+
+DEFAULT_F_FIELD_NAME = 'float_vector'
+DEFAULT_B_FIELD_NAME = 'binary_vector'
+DEFAULT_INT_FIELD_NAME = 'int64'
+DEFAULT_FLOAT_FIELD_NAME = 'float'
+
+METRIC_MAP = {
+    "l2": "L2",
+    "ip": "IP",
+    "jaccard": "JACCARD",
+    "hamming": "HAMMING",
+    "sub": "SUBSTRUCTURE",
+    "super": "SUPERSTRUCTURE"
+}
+
+
+def metric_type_trans(metric_type):
+    if metric_type in METRIC_MAP.keys():
+        return METRIC_MAP[metric_type]
+    else:
+        raise Exception("metric_type: %s not in METRIC_MAP" % metric_type)
+
+
+def timestr_to_int(time_str):
+    time_int = 0
+    if isinstance(time_str, int) or time_str.isdigit():
+        time_int = int(time_str)
+    elif time_str.endswith("s"):
+        time_int = int(time_str.split("s")[0])
+    elif time_str.endswith("m"):
+        time_int = int(time_str.split("m")[0]) * 60
+    elif time_str.endswith("h"):
+        time_int = int(time_str.split("h")[0]) * 60 * 60
+    else:
+        raise Exception("%s not support" % time_str)
+    return time_int
+
+
+class literal_str(str): pass
+
+
+def change_style(style, representer):
+    def new_representer(dumper, data):
+        scalar = representer(dumper, data)
+        scalar.style = style
+        return scalar
+
+    return new_representer
+
+
+from yaml.representer import SafeRepresenter
+
+# represent_str does handle some corner cases, so use that
+# instead of calling represent_scalar directly
+represent_literal_str = change_style('|', SafeRepresenter.represent_str)
+
+yaml.add_representer(literal_str, represent_literal_str)
+
+
+def retry(times):
+    """
+    This decorator prints the execution time for the decorated function.
+    """
+    def wrapper(func):
+        def newfn(*args, **kwargs):
+            attempt = 0
+            while attempt < times:
+                try:
+                    print("retry {} times".format(attempt+1))
+                    result = func(*args, **kwargs)
+                    if result:
+                        break
+                    else:
+                        logger.error("Retry failed")
+                        raise Exception("Result false")
+                except Exception as e:
+                    logger.info(str(e))
+                    time.sleep(3)
+                    attempt += 1
+            return func(*args, **kwargs)
+        return newfn
+    return wrapper
+
+
+def timestr_to_int(time_str):
+    time_int = 0
+    if isinstance(time_str, int) or time_str.isdigit():
+        time_int = int(time_str)
+    elif time_str.endswith("s"):
+        time_int = int(time_str.split("s")[0])
+    elif time_str.endswith("m"):
+        time_int = int(time_str.split("m")[0]) * 60
+    elif time_str.endswith("h"):
+        time_int = int(time_str.split("h")[0]) * 60 * 60
+    else:
+        raise Exception("%s not support" % time_str)
+    return time_int
+
+
+def get_default_field_name(data_type=DataType.FLOAT_VECTOR):
+    if data_type == DataType.FLOAT_VECTOR:
+        field_name = DEFAULT_F_FIELD_NAME
+    elif data_type == DataType.BINARY_VECTOR:
+        field_name = DEFAULT_B_FIELD_NAME
+    elif data_type == DataType.INT64:
+        field_name = DEFAULT_INT_FIELD_NAME
+    elif data_type == DataType.FLOAT:
+        field_name = DEFAULT_FLOAT_FIELD_NAME
+    else:
+        logger.error(data_type)
+        raise Exception("Not supported data type")
+    return field_name
+
+
+def normalize(metric_type, X):
+    if metric_type == "ip":
+        logger.info("Set normalize for metric_type: %s" % metric_type)
+        X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
+        X = X.tolist()
+    elif metric_type in ["jaccard", "hamming", "sub", "super"]:
+        tmp = []
+        for index, item in enumerate(X):
+            new_vector = bytes(np.packbits(item, axis=-1).tolist())
+            tmp.append(new_vector)
+        X = tmp
+    return X
+
+
+def convert_nested(dct):
+    def insert(dct, lst):
+        for x in lst[:-2]:
+            dct[x] = dct = dct.get(x, dict())
+        dct.update({lst[-2]: lst[-1]})
+
+        # empty dict to store the result
+
+    result = dict()
+
+    # create an iterator of lists  
+    # representing nested or hierarchial flow 
+    lsts = ([*k.split("."), v] for k, v in dct.items())
+
+    # insert each list into the result 
+    for lst in lsts:
+        insert(result, lst)
+    return result
+
+
+def get_unique_name(prefix=None):
+    if prefix is None:
+        prefix = "milvus-benchmark-test-"
+    return prefix + "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower()
+
+
+def get_current_time():
+    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
+
+
+def print_table(headers, columns, data):
+    bodys = []
+    for index, value in enumerate(columns):
+        tmp = [value]
+        tmp.extend(data[index])
+        bodys.append(tmp)
+    tp.table(bodys, headers)
+
+
+def get_dataset(hdf5_file_path):
+    if not os.path.exists(hdf5_file_path):
+        raise Exception("%s not existed" % hdf5_file_path)
+    dataset = h5py.File(hdf5_file_path)
+    return dataset
+
+
+def modify_config(k, v, type=None, file_path="conf/server_config.yaml", db_slave=None):
+    if not os.path.isfile(file_path):
+        raise Exception('File: %s not found' % file_path)
+    with open(file_path) as f:
+        config_dict = full_load(f)
+        f.close()
+    if config_dict:
+        if k.find("use_blas_threshold") != -1:
+            config_dict['engine_config']['use_blas_threshold'] = int(v)
+        elif k.find("use_gpu_threshold") != -1:
+            config_dict['engine_config']['gpu_search_threshold'] = int(v)
+        elif k.find("cpu_cache_capacity") != -1:
+            config_dict['cache_config']['cpu_cache_capacity'] = int(v)
+        elif k.find("enable_gpu") != -1:
+            config_dict['gpu_resource_config']['enable'] = v
+        elif k.find("gpu_cache_capacity") != -1:
+            config_dict['gpu_resource_config']['cache_capacity'] = int(v)
+        elif k.find("index_build_device") != -1:
+            config_dict['gpu_resource_config']['build_index_resources'] = v
+        elif k.find("search_resources") != -1:
+            config_dict['resource_config']['resources'] = v
+
+        # if db_slave:
+        #     config_dict['db_config']['db_slave_path'] = MULTI_DB_SLAVE_PATH
+        with open(file_path, 'w') as f:
+            dump(config_dict, f, default_flow_style=False)
+        f.close()
+    else:
+        raise Exception('Load file:%s error' % file_path)
+
+
+# update server_config.yaml
+def update_server_config(file_path, server_config):
+    if not os.path.isfile(file_path):
+        raise Exception('File: %s not found' % file_path)
+    with open(file_path) as f:
+        values_dict = full_load(f)
+        f.close()
+        for k, v in server_config.items():
+            if k.find("primary_path") != -1:
+                values_dict["db_config"]["primary_path"] = v
+            elif k.find("use_blas_threshold") != -1:
+                values_dict['engine_config']['use_blas_threshold'] = int(v)
+            elif k.find("gpu_search_threshold") != -1:
+                values_dict['engine_config']['gpu_search_threshold'] = int(v)
+            elif k.find("cpu_cache_capacity") != -1:
+                values_dict['cache_config']['cpu_cache_capacity'] = int(v)
+            elif k.find("cache_insert_data") != -1:
+                values_dict['cache_config']['cache_insert_data'] = v
+            elif k.find("enable") != -1:
+                values_dict['gpu_resource_config']['enable'] = v
+            elif k.find("gpu_cache_capacity") != -1:
+                values_dict['gpu_resource_config']['cache_capacity'] = int(v)
+            elif k.find("build_index_resources") != -1:
+                values_dict['gpu_resource_config']['build_index_resources'] = v
+            elif k.find("search_resources") != -1:
+                values_dict['gpu_resource_config']['search_resources'] = v
+            with open(file_path, 'w') as f:
+                dump(values_dict, f, default_flow_style=False)
+            f.close()
diff --git a/tests/python_test/.dockerignore b/tests/python_test/.dockerignore
new file mode 100644
index 000000000..c97d9d043
--- /dev/null
+++ b/tests/python_test/.dockerignore
@@ -0,0 +1,14 @@
+node_modules
+npm-debug.log
+Dockerfile*
+docker-compose*
+.dockerignore
+.git
+.gitignore
+.env
+*/bin
+*/obj
+README.md
+LICENSE
+.vscode
+__pycache__
\ No newline at end of file
diff --git a/tests/python_test/.gitignore b/tests/python_test/.gitignore
new file mode 100644
index 000000000..9bd7345e5
--- /dev/null
+++ b/tests/python_test/.gitignore
@@ -0,0 +1,13 @@
+.python-version
+.pytest_cache
+__pycache__
+.vscode
+.idea
+
+test_out/
+*.pyc
+
+db/
+logs/
+
+.coverage
diff --git a/tests/python_test/Dockerfile b/tests/python_test/Dockerfile
new file mode 100644
index 000000000..dbc3f14ce
--- /dev/null
+++ b/tests/python_test/Dockerfile
@@ -0,0 +1,15 @@
+FROM python:3.6.8-jessie
+
+LABEL Name=megasearch_engine_test Version=0.0.1
+
+WORKDIR /app
+COPY . /app
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+    libc-dev build-essential && \
+    python3 -m pip install -r requirements.txt && \
+    apt-get remove --purge -y && \
+    rm -rf /var/lib/apt/lists/*
+
+ENTRYPOINT [ "/app/docker-entrypoint.sh" ]
+CMD [ "start" ]
\ No newline at end of file
diff --git a/tests/python_test/README.md b/tests/python_test/README.md
new file mode 100644
index 000000000..3e8b45a18
--- /dev/null
+++ b/tests/python_test/README.md
@@ -0,0 +1,62 @@
+## Requirements
+* python 3.6.8+
+* pip install -r requirements.txt
+
+## How to Build Test Env
+```shell
+sudo docker pull registry.zilliz.com/milvus/milvus-test:v0.2
+sudo docker run -it -v /home/zilliz:/home/zilliz -d registry.zilliz.com/milvus/milvus-test:v0.2
+```
+
+## How to Create Test Env docker in k8s
+```shell
+# 1. start milvus k8s pod
+cd milvus-helm/charts/milvus
+helm install --wait --timeout 300s \
+  --set image.repository=registry.zilliz.com/milvus/engine \
+  --set persistence.enabled=true \
+  --set image.tag=PR-3818-gpu-centos7-release \
+  --set image.pullPolicy=Always \
+  --set service.type=LoadBalancer \
+  -f ci/db_backend/mysql_gpu_values.yaml \
+  -f ci/filebeat/values.yaml \
+  -f test.yaml \
+  --namespace milvus \
+  milvus-ci-pr-3818-1-single-centos7-gpu .
+
+# 2. remove milvus k8s pod
+helm uninstall -n milvus milvus-test
+
+# 3. check k8s pod status
+kubectl get svc -n milvus -w milvus-test
+
+# 4. login to pod
+kubectl get pods --namespace milvus
+kubectl exec -it milvus-test-writable-6cc49cfcd4-rbrns -n milvus bash
+```
+
+## How to Run Test cases
+```shell
+# Test level-1 cases
+pytest . --level=1 --ip=127.0.0.1 --port=19530
+
+# Test level-1 cases in 'test_connect.py' only
+pytest test_connect.py --level=1
+```
+
+## How to list test cases
+```shell
+# List all cases
+pytest --dry-run -qq
+
+# Collect all cases with docstring
+pytest --collect-only -qq
+
+# Create test report with allure
+pytest --alluredir=test_out . -q -v
+allure serve test_out
+ ```
+
+## Contribution getting started
+* Follow PEP-8 for naming and black for formatting.
+
diff --git a/tests/python_test/collection/test_collection_count.py b/tests/python_test/collection/test_collection_count.py
new file mode 100644
index 000000000..7e2bece23
--- /dev/null
+++ b/tests/python_test/collection/test_collection_count.py
@@ -0,0 +1,575 @@
+import pdb
+import copy
+import logging
+import itertools
+from time import sleep
+import threading
+from multiprocessing import Process
+import sklearn.preprocessing
+
+import pytest
+from utils import *
+from constants import *
+
+uid = "collection_count"
+tag = "collection_count_tag"
+
+class TestCollectionCount:
+    """
+    params means different nb, the nb value may trigger merge, or not
+    """
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            1000,
+            2001
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+
+    """
+    generate valid create_index params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")[1]) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in cpu mode")
+        request.param.update({"metric_type": "L2"})
+        return request.param
+
+    def test_collection_count(self, connect, collection, insert_count):
+        '''
+        target: test collection rows_count is correct or not
+        method: create collection and add vectors in it,
+            assert the value returned by count_entities method is equal to length of vectors
+        expected: the count is equal to the length of vectors
+        '''
+        entities = gen_entities(insert_count)
+        res = connect.insert(collection, entities)
+        connect.flush([collection])
+        # res = connect.count_entities(collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == insert_count
+
+    def test_collection_count_partition(self, connect, collection, insert_count):
+        '''
+        target: test collection rows_count is correct or not
+        method: create collection, create partition and add vectors in it,
+            assert the value returned by count_entities method is equal to length of vectors
+        expected: the count is equal to the length of vectors
+        '''
+        entities = gen_entities(insert_count)
+        connect.create_partition(collection, tag)
+        res_ids = connect.insert(collection, entities, partition_tag=tag)
+        connect.flush([collection])
+        # res = connect.count_entities(collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == insert_count
+
+    # def test_collection_count_multi_partitions_A(self, connect, collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in it,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     entities = gen_entities(insert_count)
+    #     connect.create_partition(collection, tag)
+    #     connect.create_partition(collection, new_tag)
+    #     res_ids = connect.insert(collection, entities)
+    #     connect.flush([collection])
+    #     # res = connect.count_entities(collection)
+    #     # assert res == insert_count
+    #     stats = connect.get_collection_stats(collection)
+    #     assert stats["row_count"] == insert_count
+
+    # def test_collection_count_multi_partitions_B(self, connect, collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in one of the partitions,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     entities = gen_entities(insert_count)
+    #     connect.create_partition(collection, tag)
+    #     connect.create_partition(collection, new_tag)
+    #     res_ids = connect.insert(collection, entities, partition_tag=tag)
+    #     connect.flush([collection])
+    #     # res = connect.count_entities(collection)
+    #     # assert res == insert_count
+    #     stats = connect.get_collection_stats(collection)
+    #     assert stats["row_count"] == insert_count
+
+    # def test_collection_count_multi_partitions_C(self, connect, collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in one of the partitions,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the count is equal to the length of vectors
+    #     '''
+    #     new_tag = "new_tag"
+    #     entities = gen_entities(insert_count)
+    #     connect.create_partition(collection, tag)
+    #     connect.create_partition(collection, new_tag)
+    #     res_ids = connect.insert(collection, entities)
+    #     res_ids_2 = connect.insert(collection, entities, partition_tag=tag)
+    #     connect.flush([collection])
+    #     # res = connect.count_entities(collection)
+    #     # assert res == insert_count * 2
+    #     stats = connect.get_collection_stats(collection)
+    #     assert stats["row_count"] == insert_count * 2
+
+    # def test_collection_count_multi_partitions_D(self, connect, collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in one of the partitions,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the collection count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     entities = gen_entities(insert_count)
+    #     connect.create_partition(collection, tag)
+    #     connect.create_partition(collection, new_tag)
+    #     res_ids = connect.insert(collection, entities, partition_tag=tag)
+    #     res_ids2 = connect.insert(collection, entities, partition_tag=new_tag)
+    #     connect.flush([collection])
+    #     # res = connect.count_entities(collection)
+    #     # assert res == insert_count * 2
+    #     stats = connect.get_collection_stats(collection)
+    #     assert stats["row_count"] == insert_count * 2
+
+    def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
+        '''
+        target: test count_entities, after index have been created
+        method: add vectors in db, and create index, then calling count_entities with correct params 
+        expected: count_entities raise exception
+        '''
+        entities = gen_entities(insert_count)
+        res = connect.insert(collection, entities)
+        connect.flush([collection])
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        # res = connect.count_entities(collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == insert_count
+
+    def test_count_without_connection(self, collection, dis_connect):
+        '''
+        target: test count_entities, without connection
+        method: calling count_entities with correct params, with a disconnected instance
+        expected: count_entities raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.count_entities(collection)
+
+    def test_collection_count_no_vectors(self, connect, collection):
+        '''
+        target: test collection rows_count is correct or not, if collection is empty
+        method: create collection and no vectors in it,
+            assert the value returned by count_entities method is equal to 0
+        expected: the count is equal to 0
+        '''    
+        # res = connect.count_entities(collection)
+        # assert res == 0
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == 0
+
+
+class TestCollectionCountIP:
+    """
+    params means different nb, the nb value may trigger merge, or not
+    """
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            1000,
+            2001
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+
+    """
+    generate valid create_index params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")[1]) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in cpu mode")
+        request.param.update({"metric_type": "IP"})
+        return request.param
+
+    def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
+        '''
+        target: test count_entities, after index have been created
+        method: add vectors in db, and create index, then calling count_entities with correct params 
+        expected: count_entities raise exception
+        '''
+        entities = gen_entities(insert_count)
+        res = connect.insert(collection, entities)
+        connect.flush([collection])
+        connect.create_index(collection, field_name, get_simple_index)
+        # res = connect.count_entities(collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == insert_count
+
+class TestCollectionCountBinary:
+    """
+    params means different nb, the nb value may trigger merge, or not
+    """
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            1000,
+            2001
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_jaccard_index(self, request, connect):
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "JACCARD"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_hamming_index(self, request, connect):
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "HAMMING"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_substructure_index(self, request, connect):
+        if request.param["index_type"] == "FLAT":
+            request.param["metric_type"] = "SUBSTRUCTURE"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_superstructure_index(self, request, connect):
+        if request.param["index_type"] == "FLAT":
+            request.param["metric_type"] = "SUPERSTRUCTURE"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    def test_collection_count(self, connect, binary_collection, insert_count):
+        '''
+        target: test collection rows_count is correct or not
+        method: create collection and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: the count is equal to the length of entities
+        '''
+        raw_vectors, entities = gen_binary_entities(insert_count)
+        res = connect.insert(binary_collection, entities)
+        logging.getLogger().info(len(res))
+        connect.flush([binary_collection])
+        # res = connect.count_entities(binary_collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == insert_count
+
+    def test_collection_count_partition(self, connect, binary_collection, insert_count):
+        '''
+        target: test collection rows_count is correct or not
+        method: create collection, create partition and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: the count is equal to the length of entities
+        '''
+        raw_vectors, entities = gen_binary_entities(insert_count)
+        connect.create_partition(binary_collection, tag)
+        res_ids = connect.insert(binary_collection, entities, partition_tag=tag)
+        connect.flush([binary_collection])
+        # res = connect.count_entities(binary_collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == insert_count
+
+    # @pytest.mark.level(2)
+    # def test_collection_count_multi_partitions_A(self, connect, binary_collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in it,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     raw_vectors, entities = gen_binary_entities(insert_count)
+    #     connect.create_partition(binary_collection, tag)
+    #     connect.create_partition(binary_collection, new_tag)
+    #     res_ids = connect.insert(binary_collection, entities)
+    #     connect.flush([binary_collection])
+    #     # res = connect.count_entities(binary_collection)
+    #     # assert res == insert_count
+    #     stats = connect.get_collection_stats(binary_collection)
+    #     assert stats["row_count"] == insert_count
+
+    # @pytest.mark.level(2)
+    # def test_collection_count_multi_partitions_B(self, connect, binary_collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in one of the partitions,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     raw_vectors, entities = gen_binary_entities(insert_count)
+    #     connect.create_partition(binary_collection, tag)
+    #     connect.create_partition(binary_collection, new_tag)
+    #     res_ids = connect.insert(binary_collection, entities, partition_tag=tag)
+    #     connect.flush([binary_collection])
+    #     # res = connect.count_entities(binary_collection)
+    #     # assert res == insert_count
+    #     stats = connect.get_collection_stats(binary_collection)
+    #     assert stats["row_count"] == insert_count
+
+    # def test_collection_count_multi_partitions_C(self, connect, binary_collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in one of the partitions,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     raw_vectors, entities = gen_binary_entities(insert_count)
+    #     connect.create_partition(binary_collection, tag)
+    #     connect.create_partition(binary_collection, new_tag)
+    #     res_ids = connect.insert(binary_collection, entities)
+    #     res_ids_2 = connect.insert(binary_collection, entities, partition_tag=tag)
+    #     connect.flush([binary_collection])
+    #     # res = connect.count_entities(binary_collection)
+    #     # assert res == insert_count * 2
+    #     stats = connect.get_collection_stats(binary_collection)
+    #     assert stats["row_count"] == insert_count * 2
+
+    # @pytest.mark.level(2)
+    # def test_collection_count_multi_partitions_D(self, connect, binary_collection, insert_count):
+    #     '''
+    #     target: test collection rows_count is correct or not
+    #     method: create collection, create partitions and add entities in one of the partitions,
+    #         assert the value returned by count_entities method is equal to length of entities
+    #     expected: the collection count is equal to the length of entities
+    #     '''
+    #     new_tag = "new_tag"
+    #     raw_vectors, entities = gen_binary_entities(insert_count)
+    #     connect.create_partition(binary_collection, tag)
+    #     connect.create_partition(binary_collection, new_tag)
+    #     res_ids = connect.insert(binary_collection, entities, partition_tag=tag)
+    #     res_ids2 = connect.insert(binary_collection, entities, partition_tag=new_tag)
+    #     connect.flush([binary_collection])
+    #     # res = connect.count_entities(binary_collection)
+    #     # assert res == insert_count * 2
+    #     stats = connect.get_collection_stats(binary_collection)
+    #     assert stats["row_count"] == insert_count * 2
+
+    # TODO: need to update and enable
+    def _test_collection_count_after_index_created(self, connect, binary_collection, get_jaccard_index, insert_count):
+        '''
+        target: test count_entities, after index have been created
+        method: add vectors in db, and create index, then calling count_entities with correct params 
+        expected: count_entities raise exception
+        '''
+        raw_vectors, entities = gen_binary_entities(insert_count)
+        res = connect.insert(binary_collection, entities)
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, field_name, get_jaccard_index)
+        # res = connect.count_entities(binary_collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == insert_count
+
+    # TODO: need to update and enable
+    def _test_collection_count_after_index_created(self, connect, binary_collection, get_hamming_index, insert_count):
+        '''
+        target: test count_entities, after index have been created
+        method: add vectors in db, and create index, then calling count_entities with correct params 
+        expected: count_entities raise exception
+        '''
+        raw_vectors, entities = gen_binary_entities(insert_count)
+        res = connect.insert(binary_collection, entities)
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, field_name, get_hamming_index)
+        # res = connect.count_entities(binary_collection)
+        # assert res == insert_count
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == insert_count
+
+    def test_collection_count_no_entities(self, connect, binary_collection):
+        '''
+        target: test collection rows_count is correct or not, if collection is empty
+        method: create collection and no vectors in it,
+            assert the value returned by count_entities method is equal to 0
+        expected: the count is equal to 0
+        '''    
+        # res = connect.count_entities(binary_collection)
+        # assert res == 0
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == 0
+
+
+class TestCollectionMultiCollections:
+    """
+    params means different nb, the nb value may trigger merge, or not
+    """
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            1000,
+            2001
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+        
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_jaccard_index(self, request, connect):
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "JACCARD"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_hamming_index(self, request, connect):
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "HAMMING"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_substructure_index(self, request, connect):
+        if request.param["index_type"] == "FLAT":
+            request.param["metric_type"] = "SUBSTRUCTURE"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_superstructure_index(self, request, connect):
+        if request.param["index_type"] == "FLAT":
+            request.param["metric_type"] = "SUPERSTRUCTURE"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    def test_collection_count_multi_collections_l2(self, connect, insert_count):
+        '''
+        target: test collection rows_count is correct or not with multiple collections of L2
+        method: create collection and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: the count is equal to the length of entities
+        '''
+        entities = gen_entities(insert_count)
+        collection_list = []
+        collection_num = 20
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            res = connect.insert(collection_name, entities)
+        connect.flush(collection_list)
+        for i in range(collection_num):
+            # res = connect.count_entities(collection_list[i])
+            # assert res == insert_count
+            stats = connect.get_collection_stats(collection_list[i])
+            assert stats["row_count"] == insert_count
+
+    @pytest.mark.level(2)
+    def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count):
+        '''
+        target: test collection rows_count is correct or not with multiple collections of JACCARD
+        method: create collection and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: the count is equal to the length of entities
+        '''
+        raw_vectors, entities = gen_binary_entities(insert_count)
+        res = connect.insert(binary_collection, entities)
+        collection_list = []
+        collection_num = 20
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_binary_fields)
+            res = connect.insert(collection_name, entities)
+        connect.flush(collection_list)
+        for i in range(collection_num):
+            # res = connect.count_entities(collection_list[i])
+            # assert res == insert_count
+            stats = connect.get_collection_stats(collection_list[i])
+            assert stats["row_count"] == insert_count
+
+    @pytest.mark.level(2)
+    def test_collection_count_multi_collections_mix(self, connect):
+        '''
+        target: test collection rows_count is correct or not with multiple collections of JACCARD
+        method: create collection and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: the count is equal to the length of entities
+        '''
+        collection_list = []
+        collection_num = 20
+        for i in range(0, int(collection_num / 2)):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            res = connect.insert(collection_name, default_entities)
+        for i in range(int(collection_num / 2), collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_binary_fields)
+            res = connect.insert(collection_name, default_binary_entities)
+        connect.flush(collection_list)
+        for i in range(collection_num):
+            # res = connect.count_entities(collection_list[i])
+            # assert res == default_nb
+            stats = connect.get_collection_stats(collection_list[i])
+            assert stats["row_count"] == default_nb
diff --git a/tests/python_test/collection/test_collection_logic.py b/tests/python_test/collection/test_collection_logic.py
new file mode 100644
index 000000000..e0503e430
--- /dev/null
+++ b/tests/python_test/collection/test_collection_logic.py
@@ -0,0 +1,138 @@
+import pdb
+import pytest
+import logging
+import itertools
+from time import sleep
+from multiprocessing import Process
+from utils import *
+
+uid = "collection_logic"
+
+def create_collection(connect, **params):
+    connect.create_collection(params["collection_name"], const.default_fields)
+
+def search_collection(connect, **params):
+    status, result = connect.search(
+        params["collection_name"], 
+        params["top_k"], 
+        params["query_vectors"],
+        params={"nprobe": params["nprobe"]})
+    return status
+
+def load_collection(connect, **params):
+    connect.load_collection(params["collection_name"])
+
+def has(connect, **params):
+    status, result = connect.has_collection(params["collection_name"])
+    return status
+
+def show(connect, **params):
+    status, result = connect.list_collections()
+    return status
+
+def delete(connect, **params):
+    status = connect.drop_collection(params["collection_name"])
+    return status
+
+def describe(connect, **params):
+    status, result = connect.get_collection_info(params["collection_name"])
+    return status
+
+def rowcount(connect, **params):
+    status, result = connect.count_entities(params["collection_name"])
+    return status
+
+def create_index(connect, **params):
+    status = connect.create_index(params["collection_name"], params["index_type"], params["index_param"])
+    return status
+
+func_map = { 
+    # 0:has, 
+    1:show,
+    10:create_collection, 
+    11:describe,
+    12:rowcount,
+    13:search_collection,
+    14:load_collection,
+    15:create_index,
+    30:delete
+}
+
+def gen_sequence():
+    raw_seq = func_map.keys()
+    result = itertools.permutations(raw_seq)
+    for x in result:
+        yield x
+
+
+class TestCollectionLogic(object):
+    @pytest.mark.parametrize("logic_seq", gen_sequence())
+    @pytest.mark.level(2)
+    def _test_logic(self, connect, logic_seq, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("Skip in http mode")
+        if self.is_right(logic_seq):
+            self.execute(logic_seq, connect)
+        else:
+            self.execute_with_error(logic_seq, connect)
+        self.tear_down(connect)
+
+    def is_right(self, seq):
+        if sorted(seq) == seq:
+            return True
+
+        not_created = True
+        has_deleted = False
+        for i in range(len(seq)):
+            if seq[i] > 10 and not_created:
+                return False
+            elif seq [i] > 10 and has_deleted:
+                return False
+            elif seq[i] == 10:
+                not_created = False
+            elif seq[i] == 30:
+                has_deleted = True
+
+        return True
+
+    def execute(self, logic_seq, connect):
+        basic_params = self.gen_params()
+        for i in range(len(logic_seq)):
+            # logging.getLogger().info(logic_seq[i])
+            f = func_map[logic_seq[i]]
+            status = f(connect, **basic_params)
+            assert status.OK()
+
+    def execute_with_error(self, logic_seq, connect):
+        basic_params = self.gen_params()
+
+        error_flag = False
+        for i in range(len(logic_seq)):
+            f = func_map[logic_seq[i]]
+            status = f(connect, **basic_params)
+            if not status.OK():
+                # logging.getLogger().info(logic_seq[i])
+                error_flag = True
+                break
+        assert error_flag == True
+
+    def tear_down(self, connect):
+        names = connect.list_collections()[1]
+        for name in names:
+            connect.drop_collection(name)
+
+    def gen_params(self):
+        collection_name = gen_unique_str(uid)
+        top_k = 1
+        vectors = gen_vectors(2, dim)
+        param = {'collection_name': collection_name,
+                 'dimension': dim,
+                 'metric_type': "L2",
+                 'nprobe': 1,
+                 'top_k': top_k,
+                 'index_type': "IVF_SQ8",
+                 'index_param': {
+                        'nlist': 16384
+                 },
+                 'query_vectors': vectors}
+        return param
diff --git a/tests/python_test/collection/test_collection_stats.py b/tests/python_test/collection/test_collection_stats.py
new file mode 100644
index 000000000..569833d46
--- /dev/null
+++ b/tests/python_test/collection/test_collection_stats.py
@@ -0,0 +1,356 @@
+import time
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+
+import pytest
+from utils import *
+from constants import *
+
+uid = "get_collection_stats"
+
+class TestGetCollectionStats:
+    """
+    ******************************************************************
+      The following cases are used to test `collection_stats` function
+    ******************************************************************
+    """
+    
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("CPU not support index_type: ivf_sq8h")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_jaccard_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "JACCARD"
+            return request.param
+        else:
+            pytest.skip("Skip index Temporary")
+
+    def test_get_collection_stats_name_not_existed(self, connect, collection):
+        '''
+        target: get collection stats where collection name does not exist
+        method: call collection_stats with a random collection_name, which is not in db
+        expected: status not ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        connect.get_collection_stats(collection_name)
+        connect.drop_collection(collection_name)
+        with pytest.raises(Exception) as e:
+            connect.get_collection_stats(collection_name)
+
+    @pytest.mark.level(2)
+    def test_get_collection_stats_name_invalid(self, connect, get_collection_name):
+        '''
+        target: get collection stats where collection name is invalid
+        method: call collection_stats with invalid collection_name
+        expected: status not ok
+        '''
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            stats = connect.get_collection_stats(collection_name)
+
+    def test_get_collection_stats_empty(self, connect, collection):
+        '''
+        target: get collection stats where no entity in collection
+        method: call collection_stats in empty collection
+        expected: segment = []
+        '''
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == 0
+        # assert len(stats["partitions"]) == 1
+        # assert stats["partitions"][0]["tag"] == default_partition_name
+        # assert stats["partitions"][0]["row_count"] == 0
+
+    def test_get_collection_stats_batch(self, connect, collection):
+        '''
+        target: get row count with collection_stats
+        method: add entities, check count in collection info
+        expected: count as expected
+        '''
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        # assert len(stats["partitions"]) == 1
+        # assert stats["partitions"][0]["tag"] == default_partition_name
+        # assert stats["partitions"][0]["row_count"] == default_nb
+
+    def test_get_collection_stats_single(self, connect, collection):
+        '''
+        target: get row count with collection_stats
+        method: add entity one by one, check count in collection info
+        expected: count as expected
+        '''
+        nb = 10
+        for i in range(nb):
+            ids = connect.insert(collection, default_entity)
+            connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == nb
+        # assert len(stats["partitions"]) == 1
+        # assert stats["partitions"][0]["tag"] == default_partition_name
+        # assert stats["partitions"][0]["row_count"] == nb
+
+    @pytest.mark.skip("delete_by_id not support yet")
+    def test_get_collection_stats_after_delete(self, connect, collection):
+        '''
+        target: get row count with collection_stats
+        method: add and delete entities, check count in collection info
+        expected: status ok, count as expected
+        '''
+        ids = connect.insert(collection, default_entities)
+        status = connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb - 2
+        assert stats["partitions"][0]["row_count"] == default_nb - 2
+        assert stats["partitions"][0]["segments"][0]["data_size"] > 0
+
+    # TODO: enable
+    @pytest.mark.level(2)
+    @pytest.mark.skip("no compact")
+    def test_get_collection_stats_after_compact_parts(self, connect, collection):
+        '''
+        target: get row count with collection_stats
+        method: add and delete entities, and compact collection, check count in collection info
+        expected: status ok, count as expected
+        '''
+        delete_length = 1000
+        ids = connect.insert(collection, default_entities)
+        status = connect.flush([collection])
+        delete_ids = ids[:delete_length]
+        connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        logging.getLogger().info(stats)
+        assert stats["row_count"] == default_nb - delete_length
+        compact_before = stats["partitions"][0]["segments"][0]["data_size"]
+        connect.compact(collection)
+        stats = connect.get_collection_stats(collection)
+        logging.getLogger().info(stats)
+        compact_after = stats["partitions"][0]["segments"][0]["data_size"]
+        assert compact_before == compact_after
+
+    @pytest.mark.skip("no compact")
+    def test_get_collection_stats_after_compact_delete_one(self, connect, collection):
+        '''
+        target: get row count with collection_stats
+        method: add and delete one entity, and compact collection, check count in collection info
+        expected: status ok, count as expected
+        '''
+        ids = connect.insert(collection, default_entities)
+        status = connect.flush([collection])
+        delete_ids = ids[:1]
+        connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        logging.getLogger().info(stats)
+        compact_before = stats["partitions"][0]["row_count"]
+        connect.compact(collection)
+        stats = connect.get_collection_stats(collection)
+        logging.getLogger().info(stats)
+        compact_after = stats["partitions"][0]["row_count"]
+        # pdb.set_trace()
+        assert compact_before == compact_after
+
+    def test_get_collection_stats_partition(self, connect, collection):
+        '''
+        target: get partition info in a collection
+        method: call collection_stats after partition created and check partition_stats
+        expected: status ok, vectors added to partition
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        # assert len(stats["partitions"]) == 2
+        # assert stats["partitions"][1]["tag"] == default_tag
+        # assert stats["partitions"][1]["row_count"] == default_nb
+
+    def test_get_collection_stats_partitions(self, connect, collection):
+        '''
+        target: get partition info in a collection
+        method: create two partitions, add vectors in one of the partitions, call collection_stats and check 
+        expected: status ok, vectors added to one partition but not the other
+        '''
+        new_tag = "new_tag"
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, new_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        # for partition in stats["partitions"]:
+        #     if partition["tag"] == default_tag:
+        #         assert partition["row_count"] == default_nb
+        #     else:
+        #         assert partition["row_count"] == 0
+        ids = connect.insert(collection, default_entities, partition_tag=new_tag)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb * 2
+        # for partition in stats["partitions"]:
+        #     if partition["tag"] in [default_tag, new_tag]:
+        #         assert partition["row_count"] == default_nb
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb * 3
+
+    # TODO: assert metric type in stats response
+    def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index):
+        '''
+        target: test collection info after index created
+        method: create collection, add vectors, create index and call collection_stats 
+        expected: status ok, index created and shown in segments
+        '''
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        stats = connect.get_collection_stats(collection)
+        logging.getLogger().info(stats)
+        assert stats["row_count"] == default_nb
+        # for file in stats["partitions"][0]["segments"][0]["files"]:
+        #     if file["name"] == default_float_vec_field_name and "index_type" in file:
+        #         assert file["data_size"] > 0
+        #         assert file["index_type"] == get_simple_index["index_type"]
+        #         break
+
+    # TODO: assert metric type in stats response
+    def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test collection info after index created
+        method: create collection, add vectors, create index and call collection_stats 
+        expected: status ok, index created and shown in segments
+        '''
+        get_simple_index["metric_type"] = "IP"
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        get_simple_index.update({"metric_type": "IP"})
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        # for file in stats["partitions"][0]["segments"][0]["files"]:
+        #     if file["name"] == default_float_vec_field_name and "index_type" in file:
+        #         assert file["data_size"] > 0
+        #         assert file["index_type"] == get_simple_index["index_type"]
+        #         break
+
+    # TODO: assert metric type in stats response
+    def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test collection info after index created
+        method: create collection, add binary entities, create index and call collection_stats 
+        expected: status ok, index created and shown in segments
+        '''
+        ids = connect.insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, "binary_vector", get_jaccard_index)
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == default_nb
+        # for file in stats["partitions"][0]["segments"][0]["files"]:
+        #     if file["name"] == default_float_vec_field_name and "index_type" in file:
+        #         assert file["data_size"] > 0
+        #         assert file["index_type"] == get_simple_index["index_type"]
+        #         break
+
+    def test_get_collection_stats_after_create_different_index(self, connect, collection):
+        '''
+        target: test collection info after index created repeatedly
+        method: create collection, add vectors, create index and call collection_stats multiple times 
+        expected: status ok, index info shown in segments
+        '''
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        for index_type in ["IVF_FLAT", "IVF_SQ8"]:
+            connect.create_index(collection, default_float_vec_field_name,
+                                 {"index_type": index_type, "params":{"nlist": 1024}, "metric_type": "L2"})
+            stats = connect.get_collection_stats(collection)
+            assert stats["row_count"] == default_nb
+            # for file in stats["partitions"][0]["segments"][0]["files"]:
+            #     if file["name"] == default_float_vec_field_name and "index_type" in file:
+            #         assert file["data_size"] > 0
+            #         assert file["index_type"] == index_type
+            #         break
+
+    def test_collection_count_multi_collections(self, connect):
+        '''
+        target: test collection rows_count is correct or not with multiple collections of L2
+        method: create collection and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: row count in segments
+        '''
+        collection_list = []
+        collection_num = 10
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            res = connect.insert(collection_name, default_entities)
+        connect.flush(collection_list)
+        for i in range(collection_num):
+            stats = connect.get_collection_stats(collection_list[i])
+            # assert stats["partitions"][0]["row_count"] == default_nb
+            assert stats["row_count"] == default_nb
+            connect.drop_collection(collection_list[i])
+
+    @pytest.mark.level(2)
+    def test_collection_count_multi_collections_indexed(self, connect):
+        '''
+        target: test collection rows_count is correct or not with multiple collections of L2
+        method: create collection and add entities in it,
+            assert the value returned by count_entities method is equal to length of entities
+        expected: row count in segments
+        '''
+        collection_list = []
+        collection_num = 10
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            res = connect.insert(collection_name, default_entities)
+            connect.flush(collection_list)
+            if i % 2:
+                connect.create_index(collection_name, default_float_vec_field_name,
+                                     {"index_type": "IVF_SQ8", "params":{"nlist": 1024}, "metric_type": "L2"})
+            else:
+                connect.create_index(collection_name, default_float_vec_field_name,
+                                     {"index_type": "IVF_FLAT","params":{"nlist": 1024}, "metric_type": "L2"})
+        for i in range(collection_num):
+            stats = connect.get_collection_stats(collection_list[i])
+            assert stats["row_count"] == default_nb
+            # if i % 2:
+            #     for file in stats["partitions"][0]["segments"][0]["files"]:
+            #         if file["name"] == default_float_vec_field_name and "index_type" in file:
+            #             assert file["index_type"] == "IVF_SQ8"
+            #             break
+            # else:
+            #     for file in stats["partitions"][0]["segments"][0]["files"]:
+            #         if file["name"] == default_float_vec_field_name and "index_type" in file:
+            #             assert file["index_type"] == "IVF_FLAT"
+            #             break
+            connect.drop_collection(collection_list[i])
diff --git a/tests/python_test/collection/test_create_collection.py b/tests/python_test/collection/test_create_collection.py
new file mode 100644
index 000000000..22c8ae80e
--- /dev/null
+++ b/tests/python_test/collection/test_create_collection.py
@@ -0,0 +1,290 @@
+import pdb
+import copy
+import logging
+import itertools
+import time
+import threading
+from multiprocessing import Process
+import sklearn.preprocessing
+
+import pytest
+from utils import *
+from constants import *
+
+uid = "create_collection"
+
+class TestCreateCollection:
+    """
+    ******************************************************************
+      The following cases are used to test `create_collection` function
+    ******************************************************************
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_filter_fields()
+    )
+    def get_filter_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_vector_fields()
+    )
+    def get_vector_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_segment_row_limits()
+    )
+    def get_segment_row_limit(self, request):
+        yield request.param
+
+    def test_create_collection_fields(self, connect, get_filter_field, get_vector_field):
+        '''
+        target: test create normal collection with different fields
+        method: create collection with diff fields: metric/field_type/...
+        expected: no exception raised
+        '''
+        filter_field = get_filter_field
+        logging.getLogger().info(filter_field)
+        vector_field = get_vector_field
+        collection_name = gen_unique_str(uid)
+        fields = {
+                "fields": [filter_field, vector_field],
+                # "segment_row_limit": default_segment_row_limit
+        }
+        logging.getLogger().info(fields)
+        connect.create_collection(collection_name, fields)
+        assert connect.has_collection(collection_name)
+
+    @pytest.mark.skip("no segment_row_limit")
+    def test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
+        '''
+        target: test create normal collection with different fields
+        method: create collection with diff segment_row_limit
+        expected: no exception raised
+        '''
+        collection_name = gen_unique_str(uid)
+        fields = copy.deepcopy(default_fields)
+        # fields["segment_row_limit"] = get_segment_row_limit
+        connect.create_collection(collection_name, fields)
+        assert connect.has_collection(collection_name)
+
+    def test_create_collection_after_insert(self, connect, collection):
+        '''
+        target: test insert vector, then create collection again
+        method: insert vector and create collection
+        expected: error raised
+        '''
+        # pdb.set_trace()
+        connect.insert(collection, default_entity)
+
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection, default_fields)
+
+    def test_create_collection_after_insert_flush(self, connect, collection):
+        '''
+        target: test insert vector, then create collection again
+        method: insert vector and create collection
+        expected: error raised
+        '''
+        connect.insert(collection, default_entity)
+        connect.flush([collection])
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection, default_fields)
+
+    # TODO: assert exception
+    def test_create_collection_without_connection(self, dis_connect):
+        '''
+        target: test create collection, without connection
+        method: create collection with correct params, with a disconnected instance
+        expected: error raised
+        '''
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            dis_connect.create_collection(collection_name, default_fields)
+
+    def test_create_collection_existed(self, connect):
+        '''
+        target: test create collection but the collection name have already existed
+        method: create collection with the same collection_name
+        expected: error raised
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, default_fields)
+
+    def test_create_after_drop_collection(self, connect, collection):
+        '''
+        target: create with the same collection name after collection dropped 
+        method: delete, then create
+        expected: create success
+        '''
+        connect.drop_collection(collection)
+        time.sleep(2)
+        connect.create_collection(collection, default_fields)
+
+    @pytest.mark.level(2)
+    def test_create_collection_multithread(self, connect):
+        '''
+        target: test create collection with multithread
+        method: create collection using multithread, 
+        expected: collections are created
+        '''
+        threads_num = 8 
+        threads = []
+        collection_names = []
+
+        def create():
+            collection_name = gen_unique_str(uid)
+            collection_names.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+        for i in range(threads_num):
+            t = TestThread(target=create, args=())
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+        
+        for item in collection_names:
+            assert item in connect.list_collections()
+            connect.drop_collection(item)
+
+
+class TestCreateCollectionInvalid(object):
+    """
+    Test creating collections with invalid params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_metric_types()
+    )
+    def get_metric_type(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_segment_row_limit(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_dim(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_invalid_string(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_field_types()
+    )
+    def get_field_type(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    @pytest.mark.skip("no segment_row_limit")
+    def test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
+        collection_name = gen_unique_str()
+        fields = copy.deepcopy(default_fields)
+        fields["segment_row_limit"] = get_segment_row_limit
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, fields)
+
+    @pytest.mark.level(2)
+    def test_create_collection_with_invalid_dimension(self, connect, get_dim):
+        dimension = get_dim
+        collection_name = gen_unique_str()
+        fields = copy.deepcopy(default_fields)
+        fields["fields"][-1]["params"]["dim"] = dimension
+        with pytest.raises(Exception) as e:
+             connect.create_collection(collection_name, fields)
+
+    @pytest.mark.level(2)
+    def test_create_collection_with_invalid_collection_name(self, connect, get_invalid_string):
+        collection_name = get_invalid_string
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, default_fields)
+
+    @pytest.mark.level(2)
+    def test_create_collection_with_empty_collection_name(self, connect):
+        collection_name = ''
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, default_fields)
+
+    @pytest.mark.level(2)
+    def test_create_collection_with_none_collection_name(self, connect):
+        collection_name = None
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, default_fields)
+
+    def test_create_collection_no_dimension(self, connect):
+        '''
+        target: test create collection with no dimension params
+        method: create collection with correct params
+        expected: create status return ok
+        '''
+        collection_name = gen_unique_str(uid)
+        fields = copy.deepcopy(default_fields)
+        fields["fields"][-1]["params"].pop("dim")
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, fields)
+
+    @pytest.mark.skip("no segment_row_limit")
+    def test_create_collection_no_segment_row_limit(self, connect):
+        '''
+        target: test create collection with no segment_row_limit params
+        method: create collection with correct params
+        expected: use default default_segment_row_limit
+        '''
+        collection_name = gen_unique_str(uid)
+        fields = copy.deepcopy(default_fields)
+        fields.pop("segment_row_limit")
+        connect.create_collection(collection_name, fields)
+        res = connect.get_collection_info(collection_name)
+        logging.getLogger().info(res)
+        assert res["segment_row_limit"] == default_server_segment_row_limit
+
+    # TODO: assert exception
+    def test_create_collection_limit_fields(self, connect):
+        collection_name = gen_unique_str(uid)
+        limit_num = 64
+        fields = copy.deepcopy(default_fields)
+        for i in range(limit_num):
+            field_name = gen_unique_str("field_name")
+            field = {"name": field_name, "type": DataType.INT64}
+            fields["fields"].append(field)
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, fields)
+
+    # TODO: assert exception
+    @pytest.mark.level(2)
+    def test_create_collection_invalid_field_name(self, connect, get_invalid_string):
+        collection_name = gen_unique_str(uid)
+        fields = copy.deepcopy(default_fields)
+        field_name = get_invalid_string
+        field = {"name": field_name, "type": DataType.INT64}
+        fields["fields"].append(field)
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, fields)
+
+    # TODO: assert exception
+    def test_create_collection_invalid_field_type(self, connect, get_field_type):
+        collection_name = gen_unique_str(uid)
+        fields = copy.deepcopy(default_fields)
+        field_type = get_field_type
+        field = {"name": "test_field", "type": field_type}
+        fields["fields"].append(field)
+        with pytest.raises(Exception) as e:
+            connect.create_collection(collection_name, fields)
diff --git a/tests/python_test/collection/test_describe_collection.py b/tests/python_test/collection/test_describe_collection.py
new file mode 100644
index 000000000..a62a65221
--- /dev/null
+++ b/tests/python_test/collection/test_describe_collection.py
@@ -0,0 +1,187 @@
+import pytest
+import logging
+import time
+from utils import *
+from constants import *
+
+uid = "describe_collection"
+
+
+class TestDescribeCollection:
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_filter_fields()
+    )
+    def get_filter_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_vector_fields()
+    )
+    def get_vector_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    """
+    ******************************************************************
+      The following cases are used to test `describe_collection` function, no data in collection
+    ******************************************************************
+    """
+
+    def test_collection_fields(self, connect, get_filter_field, get_vector_field):
+        '''
+        target: test create normal collection with different fields, check info returned
+        method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
+        expected: no exception raised, and value returned correct
+        '''
+        filter_field = get_filter_field
+        vector_field = get_vector_field
+        collection_name = gen_unique_str(uid)
+        fields = {
+            "fields": [filter_field, vector_field],
+            # "segment_row_limit": default_segment_row_limit
+        }
+        connect.create_collection(collection_name, fields)
+        res = connect.describe_collection(collection_name)
+        assert res['auto_id'] == True
+        # assert res['segment_row_limit'] == default_segment_row_limit
+        assert len(res["fields"]) == 2
+        for field in res["fields"]:
+            if field["type"] == filter_field:
+                assert field["name"] == filter_field["name"]
+            elif field["type"] == vector_field:
+                assert field["name"] == vector_field["name"]
+                assert field["params"] == vector_field["params"]
+
+    def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        res = connect.describe_collection(collection)
+        for field in res["fields"]:
+            if field["name"] == default_float_vec_field_name:
+                index = field["indexes"][0]
+                assert index["index_type"] == get_simple_index["index_type"]
+                assert index["metric_type"] == get_simple_index["metric_type"]
+
+    @pytest.mark.level(2)
+    def test_describe_collection_without_connection(self, collection, dis_connect):
+        '''
+        target: test get collection info, without connection
+        method: calling get collection info with correct params, with a disconnected instance
+        expected: get collection info raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.describe_collection(collection)
+
+    def test_describe_collection_not_existed(self, connect):
+        '''
+        target: test if collection not created
+        method: random a collection name, create this collection then drop it,
+            assert the value returned by describe_collection method
+        expected: False
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        connect.describe_collection(collection_name)
+        connect.drop_collection(collection_name)
+        with pytest.raises(Exception) as e:
+            connect.describe_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_describe_collection_multithread(self, connect):
+        '''
+        target: test create collection with multithread
+        method: create collection using multithread,
+        expected: collections are created
+        '''
+        threads_num = 4
+        threads = []
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+
+        def get_info():
+            connect.describe_collection(collection_name)
+
+        for i in range(threads_num):
+            t = TestThread(target=get_info)
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+
+    """
+    ******************************************************************
+      The following cases are used to test `describe_collection` function, and insert data in collection
+    ******************************************************************
+    """
+
+    def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
+        '''
+        target: test create normal collection with different fields, check info returned
+        method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
+        expected: no exception raised, and value returned correct
+        '''
+        filter_field = get_filter_field
+        vector_field = get_vector_field
+        collection_name = gen_unique_str(uid)
+        fields = {
+            "fields": [filter_field, vector_field],
+            # "segment_row_limit": default_segment_row_limit
+        }
+        connect.create_collection(collection_name, fields)
+        entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
+        res_ids = connect.insert(collection_name, entities)
+        connect.flush([collection_name])
+        res = connect.describe_collection(collection_name)
+        assert res['auto_id'] == True
+        # assert res['segment_row_limit'] == default_segment_row_limit
+        assert len(res["fields"]) == 2
+        for field in res["fields"]:
+            if field["type"] == filter_field:
+                assert field["name"] == filter_field["name"]
+            elif field["type"] == vector_field:
+                assert field["name"] == vector_field["name"]
+                assert field["params"] == vector_field["params"]
+
+
+
+class TestDescribeCollectionInvalid(object):
+    """
+    Test describe collection with invalid params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.describe_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_describe_collection_with_empty_collection_name(self, connect):
+        collection_name = ''
+        with pytest.raises(Exception) as e:
+            connect.describe_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_describe_collection_with_none_collection_name(self, connect):
+        collection_name = None
+        with pytest.raises(Exception) as e:
+            connect.describe_collection(collection_name)
diff --git a/tests/python_test/collection/test_drop_collection.py b/tests/python_test/collection/test_drop_collection.py
new file mode 100644
index 000000000..a83be4ef2
--- /dev/null
+++ b/tests/python_test/collection/test_drop_collection.py
@@ -0,0 +1,103 @@
+import pdb
+import pytest
+import logging
+import itertools
+from time import sleep
+import threading
+from multiprocessing import Process
+from utils import *
+from constants import *
+
+uid = "drop_collection"
+
+class TestDropCollection:
+    """
+    ******************************************************************
+      The following cases are used to test `drop_collection` function
+    ******************************************************************
+    """
+    def test_drop_collection(self, connect, collection):
+        '''
+        target: test delete collection created with correct params 
+        method: create collection and then delete, 
+            assert the value returned by delete method
+        expected: status ok, and no collection in collections
+        '''
+        connect.drop_collection(collection)
+        time.sleep(2)
+        assert not connect.has_collection(collection)
+
+    def test_drop_collection_without_connection(self, collection, dis_connect):
+        '''
+        target: test describe collection, without connection
+        method: drop collection with correct params, with a disconnected instance
+        expected: drop raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.drop_collection(collection)
+
+    def test_drop_collection_not_existed(self, connect):
+        '''
+        target: test if collection not created
+        method: random a collection name, which not existed in db, 
+            assert the exception raised returned by drp_collection method
+        expected: False
+        '''
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.drop_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_create_drop_collection_multithread(self, connect):
+        '''
+        target: test create and drop collection with multithread
+        method: create and drop collection using multithread, 
+        expected: collections are created, and dropped
+        '''
+        threads_num = 8 
+        threads = []
+        collection_names = []
+
+        def create():
+            collection_name = gen_unique_str(uid)
+            collection_names.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            connect.drop_collection(collection_name)
+        for i in range(threads_num):
+            t = TestThread(target=create, args=())
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+        
+        for item in collection_names:
+            assert not connect.has_collection(item)
+
+
+class TestDropCollectionInvalid(object):
+    """
+    Test has collection with invalid params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.has_collection(collection_name)
+
+    def test_drop_collection_with_empty_collection_name(self, connect):
+        collection_name = ''
+        with pytest.raises(Exception) as e:
+            connect.has_collection(collection_name)
+
+    def test_drop_collection_with_none_collection_name(self, connect):
+        collection_name = None
+        with pytest.raises(Exception) as e:
+            connect.has_collection(collection_name)
diff --git a/tests/python_test/collection/test_has_collection.py b/tests/python_test/collection/test_has_collection.py
new file mode 100644
index 000000000..e85dc5b1a
--- /dev/null
+++ b/tests/python_test/collection/test_has_collection.py
@@ -0,0 +1,101 @@
+import pdb
+import pytest
+import logging
+import itertools
+import threading
+import time
+from multiprocessing import Process
+from utils import *
+from constants import *
+
+uid = "has_collection"
+
+class TestHasCollection:
+    """
+    ******************************************************************
+      The following cases are used to test `has_collection` function
+    ******************************************************************
+    """
+    def test_has_collection(self, connect, collection):
+        '''
+        target: test if the created collection existed
+        method: create collection, assert the value returned by has_collection method
+        expected: True
+        '''
+        assert connect.has_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_has_collection_without_connection(self, collection, dis_connect):
+        '''
+        target: test has collection, without connection
+        method: calling has collection with correct params, with a disconnected instance
+        expected: has collection raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            assert dis_connect.has_collection(collection)
+
+    def test_has_collection_not_existed(self, connect):
+        '''
+        target: test if collection not created
+        method: random a collection name, create this collection then drop it,
+            assert the value returned by has_collection method
+        expected: False
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        assert connect.has_collection(collection_name)
+        connect.drop_collection(collection_name)
+        assert not connect.has_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_has_collection_multithread(self, connect):
+        '''
+        target: test create collection with multithread
+        method: create collection using multithread,
+        expected: collections are created
+        '''
+        threads_num = 4
+        threads = []
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+
+        def has():
+            assert connect.has_collection(collection_name)
+            # assert not assert_collection(connect, collection_name)
+        for i in range(threads_num):
+            t = TestThread(target=has, args=())
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+
+
+class TestHasCollectionInvalid(object):
+    """
+    Test has collection with invalid params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.has_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_has_collection_with_empty_collection_name(self, connect):
+        collection_name = ''
+        with pytest.raises(Exception) as e:
+            connect.has_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_has_collection_with_none_collection_name(self, connect):
+        collection_name = None
+        with pytest.raises(Exception) as e:
+            connect.has_collection(collection_name)
diff --git a/tests/python_test/collection/test_list_collections.py b/tests/python_test/collection/test_list_collections.py
new file mode 100644
index 000000000..084605f62
--- /dev/null
+++ b/tests/python_test/collection/test_list_collections.py
@@ -0,0 +1,94 @@
+import pytest
+import time
+from utils import *
+from constants import *
+
+uid = "list_collections"
+
+
+class TestListCollections:
+    """
+    ******************************************************************
+      The following cases are used to test `list_collections` function
+    ******************************************************************
+    """
+
+    def test_list_collections(self, connect, collection):
+        '''
+        target: test list collections
+        method: create collection, assert the value returned by list_collections method
+        expected: True
+        '''
+        assert collection in connect.list_collections()
+
+    def test_list_collections_multi_collections(self, connect):
+        '''
+        target: test list collections
+        method: create collection, assert the value returned by list_collections method
+        expected: True
+        '''
+        collection_num = 50
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            connect.create_collection(collection_name, default_fields)
+            assert collection_name in connect.list_collections()
+
+    @pytest.mark.level(2)
+    def test_list_collections_without_connection(self, dis_connect):
+        '''
+        target: test list collections, without connection
+        method: calling list collections with correct params, with a disconnected instance
+        expected: list collections raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.list_collections()
+
+    def test_list_collections_not_existed(self, connect):
+        '''
+        target: test if collection not created
+        method: random a collection name, create this collection then drop it,
+            assert the value returned by list_collections method
+        expected: False
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        assert collection_name in connect.list_collections()
+        connect.drop_collection(collection_name)
+        assert collection_name not in connect.list_collections()
+
+    # TODO: make sure to run this case in the end
+    @pytest.mark.level(2)
+    def test_list_collections_no_collection(self, connect):
+        '''
+        target: test show collections is correct or not, if no collection in db
+        method: delete all collections,
+            assert the value returned by list_collections method is equal to []
+        expected: the status is ok, and the result is equal to []      
+        '''
+        result = connect.list_collections()
+        if result:
+            for collection_name in result:
+                assert connect.has_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_list_collections_multithread(self, connect):
+        '''
+        target: test list collection with multithread
+        method: list collection using multithread,
+        expected: list collections correctly
+        '''
+        threads_num = 10
+        threads = []
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+
+        def _list():
+            assert collection_name in connect.list_collections()
+
+        for i in range(threads_num):
+            t = TestThread(target=_list)
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
diff --git a/tests/python_test/collection/test_load_collection.py b/tests/python_test/collection/test_load_collection.py
new file mode 100644
index 000000000..7bb7f1757
--- /dev/null
+++ b/tests/python_test/collection/test_load_collection.py
@@ -0,0 +1,573 @@
+import pdb
+import pytest
+import logging
+import itertools
+from time import sleep
+import threading
+from multiprocessing import Process
+from utils import *
+from constants import *
+
+uid = "load_collection"
+field_name = default_float_vec_field_name
+default_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {field_name: {"topk": default_top_k, "query": gen_vectors(1, default_dim), "metric_type": "L2",
+                                     "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+
+
+class TestLoadCollection:
+    """
+    ******************************************************************
+      The following cases are used to test `load_collection` function
+    ******************************************************************
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in cpu mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_binary_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] in binary_support():
+            return request.param
+        else:
+            pytest.skip("Skip index Temporary")
+
+    def test_load_collection_after_index(self, connect, collection, get_simple_index):
+        '''
+        target: test load collection, after index created
+        method: insert and create index, load collection with correct params
+        expected: no error raised
+        ''' 
+        connect.insert(collection, default_entities)
+        connect.flush([collection])
+        logging.getLogger().info(get_simple_index)
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        connect.load_collection(collection)
+        connect.release_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
+        '''
+        target: test load binary_collection, after index created
+        method: insert and create index, load binary_collection with correct params
+        expected: no error raised
+        ''' 
+        connect.insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        for metric_type in binary_metrics():
+            logging.getLogger().info(metric_type)
+            get_binary_index["metric_type"] = metric_type
+            if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
+                with pytest.raises(Exception) as e:
+                    connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
+            else:
+                connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
+            connect.load_collection(binary_collection)
+        connect.release_collection(binary_collection)
+
+    def test_load_empty_collection(self, connect, collection):
+        '''
+        target: test load collection
+        method: no entities in collection, load collection with correct params
+        expected: load success
+        '''
+        connect.load_collection(collection)
+        connect.release_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_load_collection_dis_connect(self, dis_connect, collection):
+        '''
+        target: test load collection, without connection
+        method: load collection with correct params, with a disconnected instance
+        expected: load raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.load_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_release_collection_dis_connect(self, dis_connect, collection):
+        '''
+        target: test release collection, without connection
+        method: release collection with correct params, with a disconnected instance
+        expected: release raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.release_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_load_collection_not_existed(self, connect, collection):
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.load_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_release_collection_not_existed(self, connect, collection):
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.release_collection(collection_name)
+
+    def test_release_collection_not_load(self, connect, collection):
+        """
+        target: test release collection without load
+        method:
+        expected: raise exception
+        """
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        with pytest.raises(Exception) as e:
+            connect.release_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_load_release_collection(self, connect, collection):
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        connect.insert(collection_name, default_entities)
+        connect.flush([collection_name])
+        connect.load_collection(collection_name)
+        connect.release_collection(collection_name)
+        connect.drop_collection(collection_name)
+        with pytest.raises(Exception) as e:
+            connect.load_collection(collection_name)
+        with pytest.raises(Exception) as e:
+            connect.release_collection(collection_name)
+
+    def test_release_collection_after_drop(self, connect, collection):
+        """
+        target: test release collection after drop
+        method: insert and flush, then release collection after load and drop
+        expected: raise exception
+        """
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        connect.load_collection(collection)
+        connect.drop_collection(collection)
+        with pytest.raises(Exception) as e:
+            connect.release_collection(collection)
+
+    # TODO
+    def test_load_collection_without_flush(self, connect, collection):
+        """
+        target: test load collection without flush
+        method: insert entities without flush, then load collection
+        expected: load collection failed
+        """
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        with pytest.raises(Exception) as e:
+            connect.load_collection(collection)
+
+    # TODO
+    def _test_load_collection_larger_than_memory(self):
+        """
+        target: test load collection when memory less than collection size
+        method: i don't know
+        expected: raise exception
+        """
+
+    def test_load_collection_release_part_partitions(self, connect, collection):
+        """
+        target: test release part partitions after load collection
+        method: load collection and release part partitions
+        expected: released partitions search empty
+        """
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        connect.load_collection(collection)
+        connect.release_partitions(collection, [default_tag])
+        res = connect.search(collection, default_single_query, partition_tags=[default_tag])
+        assert len(res[0]) == 0
+        res = connect.search(collection, default_single_query, partition_tags=[default_partition_name])
+        assert len(res[0]) == default_top_k
+
+    def test_load_collection_release_part_partitions(self, connect, collection):
+        """
+        target: test release all partitions after load collection
+        method: load collection and release all partitions
+        expected: search empty
+        """
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        connect.load_collection(collection)
+        connect.release_partitions(collection, [default_partition_name, default_tag])
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def test_load_partitions_release_collection(self, connect, collection):
+        """
+        target: test release collection after load partitions
+        method: insert entities into partitions, search empty after load partitions and release collection
+        expected: search result empty
+        """
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        connect.load_collection(collection)
+        connect.release_partitions(collection, [default_tag])
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+
+class TestReleaseAdvanced:
+    
+    def _test_release_collection_during_searching(self, connect, collection):
+        """
+        target: test release collection during searching 
+        method: insert entities into collection, flush and load collection, release collection during searching
+        expected: 
+        """
+        nq = 1000
+        top_k = 1
+        connect.insert(collection, default_entities)
+        connect.flush([collection])
+        connect.load_collection(collection)
+        query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
+        res = connect.search(collection, query, _async=True)
+        connect.release_collection(collection)
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def _test_release_partition_during_searching(self, connect, collection):
+        """
+        target: test release partition during searching 
+        method: insert entities into partition, flush and load partition, release partition during searching
+        expected: 
+        """
+        nq = 1000
+        top_k = 1
+        connect.create_partition(collection, default_tag)
+        query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
+        connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        connect.load_partitions(collection, [default_tag])
+        res = connect.search(collection, query, _async=True)
+        connect.release_partitions(collection, [default_tag])
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def _test_release_collection_during_searching_A(self, connect, collection):
+        """
+        target: test release collection during searching
+        method: insert entities into partition, flush and load partition, release collection during searching
+        expected:
+        """
+        nq = 1000
+        top_k = 1
+        connect.create_partition(collection, default_tag)
+        query, _ = gen_query_vectors(field_name, default_entities, top_k, nq) 
+        connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        connect.load_partitions(collection, [default_tag])
+        res = connect.search(collection, query, _async=True)
+        connect.release_collection(collection)
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def _test_release_collection_during_loading(self, connect, collection):
+        """
+        target: test release collection during loading
+        method: insert entities into collection, flush, release collection during loading
+        expected:
+        """
+        connect.insert(collection, default_entities)
+        connect.flush([collection])
+
+        def load(collection):
+            connect.load_collection(collection)
+        t = threading.Thread(target=load, (collection, ))
+        t.start()
+        connect.release_collection(collection)
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def _test_release_partition_during_loading(self, connect, collection):
+        """
+        target: test release partition during loading
+        method: insert entities into partition, flush, release partition during loading
+        expected:
+        """
+        connect.create_partition(collection, default_tag)
+        connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+
+        def load(collection):
+            connect.load_collection(collection)
+        t = threading.Thread(target=load, (collection, ))
+        t.start()
+        connect.release_partitions(collection, [default_tag])
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def _test_release_collection_during_inserting(self, connect, collection):
+        """
+        target: test release collection during inserting
+        method: load collection, do release collection during inserting
+        expected:
+        """
+        connect.insert(collection, default_entities)
+        connect.flush([collection])
+        connect.load_collection(collection)
+
+        def insert(collection):
+            connect.insert(collection, default_entities)
+        t = threading.Thread(target=insert, (collection, ))
+        t.start()
+        connect.release_collection(collection)
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    def _test_release_collection_during_indexing(self, connect, collection):
+        """
+        target: test release collection during building index
+        method: insert and flush, load collection, do release collection during creating index
+        expected:
+        """
+        pass
+
+    def _test_release_collection_during_droping_index(self, connect, collection):
+        """
+        target: test release collection during droping index
+        method: insert, create index and flush, load collection, do release collection during droping index
+        expected:
+        """
+        pass
+
+
+class TestLoadCollectionInvalid(object):
+    """
+    Test load collection with invalid params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.load_collection(collection_name)
+
+    @pytest.mark.level(2)
+    def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.release_collection(collection_name)
+
+
+class TestLoadPartition:
+    """
+    ******************************************************************
+      The following cases are used to test `load_collection` function
+    ******************************************************************
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in cpu mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_binary_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] in binary_support():
+            return request.param
+        else:
+            pytest.skip("Skip index Temporary")
+
+    def test_load_partition_after_index(self, connect, collection, get_simple_index):
+        '''
+        target: test load collection, after index created
+        method: insert and create index, load collection with correct params
+        expected: no error raised
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        connect.load_partitions(collection, [default_tag])
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == default_top_k
+
+    @pytest.mark.level(2)
+    def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
+        '''
+        target: test load binary_collection, after index created
+        method: insert and create index, load binary_collection with correct params
+        expected: no error raised
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        ids = connect.insert(binary_collection, default_binary_entities, partition_tag=[default_tag])
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        for metric_type in binary_metrics():
+            logging.getLogger().info(metric_type)
+            get_binary_index["metric_type"] = metric_type
+            if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
+                with pytest.raises(Exception) as e:
+                    connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
+            else:
+                connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
+            connect.load_partitions(binary_collection, [default_tag])
+
+    def test_load_empty_partition(self, connect, collection):
+        '''
+        target: test load collection
+        method: no entities in collection, load collection with correct params
+        expected: load success
+        '''
+        connect.create_partition(collection, default_tag)
+        connect.load_partitions(collection, [default_tag])
+        res = connect.search(collection, default_single_query)
+        assert len(res[0]) == 0
+
+    @pytest.mark.level(2)
+    def test_load_collection_dis_connect(self, connect, dis_connect, collection):
+        '''
+        target: test load collection, without connection
+        method: load collection with correct params, with a disconnected instance
+        expected: load raise exception
+        '''
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            dis_connect.load_partitions(collection, [default_tag])
+
+    @pytest.mark.level(2)
+    def test_release_partition_dis_connect(self, connect, dis_connect, collection):
+        '''
+        target: test release collection, without connection
+        method: release collection with correct params, with a disconnected instance
+        expected: release raise exception
+        '''
+        connect.create_partition(collection, default_tag)
+        connect.load_partitions(collection, [default_tag])
+        with pytest.raises(Exception) as e:
+            dis_connect.release_partitions(collection, [default_tag])
+
+    @pytest.mark.level(2)
+    def test_load_partition_not_existed(self, connect, collection):
+        partition_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.load_partitions(collection, [partition_name])
+
+    @pytest.mark.level(2)
+    def test_release_partition_not_existed(self, connect, collection):
+        partition_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.release_partitions(collection, [partition_name])
+
+    def test_release_partition_not_load(self, connect, collection):
+        """
+        target: test release collection without load
+        method:
+        expected: raise exception
+        """
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=[default_tag])
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        with pytest.raises(Exception) as e:
+            connect.release_partitions(collection, [default_tag])
+
+    @pytest.mark.level(2)
+    def test_load_release_after_drop(self, connect, collection):
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=[default_tag])
+        connect.flush([collection_name])
+        connect.load_partitions(collection, [default_tag])
+        connect.release_partitions(collection, [default_tag])
+        connect.drop_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.load_partitions(collection, [default_tag])
+        with pytest.raises(Exception) as e:
+            connect.release_partitions(collection, [default_tag])
+
+    def test_release_partition_after_drop(self, connect, collection):
+        """
+        target: test release collection after drop
+        method: insert and flush, then release collection after load and drop
+        expected: raise exception
+        """
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=[default_tag])
+        connect.flush([collection_name])
+        connect.load_partitions(collection, [default_tag])
+        connect.drop_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.release_partitions(collection, [default_tag])
+
+    def test_load_release_after_collection_drop(self, connect, collection):
+        """
+        target: test release collection after drop
+        method: insert and flush, then release collection after load and drop
+        expected: raise exception
+        """
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=[default_tag])
+        connect.flush([collection_name])
+        connect.load_partitions(collection, [default_tag])
+        connect.release_partitions(collection, [default_tag])
+        connect.drop_collection(collection)
+        with pytest.raises(Exception) as e:
+            connect.load_partitions(collection, [default_tag])
+        with pytest.raises(Exception) as e:
+            connect.release_partitions(collection, [default_tag])
+
+
+class TestLoadPartitionInvalid(object):
+    """
+    Test load collection with invalid params
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_partition_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
+        partition_name = get_partition_name
+        with pytest.raises(Exception) as e:
+            connect.load_partitions(collection, [partition_name])
+
+    @pytest.mark.level(2)
+    def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
+        partition_name = get_partition_name
+        with pytest.raises(Exception) as e:
+            connect.load_partitions(collection, [partition_name])
diff --git a/tests/python_test/conftest.py b/tests/python_test/conftest.py
new file mode 100644
index 000000000..456c3dd7a
--- /dev/null
+++ b/tests/python_test/conftest.py
@@ -0,0 +1,194 @@
+import pdb
+import logging
+import socket
+import pytest
+from utils import gen_unique_str
+from milvus import Milvus, DataType
+from utils import *
+
+timeout = 60
+dimension = 128
+delete_timeout = 60
+
+
+def pytest_addoption(parser):
+    parser.addoption("--ip", action="store", default="localhost")
+    parser.addoption("--service", action="store", default="")
+    parser.addoption("--port", action="store", default=19530)
+    parser.addoption("--http-port", action="store", default=19121)
+    parser.addoption("--handler", action="store", default="GRPC")
+    parser.addoption("--tag", action="store", default="all", help="only run tests matching the tag.")
+    parser.addoption('--dry-run', action='store_true', default=False)
+
+
+def pytest_configure(config):
+    # register an additional marker
+    config.addinivalue_line(
+        "markers", "tag(name): mark test to run only matching the tag"
+    )
+
+
+def pytest_runtest_setup(item):
+    tags = list()
+    for marker in item.iter_markers(name="tag"):
+        for tag in marker.args:
+            tags.append(tag)
+    if tags:
+        cmd_tag = item.config.getoption("--tag")
+        if cmd_tag != "all" and cmd_tag not in tags:
+            pytest.skip("test requires tag in {!r}".format(tags))
+
+
+def pytest_runtestloop(session):
+    if session.config.getoption('--dry-run'):
+        for item in session.items:
+            print(item.nodeid)
+        return True
+
+
+def check_server_connection(request):
+    ip = request.config.getoption("--ip")
+    port = request.config.getoption("--port")
+
+    connected = True
+    if ip and (ip not in ['localhost', '127.0.0.1']):
+        try:
+            socket.getaddrinfo(ip, port, 0, 0, socket.IPPROTO_TCP) 
+        except Exception as e:
+            print("Socket connnet failed: %s" % str(e))
+            connected = False
+    return connected
+
+
+@pytest.fixture(scope="module")
+def connect(request):
+    ip = request.config.getoption("--ip")
+    service_name = request.config.getoption("--service")
+    port = request.config.getoption("--port")
+    http_port = request.config.getoption("--http-port")
+    handler = request.config.getoption("--handler")
+    if handler == "HTTP":
+        port = http_port
+    try:
+        milvus = get_milvus(host=ip, port=port, handler=handler)
+        # reset_build_index_threshold(milvus)
+    except Exception as e:
+        logging.getLogger().error(str(e))
+        pytest.exit("Milvus server can not connected, exit pytest ...")
+    def fin():
+        try:
+            milvus.close()
+            pass
+        except Exception as e:
+            logging.getLogger().info(str(e))
+    request.addfinalizer(fin)
+    return milvus
+
+
+@pytest.fixture(scope="module")
+def dis_connect(request):
+    ip = request.config.getoption("--ip")
+    service_name = request.config.getoption("--service")
+    port = request.config.getoption("--port")
+    http_port = request.config.getoption("--http-port")
+    handler = request.config.getoption("--handler")
+    if handler == "HTTP":
+        port = http_port
+    milvus = get_milvus(host=ip, port=port, handler=handler)
+    milvus.close()
+    return milvus
+
+
+@pytest.fixture(scope="module")
+def args(request):
+    ip = request.config.getoption("--ip")
+    service_name = request.config.getoption("--service")
+    port = request.config.getoption("--port")
+    http_port = request.config.getoption("--http-port")
+    handler = request.config.getoption("--handler")
+    if handler == "HTTP":
+        port = http_port
+    args = {"ip": ip, "port": port, "handler": handler, "service_name": service_name}
+    return args
+
+
+@pytest.fixture(scope="module")
+def milvus(request):
+    ip = request.config.getoption("--ip")
+    port = request.config.getoption("--port")
+    http_port = request.config.getoption("--http-port")
+    handler = request.config.getoption("--handler")
+    if handler == "HTTP":
+        port = http_port
+    return get_milvus(host=ip, port=port, handler=handler)
+
+
+@pytest.fixture(scope="function")
+def collection(request, connect):
+    ori_collection_name = getattr(request.module, "collection_id", "test")
+    collection_name = gen_unique_str(ori_collection_name)
+    try:
+        default_fields = gen_default_fields()
+        connect.create_collection(collection_name, default_fields)
+    except Exception as e:
+        pytest.exit(str(e))
+    def teardown():
+        if connect.has_collection(collection_name):
+            connect.drop_collection(collection_name, timeout=delete_timeout)
+    request.addfinalizer(teardown)
+    assert connect.has_collection(collection_name)
+    return collection_name
+
+
+# customised id
+@pytest.fixture(scope="function")
+def id_collection(request, connect):
+    ori_collection_name = getattr(request.module, "collection_id", "test")
+    collection_name = gen_unique_str(ori_collection_name)
+    try:
+        fields = gen_default_fields(auto_id=False)
+        connect.create_collection(collection_name, fields)
+    except Exception as e:
+        pytest.exit(str(e))
+    def teardown():
+        if connect.has_collection(collection_name):
+            connect.drop_collection(collection_name, timeout=delete_timeout)
+    request.addfinalizer(teardown)
+    assert connect.has_collection(collection_name)
+    return collection_name
+
+
+@pytest.fixture(scope="function")
+def binary_collection(request, connect):
+    ori_collection_name = getattr(request.module, "collection_id", "test")
+    collection_name = gen_unique_str(ori_collection_name)
+    try:
+        fields = gen_binary_default_fields()
+        connect.create_collection(collection_name, fields)
+    except Exception as e:
+        pytest.exit(str(e))
+    def teardown():
+        collection_names = connect.list_collections()
+        if connect.has_collection(collection_name):
+            connect.drop_collection(collection_name, timeout=delete_timeout)
+    request.addfinalizer(teardown)
+    assert connect.has_collection(collection_name)
+    return collection_name
+
+
+# customised id
+@pytest.fixture(scope="function")
+def binary_id_collection(request, connect):
+    ori_collection_name = getattr(request.module, "collection_id", "test")
+    collection_name = gen_unique_str(ori_collection_name)
+    try:
+        fields = gen_binary_default_fields(auto_id=False)
+        connect.create_collection(collection_name, fields)
+    except Exception as e:
+        pytest.exit(str(e))
+    def teardown():
+        if connect.has_collection(collection_name):
+            connect.drop_collection(collection_name, timeout=delete_timeout)
+    request.addfinalizer(teardown)
+    assert connect.has_collection(collection_name)
+    return collection_name
diff --git a/tests/python_test/constants.py b/tests/python_test/constants.py
new file mode 100644
index 000000000..d3eae745f
--- /dev/null
+++ b/tests/python_test/constants.py
@@ -0,0 +1,22 @@
+import utils
+
+default_fields = utils.gen_default_fields()
+default_binary_fields = utils.gen_binary_default_fields()
+
+default_entity = utils.gen_entities(1)
+default_raw_binary_vector, default_binary_entity = utils.gen_binary_entities(1)
+
+default_entity_row = utils.gen_entities_rows(1)
+default_raw_binary_vector_row, default_binary_entity_row = utils.gen_binary_entities_rows(1)
+
+
+default_entities = utils.gen_entities(utils.default_nb)
+default_raw_binary_vectors, default_binary_entities = utils.gen_binary_entities(utils.default_nb)
+
+
+default_entities_new = utils.gen_entities_new(utils.default_nb)
+default_raw_binary_vectors_new, default_binary_entities_new = utils.gen_binary_entities_new(utils.default_nb)
+
+
+default_entities_rows = utils.gen_entities_rows(utils.default_nb)
+default_raw_binary_vectors_rows, default_binary_entities_rows = utils.gen_binary_entities_rows(utils.default_nb)
\ No newline at end of file
diff --git a/tests/python_test/docker-entrypoint.sh b/tests/python_test/docker-entrypoint.sh
new file mode 100755
index 000000000..af9ba0ba6
--- /dev/null
+++ b/tests/python_test/docker-entrypoint.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -e
+
+if [ "$1" = 'start' ]; then
+   tail -f /dev/null
+fi
+
+exec "$@"
\ No newline at end of file
diff --git a/tests/python_test/entity/test_delete.py b/tests/python_test/entity/test_delete.py
new file mode 100644
index 000000000..1cc78e402
--- /dev/null
+++ b/tests/python_test/entity/test_delete.py
@@ -0,0 +1,473 @@
+import time
+import random
+import pdb
+import copy
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+from constants import *
+
+field_name = default_float_vec_field_name
+default_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {field_name: {"topk": 10, "metric_type":"L2", "query": gen_vectors(1, default_dim), "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+
+
+class TestDeleteBase:
+    """
+    ******************************************************************
+      The following cases are used to test `delete_entity_by_id` function
+    ******************************************************************
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "GPU":
+            if not request.param["index_type"] not in ivf():
+                pytest.skip("Only support index_type: idmap/ivf")
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("CPU not support index_type: ivf_sq8h")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            2000
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+
+    def test_delete_entity_id_not_exised(self, connect, collection):
+        '''
+        target: test delete entity, params entity_id not existed
+        method: add entity and delete
+        expected: status DELETED
+        '''
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        status = connect.delete_entity_by_id(collection, [0])
+        assert status
+
+    def test_delete_empty_collection(self, connect, collection):
+        '''
+        target: test delete entity, params collection_name not existed
+        method: add entity and delete
+        expected: status DELETED
+        '''
+        status = connect.delete_entity_by_id(collection, [0])
+        assert status
+
+    def test_delete_entity_collection_not_existed(self, connect, collection):
+        '''
+        target: test delete entity, params collection_name not existed
+        method: add entity and delete
+        expected: error raised
+        '''
+        collection_new = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            status = connect.delete_entity_by_id(collection_new, [0])
+
+    def test_delete_entity_collection_not_existed(self, connect, collection):
+        '''
+        target: test delete entity, params collection_name not existed
+        method: add entity and delete
+        expected: error raised
+        '''
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        collection_new = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            status = connect.delete_entity_by_id(collection_new, [0])
+
+    def test_insert_delete(self, connect, collection, insert_count):
+        '''
+        target: test delete entity
+        method: add entities and delete
+        expected: no error raised
+        '''
+        entities = gen_entities(insert_count)
+        ids = connect.bulk_insert(collection, entities)
+        connect.flush([collection])
+        delete_ids = [ids[0]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == insert_count - 1
+
+    def test_insert_delete_A(self, connect, collection):
+        '''
+        target: test delete entity
+        method: add entities and delete one in collection, and one not in collection
+        expected: no error raised
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], 1]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == default_nb - 1
+
+    def test_insert_delete_B(self, connect, id_collection):
+        '''
+        target: test delete entity
+        method: add entities with the same ids, and delete the id in collection
+        expected: no error raised, all entities deleted
+        '''
+        ids = [1 for i in range(default_nb)]
+        res_ids = connect.bulk_insert(id_collection, default_entities, ids)
+        connect.flush([id_collection])
+        delete_ids = [1]
+        status = connect.delete_entity_by_id(id_collection, delete_ids)
+        assert status
+        connect.flush([id_collection])
+        res_count = connect.count_entities(id_collection)
+        assert res_count == 0
+
+    def test_delete_exceed_limit(self, connect, collection):
+        '''
+        target: test delete entity
+        method: add one entity and delete two ids
+        expected: error raised
+        '''
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        delete_ids = [ids[0], 1]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == 0
+
+    def test_flush_after_delete(self, connect, collection):
+        '''
+        target: test delete entity
+        method: add entities and delete, then flush
+        expected: entity deleted and no error raised
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == default_nb - len(delete_ids)
+
+    def test_flush_after_delete_binary(self, connect, binary_collection):
+        '''
+        target: test delete entity
+        method: add entities and delete, then flush
+        expected: entity deleted and no error raised
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(binary_collection, delete_ids)
+        assert status
+        connect.flush([binary_collection])
+        res_count = connect.count_entities(binary_collection)
+        assert res_count == default_nb - len(delete_ids)
+
+    def test_insert_delete_binary(self, connect, binary_collection):
+        '''
+        method: add entities and delete
+        expected: status DELETED
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(binary_collection, delete_ids)
+
+    def test_insert_same_ids_after_delete(self, connect, id_collection):
+        '''
+        method: add entities and delete
+        expected: status DELETED
+        note: Not flush after delete
+        '''
+        insert_ids = [i for i in range(default_nb)]
+        ids = connect.bulk_insert(id_collection, default_entities, insert_ids)
+        connect.flush([id_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(id_collection, delete_ids)
+        assert status
+        new_ids = connect.bulk_insert(id_collection, default_entity, [ids[0]])
+        assert new_ids == [ids[0]]
+        connect.flush([id_collection])
+        res_count = connect.count_entities(id_collection)
+        assert res_count == default_nb - 1
+
+    def test_insert_same_ids_after_delete_binary(self, connect, binary_id_collection):
+        '''
+        method: add entities, with the same id and delete the ids
+        expected: status DELETED, all id deleted
+        '''
+        insert_ids = [i for i in range(default_nb)]
+        ids = connect.bulk_insert(binary_id_collection, default_binary_entities, insert_ids)
+        connect.flush([binary_id_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(binary_id_collection, delete_ids)
+        assert status
+        new_ids = connect.bulk_insert(binary_id_collection, default_binary_entity, [ids[0]])
+        assert new_ids == [ids[0]]
+        connect.flush([binary_id_collection])
+        res_count = connect.count_entities(binary_id_collection)
+        assert res_count == default_nb - 1
+
+    def test_search_after_delete(self, connect, collection):
+        '''
+        target: test delete entity
+        method: add entities and delete, then search
+        expected: entity deleted and no error raised
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        query = copy.deepcopy(default_single_query)
+        query["bool"]["must"][0]["vector"][field_name]["query"] =\
+            [default_entity[-1]["values"][0], default_entities[-1]["values"][1], default_entities[-1]["values"][-1]]
+        res = connect.search(collection, query)
+        logging.getLogger().debug(res)
+        assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"])
+        assert res[0]._distances[0] > epsilon
+        assert res[1]._distances[0] < epsilon
+        assert res[2]._distances[0] > epsilon
+
+    def test_create_index_after_delete(self, connect, collection, get_simple_index):
+        '''
+        method: add entitys and delete, then create index
+        expected: vectors deleted, index created
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.create_index(collection, field_name, get_simple_index)
+        # assert index info
+
+    def test_delete_multiable_times(self, connect, collection):
+        '''
+        method: add entities and delete id serveral times
+        expected: entities deleted
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        for i in range(10):
+            status = connect.delete_entity_by_id(collection, delete_ids)
+            assert status
+
+    def test_index_insert_batch_delete_get(self, connect, collection, get_simple_index):
+        '''
+        method: create index, insert entities, and delete
+        expected: entities deleted
+        '''
+        connect.create_index(collection, field_name, get_simple_index)
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == default_nb - len(delete_ids)
+        res_get = connect.get_entity_by_id(collection, delete_ids)
+        assert res_get[0] is None
+
+    # TODO: disable
+    @pytest.mark.level(2)
+    def _test_index_insert_single_delete_get(self, connect, id_collection):
+        '''
+        method: insert entities, and delete
+        expected: entities deleted
+        '''
+        ids = [i for i in range(default_nb)]
+        for i in range(default_nb):
+            connect.bulk_insert(id_collection, default_entity, [ids[i]])
+        connect.flush([id_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(id_collection, delete_ids)
+        assert status
+        connect.flush([id_collection])
+        res_count = connect.count_entities(id_collection)
+        assert res_count == default_nb - len(delete_ids)
+
+    """
+    ******************************************************************
+      The following cases are used to test `delete_entity_by_id` function, with tags
+    ******************************************************************
+    """
+
+    def test_insert_tag_delete(self, connect, collection):
+        '''
+        method: add entitys with given tag, delete entities with the return ids
+        expected: entities deleted
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == default_nb - 2
+
+    def test_insert_default_tag_delete(self, connect, collection):
+        '''
+        method: add entitys, delete entities with the return ids
+        expected: entities deleted
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == default_nb - 2
+
+    def test_insert_tags_delete(self, connect, collection):
+        '''
+        method: add entitys with given two tags, delete entities with the return ids
+        expected: entities deleted
+        '''
+        tag_new = "tag_new"
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, tag_new)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
+        ids_new = connect.bulk_insert(collection, default_entities, partition_tag=tag_new)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids_new[0]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == 2 * (default_nb - 1)
+
+    def test_insert_tags_index_delete(self, connect, collection, get_simple_index):
+        """
+        method: add entitys with given tag, create index, delete entities with the return ids
+        expected: entities deleted
+        """
+        tag_new = "tag_new"
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, tag_new)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
+        ids_new = connect.bulk_insert(collection, default_entities, partition_tag=tag_new)
+        connect.flush([collection])
+        connect.create_index(collection, field_name, get_simple_index)
+        delete_ids = [ids[0], ids_new[0]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == 2 * (default_nb - 1)
+
+    def test_insert_delete_loop(self, connect, collection):
+        """
+        target: test loop insert and delete entities
+        method: loop insert entities into two segments, and delete entities cross segments.
+        expected: count is correct
+        """
+        loop = 2
+        for i in range(loop):
+            ids = connect.bulk_insert(collection, default_entities)
+            connect.flush([collection])
+            status = connect.delete_entity_by_id(collection, ids[100:default_nb - 100])
+            connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == loop * 200
+
+    def test_search_delete_loop(self, connect, collection):
+        """
+        target: test loop search and delete entities
+        method: loop search and delete cross segments
+        expected: ok
+        """
+        loop = 2
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        ni = default_nb // loop
+        for i in range(loop):
+            res = connect.search(collection, default_single_query)
+            status = connect.delete_entity_by_id(collection, ids[i * ni:(i + 1) * ni])
+            assert status
+            connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == 0
+
+    def test_count_delete_loop(self, connect, collection):
+        """
+        target: test loop search and delete entities
+        method: loop search and delete cross segments
+        expected: ok
+        """
+        loop = 2
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        ni = default_nb // loop
+        for i in range(loop):
+            connect.count_entities(collection)
+            status = connect.delete_entity_by_id(collection, ids[i * ni:(i + 1) * ni])
+            assert status
+            connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == 0
+
+
+class TestDeleteInvalid(object):
+    """
+    Test adding vectors with invalid vectors
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def gen_entity_id(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(1)
+    def test_delete_entity_id_invalid(self, connect, collection, gen_entity_id):
+        invalid_id = gen_entity_id
+        with pytest.raises(Exception) as e:
+            status = connect.delete_entity_by_id(collection, [invalid_id])
+
+    def test_delete_entity_ids_invalid(self, connect, collection, gen_entity_id):
+        invalid_id = gen_entity_id
+        with pytest.raises(Exception) as e:
+            status = connect.delete_entity_by_id(collection, [1, invalid_id])
+
+    @pytest.mark.level(2)
+    def test_delete_entity_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            status = connect.delete_entity_by_id(collection_name, [1])
diff --git a/tests/python_test/entity/test_get_entity_by_id.py b/tests/python_test/entity/test_get_entity_by_id.py
new file mode 100644
index 000000000..afe6d479a
--- /dev/null
+++ b/tests/python_test/entity/test_get_entity_by_id.py
@@ -0,0 +1,666 @@
+import time
+import random
+import pdb
+import copy
+import logging
+from multiprocessing import Pool, Process
+import concurrent.futures
+from threading import current_thread
+import pytest
+from utils import *
+from constants import *
+
+default_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {
+                default_float_vec_field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+
+class TestGetBase:
+    """
+    ******************************************************************
+      The following cases are used to test `get_entity_by_id` function
+    ******************************************************************
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            500
+        ],
+    )
+    def get_pos(self, request):
+        yield request.param
+
+    def test_get_entity(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id, get one
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        assert res_count == default_nb
+        get_ids = [ids[get_pos]]
+        res = connect.get_entity_by_id(collection, get_ids)
+        assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][get_pos])
+
+    def test_get_entity_multi_ids(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id, get one
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    def test_get_entity_parts_ids(self, connect, collection):
+        '''
+        target: test.get_entity_by_id, some ids in collection, some ids not
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = [ids[0], 1, ids[-1]]
+        res = connect.get_entity_by_id(collection, get_ids)
+        assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][0])
+        assert_equal_vector(res[-1].get(default_float_vec_field_name), default_entities[-1]["values"][-1])
+        assert res[1] is None
+
+    def test_get_entity_limit(self, connect, collection, args):
+        '''
+        target: test.get_entity_by_id
+        method: add entity, and get, limit > 1000
+        expected: entity returned
+        '''
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        with pytest.raises(Exception) as e:
+            res = connect.get_entity_by_id(collection, ids)
+
+    def test_get_entity_same_ids(self, connect, id_collection):
+        '''
+        target: test.get_entity_by_id, with the same ids
+        method: add entity, and get one id
+        expected: entity returned equals insert
+        '''
+        ids = [1 for i in range(default_nb)]
+        res_ids = connect.bulk_insert(id_collection, default_entities, ids)
+        connect.flush([id_collection])
+        get_ids = [ids[0]]
+        res = connect.get_entity_by_id(id_collection, get_ids)
+        assert len(res) == 1
+        assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][0])
+
+    def test_get_entity_params_same_ids(self, connect, id_collection):
+        '''
+        target: test.get_entity_by_id, with the same ids
+        method: add entity, and get entity with the same ids
+        expected: entity returned equals insert
+        '''
+        ids = [1]
+        res_ids = connect.bulk_insert(id_collection, default_entity, ids)
+        connect.flush([id_collection])
+        get_ids = [1, 1]
+        res = connect.get_entity_by_id(id_collection, get_ids)
+        assert len(res) == len(get_ids)
+        for i in range(len(get_ids)):
+            logging.getLogger().info(i)
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entity[-1]["values"][0])
+
+    def test_get_entities_params_same_ids(self, connect, collection):
+        '''
+        target: test.get_entity_by_id, with the same ids
+        method: add entities, and get entity with the same ids
+        expected: entity returned equals insert
+        '''
+        res_ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = [res_ids[0], res_ids[0]]
+        res = connect.get_entity_by_id(collection, get_ids)
+        assert len(res) == len(get_ids)
+        for i in range(len(get_ids)):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][0])
+
+    """
+    ******************************************************************
+      The following cases are used to test `get_entity_by_id` function, with different metric type
+    ******************************************************************
+    """
+
+    def test_get_entity_parts_ids_binary(self, connect, binary_collection):
+        '''
+        target: test.get_entity_by_id, some ids in jac_collection, some ids not
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        get_ids = [ids[0], 1, ids[-1]]
+        res = connect.get_entity_by_id(binary_collection, get_ids)
+        assert_equal_vector(res[0].get("binary_vector"), default_binary_entities[-1]["values"][0])
+        assert_equal_vector(res[-1].get("binary_vector"), default_binary_entities[-1]["values"][-1])
+        assert res[1] is None
+
+    """
+    ******************************************************************
+      The following cases are used to test `get_entity_by_id` function, with tags
+    ******************************************************************
+    """
+
+    def test_get_entities_tag(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities with tag, get
+        expected: entity returned
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag = default_tag)
+        connect.flush([collection])
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    def test_get_entities_tag_default(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities with default tag, get
+        expected: entity returned
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    def test_get_entities_tags_default(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: create partitions, add entities with default tag, get
+        expected: entity returned
+        '''
+        tag_new = "tag_new"
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, tag_new)
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    def test_get_entities_tags_A(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: create partitions, add entities with default tag, get
+        expected: entity returned
+        '''
+        tag_new = "tag_new"
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, tag_new)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag = default_tag)
+        connect.flush([collection])
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    def test_get_entities_tags_B(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: create partitions, add entities with default tag, get
+        expected: entity returned
+        '''
+        tag_new = "tag_new"
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, tag_new)
+        new_entities = gen_entities(default_nb + 1)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag = default_tag)
+        ids_new = connect.bulk_insert(collection, new_entities, partition_tag = tag_new)
+        connect.flush([collection])
+        get_ids = ids[:get_pos]
+        get_ids.extend(ids_new[:get_pos])
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+        for i in range(get_pos, get_pos * 2):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), new_entities[-1]["values"][i - get_pos])
+
+    @pytest.mark.level(2)
+    def test_get_entities_indexed_tag(self, connect, collection, get_simple_index, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities with tag, get
+        expected: entity returned
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag = default_tag)
+        connect.flush([collection])
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    """
+    ******************************************************************
+      The following cases are used to test `get_entity_by_id` function, with fields params
+    ******************************************************************
+    """
+
+    def test_get_entity_field(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id, get one
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = [ids[get_pos]]
+        fields = ["int64"]
+        res = connect.get_entity_by_id(collection, get_ids, fields = fields)
+        # assert fields
+        res = res.dict()
+        assert res[0]["field"] == fields[0]
+        assert res[0]["values"] == [default_entities[0]["values"][get_pos]]
+        assert res[0]["type"] == DataType.INT64
+
+    def test_get_entity_fields(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id, get one
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = [ids[get_pos]]
+        fields = ["int64", "float", default_float_vec_field_name]
+        res = connect.get_entity_by_id(collection, get_ids, fields = fields)
+        # assert fields
+        res = res.dict()
+        assert len(res) == len(fields)
+        for field in res:
+            if field["field"] == fields[0]:
+                assert field["values"] == [default_entities[0]["values"][get_pos]]
+            elif field["field"] == fields[1]:
+                assert field["values"] == [default_entities[1]["values"][get_pos]]
+            else:
+                assert_equal_vector(field["values"][0], default_entities[-1]["values"][get_pos])
+
+    # TODO: assert exception
+    def test_get_entity_field_not_match(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id, get one
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = [ids[get_pos]]
+        fields = ["int1288"]
+        with pytest.raises(Exception) as e:
+            res = connect.get_entity_by_id(collection, get_ids, fields = fields)
+
+    # TODO: assert exception
+    def test_get_entity_fields_not_match(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id, get one
+        method: add entity, and get
+        expected: entity returned equals insert
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_ids = [ids[get_pos]]
+        fields = ["int1288"]
+        with pytest.raises(Exception) as e:
+            res = connect.get_entity_by_id(collection, get_ids, fields = fields)
+
+    def test_get_entity_id_not_exised(self, connect, collection):
+        '''
+        target: test get entity, params entity_id not existed
+        method: add entity and get 
+        expected: empty result
+        '''
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        res = connect.get_entity_by_id(collection, [1])
+        assert res[0] is None
+
+    def test_get_entity_collection_not_existed(self, connect, collection):
+        '''
+        target: test get entity, params collection_name not existed
+        method: add entity and get
+        expected: error raised
+        '''
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        collection_new = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            res = connect.get_entity_by_id(collection_new, [ids[0]])
+
+    """
+    ******************************************************************
+      The following cases are used to test `get_entity_by_id` function, after deleted
+    ******************************************************************
+    """
+
+    def test_get_entity_after_delete(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities, and delete, get entity by the given id
+        expected: empty result
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[get_pos]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        get_ids = [ids[get_pos]]
+        res = connect.get_entity_by_id(collection, get_ids)
+        assert res[0] is None
+
+    def test_get_entities_after_delete(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities, and delete, get entity by the given id
+        expected: empty result
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = ids[:get_pos]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        get_ids = delete_ids
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert res[i] is None
+
+    def test_get_entities_after_delete_compact(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities, and delete, get entity by the given id
+        expected: empty result
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = ids[:get_pos]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        connect.compact(collection)
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert res[i] is None
+
+    def test_get_entities_indexed_batch(self, connect, collection, get_simple_index, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities batch, create index, get
+        expected: entity returned
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+    @pytest.mark.level(2)
+    def test_get_entities_indexed_single(self, connect, collection, get_simple_index, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities 1 entity/per request, create index, get
+        expected: entity returned
+        '''
+        ids = []
+        for i in range(default_nb):
+            ids.append(connect.bulk_insert(collection, default_entity)[0])
+        connect.flush([collection])
+        connect.create_index(collection, default_float_vec_field_name, get_simple_index)
+        get_ids = ids[:get_pos]
+        res = connect.get_entity_by_id(collection, get_ids)
+        for i in range(get_pos):
+            assert_equal_vector(res[i].get(default_float_vec_field_name), default_entity[-1]["values"][0])
+
+    def test_get_entities_with_deleted_ids(self, connect, id_collection):
+        '''
+        target: test.get_entity_by_id
+        method: add entities ids, and delete part, get entity include the deleted id
+        expected:
+        '''
+        ids = [i for i in range(default_nb)]
+        res_ids = connect.bulk_insert(id_collection, default_entities, ids)
+        connect.flush([id_collection])
+        status = connect.delete_entity_by_id(id_collection, [res_ids[1]])
+        connect.flush([id_collection])
+        get_ids = res_ids[:2]
+        res = connect.get_entity_by_id(id_collection, get_ids)
+        assert len(res) == len(get_ids)
+        assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][0])
+        assert res[1] is None
+
+    # TODO: unable to set config
+    def _test_get_entities_after_delete_disable_autoflush(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: disable autoflush, add entities, and delete, get entity by the given id
+        expected: empty result
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = ids[:get_pos]
+        try:
+            disable_flush(connect)
+            status = connect.delete_entity_by_id(collection, delete_ids)
+            get_ids = ids[:get_pos]
+            res = connect.get_entity_by_id(collection, get_ids)
+            for i in range(get_pos):
+                assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+        finally:
+            enable_flush(connect)
+
+    def test_get_entities_after_delete_same_ids(self, connect, id_collection):
+        '''
+        target: test.get_entity_by_id
+        method: add entities with the same ids, and delete, get entity by the given id
+        expected: empty result
+        '''
+        ids = [i for i in range(default_nb)]
+        ids[0] = 1
+        res_ids = connect.bulk_insert(id_collection, default_entities, ids)
+        connect.flush([id_collection])
+        status = connect.delete_entity_by_id(id_collection, [1])
+        connect.flush([id_collection])
+        get_ids = [1]
+        res = connect.get_entity_by_id(id_collection, get_ids)
+        assert res[0] is None
+
+    def test_get_entity_after_delete_with_partition(self, connect, collection, get_pos):
+        '''
+        target: test.get_entity_by_id
+        method: add entities into partition, and delete, get entity by the given id
+        expected: get one entity
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag = default_tag)
+        connect.flush([collection])
+        status = connect.delete_entity_by_id(collection, [ids[get_pos]])
+        connect.flush([collection])
+        res = connect.get_entity_by_id(collection, [ids[get_pos]])
+        assert res[0] is None
+
+    def test_get_entity_by_id_multithreads(self, connect, collection):
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_id = ids[100:200]
+
+        def get():
+            res = connect.get_entity_by_id(collection, get_id)
+            assert len(res) == len(get_id)
+            for i in range(len(res)):
+                assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][100 + i])
+
+        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
+            future_results = {executor.submit(
+                get): i for i in range(10)}
+            for future in concurrent.futures.as_completed(future_results):
+                future.result()
+
+    @pytest.mark.level(2)
+    def test_get_entity_by_id_insert_multi_threads(self, connect, collection):
+        '''
+        target: test.get_entity_by_id
+        method: thread do insert and get
+        expected:
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        get_id = ids[:1000]
+
+        def insert():
+            # logging.getLogger().info(current_thread().getName() + " insert")
+            step = 1000
+            for i in range(default_nb // step):
+                group_entities = gen_entities(step, False)
+                connect.bulk_insert(collection, group_entities)
+                connect.flush([collection])
+
+        def get():
+            # logging.getLogger().info(current_thread().getName() + " get")
+            res = connect.get_entity_by_id(collection, get_id)
+            assert len(res) == len(get_id)
+            for i in range(len(res)):
+                assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i])
+
+        with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
+            for i in range(20):
+                fun = random.choices([get, insert])[0]
+                future = executor.submit(fun)
+                future.result()
+
+    @pytest.mark.level(2)
+    def test_get_entity_by_id_insert_multi_threads_2(self, connect, collection):
+        '''
+        target: test.get_entity_by_id
+        method: thread do insert and get
+        expected:
+        '''
+        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
+            def get(group_ids, group_entities):
+                # logging.getLogger().info(current_thread().getName() + " get")
+                res = connect.get_entity_by_id(collection, group_ids)
+                assert len(res) == len(group_ids)
+                for i in range(len(res)):
+                    assert_equal_vector(res[i].get(default_float_vec_field_name), group_entities[-1]["values"][i])
+
+            def insert(group_vectors):
+                # logging.getLogger().info(current_thread().getName() + " insert")
+                for group_vector in group_vectors:
+                    group_entities = [
+                        {"name": "int64", "type": DataType.INT64, "values": [i for i in range(step)]},
+                        {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(step)]},
+                        {"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "values": group_vector}
+                    ]
+                    group_ids = connect.bulk_insert(collection, group_entities)
+                    connect.flush([collection])
+                    executor.submit(get, group_ids, group_entities)
+
+            step = 100
+            vectors = gen_vectors(default_nb, default_dim, False)
+            group_vectors = [vectors[i:i + step] for i in range(0, len(vectors), step)]
+            task = executor.submit(insert, group_vectors)
+            task.result()
+
+
+class TestGetInvalid(object):
+    """
+    Test get entities with invalid params
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_entity_id(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_insert_ids_invalid(self, connect, collection, get_entity_id):
+        '''
+        target: test insert, with using customize ids, which are not int64
+        method: create collection and insert entities in it
+        expected: raise an exception
+        '''
+        entity_id = get_entity_id
+        ids = [entity_id for _ in range(default_nb)]
+        with pytest.raises(Exception):
+            connect.get_entity_by_id(collection, ids)
+
+    @pytest.mark.level(2)
+    def test_insert_parts_ids_invalid(self, connect, collection, get_entity_id):
+        '''
+        target: test insert, with using customize ids, which are not int64
+        method: create collection and insert entities in it
+        expected: raise an exception
+        '''
+        entity_id = get_entity_id
+        ids = [i for i in range(default_nb)]
+        ids[-1] = entity_id
+        with pytest.raises(Exception):
+            connect.get_entity_by_id(collection, ids)
+
+    @pytest.mark.level(2)
+    def test_get_entities_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        ids = [1]
+        with pytest.raises(Exception):
+            res = connect.get_entity_by_id(collection_name, ids)
+
+    @pytest.mark.level(2)
+    def test_get_entities_with_invalid_field_name(self, connect, collection, get_field_name):
+        field_name = get_field_name
+        ids = [1]
+        fields = [field_name]
+        with pytest.raises(Exception):
+            res = connect.get_entity_by_id(collection, ids, fields = fields)
diff --git a/tests/python_test/entity/test_insert.py b/tests/python_test/entity/test_insert.py
new file mode 100644
index 000000000..dc587a898
--- /dev/null
+++ b/tests/python_test/entity/test_insert.py
@@ -0,0 +1,1090 @@
+import logging
+import time
+import pdb
+import copy
+import threading
+from multiprocessing import Pool, Process
+import pytest
+from milvus import DataType
+from utils import *
+from constants import *
+
+ADD_TIMEOUT = 60
+uid = "test_insert"
+field_name = default_float_vec_field_name
+binary_field_name = default_binary_vec_field_name
+default_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
+                                     "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+
+
+class TestInsertBase:
+    """
+    ******************************************************************
+      The following cases are used to test `insert` function
+    ******************************************************************
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("CPU not support index_type: ivf_sq8h")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_filter_fields()
+    )
+    def get_filter_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_vector_fields()
+    )
+    def get_vector_field(self, request):
+        yield request.param
+
+    def test_insert_with_empty_entity(self, connect, collection):
+        '''
+        target: test insert with empty entity list
+        method: set empty entity list as insert method params
+        expected: raises a ParamError exception
+        '''
+        entities = []
+        with pytest.raises(ParamError) as e:
+            status, ids = connect.insert(collection, entities)
+
+    def test_insert_with_None(self, connect, collection):
+        '''
+        target: test insert with None
+        method: set None as insert method params
+        expected: raises a ParamError
+        '''
+        entity = None
+        with pytest.raises(Exception) as e:
+            status, ids = connect.insert(collection, entity)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_collection_not_existed(self, connect):
+        '''
+        target: test insert, with collection not existed
+        method: insert entity into a random named collection
+        expected: raise a BaseException
+        '''
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.insert(collection_name, default_entities)
+
+    @pytest.mark.level(2)
+    def test_insert_without_connect(self, dis_connect, collection):
+        '''
+        target: test insert entities without connection
+        method: create collection and insert entities in it, check if inserted successfully
+        expected: raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            ids = dis_connect.insert(collection, default_entities)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_drop_collection(self, connect, collection):
+        '''
+        target: test delete collection after insert entities
+        method: insert entities and drop collection
+        expected: has_collection false
+        '''
+        ids = connect.insert(collection, default_entity_row)
+        assert len(ids) == 1
+        connect.drop_collection(collection)
+        assert connect.has_collection(collection) == False
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_flush_drop_collection(self, connect, collection):
+        '''
+        target: test drop collection after insert entities for a while
+        method: insert entities, sleep, and delete collection
+        expected: has_collection false
+        '''
+        ids = connect.insert(collection, default_entity_row)
+        assert len(ids) == 1
+        connect.flush([collection])
+        connect.drop_collection(collection)
+        assert connect.has_collection(collection) == False
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_create_index(self, connect, collection, get_simple_index):
+        '''
+        target: test build index insert after entities
+        method: insert entities and build index
+        expected: no error raised
+        '''
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+        # fields = info["fields"]
+        # for field in fields:
+        #     if field["name"] == field_name:
+        #         assert field["indexes"][0] == get_simple_index
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_after_create_index(self, connect, collection, get_simple_index):
+        '''
+        target: test build index insert after vector
+        method: insert entities and build index
+        expected: no error raised
+        '''
+        connect.create_index(collection, field_name, get_simple_index)
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    # TODO
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_search(self, connect, collection):
+        '''
+        target: test search entity after insert entity after a while
+        method: insert entity, sleep, and search collection
+        expected: no error raised 
+        '''
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        connect.load_collection(collection)
+        res = connect.search(collection, default_single_query)
+        assert res
+
+    @pytest.mark.skip("No segment_row_limit")
+    def test_insert_segment_row_count(self, connect, collection):
+        nb = default_segment_row_limit + 1
+        res_ids = connect.insert(collection, gen_entities(nb))
+        connect.flush([collection])
+        assert len(res_ids) == nb
+        stats = connect.get_collection_stats(collection)
+        assert len(stats['partitions'][0]['segments']) == 2
+        for segment in stats['partitions'][0]['segments']:
+            assert segment['row_count'] in [default_segment_row_limit, 1]
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            2000
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_ids(self, connect, id_collection, insert_count):
+        '''
+        target: test insert entities in collection, use customize ids
+        method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
+        expected: the length of ids and the collection row count
+        '''
+        nb = insert_count
+        ids = [i for i in range(nb)]
+        res_ids = connect.insert(id_collection, gen_entities(nb), ids)
+        connect.flush([id_collection])
+        assert len(res_ids) == nb
+        assert res_ids == ids
+        stats = connect.get_collection_stats(id_collection)
+        assert stats["row_count"] == nb
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_the_same_ids(self, connect, id_collection, insert_count):
+        '''
+        target: test insert vectors in collection, use customize the same ids
+        method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
+        expected: the length of ids and the collection row count
+        '''
+        nb = insert_count
+        ids = [1 for i in range(nb)]
+        res_ids = connect.insert(id_collection, gen_entities(nb), ids)
+        connect.flush([id_collection])
+        assert len(res_ids) == nb
+        assert res_ids == ids
+        stats = connect.get_collection_stats(id_collection)
+        assert stats["row_count"] == nb
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
+        '''
+        target: test create normal collection with different fields, insert entities into id with ids
+        method: create collection with diff fields: metric/field_type/..., insert, and count
+        expected: row count correct
+        '''
+        nb = 5
+        filter_field = get_filter_field
+        vector_field = get_vector_field
+        collection_name = gen_unique_str("test_collection")
+        fields = {
+            "fields": [filter_field, vector_field],
+            "auto_id": True
+        }
+        connect.create_collection(collection_name, fields)
+        ids = [i for i in range(nb)]
+        entities = gen_entities_by_fields(fields["fields"], nb, dim)
+        res_ids = connect.insert(collection_name, entities, ids)
+        assert res_ids == ids
+        connect.flush([collection_name])
+        stats = connect.get_collection_stats(id_collection)
+        assert stats["row_count"] == nb
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_ids_not_match(self, connect, id_collection, insert_count):
+        '''
+        target: test insert entities in collection without ids
+        method: create id_collection and insert entities without
+        expected: exception raised
+        '''
+        nb = insert_count
+        with pytest.raises(Exception) as e:
+            connect.insert(id_collection, gen_entities(nb))
+
+    # TODO
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_twice_ids_no_ids(self, connect, collection):
+        '''
+        target: check the result of insert, with params ids and no ids
+        method: test insert vectors twice, use customize ids first, and then use no ids
+        expected:  BaseException raised
+        '''
+        ids = [i for i in range(default_nb)]
+        res_ids = connect.insert(id_collection, default_entities, ids)
+        with pytest.raises(Exception) as e:
+            res_ids_new = connect.insert(id_collection, default_entities)
+
+    # TODO: assert exception && enable
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_twice_not_ids_ids(self, connect, id_collection):
+        '''
+        target: check the result of insert, with params ids and no ids
+        method: test insert vectors twice, use not ids first, and then use customize ids
+        expected:  error raised
+        '''
+        with pytest.raises(Exception) as e:
+            res_ids = connect.insert(id_collection, default_entities)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_ids_length_not_match_batch(self, connect, id_collection):
+        '''
+        target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
+        method: create collection and insert vectors in it
+        expected: raise an exception
+        '''
+        ids = [i for i in range(1, default_nb)]
+        logging.getLogger().info(len(ids))
+        with pytest.raises(Exception) as e:
+            res_ids = connect.insert(id_collection, default_entities, ids)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_ids_length_not_match_single(self, connect, id_collection):
+        '''
+        target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
+        method: create collection and insert vectors in it
+        expected: raise an exception
+        '''
+        ids = [i for i in range(1, default_nb)]
+        logging.getLogger().info(len(ids))
+        with pytest.raises(Exception) as e:
+            res_ids = connect.insert(id_collection, default_entity, ids)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_partition(self, connect, collection):
+        '''
+        target: test insert entities in collection created before
+        method: create collection and insert entities in it, with the partition_tag param
+        expected: the collection row count equals to nq
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        assert connect.has_partition(collection, default_tag)
+        connect.flush([collection_name])
+        stats = connect.get_collection_stats(id_collection)
+        assert stats["row_count"] == default_nb
+
+    # TODO
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_partition_with_ids(self, connect, id_collection):
+        '''
+        target: test insert entities in collection created before, insert with ids
+        method: create collection and insert entities in it, with the partition_tag param
+        expected: the collection row count equals to nq
+        '''
+        connect.create_partition(id_collection, default_tag)
+        ids = [i for i in range(default_nb)]
+        res_ids = connect.insert(id_collection, gen_entities(default_nb, _id=False), partition_tag=default_tag)
+        assert res_ids == ids
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_default_partition(self, connect, collection):
+        '''
+        target: test insert entities into default partition
+        method: create partition and insert info collection without tag params
+        expected: the collection row count equals to nb
+        '''
+        default_tag = "_default"
+        with pytest.raises(Exception) as e:
+            connect.create_partition(collection, default_tag)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_partition_not_existed(self, connect, collection):
+        '''
+        target: test insert entities in collection created before
+        method: create collection and insert entities in it, with the not existed partition_tag param
+        expected: error raised
+        '''
+        tag = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            ids = connect.insert(collection, default_entities, partition_tag=tag)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_partition_repeatedly(self, connect, collection):
+        '''
+        target: test insert entities in collection created before
+        method: create collection and insert entities in it repeatly, with the partition_tag param
+        expected: the collection row count equals to nq
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        res = connect.get_collection_stats(collection)
+        assert res["row_count"] == 2 * default_nb
+
+    def test_insert_dim_not_matched(self, connect, collection):
+        '''
+        target: test insert entities, the vector dimension is not equal to the collection dimension
+        method: the entities dimension is half of the collection dimension, check the status
+        expected: error raised
+        '''
+        vectors = gen_vectors(default_nb, int(default_dim) // 2)
+        insert_entities = copy.deepcopy(default_entities)
+        insert_entities[-1][default_float_vec_field_name] = vectors
+        with pytest.raises(Exception) as e:
+            ids = connect.insert(collection, insert_entities)
+
+    def test_insert_with_field_name_not_match(self, connect, collection):
+        '''
+        target: test insert entities, with the entity field name updated
+        method: update entity field name
+        expected: error raised
+        '''
+        tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    def test_insert_with_field_type_not_match(self, connect, collection):
+        '''
+        target: test insert entities, with the entity field type updated
+        method: update entity field type
+        expected: error raised
+        '''
+        tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    def test_insert_with_field_value_not_match(self, connect, collection):
+        '''
+        target: test insert entities, with the entity field value updated
+        method: update entity field value
+        expected: error raised
+        '''
+        tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_field_more(self, connect, collection):
+        '''
+        target: test insert entities, with more fields than collection schema
+        method: add entity field
+        expected: error raised
+        '''
+        tmp_entity = add_field(copy.deepcopy(default_entity))
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_field_vector_more(self, connect, collection):
+        '''
+        target: test insert entities, with more fields than collection schema
+        method: add entity vector field
+        expected: error raised
+        '''
+        tmp_entity = add_vector_field(default_nb, default_dim)
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_field_less(self, connect, collection):
+        '''
+        target: test insert entities, with less fields than collection schema
+        method: remove entity field
+        expected: error raised
+        '''
+        tmp_entity = remove_field(copy.deepcopy(default_entity))
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_field_vector_less(self, connect, collection):
+        '''
+        target: test insert entities, with less fields than collection schema
+        method: remove entity vector field
+        expected: error raised
+        '''
+        tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_no_field_vector_value(self, connect, collection):
+        '''
+        target: test insert entities, with no vector field value
+        method: remove entity values of vector field
+        expected: error raised
+        '''
+        tmp_entity = copy.deepcopy(default_entity)
+        del tmp_entity[-1]["values"]
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_no_field_vector_type(self, connect, collection):
+        '''
+        target: test insert entities, with no vector field type
+        method: remove entity vector field
+        expected: error raised
+        '''
+        tmp_entity = copy.deepcopy(default_entity)
+        del tmp_entity[-1]["type"]
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_no_field_vector_name(self, connect, collection):
+        '''
+        target: test insert entities, with no vector field name
+        method: remove entity vector field
+        expected: error raised
+        '''
+        tmp_entity = copy.deepcopy(default_entity)
+        del tmp_entity[-1]["name"]
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(30)
+    def test_collection_insert_rows_count_multi_threading(self, args, collection):
+        '''
+        target: test collection rows_count is correct or not with multi threading
+        method: create collection and insert entities in it(idmap),
+            assert the value returned by count_entities method is equal to length of entities
+        expected: the count is equal to the length of entities
+        '''
+        if args["handler"] == "HTTP":
+            pytest.skip("Skip test in http mode")
+        thread_num = 8
+        threads = []
+        milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
+
+        def insert(thread_i):
+            logging.getLogger().info("In thread-%d" % thread_i)
+            res_ids = milvus.insert(collection, default_entities)
+            milvus.flush([collection])
+
+        for i in range(thread_num):
+            x = threading.Thread(target=insert, args=(i,))
+            threads.append(x)
+            x.start()
+        for th in threads:
+            th.join()
+        res_count = milvus.count_entities(collection)
+        assert res_count == thread_num * default_nb
+
+    # TODO: unable to set config
+    @pytest.mark.level(2)
+    def _test_insert_disable_auto_flush(self, connect, collection):
+        '''
+        target: test insert entities, with disable autoflush
+        method: disable autoflush and insert, get entity
+        expected: the count is equal to 0
+        '''
+        delete_nums = 500
+        disable_flush(connect)
+        ids = connect.insert(collection, default_entities)
+        res = connect.get_entity_by_id(collection, ids[:delete_nums])
+        assert len(res) == delete_nums
+        assert res[0] is None
+
+
+class TestInsertBinary:
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_binary_index(self, request):
+        request.param["metric_type"] = "JACCARD"
+        return request.param
+
+    def test_insert_binary_entities(self, connect, binary_collection):
+        '''
+        target: test insert entities in binary collection
+        method: create collection and insert binary entities in it
+        expected: the collection row count equals to nb
+        '''
+        ids = connect.insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush()
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == default_nb
+
+    def test_insert_binary_partition(self, connect, binary_collection):
+        '''
+        target: test insert entities and create partition tag
+        method: create collection and insert binary entities in it, with the partition_tag param
+        expected: the collection row count equals to nb
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        assert connect.has_partition(binary_collection, default_tag)
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == default_nb
+
+    def test_insert_binary_multi_times(self, connect, binary_collection):
+        '''
+        target: test insert entities multi times and final flush
+        method: create collection and insert binary entity multi and final flush
+        expected: the collection row count equals to nb
+        '''
+        for i in range(default_nb):
+            ids = connect.insert(binary_collection, default_binary_entity)
+            assert len(ids) == 1
+        connect.flush([binary_collection])
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == default_nb
+
+    def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
+        '''
+        target: test insert binary entities after build index
+        method: build index and insert entities
+        expected: no error raised
+        '''
+        connect.create_index(binary_collection, binary_field_name, get_binary_index)
+        ids = connect.insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        index = connect.describe_index(binary_collection, binary_field_name)
+        assert index == get_binary_index
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
+        '''
+        target: test build index insert after vector
+        method: insert vector and build index
+        expected: no error raised
+        '''
+        ids = connect.insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, binary_field_name, get_binary_index)
+        index = connect.describe_index(binary_collection, binary_field_name)
+        assert index == get_binary_index
+
+    def test_insert_binary_search(self, connect, binary_collection):
+        '''
+        target: test search vector after insert vector after a while
+        method: insert vector, sleep, and search collection
+        expected: no error raised
+        '''
+        ids = connect.insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1, metric_type="JACCARD")
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        logging.getLogger().debug(res)
+        assert res
+
+
+class TestInsertAsync:
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            1000
+        ],
+    )
+    def insert_count(self, request):
+        yield request.param
+
+    def check_status(self, result):
+        logging.getLogger().info("In callback check status")
+        assert not result
+
+    def check_result(self, result):
+        logging.getLogger().info("In callback check status")
+        assert result
+
+    def test_insert_async(self, connect, collection, insert_count):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        nb = insert_count
+        future = connect.insert(collection, gen_entities(nb), _async=True)
+        ids = future.result()
+        connect.flush([collection])
+        assert len(ids) == nb
+
+    @pytest.mark.level(2)
+    def test_insert_async_false(self, connect, collection, insert_count):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        nb = insert_count
+        ids = connect.insert(collection, gen_entities(nb), _async=False)
+        # ids = future.result()
+        connect.flush([collection])
+        assert len(ids) == nb
+
+    def test_insert_async_callback(self, connect, collection, insert_count):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        nb = insert_count
+        future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status)
+        future.done()
+
+    @pytest.mark.level(2)
+    def test_insert_async_long(self, connect, collection):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        nb = 50000
+        future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
+        result = future.result()
+        assert len(result) == nb
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        logging.getLogger().info(stats)
+        assert stats["row_count"] == nb
+
+    @pytest.mark.level(2)
+    def test_insert_async_callback_timeout(self, connect, collection):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        nb = 100000
+        future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
+        with pytest.raises(Exception) as e:
+            result = future.result()
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == 0
+
+    def test_insert_async_invalid_params(self, connect):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        collection_new = gen_unique_str()
+        future = connect.insert(collection_new, default_entities, _async=True)
+        with pytest.raises(Exception) as e:
+            result = future.result()
+
+    def test_insert_async_invalid_params_raise_exception(self, connect, collection):
+        '''
+        target: test insert vectors with different length of vectors
+        method: set different vectors as insert method params
+        expected: length of ids is equal to the length of vectors
+        '''
+        entities = []
+        future = connect.insert(collection, entities, _async=True)
+        with pytest.raises(Exception) as e:
+            future.result()
+
+
+class TestInsertMultiCollections:
+    """
+    ******************************************************************
+      The following cases are used to test `insert` function
+    ******************************************************************
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    def test_insert_entity_multi_collections(self, connect):
+        '''
+        target: test insert entities
+        method: create 10 collections and insert entities into them in turn
+        expected: row count
+        '''
+        collection_num = 10
+        collection_list = []
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            ids = connect.insert(collection_name, default_entities)
+            connect.flush([collection_name])
+            assert len(ids) == default_nb
+            stats = connect.get_collection_stats(collection_name)
+            assert stats["row_count"] == default_nb
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_drop_collection_insert_entity_another(self, connect, collection):
+        '''
+        target: test insert vector to collection_1 after collection_2 deleted
+        method: delete collection_2 and insert vector to collection_1
+        expected: row count equals the length of entities inserted
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        connect.drop_collection(collection)
+        ids = connect.insert(collection_name, default_entity)
+        connect.flush([collection_name])
+        assert len(ids) == 1
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
+        '''
+        target: test insert vector to collection_2 after build index for collection_1
+        method: build index and insert vector
+        expected: status ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        connect.create_index(collection, field_name, get_simple_index)
+        ids = connect.insert(collection_name, default_entity)
+        assert len(ids) == 1
+        # connect.drop_collection(collection_name)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
+        '''
+        target: test insert vector to collection_2 after build index for collection_1
+        method: build index and insert vector
+        expected: status ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        ids = connect.insert(collection, default_entity)
+        connect.create_index(collection_name, field_name, get_simple_index)
+        index = connect.describe_index(collection_name, field_name)
+        assert index == get_simple_index
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == 1
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
+        '''
+        target: test insert vector to collection_2 after build index for collection_1 for a while
+        method: build index and insert vector
+        expected: status ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        ids = connect.insert(collection, default_entity)
+        connect.flush([collection])
+        connect.create_index(collection_name, field_name, get_simple_index)
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == 1
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_search_entity_insert_vector_another(self, connect, collection):
+        '''
+        target: test insert entity to collection_1 after search collection_2
+        method: search collection and insert entity
+        expected: status ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        res = connect.search(collection, default_single_query)
+        logging.getLogger().debug(res)
+        ids = connect.insert(collection_name, default_entity)
+        connect.flush()
+        stats = connect.get_collection_stats(collection_name)
+        assert stats["row_count"] == 1
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_entity_search_entity_another(self, connect, collection):
+        '''
+        target: test insert entity to collection_1 after search collection_2
+        method: search collection and insert entity
+        expected: status ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        ids = connect.insert(collection, default_entity)
+        result = connect.search(collection_name, default_single_query)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def test_insert_entity_sleep_search_entity_another(self, connect, collection):
+        '''
+        target: test insert entity to collection_1 after search collection_2 a while
+        method: search collection, sleep, and insert entity
+        expected: status ok
+        '''
+        collection_name = gen_unique_str(uid)
+        connect.create_collection(collection_name, default_fields)
+        ids = connect.insert(collection, default_entity)
+        connect.flush([collection])
+        result = connect.search(collection_name, default_single_query)
+
+    @pytest.mark.timeout(ADD_TIMEOUT)
+    def _test_insert_entity_during_release_collection(self, connect, collection):
+        '''
+        target: test insert entity during release
+        method: release collection async, then do insert operation
+        expected: insert ok 
+        '''
+        for i in range(10):
+            connect.insert(collection, default_entities)
+        connect.flush([collection])
+        connect.load_collection(collection)
+        def release(collection):
+            connect.release_collection(collection)
+        t = threading.Thread(target=release, (collection, ))
+        t.start()
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+
+
+class TestInsertInvalid(object):
+    """
+    Test inserting vectors with invalid collection names
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_tag_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_type(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_int_value(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_entity_id(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_vectors()
+    )
+    def get_field_vectors_value(self, request):
+        yield request.param
+
+    def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
+        '''
+        target: test insert, with using customize ids, which are not int64
+        method: create collection and insert entities in it
+        expected: raise an exception
+        '''
+        entity_id = get_entity_id
+        ids = [entity_id for _ in range(default_nb)]
+        with pytest.raises(Exception):
+            connect.insert(id_collection, default_entities, ids)
+
+    def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception):
+            connect.insert(collection_name, default_entity)
+
+    def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
+        tag_name = get_tag_name
+        connect.create_partition(collection, default_tag)
+        if tag_name is not None:
+            with pytest.raises(Exception):
+                connect.insert(collection, default_entity, partition_tag=tag_name)
+        else:
+            connect.insert(collection, default_entity, partition_tag=tag_name)
+
+    def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
+        tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
+        field_type = get_field_type
+        tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
+        field_value = get_field_int_value
+        tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+    def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
+        tmp_entity = copy.deepcopy(default_entity)
+        src_vector = tmp_entity[-1]["values"]
+        src_vector[0][1] = get_field_vectors_value
+        with pytest.raises(Exception):
+            connect.insert(collection, tmp_entity)
+
+
+class TestInsertInvalidBinary(object):
+    """
+    Test inserting vectors with invalid collection names
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_tag_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_type(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_field_int_value(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_entity_id(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_vectors()
+    )
+    def get_field_vectors_value(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
+        tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
+        with pytest.raises(Exception):
+            connect.insert(binary_collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
+        tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
+        with pytest.raises(Exception):
+            connect.insert(binary_collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
+        tmp_entity = copy.deepcopy(default_binary_entity)
+        src_vector = tmp_entity[-1]["values"]
+        src_vector[0][1] = get_field_vectors_value
+        with pytest.raises(Exception):
+            connect.insert(binary_collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
+        '''
+        target: test insert, with using customize ids, which are not int64
+        method: create collection and insert entities in it
+        expected: raise an exception
+        '''
+        entity_id = get_entity_id
+        ids = [entity_id for _ in range(default_nb)]
+        with pytest.raises(Exception):
+            connect.insert(binary_id_collection, default_binary_entities, ids)
+
+    @pytest.mark.level(2)
+    def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
+        field_type = get_field_type
+        tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
+        with pytest.raises(Exception):
+            connect.insert(binary_collection, tmp_entity)
+
+    @pytest.mark.level(2)
+    def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
+        tmp_entity = copy.deepcopy(default_binary_entities)
+        src_vector = tmp_entity[-1]["values"]
+        src_vector[1] = get_field_vectors_value
+        with pytest.raises(Exception):
+            connect.insert(binary_collection, tmp_entity)
diff --git a/tests/python_test/entity/test_list_id_in_segment.py b/tests/python_test/entity/test_list_id_in_segment.py
new file mode 100644
index 000000000..dd95c8695
--- /dev/null
+++ b/tests/python_test/entity/test_list_id_in_segment.py
@@ -0,0 +1,318 @@
+import time
+import random
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+from constants import *
+
+uid = "list_id_in_segment"
+
+def get_segment_id(connect, collection, nb=1, vec_type='float', index_params=None):
+    if vec_type != "float":
+        vectors, entities = gen_binary_entities(nb)
+    else:
+        entities = gen_entities(nb)
+    ids = connect.bulk_insert(collection, entities)
+    connect.flush([collection])
+    if index_params:
+        if vec_type == 'float':
+            connect.create_index(collection, default_float_vec_field_name, index_params)
+        else:
+            connect.create_index(collection, default_binary_vec_field_name, index_params)
+    stats = connect.get_collection_stats(collection)
+    return ids, stats["partitions"][0]["segments"][0]["id"]
+
+
+class TestListIdInSegmentBase:
+        
+    """
+    ******************************************************************
+      The following cases are used to test `list_id_in_segment` function
+    ******************************************************************
+    """
+    def test_list_id_in_segment_collection_name_None(self, connect, collection):
+        '''
+        target: get vector ids where collection name is None
+        method: call list_id_in_segment with the collection_name: None
+        expected: exception raised
+        '''
+        collection_name = None
+        ids, segment_id = get_segment_id(connect, collection)
+        with pytest.raises(Exception) as e:
+            connect.list_id_in_segment(collection_name, segment_id)
+
+    def test_list_id_in_segment_collection_name_not_existed(self, connect, collection):
+        '''
+        target: get vector ids where collection name does not exist
+        method: call list_id_in_segment with a random collection_name, which is not in db
+        expected: status not ok
+        '''
+        collection_name = gen_unique_str(uid)
+        ids, segment_id = get_segment_id(connect, collection)
+        with pytest.raises(Exception) as e:
+            vector_ids = connect.list_id_in_segment(collection_name, segment_id)
+    
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_collection_name_invalid(self, connect, collection, get_collection_name):
+        '''
+        target: get vector ids where collection name is invalid
+        method: call list_id_in_segment with invalid collection_name
+        expected: status not ok
+        '''
+        collection_name = get_collection_name
+        ids, segment_id = get_segment_id(connect, collection)
+        with pytest.raises(Exception) as e:
+            connect.list_id_in_segment(collection_name, segment_id)
+
+    def test_list_id_in_segment_name_None(self, connect, collection):
+        '''
+        target: get vector ids where segment name is None
+        method: call list_id_in_segment with the name: None
+        expected: exception raised
+        '''
+        ids, segment_id = get_segment_id(connect, collection)
+        segment = None
+        with pytest.raises(Exception) as e:
+            vector_ids = connect.list_id_in_segment(collection, segment)
+
+    def test_list_id_in_segment_name_not_existed(self, connect, collection):
+        '''
+        target: get vector ids where segment name does not exist
+        method: call list_id_in_segment with a random segment name
+        expected: status not ok
+        '''
+        ids, seg_id = get_segment_id(connect, collection)
+        # segment = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            vector_ids = connect.list_id_in_segment(collection, seg_id + 10000)
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_without_index_A(self, connect, collection):
+        '''
+        target: get vector ids when there is no index
+        method: call list_id_in_segment and check if the segment contains vectors
+        expected: status ok
+        '''
+        nb = 1
+        ids, seg_id = get_segment_id(connect, collection, nb=nb)
+        vector_ids = connect.list_id_in_segment(collection, seg_id)
+        # vector_ids should match ids
+        assert len(vector_ids) == nb
+        assert vector_ids[0] == ids[0]
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_without_index_B(self, connect, collection):
+        '''
+        target: get vector ids when there is no index but with partition
+        method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors
+        expected: status ok
+        '''
+        nb = 10
+        entities = gen_entities(nb)
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, entities, partition_tag=default_tag)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["partitions"][1]["tag"] == default_tag
+        vector_ids = connect.list_id_in_segment(collection, stats["partitions"][1]["segments"][0]["id"])
+        # vector_ids should match ids
+        assert len(vector_ids) == nb
+        for i in range(nb):
+            assert vector_ids[i] == ids[i]
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("CPU not support index_type: ivf_sq8h")
+        return request.param
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_with_index_A(self, connect, collection, get_simple_index):
+        '''
+        target: get vector ids when there is index
+        method: call list_id_in_segment and check if the segment contains vectors
+        expected: status ok
+        '''
+        ids, seg_id = get_segment_id(connect, collection, nb=default_nb, index_params=get_simple_index)
+        try:
+            connect.list_id_in_segment(collection, seg_id)
+        except Exception as e:
+            assert False, str(e)
+        # TODO: 
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_with_index_B(self, connect, collection, get_simple_index):
+        '''
+        target: get vector ids when there is index and with partition
+        method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors
+        expected: status ok
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["partitions"][1]["tag"] == default_tag
+        try:
+            connect.list_id_in_segment(collection, stats["partitions"][1]["segments"][0]["id"])
+        except Exception as e:
+            assert False, str(e)
+        # vector_ids should match ids
+        # TODO
+
+    def test_list_id_in_segment_after_delete_vectors(self, connect, collection):
+        '''
+        target: get vector ids after vectors are deleted
+        method: add vectors and delete a few, call list_id_in_segment
+        expected: status ok, vector_ids decreased after vectors deleted
+        '''
+        nb = 2
+        ids, seg_id = get_segment_id(connect, collection, nb=nb)
+        delete_ids = [ids[0]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        vector_ids = connect.list_id_in_segment(collection, stats["partitions"][0]["segments"][0]["id"])
+        assert len(vector_ids) == 1
+        assert vector_ids[0] == ids[1]
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_after_delete_vectors(self, connect, collection):
+        '''
+        target: get vector ids after vectors are deleted
+        method: add vectors and delete a few, call list_id_in_segment
+        expected: vector_ids decreased after vectors deleted
+        '''
+        nb = 60
+        delete_length = 10
+        ids, seg_id = get_segment_id(connect, collection, nb=nb)
+        delete_ids = ids[:delete_length]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        vector_ids = connect.list_id_in_segment(collection, stats["partitions"][0]["segments"][0]["id"])
+        assert len(vector_ids) == nb - delete_length
+        assert vector_ids[0] == ids[delete_length]
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_with_index_ip(self, connect, collection, get_simple_index):
+        '''
+        target: get vector ids when there is index
+        method: call list_id_in_segment and check if the segment contains vectors
+        expected: ids returned in ids inserted
+        '''
+        get_simple_index["metric_type"] = "IP"
+        ids, seg_id = get_segment_id(connect, collection, nb=default_nb, index_params=get_simple_index)
+        vector_ids = connect.list_id_in_segment(collection, seg_id)
+        # TODO:
+        segment_row_limit = connect.get_collection_info(collection)["segment_row_limit"]
+        assert vector_ids[0:segment_row_limit] == ids[0:segment_row_limit]
+
+class TestListIdInSegmentBinary:
+    """
+    ******************************************************************
+      The following cases are used to test `list_id_in_segment` function
+    ******************************************************************
+    """
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_without_index_A(self, connect, binary_collection):
+        '''
+        target: get vector ids when there is no index
+        method: call list_id_in_segment and check if the segment contains vectors
+        expected: status ok
+        '''
+        nb = 10
+        vectors, entities = gen_binary_entities(nb)
+        ids = connect.bulk_insert(binary_collection, entities)
+        connect.flush([binary_collection])
+        stats = connect.get_collection_stats(binary_collection)
+        vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][0]["segments"][0]["id"])
+        # vector_ids should match ids
+        assert len(vector_ids) == nb
+        for i in range(nb):
+            assert vector_ids[i] == ids[i]
+
+    @pytest.mark.level(2)
+    def test_list_id_in_segment_without_index_B(self, connect, binary_collection):
+        '''
+        target: get vector ids when there is no index but with partition
+        method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors
+        expected: status ok
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        nb = 10
+        vectors, entities = gen_binary_entities(nb)
+        ids = connect.bulk_insert(binary_collection, entities, partition_tag=default_tag)
+        connect.flush([binary_collection])
+        stats = connect.get_collection_stats(binary_collection)
+        vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][1]["segments"][0]["id"])
+        # vector_ids should match ids
+        assert len(vector_ids) == nb
+        for i in range(nb):
+            assert vector_ids[i] == ids[i]
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_jaccard_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "JACCARD"
+            return request.param
+        else:
+            pytest.skip("not support")
+
+    def test_list_id_in_segment_with_index_A(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: get vector ids when there is index
+        method: call list_id_in_segment and check if the segment contains vectors
+        expected: status ok
+        '''
+        ids, seg_id = get_segment_id(connect, binary_collection, nb=default_nb, index_params=get_jaccard_index, vec_type='binary')
+        vector_ids = connect.list_id_in_segment(binary_collection, seg_id)
+        # TODO: 
+
+    def test_list_id_in_segment_with_index_B(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: get vector ids when there is index and with partition
+        method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors
+        expected: status ok
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        ids = connect.bulk_insert(binary_collection, default_binary_entities, partition_tag=default_tag)
+        connect.flush([binary_collection])
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["partitions"][1]["tag"] == default_tag
+        vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][1]["segments"][0]["id"])
+        # vector_ids should match ids
+        # TODO
+
+    def test_list_id_in_segment_after_delete_vectors(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: get vector ids after vectors are deleted
+        method: add vectors and delete a few, call list_id_in_segment
+        expected: status ok, vector_ids decreased after vectors deleted
+        '''
+        nb = 2
+        ids, seg_id = get_segment_id(connect, binary_collection, nb=nb, vec_type='binary', index_params=get_jaccard_index)
+        delete_ids = [ids[0]]
+        status = connect.delete_entity_by_id(binary_collection, delete_ids)
+        connect.flush([binary_collection])
+        stats = connect.get_collection_stats(binary_collection)
+        vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][0]["segments"][0]["id"])
+        assert len(vector_ids) == 1
+        assert vector_ids[0] == ids[1]
diff --git a/tests/python_test/entity/test_search.py b/tests/python_test/entity/test_search.py
new file mode 100644
index 000000000..69b131e13
--- /dev/null
+++ b/tests/python_test/entity/test_search.py
@@ -0,0 +1,1782 @@
+import time
+import pdb
+import copy
+import logging
+from multiprocessing import Pool, Process
+import pytest
+import numpy as np
+
+from milvus import DataType
+from utils import *
+from constants import *
+
+uid = "test_search"
+nq = 1
+epsilon = 0.001
+field_name = default_float_vec_field_name
+binary_field_name = default_binary_vec_field_name
+search_param = {"nprobe": 1}
+
+entity = gen_entities(1, is_normal=True)
+entities = gen_entities(default_nb, is_normal=True)
+raw_vectors, binary_entities = gen_binary_entities(default_nb)
+default_query, default_query_vecs = gen_query_vectors(field_name, entities, default_top_k, nq)
+default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, default_top_k,
+                                                                    nq)
+
+
+def init_data(connect, collection, nb=1200, partition_tags=None, auto_id=True):
+    '''
+    Generate entities and add it in collection
+    '''
+    global entities
+    if nb == 1200:
+        insert_entities = entities
+    else:
+        insert_entities = gen_entities(nb, is_normal=True)
+    if partition_tags is None:
+        if auto_id:
+            ids = connect.insert(collection, insert_entities)
+        else:
+            ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)])
+    else:
+        if auto_id:
+            ids = connect.insert(collection, insert_entities, partition_tag=partition_tags)
+        else:
+            ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)], partition_tag=partition_tags)
+    connect.flush([collection])
+    return insert_entities, ids
+
+
+def init_binary_data(connect, collection, nb=1200, insert=True, partition_tags=None):
+    '''
+    Generate entities and add it in collection
+    '''
+    ids = []
+    global binary_entities
+    global raw_vectors
+    if nb == 1200:
+        insert_entities = binary_entities
+        insert_raw_vectors = raw_vectors
+    else:
+        insert_raw_vectors, insert_entities = gen_binary_entities(nb)
+    if insert is True:
+        if partition_tags is None:
+            ids = connect.insert(collection, insert_entities)
+        else:
+            ids = connect.insert(collection, insert_entities, partition_tag=partition_tags)
+        connect.flush([collection])
+    return insert_raw_vectors, insert_entities, ids
+
+
+class TestSearchBase:
+    """
+    generate valid create_index params
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_index()
+    )
+    def get_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_jaccard_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] in binary_support():
+            return request.param
+        else:
+            pytest.skip("Skip index Temporary")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_hamming_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] in binary_support():
+            return request.param
+        else:
+            pytest.skip("Skip index Temporary")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_structure_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if request.param["index_type"] == "FLAT":
+            return request.param
+        else:
+            pytest.skip("Skip index Temporary")
+
+    """
+    generate top-k params
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=[1, 10]
+    )
+    def get_top_k(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=[1, 10, 1100]
+    )
+    def get_nq(self, request):
+        yield request.param
+
+    def test_search_flat(self, connect, collection, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, change top-k value
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = get_nq
+        entities, ids = init_data(connect, collection)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
+        if top_k <= max_top_k:
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert len(res[0]) == top_k
+            assert res[0]._distances[0] <= epsilon
+            assert check_id_result(res[0], ids[0])
+        else:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+
+    def test_search_flat_top_k(self, connect, collection, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, change top-k value
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = 16385
+        nq = get_nq
+        entities, ids = init_data(connect, collection)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
+        if top_k <= max_top_k:
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert len(res[0]) == top_k
+            assert res[0]._distances[0] <= epsilon
+            assert check_id_result(res[0], ids[0])
+        else:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+
+    def test_search_field(self, connect, collection, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, change top-k value
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = get_nq
+        entities, ids = init_data(connect, collection)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
+        if top_k <= max_top_k:
+            connect.load_collection(collection)
+            res = connect.search(collection, query, fields=["float_vector"])
+            assert len(res[0]) == top_k
+            assert res[0]._distances[0] <= epsilon
+            assert check_id_result(res[0], ids[0])
+            res = connect.search(collection, query, fields=["float"])
+            for i in range(nq):
+                assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
+        else:
+            with pytest.raises(Exception):
+                connect.search(collection, query)
+
+    @pytest.mark.skip("delete_entity_by_id not ready")
+    def test_search_after_delete(self, connect, collection, get_top_k, get_nq):
+        '''
+        target: test basic search function before and after deletion, all the search params is
+                correct, change top-k value.
+                check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
+        method: search with the given vectors, check the result
+        expected: the deleted entities do not exist in the result.
+        '''
+        top_k = get_top_k
+        nq = get_nq
+
+        entities, ids = init_data(connect, collection, nb=10000)
+        first_int64_value = entities[0]["values"][0]
+        first_vector = entities[2]["values"][0]
+
+        search_param = get_search_param("FLAT")
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
+        vecs[:] = []
+        vecs.append(first_vector)
+
+        res = None
+        if top_k > max_top_k:
+            with pytest.raises(Exception):
+                connect.search(collection, query, fields=['int64'])
+            pytest.skip("top_k value is larger than max_topp_k")
+        else:
+            res = connect.search(collection, query, fields=['int64'])
+            assert len(res) == 1
+            assert len(res[0]) >= top_k
+            assert res[0][0].id == ids[0]
+            assert res[0][0].entity.get("int64") == first_int64_value
+            assert res[0]._distances[0] < epsilon
+            assert check_id_result(res[0], ids[0])
+
+        connect.delete_entity_by_id(collection, ids[:1])
+        connect.flush([collection])
+
+        res2 = connect.search(collection, query, fields=['int64'])
+        assert len(res2) == 1
+        assert len(res2[0]) >= top_k
+        assert res2[0][0].id != ids[0]
+        if top_k > 1:
+            assert res2[0][0].id == res[0][1].id
+            assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
+
+    # TODO:
+    @pytest.mark.level(2)
+    def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = get_nq
+
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        entities, ids = init_data(connect, collection)
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+        else:
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert len(res) == nq
+            assert len(res[0]) >= top_k
+            assert res[0]._distances[0] < epsilon
+            assert check_id_result(res[0], ids[0])
+
+    def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
+        '''
+        target: test search with different metric_type
+        method: build index with L2, and search using IP
+        expected: search ok
+        '''
+        search_metric_type = "IP"
+        index_type = get_simple_index["index_type"]
+        entities, ids = init_data(connect, collection)
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, metric_type=search_metric_type,
+                                        search_params=search_param)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+        assert res[0]._distances[0] > res[0]._distances[default_top_k - 1]
+
+    @pytest.mark.level(2)
+    def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: add vectors into collection, search with the given vectors, check the result
+        expected: the length of the result is top_k, search collection with partition tag return empty
+        '''
+        top_k = get_top_k
+        nq = get_nq
+
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        connect.create_partition(collection, default_tag)
+        entities, ids = init_data(connect, collection)
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+        else:
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert len(res) == nq
+            assert len(res[0]) >= top_k
+            assert res[0]._distances[0] < epsilon
+            assert check_id_result(res[0], ids[0])
+            connect.release(collection)
+            connect.load_partitions(collection, [default_tag])
+            res = connect.search(collection, query, partition_tags=[default_tag])
+            assert len(res[0]) == 0
+
+    @pytest.mark.level(2)
+    def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = get_nq
+
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        connect.create_partition(collection, default_tag)
+        entities, ids = init_data(connect, collection, partition_tags=default_tag)
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
+        for tags in [[default_tag], [default_tag, "new_tag"]]:
+            if top_k > max_top_k:
+                with pytest.raises(Exception) as e:
+                    res = connect.search(collection, query, partition_tags=tags)
+            else:
+                connect.load_partitions(collection, tags)
+                res = connect.search(collection, query, partition_tags=tags)
+                assert len(res) == nq
+                assert len(res[0]) >= top_k
+                assert res[0]._distances[0] < epsilon
+                assert check_id_result(res[0], ids[0])
+
+    @pytest.mark.level(2)
+    def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search with the given vectors and tag (tag name not existed in collection), check the result
+        expected: error raised
+        '''
+        top_k = get_top_k
+        nq = get_nq
+        entities, ids = init_data(connect, collection)
+        connect.create_index(collection, field_name, get_simple_index)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query, partition_tags=["new_tag"])
+        else:
+            connect.load_partitions(collection, ["new_tag"])
+            res = connect.search(collection, query, partition_tags=["new_tag"])
+            assert len(res) == nq
+            assert len(res[0]) == 0
+
+    @pytest.mark.level(2)
+    def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search collection with the given vectors and tags, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = 2
+        new_tag = "new_tag"
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, new_tag)
+        entities, ids = init_data(connect, collection, partition_tags=default_tag)
+        new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+        else:
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert check_id_result(res[0], ids[0])
+            assert not check_id_result(res[1], new_ids[0])
+            assert res[0]._distances[0] < epsilon
+            assert res[1]._distances[0] < epsilon
+            res = connect.search(collection, query, partition_tags=[new_tag])
+            assert res[0]._distances[0] > epsilon
+            assert res[1]._distances[0] > epsilon
+            connect.release_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search collection with the given vectors and tags, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = 2
+        tag = "tag"
+        new_tag = "new_tag"
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        connect.create_partition(collection, tag)
+        connect.create_partition(collection, new_tag)
+        entities, ids = init_data(connect, collection, partition_tags=tag)
+        new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param)
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+        else:
+            connect.load_collection(collection)
+            res = connect.search(collection, query, partition_tags=["(.*)tag"])
+            assert not check_id_result(res[0], ids[0])
+            assert res[0]._distances[0] < epsilon
+            assert res[1]._distances[0] < epsilon
+            res = connect.search(collection, query, partition_tags=["new(.*)"])
+            assert res[0]._distances[0] < epsilon
+            assert res[1]._distances[0] < epsilon
+            connect.release_collection(collection)
+
+    #
+    # test for ip metric
+    #
+    @pytest.mark.level(2)
+    def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, change top-k value
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = get_nq
+        entities, ids = init_data(connect, collection)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP")
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res[0]) == top_k
+        assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
+        assert check_id_result(res[0], ids[0])
+
+    @pytest.mark.level(2)
+    def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search with the given vectors, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = get_nq
+
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        entities, ids = init_data(connect, collection)
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) >= top_k
+        assert check_id_result(res[0], ids[0])
+        assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
+
+    @pytest.mark.level(2)
+    def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: add vectors into collection, search with the given vectors, check the result
+        expected: the length of the result is top_k, search collection with partition tag return empty
+        '''
+        top_k = get_top_k
+        nq = get_nq
+        metric_type = "IP"
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        connect.create_partition(collection, default_tag)
+        entities, ids = init_data(connect, collection)
+        get_simple_index["metric_type"] = metric_type
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type,
+                                        search_params=search_param)
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query)
+        else:
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert len(res) == nq
+            assert len(res[0]) >= top_k
+            assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
+            assert check_id_result(res[0], ids[0])
+            res = connect.search(collection, query, partition_tags=[default_tag])
+            assert len(res[0]) == 0
+
+    @pytest.mark.level(2)
+    def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
+        '''
+        target: test basic search function, all the search params is correct, test all index params, and build
+        method: search collection with the given vectors and tags, check the result
+        expected: the length of the result is top_k
+        '''
+        top_k = get_top_k
+        nq = 2
+        metric_type = "IP"
+        new_tag = "new_tag"
+        index_type = get_simple_index["index_type"]
+        if index_type in skip_pq():
+            pytest.skip("Skip PQ")
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, new_tag)
+        entities, ids = init_data(connect, collection, partition_tags=default_tag)
+        new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
+        get_simple_index["metric_type"] = metric_type
+        connect.create_index(collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert check_id_result(res[0], ids[0])
+        assert not check_id_result(res[1], new_ids[0])
+        assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
+        assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
+        res = connect.search(collection, query, partition_tags=["new_tag"])
+        assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
+        # TODO:
+        # assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
+
+    @pytest.mark.level(2)
+    def test_search_without_connect(self, dis_connect, collection):
+        '''
+        target: test search vectors without connection
+        method: use dis connected instance, call search method and check if search successfully
+        expected: raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            res = dis_connect.search(collection, default_query)
+
+    def test_search_collection_not_existed(self, connect):
+        '''
+        target: search collection not existed
+        method: search with the random collection_name, which is not in db
+        expected: status not ok
+        '''
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection_name, default_query)
+
+    def test_search_distance_l2(self, connect, collection):
+        '''
+        target: search collection, and check the result: distance
+        method: compare the return distance value with value computed with Euclidean
+        expected: the return distance equals to the computed value
+        '''
+        nq = 2
+        search_param = {"nprobe": 1}
+        entities, ids = init_data(connect, collection, nb=nq)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
+                                        search_params=search_param)
+        inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
+                                                      search_params=search_param)
+        distance_0 = l2(vecs[0], inside_vecs[0])
+        distance_1 = l2(vecs[0], inside_vecs[1])
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
+
+    # TODO
+    def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
+        '''
+        target: search collection, and check the result: distance
+        method: compare the return distance value with value computed with Inner product
+        expected: the return distance equals to the computed value
+        '''
+        index_type = get_simple_index["index_type"]
+        nq = 2
+        entities, ids = init_data(connect, id_collection, auto_id=False)
+        connect.create_index(id_collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
+                                        search_params=search_param)
+        inside_vecs = entities[-1]["values"]
+        min_distance = 1.0
+        min_id = None
+        for i in range(default_nb):
+            tmp_dis = l2(vecs[0], inside_vecs[i])
+            if min_distance > tmp_dis:
+                min_distance = tmp_dis
+                min_id = ids[i]
+        connect.load_collection(id_collection)
+        res = connect.search(id_collection, query)
+        tmp_epsilon = epsilon
+        check_id_result(res[0], min_id)
+        # if index_type in ["ANNOY", "IVF_PQ"]:
+        #     tmp_epsilon = 0.1
+        # TODO:
+        # assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
+
+    @pytest.mark.level(2)
+    def test_search_distance_ip(self, connect, collection):
+        '''
+        target: search collection, and check the result: distance
+        method: compare the return distance value with value computed with Inner product
+        expected: the return distance equals to the computed value
+        '''
+        nq = 2
+        metirc_type = "IP"
+        search_param = {"nprobe": 1}
+        entities, ids = init_data(connect, collection, nb=nq)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
+                                        metric_type=metirc_type,
+                                        search_params=search_param)
+        inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
+                                                      search_params=search_param)
+        distance_0 = ip(vecs[0], inside_vecs[0])
+        distance_1 = ip(vecs[0], inside_vecs[1])
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
+
+    def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
+        '''
+        target: search collection, and check the result: distance
+        method: compare the return distance value with value computed with Inner product
+        expected: the return distance equals to the computed value
+        '''
+        index_type = get_simple_index["index_type"]
+        nq = 2
+        metirc_type = "IP"
+        entities, ids = init_data(connect, id_collection, auto_id=False)
+        get_simple_index["metric_type"] = metirc_type
+        connect.create_index(id_collection, field_name, get_simple_index)
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
+                                        metric_type=metirc_type,
+                                        search_params=search_param)
+        inside_vecs = entities[-1]["values"]
+        max_distance = 0
+        max_id = None
+        for i in range(default_nb):
+            tmp_dis = ip(vecs[0], inside_vecs[i])
+            if max_distance < tmp_dis:
+                max_distance = tmp_dis
+                max_id = ids[i]
+        connect.load_collection(id_collection)
+        res = connect.search(id_collection, query)
+        tmp_epsilon = epsilon
+        check_id_result(res[0], max_id)
+        # if index_type in ["ANNOY", "IVF_PQ"]:
+        #     tmp_epsilon = 0.1
+        # TODO:
+        # assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
+
+    def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: compare the return distance value with value computed with L2
+        expected: the return distance equals to the computed value
+        '''
+        nq = 1
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
+        distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
+
+    @pytest.mark.level(2)
+    def test_search_binary_flat_with_L2(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: compare the return distance value with value computed with L2
+        expected: the return distance equals to the computed value
+        '''
+        nq = 1
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
+        with pytest.raises(Exception) as e:
+            connect.search(binary_collection, query)
+
+    @pytest.mark.level(2)
+    def test_search_distance_hamming_flat_index(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: compare the return distance value with value computed with Inner product
+        expected: the return distance equals to the computed value
+        '''
+        nq = 1
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        distance_0 = hamming(query_int_vectors[0], int_vectors[0])
+        distance_1 = hamming(query_int_vectors[0], int_vectors[1])
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_search_distance_substructure_flat_index(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: search with new random binary entities and SUBSTRUCTURE metric type
+        expected: the return distance equals to the computed value
+        '''
+        nq = 1
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        distance_0 = substructure(query_int_vectors[0], int_vectors[0])
+        distance_1 = substructure(query_int_vectors[0], int_vectors[1])
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
+                                        metric_type="SUBSTRUCTURE")
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert len(res[0]) == 0
+
+    @pytest.mark.level(2)
+    def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: search with entities that related to inserted entities
+        expected: the return distance equals to the computed value
+        '''
+        top_k = 3
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
+        query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
+                                        replace_vecs=query_vecs)
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert res[0][0].distance <= epsilon
+        assert res[0][0].id == ids[0]
+        assert res[1][0].distance <= epsilon
+        assert res[1][0].id == ids[1]
+
+    @pytest.mark.level(2)
+    def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: compare the return distance value with value computed with Inner product
+        expected: the return distance equals to the computed value
+        '''
+        nq = 1
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
+        distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
+                                        metric_type="SUPERSTRUCTURE")
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert len(res[0]) == 0
+
+    @pytest.mark.level(2)
+    def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: compare the return distance value with value computed with SUPER
+        expected: the return distance equals to the computed value
+        '''
+        top_k = 3
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
+        query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
+                                        replace_vecs=query_vecs)
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert len(res[0]) == 2
+        assert len(res[1]) == 2
+        assert res[0][0].id in ids
+        assert res[0][0].distance <= epsilon
+        assert res[1][0].id in ids
+        assert res[1][0].distance <= epsilon
+
+    @pytest.mark.level(2)
+    def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
+        '''
+        target: search binary_collection, and check the result: distance
+        method: compare the return distance value with value computed with Inner product
+        expected: the return distance equals to the computed value
+        '''
+        nq = 1
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
+        distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query)
+        assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(30)
+    def test_search_concurrent_multithreads(self, connect, args):
+        '''
+        target: test concurrent search with multiprocessess
+        method: search with 10 processes, each process uses dependent connection
+        expected: status ok and the returned vectors should be query_records
+        '''
+        nb = 100
+        top_k = 10
+        threads_num = 4
+        threads = []
+        collection = gen_unique_str(uid)
+        uri = "tcp://%s:%s" % (args["ip"], args["port"])
+        # create collection
+        milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
+        milvus.create_collection(collection, default_fields)
+        entities, ids = init_data(milvus, collection)
+        connect.load_collection(collection)
+
+        def search(milvus):
+            res = milvus.search(collection, default_query)
+            assert len(res) == 1
+            assert res[0]._entities[0].id in ids
+            assert res[0]._distances[0] < epsilon
+
+        for i in range(threads_num):
+            milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
+            t = TestThread(target=search, args=(milvus,))
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(30)
+    def test_search_concurrent_multithreads_single_connection(self, connect, args):
+        '''
+        target: test concurrent search with multiprocessess
+        method: search with 10 processes, each process uses dependent connection
+        expected: status ok and the returned vectors should be query_records
+        '''
+        nb = 100
+        top_k = 10
+        threads_num = 4
+        threads = []
+        collection = gen_unique_str(uid)
+        uri = "tcp://%s:%s" % (args["ip"], args["port"])
+        # create collection
+        milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
+        milvus.create_collection(collection, default_fields)
+        entities, ids = init_data(milvus, collection)
+        connect.load_collection(collection)
+
+        def search(milvus):
+            res = milvus.search(collection, default_query)
+            assert len(res) == 1
+            assert res[0]._entities[0].id in ids
+            assert res[0]._distances[0] < epsilon
+
+        for i in range(threads_num):
+            t = TestThread(target=search, args=(milvus,))
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+
+    @pytest.mark.level(2)
+    def test_search_multi_collections(self, connect, args):
+        '''
+        target: test search multi collections of L2
+        method: add vectors into 10 collections, and search
+        expected: search status ok, the length of result
+        '''
+        num = 10
+        top_k = 10
+        nq = 20
+        for i in range(num):
+            collection = gen_unique_str(uid + str(i))
+            connect.create_collection(collection, default_fields)
+            entities, ids = init_data(connect, collection)
+            assert len(ids) == default_nb
+            query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
+            connect.load_collection(collection)
+            res = connect.search(collection, query)
+            assert len(res) == nq
+            for i in range(nq):
+                assert check_id_result(res[i], ids[i])
+                assert res[i]._distances[0] < epsilon
+                assert res[i]._distances[1] > epsilon
+
+    def test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
+        """
+        target: test search with field, and let return entities less than topk
+        method: insert entities and build ivf_ index, and search with field, n_probe=1
+        expected:
+        """
+        entities, ids = init_data(connect, id_collection, auto_id=False)
+        simple_index = {"index_type": "IVF_FLAT", "params": {"nlist": 200}, "metric_type": "L2"}
+        connect.create_index(id_collection, field_name, simple_index)
+        # logging.getLogger().info(connect.get_collection_info(id_collection))
+        top_k = 300
+        default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq,
+                                                              search_params={"nprobe": 1})
+        expr = {"must": [gen_default_vector_expr(default_query)]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(id_collection)
+        res = connect.search(id_collection, query, fields=["int64"])
+        assert len(res) == nq
+        for r in res[0]:
+            assert getattr(r.entity, "int64") == getattr(r.entity, "id")
+
+
+class TestSearchDSL(object):
+    """
+    ******************************************************************
+    #  The following cases are used to build invalid query expr
+    ******************************************************************
+    """
+
+    def test_query_no_must(self, connect, collection):
+        '''
+        method: build query without must expr
+        expected: error raised
+        '''
+        # entities, ids = init_data(connect, collection)
+        query = update_query_expr(default_query, keep_old=False)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_no_vector_term_only(self, connect, collection):
+        '''
+        method: build query without vector only term
+        expected: error raised
+        '''
+        # entities, ids = init_data(connect, collection)
+        expr = {
+            "must": [gen_default_term_expr]
+        }
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_no_vector_range_only(self, connect, collection):
+        '''
+        method: build query without vector only range
+        expected: error raised
+        '''
+        # entities, ids = init_data(connect, collection)
+        expr = {
+            "must": [gen_default_range_expr]
+        }
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_vector_only(self, connect, collection):
+        entities, ids = init_data(connect, collection)
+        connect.load_collection(collection)
+        res = connect.search(collection, default_query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+
+    def test_query_wrong_format(self, connect, collection):
+        '''
+        method: build query without must expr, with wrong expr name
+        expected: error raised
+        '''
+        # entities, ids = init_data(connect, collection)
+        expr = {
+            "must1": [gen_default_term_expr]
+        }
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_empty(self, connect, collection):
+        '''
+        method: search with empty query
+        expected: error raised
+        '''
+        query = {}
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    """
+    ******************************************************************
+    #  The following cases are used to build valid query expr
+    ******************************************************************
+    """
+
+    @pytest.mark.level(2)
+    def test_query_term_value_not_in(self, connect, collection):
+        '''
+        method: build query with vector and term expr, with no term can be filtered
+        expected: filter pass
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {
+            "must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+        # TODO:
+
+    # TODO:
+    @pytest.mark.level(2)
+    def test_query_term_value_all_in(self, connect, collection):
+        '''
+        method: build query with vector and term expr, with all term can be filtered
+        expected: filter pass
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 1
+        # TODO:
+
+    # TODO:
+    @pytest.mark.level(2)
+    def test_query_term_values_not_in(self, connect, collection):
+        '''
+        method: build query with vector and term expr, with no term can be filtered
+        expected: filter pass
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {"must": [gen_default_vector_expr(default_query),
+                         gen_default_term_expr(values=[i for i in range(100000, 100010)])]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+        # TODO:
+
+    def test_query_term_values_all_in(self, connect, collection):
+        '''
+        method: build query with vector and term expr, with all term can be filtered
+        expected: filter pass
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+        limit = default_nb // 2
+        for i in range(nq):
+            for result in res[i]:
+                logging.getLogger().info(result.id)
+                assert result.id in ids[:limit]
+        # TODO:
+
+    def test_query_term_values_parts_in(self, connect, collection):
+        '''
+        method: build query with vector and term expr, with parts of term can be filtered
+        expected: filter pass
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {"must": [gen_default_vector_expr(default_query),
+                         gen_default_term_expr(
+                             values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+        # TODO:
+
+    # TODO:
+    @pytest.mark.level(2)
+    def test_query_term_values_repeat(self, connect, collection):
+        '''
+        method: build query with vector and term expr, with the same values
+        expected: filter pass
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {
+            "must": [gen_default_vector_expr(default_query),
+                     gen_default_term_expr(values=[1 for i in range(1, default_nb)])]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 1
+        # TODO:
+
+    def test_query_term_value_empty(self, connect, collection):
+        '''
+        method: build query with term value empty
+        expected: return null
+        '''
+        expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+
+    def test_query_complex_dsl(self, connect, collection):
+        '''
+        method: query with complicated dsl
+        expected: no error raised
+        '''
+        expr = {"must": [
+            {"must": [{"should": [gen_default_term_expr(values=[1]), gen_default_range_expr()]}]},
+            {"must": [gen_default_vector_expr(default_query)]}
+        ]}
+        logging.getLogger().info(expr)
+        query = update_query_expr(default_query, expr=expr)
+        logging.getLogger().info(query)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        logging.getLogger().info(res)
+
+    """
+    ******************************************************************
+    #  The following cases are used to build invalid term query expr
+    ******************************************************************
+    """
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_term_key_error(self, connect, collection):
+        '''
+        method: build query with term key error
+        expected: Exception raised
+        '''
+        expr = {"must": [gen_default_vector_expr(default_query),
+                         gen_default_term_expr(keyword="terrm", values=[i for i in range(default_nb // 2)])]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_term()
+    )
+    def get_invalid_term(self, request):
+        return request.param
+
+    @pytest.mark.level(2)
+    def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
+        '''
+        method: build query with wrong format term
+        expected: Exception raised
+        '''
+        entities, ids = init_data(connect, collection)
+        term = get_invalid_term
+        expr = {"must": [gen_default_vector_expr(default_query), term]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_term_field_named_term(self, connect, collection):
+        '''
+        method: build query with field named "term"
+        expected: error raised
+        '''
+        term_fields = add_field_default(default_fields, field_name="term")
+        collection_term = gen_unique_str("term")
+        connect.create_collection(collection_term, term_fields)
+        term_entities = add_field(entities, field_name="term")
+        ids = connect.insert(collection_term, term_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection_term])
+        # count = connect.count_entities(collection_term)
+        # assert count == default_nb
+        stats = connect.get_collection_stats(collection_term)
+        assert stats["row_count"] == default_nb
+        term_param = {"term": {"term": {"values": [i for i in range(default_nb // 2)]}}}
+        expr = {"must": [gen_default_vector_expr(default_query),
+                         term_param]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection_term, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+        connect.drop_collection(collection_term)
+
+    @pytest.mark.level(2)
+    def test_query_term_one_field_not_existed(self, connect, collection):
+        '''
+        method: build query with two fields term, one of it not existed
+        expected: exception raised
+        '''
+        entities, ids = init_data(connect, collection)
+        term = gen_default_term_expr()
+        term["term"].update({"a": [0]})
+        expr = {"must": [gen_default_vector_expr(default_query), term]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    """
+    ******************************************************************
+    #  The following cases are used to build valid range query expr
+    ******************************************************************
+    """
+
+    # TODO
+    def test_query_range_key_error(self, connect, collection):
+        '''
+        method: build query with range key error
+        expected: Exception raised
+        '''
+        range = gen_default_range_expr(keyword="ranges")
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_range()
+    )
+    def get_invalid_range(self, request):
+        return request.param
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
+        '''
+        method: build query with wrong format range
+        expected: Exception raised
+        '''
+        entities, ids = init_data(connect, collection)
+        range = get_invalid_range
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    @pytest.mark.level(2)
+    def test_query_range_string_ranges(self, connect, collection):
+        '''
+        method: build query with invalid ranges
+        expected: raise Exception
+        '''
+        entities, ids = init_data(connect, collection)
+        ranges = {"GT": "0", "LT": "1000"}
+        range = gen_default_range_expr(ranges=ranges)
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    @pytest.mark.level(2)
+    def test_query_range_invalid_ranges(self, connect, collection):
+        '''
+        method: build query with invalid ranges
+        expected: 0
+        '''
+        entities, ids = init_data(connect, collection)
+        ranges = {"GT": default_nb, "LT": 0}
+        range = gen_default_range_expr(ranges=ranges)
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res[0]) == 0
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_valid_ranges()
+    )
+    def get_valid_ranges(self, request):
+        return request.param
+
+    @pytest.mark.level(2)
+    def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
+        '''
+        method: build query with valid ranges
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        ranges = get_valid_ranges
+        range = gen_default_range_expr(ranges=ranges)
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+
+    def test_query_range_one_field_not_existed(self, connect, collection):
+        '''
+        method: build query with two fields ranges, one of fields not existed
+        expected: exception raised
+        '''
+        entities, ids = init_data(connect, collection)
+        range = gen_default_range_expr()
+        range["range"].update({"a": {"GT": 1, "LT": default_nb // 2}})
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    """
+    ************************************************************************
+    #  The following cases are used to build query expr multi range and term
+    ************************************************************************
+    """
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_multi_term_has_common(self, connect, collection):
+        '''
+        method: build query with multi term with same field, and values has common
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        term_first = gen_default_term_expr()
+        term_second = gen_default_term_expr(values=[i for i in range(default_nb // 3)])
+        expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_multi_term_no_common(self, connect, collection):
+        '''
+         method: build query with multi range with same field, and ranges no common
+         expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        term_first = gen_default_term_expr()
+        term_second = gen_default_term_expr(values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])
+        expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+
+    # TODO
+    def test_query_multi_term_different_fields(self, connect, collection):
+        '''
+         method: build query with multi range with same field, and ranges no common
+         expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        term_first = gen_default_term_expr()
+        term_second = gen_default_term_expr(field="float",
+                                            values=[float(i) for i in range(default_nb // 2, default_nb)])
+        expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_single_term_multi_fields(self, connect, collection):
+        '''
+        method: build query with multi term, different field each term
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        term_first = {"int64": {"values": [i for i in range(default_nb // 2)]}}
+        term_second = {"float": {"values": [float(i) for i in range(default_nb // 2, default_nb)]}}
+        term = update_term_expr({"term": {}}, [term_first, term_second])
+        expr = {"must": [gen_default_vector_expr(default_query), term]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_multi_range_has_common(self, connect, collection):
+        '''
+        method: build query with multi range with same field, and ranges has common
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        range_one = gen_default_range_expr()
+        range_two = gen_default_range_expr(ranges={"GT": 1, "LT": default_nb // 3})
+        expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_multi_range_no_common(self, connect, collection):
+        '''
+         method: build query with multi range with same field, and ranges no common
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        range_one = gen_default_range_expr()
+        range_two = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
+        expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_multi_range_different_fields(self, connect, collection):
+        '''
+        method: build query with multi range, different field each range
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        range_first = gen_default_range_expr()
+        range_second = gen_default_range_expr(field="float", ranges={"GT": default_nb // 2, "LT": default_nb})
+        expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_single_range_multi_fields(self, connect, collection):
+        '''
+        method: build query with multi range, different field each range
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        range_first = {"int64": {"GT": 0, "LT": default_nb // 2}}
+        range_second = {"float": {"GT": default_nb / 2, "LT": float(default_nb)}}
+        range = update_range_expr({"range": {}}, [range_first, range_second])
+        expr = {"must": [gen_default_vector_expr(default_query), range]}
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    """
+    ******************************************************************
+    #  The following cases are used to build query expr both term and range
+    ******************************************************************
+    """
+
+    # TODO
+    @pytest.mark.level(2)
+    def test_query_single_term_range_has_common(self, connect, collection):
+        '''
+        method: build query with single term single range
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        term = gen_default_term_expr()
+        range = gen_default_range_expr(ranges={"GT": -1, "LT": default_nb // 2})
+        expr = {"must": [gen_default_vector_expr(default_query), term, range]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == default_top_k
+
+    # TODO
+    def test_query_single_term_range_no_common(self, connect, collection):
+        '''
+        method: build query with single term single range
+        expected: pass
+        '''
+        entities, ids = init_data(connect, collection)
+        term = gen_default_term_expr()
+        range = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
+        expr = {"must": [gen_default_vector_expr(default_query), term, range]}
+        query = update_query_expr(default_query, expr=expr)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+        assert len(res[0]) == 0
+
+    """
+    ******************************************************************
+    #  The following cases are used to build multi vectors query expr
+    ******************************************************************
+    """
+
+    # TODO
+    def test_query_multi_vectors_same_field(self, connect, collection):
+        '''
+        method: build query with two vectors same field
+        expected: error raised
+        '''
+        entities, ids = init_data(connect, collection)
+        vector1 = default_query
+        vector2 = gen_query_vectors(field_name, entities, default_top_k, nq=2)
+        expr = {
+            "must": [vector1, vector2]
+        }
+        query = update_query_expr(default_query, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+
+class TestSearchDSLBools(object):
+    """
+    ******************************************************************
+    #  The following cases are used to build invalid query expr
+    ******************************************************************
+    """
+
+    @pytest.mark.level(2)
+    def test_query_no_bool(self, connect, collection):
+        '''
+        method: build query without bool expr
+        expected: error raised
+        '''
+        entities, ids = init_data(connect, collection)
+        expr = {"bool1": {}}
+        query = expr
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_should_only_term(self, connect, collection):
+        '''
+        method: build query without must, with should.term instead
+        expected: error raised
+        '''
+        expr = {"should": gen_default_term_expr}
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_should_only_vector(self, connect, collection):
+        '''
+        method: build query without must, with should.vector instead
+        expected: error raised
+        '''
+        expr = {"should": default_query["bool"]["must"]}
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_must_not_only_term(self, connect, collection):
+        '''
+        method: build query without must, with must_not.term instead
+        expected: error raised
+        '''
+        expr = {"must_not": gen_default_term_expr}
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_must_not_vector(self, connect, collection):
+        '''
+        method: build query without must, with must_not.vector instead
+        expected: error raised
+        '''
+        expr = {"must_not": default_query["bool"]["must"]}
+        query = update_query_expr(default_query, keep_old=False, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    def test_query_must_should(self, connect, collection):
+        '''
+        method: build query must, and with should.term
+        expected: error raised
+        '''
+        expr = {"should": gen_default_term_expr}
+        query = update_query_expr(default_query, keep_old=True, expr=expr)
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+
+"""
+******************************************************************
+#  The following cases are used to test `search` function 
+#  with invalid collection_name, or invalid query expr
+******************************************************************
+"""
+
+
+class TestSearchInvalid(object):
+    """
+    Test search collection with invalid collection names
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_invalid_partition(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_invalid_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    @pytest.mark.level(2)
+    def test_search_with_invalid_collection(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection_name, default_query)
+
+    @pytest.mark.level(2)
+    def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition):
+        # tag = " "
+        tag = get_invalid_partition
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, default_query, partition_tags=tag)
+
+    @pytest.mark.level(2)
+    def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
+        fields = [get_invalid_field]
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, default_query, fields=fields)
+
+    @pytest.mark.level(1)
+    def test_search_with_not_existed_field(self, connect, collection):
+        fields = [gen_unique_str("field_name")]
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, default_query, fields=fields)
+
+    """
+    Test search collection with invalid query
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_top_k(self, request):
+        yield request.param
+
+    @pytest.mark.level(1)
+    def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
+        '''
+        target: test search function, with the wrong top_k
+        method: search with top_k
+        expected: raise an error, and the connection is normal
+        '''
+        top_k = get_top_k
+        default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, default_query)
+
+    """
+    Test search collection with invalid search params
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invaild_search_params()
+    )
+    def get_search_params(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
+        '''
+        target: test search function, with the wrong nprobe
+        method: search with nprobe
+        expected: raise an error, and the connection is normal
+        '''
+        search_params = get_search_params
+        index_type = get_simple_index["index_type"]
+        if index_type in ["FLAT"]:
+            pytest.skip("skip in FLAT index")
+        if index_type != search_params["index_type"]:
+            pytest.skip("skip if index_type not matched")
+        entities, ids = init_data(connect, collection)
+        connect.create_index(collection, field_name, get_simple_index)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1,
+                                        search_params=search_params["search_params"])
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+    @pytest.mark.level(2)
+    def test_search_with_invalid_params_binary(self, connect, binary_collection):
+        '''
+        target: test search function, with the wrong nprobe
+        method: search with nprobe
+        expected: raise an error, and the connection is normal
+        '''
+        nq = 1
+        index_type = "BIN_IVF_FLAT"
+        int_vectors, entities, ids = init_binary_data(connect, binary_collection)
+        query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
+        connect.create_index(binary_collection, binary_field_name,
+                             {"index_type": index_type, "metric_type": "JACCARD", "params": {"nlist": 128}})
+        query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
+                                        search_params={"nprobe": 0}, metric_type="JACCARD")
+        with pytest.raises(Exception) as e:
+            res = connect.search(binary_collection, query)
+
+    @pytest.mark.level(2)
+    def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
+        '''
+        target: test search function, with empty search params
+        method: search with params
+        expected: raise an error, and the connection is normal
+        '''
+        index_type = get_simple_index["index_type"]
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+        if index_type == "FLAT":
+            pytest.skip("skip in FLAT index")
+        entities, ids = init_data(connect, collection)
+        connect.create_index(collection, field_name, get_simple_index)
+        query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, search_params={})
+        with pytest.raises(Exception) as e:
+            res = connect.search(collection, query)
+
+
+def check_id_result(result, id):
+    limit_in = 5
+    ids = [entity.id for entity in result]
+    if len(result) >= limit_in:
+        return id in ids[:limit_in]
+    else:
+        return id in ids
diff --git a/tests/python_test/pytest.ini b/tests/python_test/pytest.ini
new file mode 100644
index 000000000..1a720a2d0
--- /dev/null
+++ b/tests/python_test/pytest.ini
@@ -0,0 +1,14 @@
+[pytest]
+log_format = [%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)
+log_date_format = %Y-%m-%d %H:%M:%S
+
+log_cli = true
+log_level = 20
+
+timeout = 360
+
+markers = 
+    level: test level
+    serial
+
+#level = 1
diff --git a/tests/python_test/requirements.txt b/tests/python_test/requirements.txt
new file mode 100644
index 000000000..69f16f1c8
--- /dev/null
+++ b/tests/python_test/requirements.txt
@@ -0,0 +1,12 @@
+numpy>=1.18.0
+pylint==2.5.0
+pytest==4.5.0
+pytest-timeout==1.3.3
+pytest-repeat==0.8.0
+allure-pytest==2.7.0
+pytest-print==0.1.2
+pytest-level==0.1.1
+pytest-xdist==1.23.2
+scikit-learn>=0.19.1
+kubernetes==10.0.1
+pymilvus-test>=0.5.0,<0.6.0
\ No newline at end of file
diff --git a/tests/python_test/requirements_cluster.txt b/tests/python_test/requirements_cluster.txt
new file mode 100644
index 000000000..a4f56b4f7
--- /dev/null
+++ b/tests/python_test/requirements_cluster.txt
@@ -0,0 +1,25 @@
+astroid==2.2.5
+atomicwrites==1.3.0
+attrs==19.1.0
+importlib-metadata==0.15
+isort==4.3.20
+lazy-object-proxy==1.4.1
+mccabe==0.6.1
+more-itertools==7.0.0
+numpy==1.16.3
+pluggy==0.12.0
+py==1.8.0
+pylint==2.5.0
+pytest==4.5.0
+pytest-timeout==1.3.3
+pytest-repeat==0.8.0
+allure-pytest==2.7.0
+pytest-print==0.1.2
+pytest-level==0.1.1
+six==1.12.0
+thrift==0.11.0
+typed-ast==1.3.5
+wcwidth==0.1.7
+wrapt==1.11.1
+zipp==0.5.1
+pymilvus>=0.2.0
diff --git a/tests/python_test/run.sh b/tests/python_test/run.sh
new file mode 100644
index 000000000..cee5b061f
--- /dev/null
+++ b/tests/python_test/run.sh
@@ -0,0 +1,4 @@
+#/bin/bash
+
+
+pytest . $@
\ No newline at end of file
diff --git a/tests/python_test/stability/test_mysql.py b/tests/python_test/stability/test_mysql.py
new file mode 100644
index 000000000..7853a220f
--- /dev/null
+++ b/tests/python_test/stability/test_mysql.py
@@ -0,0 +1,43 @@
+import time
+import random
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+
+class TestMysql:
+    """
+    ******************************************************************
+      The following cases are used to test mysql failure
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_check(self, connect, args):
+        if args["service_name"].find("shards") != -1:
+            reason = "Skip restart cases in shards mode"
+            logging.getLogger().info(reason)
+            pytest.skip(reason)
+
+    def _test_kill_mysql_during_index(self, connect, collection, args):
+        big_nb = 20000
+        index_param = {"nlist": 1024, "m": 16}
+        index_type = IndexType.IVF_PQ
+        vectors = gen_vectors(big_nb, default_dim)
+        status, ids = connect.bulk_insert(collection, vectors, ids=[i for i in range(big_nb)])
+        status = connect.flush([collection])
+        assert status.OK()
+        status, res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        assert status.OK()
+        assert res_count == big_nb
+        logging.getLogger().info("Start create index async")
+        status = connect.create_index(collection, index_type, index_param, _async=True)
+        time.sleep(2)
+        logging.getLogger().info("Start play mysql failure")
+        # pass
+        new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"])
+        status, res_count = new_connect.count_entities(collection)
+        assert status.OK()
+        assert res_count == big_nb
diff --git a/tests/python_test/stability/test_restart.py b/tests/python_test/stability/test_restart.py
new file mode 100644
index 000000000..bfdc655b4
--- /dev/null
+++ b/tests/python_test/stability/test_restart.py
@@ -0,0 +1,315 @@
+import time
+import random
+import pdb
+import threading
+import logging
+import json
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+
+
+uid = "wal"
+TIMEOUT = 120
+insert_interval_time = 1.5
+big_nb = 100000
+field_name = "float_vector"
+big_entities = gen_entities(big_nb)
+default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
+
+
+class TestRestartBase:
+    """
+    ******************************************************************
+      The following cases are used to test `create_partition` function 
+    ******************************************************************
+    """
+    @pytest.fixture(scope="module", autouse=True)
+    def skip_check(self, args):
+        logging.getLogger().info(args)
+        if "service_name" not in args or not args["service_name"]:
+            reason = "Skip if service name not provided"
+            logging.getLogger().info(reason)
+            pytest.skip(reason)
+        if args["service_name"].find("shards") != -1:
+            reason = "Skip restart cases in shards mode"
+            logging.getLogger().info(reason)
+            pytest.skip(reason)
+
+    @pytest.mark.level(2)
+    def _test_insert_flush(self, connect, collection, args):
+        '''
+        target: return the same row count after server restart
+        method: call function: create collection, then insert/flush, restart server and assert row count
+        expected: row count keep the same
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        assert res_count == 2 * nb
+        # restart server
+        logging.getLogger().info("Start restart server")
+        assert restart_server(args["service_name"])
+        # assert row count again
+        new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+        res_count = new_connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        assert res_count == 2 * nb
+
+    @pytest.mark.level(2)
+    def _test_insert_during_flushing(self, connect, collection, args):
+        '''
+        target: flushing will recover
+        method: call function: create collection, then insert/flushing, restart server and assert row count
+        expected: row count equals 0
+        '''
+        # disable_autoflush()
+        ids = connect.bulk_insert(collection, big_entities)
+        connect.flush([collection], _async=True)
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        if res_count < big_nb:
+            # restart server
+            assert restart_server(args["service_name"])
+            # assert row count again
+            new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+            res_count_2 = new_connect.count_entities(collection)
+            logging.getLogger().info(res_count_2)
+            timeout = 300
+            start_time = time.time()
+            while new_connect.count_entities(collection) != big_nb and (time.time() - start_time < timeout):
+                time.sleep(10)
+                logging.getLogger().info(new_connect.count_entities(collection))
+            res_count_3 = new_connect.count_entities(collection)
+            logging.getLogger().info(res_count_3)
+            assert res_count_3 == big_nb
+
+    @pytest.mark.level(2)
+    def _test_delete_during_flushing(self, connect, collection, args):
+        '''
+        target: flushing will recover
+        method: call function: create collection, then delete/flushing, restart server and assert row count
+        expected: row count equals (nb - delete_length)
+        '''
+        # disable_autoflush()
+        ids = connect.bulk_insert(collection, big_entities)
+        connect.flush([collection])
+        delete_length = 1000
+        delete_ids = ids[big_nb//4:big_nb//4+delete_length]
+        delete_res = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection], _async=True)
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        # restart server
+        assert restart_server(args["service_name"])
+        # assert row count again
+        new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+        res_count_2 = new_connect.count_entities(collection)
+        logging.getLogger().info(res_count_2)
+        timeout = 100
+        start_time = time.time()
+        while new_connect.count_entities(collection) != big_nb - delete_length and (time.time() - start_time < timeout):
+            time.sleep(10)
+            logging.getLogger().info(new_connect.count_entities(collection))
+        if new_connect.count_entities(collection) == big_nb - delete_length:
+            time.sleep(10)
+            res_count_3 = new_connect.count_entities(collection)
+            logging.getLogger().info(res_count_3)
+            assert res_count_3 == big_nb - delete_length
+
+    @pytest.mark.level(2)
+    def _test_during_indexed(self, connect, collection, args):
+        '''
+        target: flushing will recover
+        method: call function: create collection, then indexed, restart server and assert row count
+        expected: row count equals nb
+        '''
+        # disable_autoflush()
+        ids = connect.bulk_insert(collection, big_entities)
+        connect.flush([collection])
+        connect.create_index(collection, field_name, default_index)
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        stats = connect.get_collection_stats(collection)
+        # logging.getLogger().info(stats)
+        # pdb.set_trace()
+        # restart server
+        assert restart_server(args["service_name"])
+        # assert row count again
+        new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+        assert new_connect.count_entities(collection) == big_nb
+        stats = connect.get_collection_stats(collection)
+        for file in stats["partitions"][0]["segments"][0]["files"]:
+            if file["field"] == field_name and file["name"] != "_raw":
+                assert file["data_size"] > 0
+                if file["index_type"] != default_index["index_type"]:
+                    assert False
+                else:
+                    assert True
+
+    @pytest.mark.level(2)
+    def _test_during_indexing(self, connect, collection, args):
+        '''
+        target: flushing will recover
+        method: call function: create collection, then indexing, restart server and assert row count
+        expected: row count equals nb, server contitue to build index after restart
+        '''
+        # disable_autoflush()
+        loop = 5
+        for i in range(loop):
+            ids = connect.bulk_insert(collection, big_entities)
+        connect.flush([collection])
+        connect.create_index(collection, field_name, default_index, _async=True)
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        stats = connect.get_collection_stats(collection)
+        # logging.getLogger().info(stats)
+        # restart server
+        assert restart_server(args["service_name"])
+        # assert row count again
+        new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+        res_count_2 = new_connect.count_entities(collection)
+        logging.getLogger().info(res_count_2)
+        assert res_count_2 == loop * big_nb
+        status = new_connect._cmd("status")
+        assert json.loads(status)["indexing"] == True
+        # timeout = 100
+        # start_time = time.time()
+        # while time.time() - start_time < timeout:
+        #     time.sleep(5)
+        #     assert new_connect.count_entities(collection) == loop * big_nb
+        #     stats = connect.get_collection_stats(collection)
+        #     assert stats["row_count"] == loop * big_nb
+        #     for file in stats["partitions"][0]["segments"][0]["files"]:
+        #         # logging.getLogger().info(file)
+        #         if file["field"] == field_name and file["name"] != "_raw":
+        #             assert file["data_size"] > 0
+        #             if file["index_type"] != default_index["index_type"]:
+        #                 continue
+        # for file in stats["partitions"][0]["segments"][0]["files"]:
+        #     if file["field"] == field_name and file["name"] != "_raw":
+        #         assert file["data_size"] > 0
+        #         if file["index_type"] != default_index["index_type"]:
+        #             assert False
+        #         else:
+        #             assert True
+
+    @pytest.mark.level(2)
+    def _test_delete_flush_during_compacting(self, connect, collection, args):
+        '''
+        target: verify server work after restart during compaction
+        method: call function: create collection, then delete/flush/compacting, restart server and assert row count
+            call `compact` again, compact pass
+        expected: row count equals (nb - delete_length)
+        '''
+        # disable_autoflush()
+        ids = connect.bulk_insert(collection, big_entities)
+        connect.flush([collection])
+        delete_length = 1000
+        loop = 10
+        for i in range(loop):
+            delete_ids = ids[i*delete_length:(i+1)*delete_length]
+            delete_res = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        connect.compact(collection, _async=True)
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        assert res_count == big_nb - delete_length*loop
+        info = connect.get_collection_stats(collection)
+        size_old = info["partitions"][0]["segments"][0]["data_size"]
+        logging.getLogger().info(size_old)
+        # restart server
+        assert restart_server(args["service_name"])
+        # assert row count again
+        new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+        res_count_2 = new_connect.count_entities(collection)
+        logging.getLogger().info(res_count_2)
+        assert res_count_2 == big_nb - delete_length*loop
+        info = connect.get_collection_stats(collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(collection)
+        assert status.OK()
+        info = connect.get_collection_stats(collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert size_before > size_after
+
+
+    @pytest.mark.level(2)
+    def _test_insert_during_flushing_multi_collections(self, connect, args):
+        '''
+        target: flushing will recover
+        method: call function: create collections, then insert/flushing, restart server and assert row count
+        expected: row count equals 0
+        '''
+        # disable_autoflush()
+        collection_num = 2
+        collection_list = []
+        for i in range(collection_num):
+            collection_name = gen_unique_str(uid)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+            ids = connect.bulk_insert(collection_name, big_entities)
+        connect.flush(collection_list, _async=True)
+        res_count = connect.count_entities(collection_list[-1])
+        logging.getLogger().info(res_count)
+        if res_count < big_nb:
+            # restart server
+            assert restart_server(args["service_name"])
+            # assert row count again
+            new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+            res_count_2 = new_connect.count_entities(collection_list[-1])
+            logging.getLogger().info(res_count_2)
+            timeout = 300
+            start_time = time.time()
+            while time.time() - start_time < timeout:
+                count_list = []
+                break_flag = True
+                for index, name in enumerate(collection_list):
+                    tmp_count = new_connect.count_entities(name)
+                    count_list.append(tmp_count)
+                    logging.getLogger().info(count_list)
+                    if tmp_count != big_nb:
+                        break_flag = False
+                        break
+                if break_flag == True:
+                    break
+                time.sleep(10)
+            for name in collection_list:
+                assert new_connect.count_entities(name) == big_nb
+
+    @pytest.mark.level(2)
+    def _test_insert_during_flushing_multi_partitions(self, connect, collection, args):
+        '''
+        target: flushing will recover
+        method: call function: create collection/partition, then insert/flushing, restart server and assert row count
+        expected: row count equals 0
+        '''
+        # disable_autoflush()
+        partitions_num = 2
+        partitions = []
+        for i in range(partitions_num):
+            tag_tmp = gen_unique_str()
+            partitions.append(tag_tmp)
+            connect.create_partition(collection, tag_tmp)
+            ids = connect.bulk_insert(collection, big_entities, partition_tag=tag_tmp)
+        connect.flush([collection], _async=True)
+        res_count = connect.count_entities(collection)
+        logging.getLogger().info(res_count)
+        if res_count < big_nb:
+            # restart server
+            assert restart_server(args["service_name"])
+            # assert row count again
+            new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) 
+            res_count_2 = new_connect.count_entities(collection)
+            logging.getLogger().info(res_count_2)
+            timeout = 300
+            start_time = time.time()
+            while new_connect.count_entities(collection) != big_nb * 2 and (time.time() - start_time < timeout):
+                time.sleep(10)
+                logging.getLogger().info(new_connect.count_entities(collection))
+            res_count_3 = new_connect.count_entities(collection)
+            logging.getLogger().info(res_count_3)
+            assert res_count_3 == big_nb * 2
\ No newline at end of file
diff --git a/tests/python_test/test_compact.py b/tests/python_test/test_compact.py
new file mode 100644
index 000000000..26929e12b
--- /dev/null
+++ b/tests/python_test/test_compact.py
@@ -0,0 +1,722 @@
+import time
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+from constants import *
+
+COMPACT_TIMEOUT = 180
+field_name = default_float_vec_field_name
+binary_field_name = default_binary_vec_field_name
+default_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type":"L2",
+                                     "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+default_binary_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {binary_field_name: {"topk": 10, "query": gen_binary_vectors(1, default_dim),
+                                            "metric_type":"JACCARD", "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+default_query, default_query_vecs = gen_query_vectors(binary_field_name, default_binary_entities, 1, 2)
+
+
+def ip_query():
+    query = copy.deepcopy(default_single_query)
+    query["bool"]["must"][0]["vector"][field_name].update({"metric_type": "IP"})
+    return query
+
+
+class TestCompactBase:
+    """
+    ******************************************************************
+      The following cases are used to test `compact` function
+    ******************************************************************
+    """
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_compact_collection_name_None(self, connect, collection):
+        '''
+        target: compact collection where collection name is None
+        method: compact with the collection_name: None
+        expected: exception raised
+        '''
+        collection_name = None
+        with pytest.raises(Exception) as e:
+            status = connect.compact(collection_name)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_compact_collection_name_not_existed(self, connect, collection):
+        '''
+        target: compact collection not existed
+        method: compact with a random collection_name, which is not in db
+        expected: exception raised
+        '''
+        collection_name = gen_unique_str("not_existed")
+        with pytest.raises(Exception) as e:
+            status = connect.compact(collection_name)
+    
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_threshold(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_compact_collection_name_invalid(self, connect, get_collection_name):
+        '''
+        target: compact collection with invalid name
+        method: compact with invalid collection_name
+        expected: exception raised
+        '''
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            status = connect.compact(collection_name)
+            # assert not status.OK()
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_compact_threshold_invalid(self, connect, collection, get_threshold):
+        '''
+        target: compact collection with invalid name
+        method: compact with invalid threshold
+        expected: exception raised
+        '''
+        threshold = get_threshold
+        if threshold != None:
+            with pytest.raises(Exception) as e:
+                status = connect.compact(collection, threshold)
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_add_entity_and_compact(self, connect, collection):
+        '''
+        target: test add entity and compact
+        method: add entity and compact collection
+        expected: data_size before and after Compact
+        '''
+        # vector = gen_single_vector(dim)
+        ids = connect.bulk_insert(collection, default_entity)
+        assert len(ids) == 1
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_and_compact(self, connect, collection):
+        '''
+        target: test add entities and compact 
+        method: add entities and compact collection
+        expected: data_size before and after Compact
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        # assert status.OK()
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        # assert status.OK()
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_part_and_compact(self, connect, collection):
+        '''
+        target: test add entities, delete part of them and compact
+        method: add entities, delete a few and compact collection
+        expected: status ok, data size maybe is smaller after compact
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status.OK()
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        size_before = info["partitions"][0]["data_size"]
+        logging.getLogger().info(size_before)
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        size_after = info["partitions"][0]["data_size"]
+        logging.getLogger().info(size_after)
+        assert(size_before >= size_after)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_part_and_compact_threshold(self, connect, collection):
+        '''
+        target: test add entities, delete part of them and compact
+        method: add entities, delete a few and compact collection
+        expected: status ok, data size maybe is smaller after compact
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status.OK()
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        size_before = info["partitions"][0]["data_size"]
+        logging.getLogger().info(size_before)
+        status = connect.compact(collection, 0.1)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        size_after = info["partitions"][0]["data_size"]
+        logging.getLogger().info(size_after)
+        assert(size_before >= size_after)
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_all_and_compact(self, connect, collection):
+        '''
+        target: test add entities, delete them and compact 
+        method: add entities, delete all and compact collection
+        expected: status ok, no data size in collection info because collection is empty
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        status = connect.delete_entity_by_id(collection, ids)
+        assert status.OK()
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        assert not info["partitions"][0]["segments"]
+
+    # TODO: enable
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_partition_delete_half_and_compact(self, connect, collection):
+        '''
+        target: test add entities into partition, delete them and compact 
+        method: add entities, delete half of entities in partition and compact collection
+        expected: status ok, data_size less than the older version
+        '''
+        connect.create_partition(collection, default_tag)
+        assert connect.has_partition(collection, default_tag)
+        ids = connect.bulk_insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        delete_ids = ids[:default_nb//2]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status.OK()
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        logging.getLogger().info(info["partitions"])
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info_after = connect.get_collection_stats(collection)
+        logging.getLogger().info(info_after["partitions"])
+        assert info["partitions"][1]["segments"][0]["data_size"] >= info_after["partitions"][1]["segments"][0]["data_size"]
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "GPU":
+            if not request.param["index_type"] not in ivf():
+                pytest.skip("Only support index_type: idmap/ivf")
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("CPU not support index_type: ivf_sq8h")
+        return request.param
+
+    @pytest.mark.level(2)
+    def test_compact_after_index_created(self, connect, collection, get_simple_index):
+        '''
+        target: test compact collection after index created
+        method: add entities, create index, delete part of entities and compact
+        expected: status ok, index description no change, data size smaller after compact
+        '''
+        count = 10
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        connect.create_index(collection, field_name, get_simple_index)
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        delete_ids = ids[:default_nb//2]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status.OK()
+        connect.flush([collection])
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before >= size_after)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_add_entity_and_compact_twice(self, connect, collection):
+        '''
+        target: test add entity and compact twice
+        method: add entity and compact collection twice
+        expected: status ok, data size no change
+        '''
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(collection)
+        assert status.OK()
+        connect.flush([collection])
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact twice
+        info = connect.get_collection_stats(collection)
+        size_after_twice = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_after == size_after_twice)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_part_and_compact_twice(self, connect, collection):
+        '''
+        target: test add entities, delete part of them and compact twice
+        method: add entities, delete part and compact collection twice
+        expected: status ok, data size smaller after first compact, no change after second
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        assert status.OK()
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        size_before = info["partitions"][0]["data_size"]
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        size_after = info["partitions"][0]["data_size"]
+        assert(size_before >= size_after)
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact twice
+        info = connect.get_collection_stats(collection)
+        size_after_twice = info["partitions"][0]["data_size"]
+        assert(size_after == size_after_twice)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_compact_multi_collections(self, connect):
+        '''
+        target: test compact works or not with multiple collections
+        method: create 50 collections, add entities into them and compact in turn
+        expected: status ok
+        '''
+        nb = 100
+        num_collections = 20
+        entities = gen_entities(nb)
+        collection_list = []
+        for i in range(num_collections):
+            collection_name = gen_unique_str("test_compact_multi_collection_%d" % i)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_fields)
+        for i in range(num_collections):
+            ids = connect.bulk_insert(collection_list[i], entities)
+            connect.delete_entity_by_id(collection_list[i], ids[:nb//2])
+            status = connect.compact(collection_list[i])
+            assert status.OK()
+            connect.drop_collection(collection_list[i])
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_add_entity_after_compact(self, connect, collection):
+        '''
+        target: test add entity after compact
+        method: after compact operation, add entity
+        expected: status ok, entity added
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+        ids = connect.bulk_insert(collection, default_entity)
+        connect.flush([collection])
+        res = connect.count_entities(collection)
+        assert res == default_nb+1
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_index_creation_after_compact(self, connect, collection, get_simple_index):
+        '''
+        target: test index creation after compact
+        method: after compact operation, create index
+        expected: status ok, index description no change
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        connect.flush([collection])
+        status = connect.delete_entity_by_id(collection, ids[:10])
+        assert status.OK()
+        connect.flush([collection])
+        status = connect.compact(collection)
+        assert status.OK()
+        status = connect.create_index(collection, field_name, get_simple_index)
+        assert status.OK()
+        # status, result = connect.get_index_info(collection)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_delete_entities_after_compact(self, connect, collection):
+        '''
+        target: test delete entities after compact
+        method: after compact operation, delete entities
+        expected: status ok, entities deleted
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        status = connect.compact(collection)
+        assert status.OK()
+        connect.flush([collection])
+        status = connect.delete_entity_by_id(collection, ids)
+        assert status.OK()
+        connect.flush([collection])
+        assert connect.count_entities(collection) == 0
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_search_after_compact(self, connect, collection):
+        '''
+        target: test search after compact
+        method: after compact operation, search vector
+        expected: status ok
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        status = connect.compact(collection)
+        assert status.OK()
+        query = copy.deepcopy(default_single_query)
+        query["bool"]["must"][0]["vector"][field_name]["query"] = [default_entity[-1]["values"][0],
+                                                                   default_entities[-1]["values"][0],
+                                                                   default_entities[-1]["values"][-1]]
+        res = connect.search(collection, query)
+        logging.getLogger().debug(res)
+        assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"])
+        assert res[0]._distances[0] > epsilon
+        assert res[1]._distances[0] < epsilon
+        assert res[2]._distances[0] < epsilon
+
+
+class TestCompactBinary:
+    """
+    ******************************************************************
+      The following cases are used to test `compact` function
+    ******************************************************************
+    """
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_add_entity_and_compact(self, connect, binary_collection):
+        '''
+        target: test add binary vector and compact
+        method: add vector and compact collection
+        expected: status ok, vector added
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entity)
+        assert len(ids) == 1
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_and_compact(self, connect, binary_collection):
+        '''
+        target: test add entities with binary vector and compact
+        method: add entities and compact collection
+        expected: status ok, entities added
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_part_and_compact(self, connect, binary_collection):
+        '''
+        target: test add entities, delete part of them and compact 
+        method: add entities, delete a few and compact collection
+        expected: status ok, data size is smaller after compact
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(binary_collection, delete_ids)
+        assert status.OK()
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        logging.getLogger().info(info["partitions"])
+        size_before = info["partitions"][0]["data_size"]
+        logging.getLogger().info(size_before)
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        logging.getLogger().info(info["partitions"])
+        size_after = info["partitions"][0]["data_size"]
+        logging.getLogger().info(size_after)
+        assert(size_before >= size_after)
+    
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_all_and_compact(self, connect, binary_collection):
+        '''
+        target: test add entities, delete them and compact 
+        method: add entities, delete all and compact collection
+        expected: status ok, no data size in collection info because collection is empty
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        status = connect.delete_entity_by_id(binary_collection, ids)
+        assert status.OK()
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        assert status.OK()
+        logging.getLogger().info(info["partitions"])
+        assert not info["partitions"][0]["segments"]
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_add_entity_and_compact_twice(self, connect, binary_collection):
+        '''
+        target: test add entity and compact twice
+        method: add entity and compact collection twice
+        expected: status ok
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entity)
+        assert len(ids) == 1
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact twice
+        info = connect.get_collection_stats(binary_collection)
+        size_after_twice = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_after == size_after_twice)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_insert_delete_part_and_compact_twice(self, connect, binary_collection):
+        '''
+        target: test add entities, delete part of them and compact twice
+        method: add entities, delete part and compact collection twice
+        expected: status ok, data size smaller after first compact, no change after second
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(binary_collection, delete_ids)
+        assert status.OK()
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        size_before = info["partitions"][0]["data_size"]
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        size_after = info["partitions"][0]["data_size"]
+        assert(size_before >= size_after)
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact twice
+        info = connect.get_collection_stats(binary_collection)
+        size_after_twice = info["partitions"][0]["data_size"]
+        assert(size_after == size_after_twice)
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_compact_multi_collections(self, connect):
+        '''
+        target: test compact works or not with multiple collections
+        method: create 10 collections, add entities into them and compact in turn
+        expected: status ok
+        '''
+        nq = 100
+        num_collections = 10
+        tmp, entities = gen_binary_entities(nq)
+        collection_list = []
+        for i in range(num_collections):
+            collection_name = gen_unique_str("test_compact_multi_collection_%d" % i)
+            collection_list.append(collection_name)
+            connect.create_collection(collection_name, default_binary_fields)
+        for i in range(num_collections):
+            ids = connect.bulk_insert(collection_list[i], entities)
+            assert len(ids) == nq
+            status = connect.delete_entity_by_id(collection_list[i], [ids[0], ids[-1]])
+            assert status.OK()
+            connect.flush([collection_list[i]])
+            status = connect.compact(collection_list[i])
+            assert status.OK()
+            status = connect.drop_collection(collection_list[i])
+            assert status.OK()
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_add_entity_after_compact(self, connect, binary_collection):
+        '''
+        target: test add entity after compact
+        method: after compact operation, add entity
+        expected: status ok, entity added
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        # get collection info before compact
+        info = connect.get_collection_stats(binary_collection)
+        size_before = info["partitions"][0]["segments"][0]["data_size"]
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        # get collection info after compact
+        info = connect.get_collection_stats(binary_collection)
+        size_after = info["partitions"][0]["segments"][0]["data_size"]
+        assert(size_before == size_after)
+        ids = connect.bulk_insert(binary_collection, default_binary_entity)
+        connect.flush([binary_collection])
+        res = connect.count_entities(binary_collection)
+        assert res == default_nb + 1
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_delete_entities_after_compact(self, connect, binary_collection):
+        '''
+        target: test delete entities after compact
+        method: after compact operation, delete entities
+        expected: status ok, entities deleted
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        connect.flush([binary_collection])
+        status = connect.delete_entity_by_id(binary_collection, ids)
+        assert status.OK()
+        connect.flush([binary_collection])
+        res = connect.count_entities(binary_collection)
+        assert res == 0
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_search_after_compact(self, connect, binary_collection):
+        '''
+        target: test search after compact
+        method: after compact operation, search vector
+        expected: status ok
+        '''
+        ids = connect.bulk_insert(binary_collection, default_binary_entities)
+        assert len(ids) == default_nb
+        connect.flush([binary_collection])
+        status = connect.compact(binary_collection)
+        assert status.OK()
+        query_vecs = [default_raw_binary_vectors[0]]
+        distance = jaccard(query_vecs[0], default_raw_binary_vectors[0])
+        query = copy.deepcopy(default_binary_single_query)
+        query["bool"]["must"][0]["vector"][binary_field_name]["query"] = [default_binary_entities[-1]["values"][0],
+                                                                          default_binary_entities[-1]["values"][-1]]
+
+        res = connect.search(binary_collection, query)
+        assert abs(res[0]._distances[0]-distance) <= epsilon
+
+    @pytest.mark.timeout(COMPACT_TIMEOUT)
+    def test_search_after_compact_ip(self, connect, collection):
+        '''
+        target: test search after compact
+        method: after compact operation, search vector
+        expected: status ok
+        '''
+        ids = connect.bulk_insert(collection, default_entities)
+        assert len(ids) == default_nb
+        connect.flush([collection])
+        status = connect.compact(collection)
+        query = ip_query()
+        query["bool"]["must"][0]["vector"][field_name]["query"] = [default_entity[-1]["values"][0],
+                                                                   default_entities[-1]["values"][0],
+                                                                   default_entities[-1]["values"][-1]]
+        res = connect.search(collection, query)
+        logging.getLogger().info(res)
+        assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"])
+        assert res[0]._distances[0] < 1 - epsilon
+        assert res[1]._distances[0] > 1 - epsilon
+        assert res[2]._distances[0] > 1 - epsilon
diff --git a/tests/python_test/test_config.py b/tests/python_test/test_config.py
new file mode 100644
index 000000000..664553a0a
--- /dev/null
+++ b/tests/python_test/test_config.py
@@ -0,0 +1,1402 @@
+import time
+import random
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+import ujson
+
+CONFIG_TIMEOUT = 80
+
+class TestCacheConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def reset_configs(self, connect):
+        '''
+        reset configs so the tests are stable
+        '''
+        relpy = connect.set_config("cache.cache_size", '4GB')
+        config_value = connect.get_config("cache.cache_size")
+        assert config_value == '4GB'
+        #relpy = connect.set_config("cache", "insert_buffer_size", '2GB')
+        #config_value = connect.get_config("cache", "insert_buffer_size")
+        #assert config_value == '1073741824'
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_cache_size_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: cache
+        expected: status not ok
+        '''
+        invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+str(".cache_size"))
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_cache_size_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: cache_size
+        expected: status not ok
+        '''
+        invalid_configs = ["Cpu_cache_size", "cpu cache_size", "cpucachecapacity"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("cache."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_cache_size_valid(self, connect, collection):
+        '''
+        target: get cache_size
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("cache.cache_size")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_insert_buffer_size_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: cache
+        expected: status not ok
+        '''
+        invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+".insert_buffer_size")
+
+    @pytest.mark.level(2)
+    def test_get_insert_buffer_size_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: insert_buffer_size
+        expected: status not ok
+        '''
+        invalid_configs = ["Insert_buffer size", "insert buffer_size", "insertbuffersize"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("cache."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_insert_buffer_size_valid(self, connect, collection):
+        '''
+        target: get insert_buffer_size
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("cache.insert_buffer_size")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_preload_collection_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: preload_collection
+        expected: status not ok
+        '''
+        invalid_configs = ["preloadtable", "preload collection "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("cache."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_preload_collection_valid(self, connect, collection):
+        '''
+        target: get preload_collection
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("cache.preload_collection")
+        assert config_value == ''
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    def get_memory_available(self, connect):
+        info = connect._cmd("get_system_info")
+        mem_info = ujson.loads(info)
+        mem_total = int(mem_info["memory_total"])
+        mem_used = int(mem_info["memory_used"])
+        logging.getLogger().info(mem_total)
+        logging.getLogger().info(mem_used)
+        mem_available = mem_total - mem_used
+        return int(mem_available / 1024 / 1024 / 1024)
+
+    def get_memory_total(self, connect):
+        info = connect._cmd("get_system_info")
+        mem_info = ujson.loads(info)
+        mem_total = int(mem_info["memory_total"])
+        return int(mem_total / 1024 / 1024 / 1024)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_size_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: cache
+        expected: status not ok
+        '''
+        self.reset_configs(connect)
+        invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config+".cache_size", '4294967296')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        self.reset_configs(connect)
+        invalid_configs = ["abc", 1]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("cache."+config, '4294967296')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_size_valid(self, connect, collection):
+        '''
+        target: set cache_size
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        self.reset_configs(connect)
+        relpy = connect.set_config("cache.cache_size", '2147483648')
+        config_value = connect.get_config("cache.cache_size")
+        assert config_value == '2GB'
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.level(2)
+    def test_set_cache_size_valid_multiple_times(self, connect, collection):
+        '''
+        target: set cache_size
+        method: call set_config correctly and repeatedly
+        expected: status ok
+        '''
+        self.reset_configs(connect)
+        for i in range(20):
+            relpy = connect.set_config("cache.cache_size", '4294967296')
+            config_value = connect.get_config("cache.cache_size")
+            assert config_value == '4294967296'
+        for i in range(20):
+            relpy = connect.set_config("cache.cache_size", '2147483648')
+            config_value = connect.get_config("cache.cache_size")
+            assert config_value == '2147483648'
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.level(2)
+    def test_set_insert_buffer_size_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: cache
+        expected: status not ok
+        '''
+        self.reset_configs(connect)
+        invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config+".insert_buffer_size", '1073741824')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_insert_buffer_size_valid(self, connect, collection):
+        '''
+        target: set insert_buffer_size
+        method: call get_config correctly
+        expected: status ok, set successfully
+        '''
+        self.reset_configs(connect)
+        relpy = connect.set_config("cache.insert_buffer_size", '2GB')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.level(2)
+    def test_set_insert_buffer_size_valid_multiple_times(self, connect, collection):
+        '''
+        target: set insert_buffer_size
+        method: call get_config correctly and repeatedly
+        expected: status ok
+        '''
+        self.reset_configs(connect)
+        for i in range(20):
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("cache.insert_buffer_size", '1GB')
+        for i in range(20):
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("cache.insert_buffer_size", '2GB')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_out_of_memory_value_A(self, connect, collection):
+        '''
+        target: set cache_size / insert_buffer_size to be out-of-memory
+        method: call set_config with child values bigger than current system memory
+        expected: status not ok (cache_size + insert_buffer_size < system memory)
+        '''
+        self.reset_configs(connect)
+        mem_total = self.get_memory_total(connect)
+        logging.getLogger().info(mem_total)
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("cache.cache_size", str(int(mem_total + 1)+''))
+
+
+
+class TestGPUConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.level(2)
+    def test_get_gpu_search_threshold_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Engine_config", "engine config"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+".gpu_search_threshold")
+
+    @pytest.mark.level(2)
+    def test_get_gpu_search_threshold_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: gpu_search_threshold
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_search threshold", "gpusearchthreshold"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("gpu."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_gpu_search_threshold_valid(self, connect, collection):
+        '''
+        target: get gpu_search_threshold
+        method: call get_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        config_value = connect.get_config("gpu.gpu_search_threshold")
+        assert config_value
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        invalid_configs = ["abc", 1]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu."+config, 1000)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_search_threshold_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Engine_config", "engine config"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config+".gpu_search_threshold", 1000)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_search_threshold_valid(self, connect, collection):
+        '''
+        target: set gpu_search_threshold
+        method: call set_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        relpy = connect.set_config("gpu.gpu_search_threshold", 2000)
+        config_value = connect.get_config("gpu.gpu_search_threshold")
+        assert config_value == '2000'
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_invalid_values(self, connect, collection):
+        '''
+        target: set gpu
+        method: call set_config with invalid child values
+        expected: status not ok
+        '''
+        for i in [-1, "1000\n", "1000\t", "1000.0", 1000.35]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu.use_blas_threshold", i)
+            if str(connect._cmd("mode")) == "GPU":
+                with pytest.raises(Exception) as e:
+                    relpy = connect.set_config("gpu.gpu_search_threshold", i)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def reset_configs(self, connect):
+        '''
+        reset configs so the tests are stable
+        '''
+        relpy = connect.set_config("gpu.cache_size", 1)
+        config_value = connect.get_config("gpu.cache_size")
+        assert config_value == '1'
+        
+        #follows can not be changed
+        #relpy = connect.set_config("gpu", "enable", "true")
+        #config_value = connect.get_config("gpu", "enable")
+        #assert config_value == "true"
+        #relpy = connect.set_config("gpu", "search_devices", "gpu0")
+        #config_value = connect.get_config("gpu", "search_devices")
+        #assert config_value == 'gpu0'
+        #relpy = connect.set_config("gpu", "build_index_devices", "gpu0")
+        #config_value = connect.get_config("gpu", "build_index_devices")
+        #assert config_value == 'gpu0'
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_gpu_enable_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+".enable")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_gpu_enable_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: enable
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Enab_le", "enab_le ", "disable", "true"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("gpu."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_gpu_enable_valid(self, connect, collection):
+        '''
+        target: get enable status
+        method: call get_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        config_value = connect.get_config("gpu.enable")
+        assert config_value == "true" or config_value == "false"
+
+    @pytest.mark.level(2)
+    def test_get_cache_size_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+".cache_size")
+
+    @pytest.mark.level(2)
+    def test_get_cache_size_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: cache_size
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Cache_capacity", "cachecapacity"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("gpu."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_cache_size_valid(self, connect, collection):
+        '''
+        target: get cache_size
+        method: call get_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        config_value = connect.get_config("gpu.cache_size")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_search_devices_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+".search_devices")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_search_devices_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: search_devices
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Search_resources"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("gpu."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_search_devices_valid(self, connect, collection):
+        '''
+        target: get search_devices
+        method: call get_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        config_value = connect.get_config("gpu.search_devices")
+        logging.getLogger().info(config_value)
+
+    @pytest.mark.level(2)
+    def test_get_build_index_devices_invalid_parent_key(self, connect, collection):
+        '''
+        target: get invalid parent key
+        method: call get_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config(config+".build_index_devices")
+
+    @pytest.mark.level(2)
+    def test_get_build_index_devices_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: build_index_devices
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Build_index_resources"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("gpu."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_build_index_devices_valid(self, connect, collection):
+        '''
+        target: get build_index_devices
+        method: call get_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        config_value = connect.get_config("gpu.build_index_devices")
+        logging.getLogger().info(config_value)
+        assert config_value
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_enable_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config+".enable", "true")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu."+config, "true")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_enable_invalid_values(self, connect, collection):
+        '''
+        target: set "enable" param
+        method: call set_config with invalid child values
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        for i in [-1, -2, 100]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu.enable", i)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_gpu_enable_valid(self, connect, collection):
+        '''
+        target: set "enable" param
+        method: call set_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        valid_configs = ["off", "False", "0", "nO", "on", "True", 1, "yES"]
+        for config in valid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu.enable", config)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_size_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config+".cache_size", 2)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_size_valid(self, connect, collection):
+        '''
+        target: set cache_size
+        method: call set_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        relpy = connect.set_config("gpu.cache_size", 2)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_cache_size_invalid_values(self, connect, collection):
+        '''
+        target: set cache_size
+        method: call set_config with invalid child values
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        self.reset_configs(connect)
+        for i in [-1, "1\n", "1\t"]:
+            logging.getLogger().info(i)
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu", "cache_size", i)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_search_devices_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config, "search_devices", "gpu0")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_search_devices_valid(self, connect, collection):
+        '''
+        target: set search_devices
+        method: call set_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("gpu", "search_devices", "gpu0")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_search_devices_invalid_values(self, connect, collection):
+        '''
+        target: set search_devices
+        method: call set_config with invalid child values
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        for i in [-1, "10", "gpu-1", "gpu0, gpu1", "gpu22,gpu44","gpu10000","gpu 0","-gpu0"]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu", "search_devices", i)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_build_index_devices_invalid_parent_key(self, connect, collection):
+        '''
+        target: set invalid parent key
+        method: call set_config without parent_key: gpu
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        invalid_configs = ["Gpu_resource_config", "gpu resource config", \
+            "gpu_resource"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config(config, "build_index_devices", "gpu0")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_build_index_devices_valid(self, connect, collection):
+        '''
+        target: set build_index_devices
+        method: call set_config correctly
+        expected: status ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("gpu", "build_index_devices", "gpu0")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_build_index_devices_invalid_values(self, connect, collection):
+        '''
+        target: set build_index_devices
+        method: call set_config with invalid child values
+        expected: status not ok
+        '''
+        if str(connect._cmd("mode")) == "CPU":
+            pytest.skip("Only support GPU mode")
+        for i in [-1, "10", "gpu-1", "gpu0, gpu1", "gpu22,gpu44","gpu10000","gpu 0","-gpu0"]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("gpu", "build_index_devices", i)
+        self.reset_configs(connect)
+
+
+class TestNetworkConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_address_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: address
+        expected: status not ok
+        '''
+        invalid_configs = ["Address", "addresses", "address "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("network."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_address_valid(self, connect, collection):
+        '''
+        target: get address
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("network.bind.address")
+
+    @pytest.mark.level(2)
+    def test_get_port_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: port
+        expected: status not ok
+        '''
+        invalid_configs = ["Port", "PORT", "port "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("network."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_port_valid(self, connect, collection):
+        '''
+        target: get port
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("network.http.port")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_http_port_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: http.port
+        expected: status not ok
+        '''
+        invalid_configs = ["webport", "Web_port", "http port "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("network."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_http_port_valid(self, connect, collection):
+        '''
+        target: get http.port
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("network.http.port")
+        assert config_value
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    def gen_valid_timezones(self):
+        timezones = []
+        for i in range(0, 13):
+            timezones.append("UTC+" + str(i))
+            timezones.append("UTC-" + str(i))
+        timezones.extend(["UTC+13", "UTC+14"])
+        return timezones
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_network_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("network.child_key", 19530)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_address_valid(self, connect, collection):
+        '''
+        target: set address
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        relpy = connect.set_config("network.bind.address", '0.0.0.0')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_port_valid(self, connect, collection):
+        '''
+        target: set port
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_port in [1025, 65534, 12345, "19530"]:
+            relpy = connect.set_config("network.http.port", valid_port)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_port_invalid(self, connect, collection):
+        '''
+        target: set port
+        method: call set_config with port number out of range(1024, 65535)
+        expected: status not ok
+        '''
+        for invalid_port in [1024, 65535, "0", "True", "100000"]:
+            logging.getLogger().info(invalid_port)
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("network.http.port", invalid_port)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_http_port_valid(self, connect, collection):
+        '''
+        target: set http.port
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_http_port in [1025, 65534, "12345", 19121]:
+            relpy = connect.set_config("network.http.port", valid_http_port)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_http_port_invalid(self, connect, collection):
+        '''
+        target: set http.port
+        method: call set_config with http.port number out of range(1024, 65535)
+        expected: status not ok
+        '''
+        for invalid_http_port in [1024, 65535, "0", "True", "1000000"]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("network.http.port", invalid_http_port)
+
+
+class TestGeneralConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_meta_uri_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: meta_uri
+        expected: status not ok
+        '''
+        invalid_configs = ["backend_Url", "backend-url", "meta uri "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("general."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_meta_uri_valid(self, connect, collection):
+        '''
+        target: get meta_uri
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("general.meta_uri")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_timezone_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: timezone
+        expected: status not ok
+        '''
+        invalid_configs = ["time", "time_zone "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("general."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_timezone_valid(self, connect, collection):
+        '''
+        target: get timezone
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("general.timezone")
+        assert "UTC" in config_value
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_timezone_invalid(self, connect, collection):
+        '''
+        target: set timezone
+        method: call set_config with invalid timezone
+        expected: status not ok
+        '''
+        for invalid_timezone in ["utc++8", "UTC++8"]:
+            logging.getLogger().info(invalid_timezone)
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("general.timezone", invalid_timezone)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_general_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("general.child_key", 1)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_meta_uri_valid(self, connect, collection):
+        '''
+        target: set meta_uri
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        relpy = connect.set_config("general.meta_uri", 'sqlite://:@:/')
+
+
+class TestStorageConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_path_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: path
+        expected: status not ok
+        '''
+        invalid_configs = ["Primary_path", "primarypath", "pa_th "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("storage."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_path_valid(self, connect, collection):
+        '''
+        target: get path
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("storage.path")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_auto_flush_interval_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: auto_flush_interval
+        expected: status not ok
+        '''
+        invalid_configs = ["autoFlushInterval", "auto_flush", "auto_flush interval "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("storage."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_auto_flush_interval_valid(self, connect, collection):
+        '''
+        target: get auto_flush_interval
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("storage.auto_flush_interval")
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_storage_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("storage.child_key", "")
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_path_valid(self, connect, collection):
+        '''
+        target: set path
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        relpy = connect.set_config("storage.path", '/var/lib/milvus')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_auto_flush_interval_valid(self, connect, collection):
+        '''
+        target: set auto_flush_interval
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_auto_flush_interval in [2, 1]:
+            logging.getLogger().info(valid_auto_flush_interval)
+            relpy = connect.set_config("storage.auto_flush_interval", valid_auto_flush_interval)
+            config_value = connect.get_config("storage.auto_flush_interval")
+            assert config_value == str(valid_auto_flush_interval)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_auto_flush_interval_invalid(self, connect, collection):
+        '''
+        target: set auto_flush_interval
+        method: call set_config with invalid auto_flush_interval
+        expected: status not ok
+        '''
+        for invalid_auto_flush_interval in [-1, "1.5", "invalid", "1+2"]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("storage.auto_flush_interval", invalid_auto_flush_interval)
+
+
+class TestMetricConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_enable_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: enable
+        expected: status not ok
+        '''
+        invalid_configs = ["enablemonitor", "Enable_monitor", "en able "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("metric."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_enable_valid(self, connect, collection):
+        '''
+        target: get enable
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("metric.enable")
+        assert config_value
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_address_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: address
+        expected: status not ok
+        '''
+        invalid_configs = ["Add ress", "addresses", "add ress "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("metric."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_address_valid(self, connect, collection):
+        '''
+        target: get address
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("metric.address")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_port_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: port
+        expected: status not ok
+        '''
+        invalid_configs = ["Po_rt", "PO_RT", "po_rt "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("metric."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_port_valid(self, connect, collection):
+        '''
+        target: get port
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("metric.port")
+        assert config_value
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_metric_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("metric.child_key", 19530)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_enable_valid(self, connect, collection):
+        '''
+        target: set enable
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_enable in ["false", "true"]:
+            relpy = connect.set_config("metric.enable", valid_enable)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_address_valid(self, connect, collection):
+        '''
+        target: set address
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        relpy = connect.set_config("metric.address", '127.0.0.1')
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_port_valid(self, connect, collection):
+        '''
+        target: set port
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_port in [1025, 65534, "19530", "9091"]:
+            relpy = connect.set_config("metric.port", valid_port)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_port_invalid(self, connect, collection):
+        '''
+        target: set port
+        method: call set_config with port number out of range(1024, 65535), or same as http.port number
+        expected: status not ok
+        '''
+        for invalid_port in [1024, 65535, "0", "True", "100000"]:
+            with pytest.raises(Exception) as e:
+                relpy = connect.set_config("metric.port", invalid_port)
+
+
+class TestWALConfig:
+    """
+    ******************************************************************
+      The following cases are used to test `get_config` function
+    ******************************************************************
+    """
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_enable_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: enable
+        expected: status not ok
+        '''
+        invalid_configs = ["enabled", "Enab_le", "enable_"]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("wal."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_enable_valid(self, connect, collection):
+        '''
+        target: get enable
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("wal.enable")
+        assert config_value
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_recovery_error_ignore_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: recovery_error_ignore
+        expected: status not ok
+        '''
+        invalid_configs = ["recovery-error-ignore", "Recovery error_ignore", "recoveryxerror_ignore "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("wal."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_recovery_error_ignore_valid(self, connect, collection):
+        '''
+        target: get recovery_error_ignore
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("wal.recovery_error_ignore")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_buffer_size_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: buffer_size
+        expected: status not ok
+        '''
+        invalid_configs = ["buffersize", "Buffer size", "buffer size "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("wal."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_buffer_size_valid(self, connect, collection):
+        '''
+        target: get buffer_size
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("wal.buffer_size")
+        assert config_value
+
+    @pytest.mark.level(2)
+    def test_get_wal_path_invalid_child_key(self, connect, collection):
+        '''
+        target: get invalid child key
+        method: call get_config without child_key: wal_path
+        expected: status not ok
+        '''
+        invalid_configs = ["wal", "Wal_path", "wal_path "]
+        for config in invalid_configs:
+            with pytest.raises(Exception) as e:
+                config_value = connect.get_config("wal."+config)
+
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_get_wal_path_valid(self, connect, collection):
+        '''
+        target: get wal_path
+        method: call get_config correctly
+        expected: status ok
+        '''
+        config_value = connect.get_config("wal.path")
+        assert config_value
+
+    """
+    ******************************************************************
+      The following cases are used to test `set_config` function
+    ******************************************************************
+    """
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_wal_invalid_child_key(self, connect, collection):
+        '''
+        target: set invalid child key
+        method: call set_config with invalid child_key
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            relpy = connect.set_config("wal.child_key", 256)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_enable_valid(self, connect, collection):
+        '''
+        target: set enable
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_enable in ["false", "true"]:
+            relpy = connect.set_config("wal.enable", valid_enable)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_recovery_error_ignore_valid(self, connect, collection):
+        '''
+        target: set recovery_error_ignore
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_recovery_error_ignore in ["false", "true"]:
+            relpy = connect.set_config("wal.recovery_error_ignore", valid_recovery_error_ignore)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    def test_set_buffer_size_valid_A(self, connect, collection):
+        '''
+        target: set buffer_size
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        for valid_buffer_size in ["64MB", "128MB", "4096MB", "1000MB", "256MB"]:
+            relpy = connect.set_config("wal.buffer_size", valid_buffer_size)
+
+    @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.")
+    @pytest.mark.timeout(CONFIG_TIMEOUT)
+    def test_set_wal_path_valid(self, connect, collection, args):
+        '''
+        target: set wal_path
+        method: call set_config correctly
+        expected: status ok, set successfully
+        '''
+        relpy = connect.set_config("wal.path", "/var/lib/milvus/wal")
+
diff --git a/tests/python_test/test_connect.py b/tests/python_test/test_connect.py
new file mode 100644
index 000000000..9d4dc5a1b
--- /dev/null
+++ b/tests/python_test/test_connect.py
@@ -0,0 +1,232 @@
+import pytest
+import pdb
+import threading
+from multiprocessing import Process
+import concurrent.futures
+from utils import *
+
+CONNECT_TIMEOUT = 12
+
+
+class TestConnect:
+
+    def local_ip(self, args):
+        '''
+        check if ip is localhost or not
+        '''
+        if not args["ip"] or args["ip"] == 'localhost' or args["ip"] == "127.0.0.1":
+            return True
+        else:
+            return False
+
+    # TODO: remove
+    def _test_disconnect(self, connect):
+        '''
+        target: test disconnect
+        method: disconnect a connected client
+        expected: connect failed after disconnected
+        '''
+        res = connect.close()
+        with pytest.raises(Exception) as e:
+            res = connect.()
+
+    # TODO: remove
+    def _test_disconnect_repeatedly(self, dis_connect, args):
+        '''
+        target: test disconnect repeatedly
+        method: disconnect a connected client, disconnect again
+        expected: raise an error after disconnected
+        '''
+        with pytest.raises(Exception) as e:
+            connect.close()
+
+    def test_connect_correct_ip_port(self, args):
+        '''
+        target: test connect with correct ip and port value
+        method: set correct ip and port
+        expected: connected is True        
+        '''
+        milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
+        # assert milvus.connected()
+
+    # TODO: Currently we test with remote IP, localhost testing need to add
+    def _test_connect_ip_localhost(self, args):
+        '''
+        target: test connect with ip value: localhost
+        method: set host localhost
+        expected: connected is True
+        '''
+        milvus = get_milvus(args["ip"], args["port"], args["handler"])
+        # milvus.connect(host='localhost', port=args["port"])
+        # assert milvus.connected()
+
+    @pytest.mark.timeout(CONNECT_TIMEOUT)
+    def test_connect_wrong_ip_null(self, args):
+        '''
+        target: test connect with wrong ip value
+        method: set host null
+        expected: not use default ip, connected is False
+        '''
+        ip = ""
+        with pytest.raises(Exception) as e:
+            milvus = get_milvus(ip, args["port"], args["handler"])
+            # assert not milvus.connected()
+
+    def test_connect_uri(self, args):
+        '''
+        target: test connect with correct uri
+        method: uri format and value are both correct
+        expected: connected is True        
+        '''
+        uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
+        milvus = get_milvus(args["ip"], args["port"], uri=uri_value, handler=args["handler"])
+        # assert milvus.connected()
+
+    def test_connect_uri_null(self, args):
+        '''
+        target: test connect with null uri
+        method: uri set null
+        expected: connected is True        
+        '''
+        uri_value = ""
+        if self.local_ip(args):
+            milvus = get_milvus(None, None, uri=uri_value, handler=args["handler"])
+            # assert milvus.connected()
+        else:
+            with pytest.raises(Exception) as e:
+                milvus = get_milvus(None, None, uri=uri_value, handler=args["handler"])
+                # assert not milvus.connected()
+
+    def test_connect_with_multiprocess(self, args):
+        '''
+        target: test uri connect with multiprocess
+        method: set correct uri, test with multiprocessing connecting
+        expected: all connection is connected        
+        '''
+        processes = []
+        def connect():
+            milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
+            assert milvus           
+        with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
+            future_results = {executor.submit(
+                connect): i for i in range(100)}
+            for future in concurrent.futures.as_completed(future_results):
+                future.result()
+
+    def test_connect_repeatedly(self, args):
+        '''
+        target: test connect repeatedly
+        method: connect again
+        expected: status.code is 0, and status.message shows have connected already
+        '''
+        uri_value = "tcp://%s:%s" % (args["ip"], args["port"])
+        milvus = Milvus(uri=uri_value, handler=args["handler"])
+        # milvus.connect(uri=uri_value, timeout=5)
+        # milvus.connect(uri=uri_value, timeout=5)
+        milvus = Milvus(uri=uri_value, handler=args["handler"])
+        # assert milvus.connected()
+
+    def _test_add_vector_and_disconnect_concurrently(self):
+        '''
+        Target: test disconnect in the middle of add vectors
+        Method:
+            a. use coroutine or multi-processing, to simulate network crashing
+            b. data_set not too large incase disconnection happens when data is underd-preparing
+            c. data_set not too small incase disconnection happens when data has already been transferred
+            d. make sure disconnection happens when data is in-transport
+        Expected: Failure, count_entities == 0
+
+        '''
+        pass
+
+    def _test_search_vector_and_disconnect_concurrently(self):
+        '''
+        Target: Test disconnect in the middle of search vectors(with large nq and topk)multiple times, and search/add vectors still work
+        Method:
+            a. coroutine or multi-processing, to simulate network crashing
+            b. connect, search and disconnect,  repeating many times
+            c. connect and search, add vectors
+        Expected: Successfully searched back, successfully added
+
+        '''
+        pass
+
+    def _test_thread_safe_with_one_connection_shared_in_multi_threads(self):
+       '''
+        Target: test 1 connection thread safe
+        Method: 1 connection shared in multi-threads, all adding vectors, or other things
+        Expected: Functional as one thread
+
+       '''
+       pass
+
+
+class TestConnectIPInvalid(object):
+    """
+    Test connect server with invalid ip
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ips()
+    )
+    def get_invalid_ip(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(CONNECT_TIMEOUT)
+    def test_connect_with_invalid_ip(self, args, get_invalid_ip):
+        ip = get_invalid_ip
+        with pytest.raises(Exception) as e:
+            milvus = get_milvus(ip, args["port"], args["handler"])
+            # assert not milvus.connected()
+
+
+class TestConnectPortInvalid(object):
+    """
+    Test connect server with invalid ip
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_ints()
+    )
+    def get_invalid_port(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(CONNECT_TIMEOUT)
+    def test_connect_with_invalid_port(self, args, get_invalid_port):
+        '''
+        target: test ip:port connect with invalid port value
+        method: set port in gen_invalid_ports
+        expected: connected is False        
+        '''
+        port = get_invalid_port
+        with pytest.raises(Exception) as e:
+            milvus = get_milvus(args["ip"], port, args["handler"])
+            # assert not milvus.connected()
+
+
+class TestConnectURIInvalid(object):
+    """
+    Test connect server with invalid uri
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_uris()
+    )
+    def get_invalid_uri(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(CONNECT_TIMEOUT)
+    def test_connect_with_invalid_uri(self, get_invalid_uri, args):
+        '''
+        target: test uri connect with invalid uri value
+        method: set port in gen_invalid_uris
+        expected: connected is False        
+        '''
+        uri_value = get_invalid_uri
+        with pytest.raises(Exception) as e:
+            milvus = get_milvus(uri=uri_value, handler=args["handler"])
+            # assert not milvus.connected()
diff --git a/tests/python_test/test_flush.py b/tests/python_test/test_flush.py
new file mode 100644
index 000000000..a6d831ac9
--- /dev/null
+++ b/tests/python_test/test_flush.py
@@ -0,0 +1,353 @@
+import time
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+from constants import *
+
+DELETE_TIMEOUT = 60
+default_single_query = {
+    "bool": {
+        "must": [
+            {"vector": {default_float_vec_field_name: {"topk": 10, "query": gen_vectors(1, default_dim),
+                                                       "metric_type": "L2", "params": {"nprobe": 10}}}}
+        ]
+    }
+}
+
+
+class TestFlushBase:
+    """
+    ******************************************************************
+      The following cases are used to test `flush` function
+    ******************************************************************
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")[1]) == "GPU":
+            if request.param["index_type"] not in ivf():
+                pytest.skip("Only support index_type: idmap/flat")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_filter_fields()
+    )
+    def get_filter_field(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_single_vector_fields()
+    )
+    def get_vector_field(self, request):
+        yield request.param
+
+    def test_flush_collection_not_existed(self, connect, collection):
+        '''
+        target: test flush, params collection_name not existed
+        method: flush, with collection not existed
+        expected: error raised
+        '''
+        collection_new = gen_unique_str("test_flush_1")
+        with pytest.raises(Exception) as e:
+            connect.flush([collection_new])
+
+    def test_flush_empty_collection(self, connect, collection):
+        '''
+        method: flush collection with no vectors
+        expected: no error raised
+        '''
+        connect.flush([collection])
+        ids = connect.insert(collection, default_entities)
+        assert len(ids) == default_nb
+        # status = connect.delete_entity_by_id(collection, ids)
+        # assert status.OK()
+        # connect.flush([collection])
+        res = connect.get_collection_stats(collection)
+        # print(res)
+        # assert res == default_nb
+        # with pytest.raises(Exception) as e:
+        #     connect.flush([collection])
+
+    def test_add_partition_flush(self, connect, id_collection):
+        '''
+        method: add entities into partition in collection, flush serveral times
+        expected: the length of ids and the collection row count
+        '''
+        connect.create_partition(id_collection, default_tag)
+        ids = [i for i in range(default_nb)]
+        ids = connect.insert(id_collection, default_entities, ids)
+        connect.flush([id_collection])
+        res_count = connect.get_collection_stats(id_collection)
+        assert res_count["row_count"] == default_nb
+        ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        assert len(ids) == default_nb
+        connect.flush([id_collection])
+        res_count = connect.get_collection_stats(id_collection)
+        assert res_count["row_count"] == default_nb * 2
+
+    def test_add_partitions_flush(self, connect, id_collection):
+        '''
+        method: add entities into partitions in collection, flush one
+        expected: the length of ids and the collection row count
+        '''
+        tag_new = gen_unique_str()
+        connect.create_partition(id_collection, default_tag)
+        connect.create_partition(id_collection, tag_new)
+        ids = [i for i in range(default_nb)]
+        ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        connect.flush([id_collection])
+        ids = connect.insert(id_collection, default_entities, ids, partition_tag=tag_new)
+        connect.flush([id_collection])
+        res = connect.get_collection_stats(id_collection)
+        assert res["row_count"] == 2 * default_nb
+
+    def test_add_collections_flush(self, connect, id_collection):
+        '''
+        method: add entities into collections, flush one
+        expected: the length of ids and the collection row count
+        '''
+        collection_new = gen_unique_str()
+        default_fields = gen_default_fields(False)
+        connect.create_collection(collection_new, default_fields)
+        connect.create_partition(id_collection, default_tag)
+        connect.create_partition(collection_new, default_tag)
+        ids = [i for i in range(default_nb)]
+        # ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        # ids = connect.insert(collection_new, default_entities, ids, partition_tag=default_tag)
+        connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        connect.insert(collection_new, default_entities, ids, partition_tag=default_tag)
+        connect.flush([id_collection])
+        connect.flush([collection_new])
+        res = connect.get_collection_stats(id_collection)
+        assert res["row_count"] == default_nb
+        res = connect.get_collection_stats(collection_new)
+        assert res["row_count"] == default_nb
+
+    def test_add_collections_fields_flush(self, connect, id_collection, get_filter_field, get_vector_field):
+        '''
+        method: create collection with different fields, and add entities into collections, flush one
+        expected: the length of ids and the collection row count
+        '''
+        nb_new = 5
+        filter_field = get_filter_field
+        vector_field = get_vector_field
+        collection_new = gen_unique_str("test_flush")
+        fields = {
+            "fields": [filter_field, vector_field],
+            "segment_row_limit": default_segment_row_limit,
+            "auto_id": False
+        }
+        connect.create_collection(collection_new, fields)
+        connect.create_partition(id_collection, default_tag)
+        connect.create_partition(collection_new, default_tag)
+        entities_new = gen_entities_by_fields(fields["fields"], nb_new, default_dim)
+        ids = [i for i in range(default_nb)]
+        ids_new = [i for i in range(nb_new)]
+        # ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        # ids = connect.insert(collection_new, entities_new, ids_new, partition_tag=default_tag)
+        connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        connect.insert(collection_new, entities_new, ids_new, partition_tag=default_tag)
+        connect.flush([id_collection])
+        connect.flush([collection_new])
+        res = connect.get_collection_stats(id_collection)
+        assert res["row_count"] == default_nb
+        res = connect.get_collection_stats(collection_new)
+        assert res["row_count"] == nb_new
+
+    def test_add_flush_multiable_times(self, connect, collection):
+        '''
+        method: add entities, flush serveral times
+        expected: no error raised
+        '''
+        ids = connect.insert(collection, default_entities)
+        for i in range(10):
+            connect.flush([collection])
+        res = connect.get_collection_stats(collection)
+        assert res["row_count"] == len(ids)
+        # query_vecs = [vectors[0], vectors[1], vectors[-1]]
+        res = connect.search(collection, default_single_query)
+        logging.getLogger().debug(res)
+        assert res
+
+    def test_add_flush_auto(self, connect, id_collection):
+        '''
+        method: add entities
+        expected: no error raised
+        '''
+        ids = [i for i in range(default_nb)]
+        ids = connect.insert(id_collection, default_entities, ids)
+        timeout = 20
+        start_time = time.time()
+        while (time.time() - start_time < timeout):
+            time.sleep(1)
+            res = connect.get_collection_stats(id_collection)
+            if res["row_count"] == default_nb:
+                break
+        if time.time() - start_time > timeout:
+            assert False
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            100
+        ],
+    )
+    def same_ids(self, request):
+        yield request.param
+
+    def test_add_flush_same_ids(self, connect, id_collection, same_ids):
+        '''
+        method: add entities, with same ids, count(same ids) < 15, > 15
+        expected: the length of ids and the collection row count
+        '''
+        ids = [i for i in range(default_nb)]
+        for i, item in enumerate(ids):
+            if item <= same_ids:
+                ids[i] = 0
+        ids = connect.insert(id_collection, default_entities, ids)
+        connect.flush([id_collection])
+        res = connect.get_collection_stats(id_collection)
+        assert res["row_count"] == default_nb
+
+    def test_delete_flush_multiable_times(self, connect, collection):
+        '''
+        method: delete entities, flush serveral times
+        expected: no error raised
+        '''
+        ids = connect.insert(collection, default_entities)
+        status = connect.delete_entity_by_id(collection, [ids[-1]])
+        assert status.OK()
+        for i in range(10):
+            connect.flush([collection])
+        # query_vecs = [vectors[0], vectors[1], vectors[-1]]
+        res = connect.search(collection, default_single_query)
+        logging.getLogger().debug(res)
+        assert res
+
+    # TODO: unable to set config 
+    @pytest.mark.level(2)
+    def _test_collection_count_during_flush(self, connect, collection, args):
+        '''
+        method: flush collection at background, call `get_collection_stats`
+        expected: no timeout
+        '''
+        ids = []
+        for i in range(5):
+            tmp_ids = connect.insert(collection, default_entities)
+            connect.flush([collection])
+            ids.extend(tmp_ids)
+        disable_flush(connect)
+        status = connect.delete_entity_by_id(collection, ids)
+
+        def flush():
+            milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
+            logging.error("start flush")
+            milvus.flush([collection])
+            logging.error("end flush")
+
+        p = TestThread(target=flush, args=())
+        p.start()
+        time.sleep(0.2)
+        logging.error("start count")
+        res = connect.get_collection_stats(collection, timeout=10)
+        p.join()
+        res = connect.get_collection_stats(collection)
+        assert res["row_count"] == 0
+
+    @pytest.mark.level(2)
+    def test_delete_flush_during_search(self, connect, collection, args):
+        '''
+        method: search at background, call `delete and flush`
+        expected: no timeout
+        '''
+        ids = []
+        loops = 5
+        for i in range(loops):
+            tmp_ids = connect.insert(collection, default_entities)
+            connect.flush([collection])
+            ids.extend(tmp_ids)
+        nq = 10000
+        query, query_vecs = gen_query_vectors(default_float_vec_field_name, default_entities, default_top_k, nq)
+        time.sleep(0.1)
+        future = connect.search(collection, query, _async=True)
+        delete_ids = [ids[0], ids[-1]]
+        status = connect.delete_entity_by_id(collection, delete_ids)
+        connect.flush([collection])
+        res = future.result()
+        res_count = connect.get_collection_stats(collection, timeout=120)
+        assert res_count["row_count"] == loops * default_nb - len(delete_ids)
+
+
+class TestFlushAsync:
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    """
+    ******************************************************************
+      The following cases are used to test `flush` function
+    ******************************************************************
+    """
+
+    def check_status(self):
+        logging.getLogger().info("In callback check status")
+
+    def test_flush_empty_collection(self, connect, collection):
+        '''
+        method: flush collection with no vectors
+        expected: status ok
+        '''
+        future = connect.flush([collection], _async=True)
+        status = future.result()
+
+    def test_flush_async_long(self, connect, collection):
+        ids = connect.insert(collection, default_entities)
+        future = connect.flush([collection], _async=True)
+        status = future.result()
+
+    def test_flush_async_long_drop_collection(self, connect, collection):
+        for i in range(5):
+            ids = connect.insert(collection, default_entities)
+        future = connect.flush([collection], _async=True)
+        logging.getLogger().info("DROP")
+        connect.drop_collection(collection)
+
+    def test_flush_async(self, connect, collection):
+        connect.insert(collection, default_entities)
+        logging.getLogger().info("before")
+        future = connect.flush([collection], _async=True, _callback=self.check_status)
+        logging.getLogger().info("after")
+        future.done()
+        status = future.result()
+
+
+class TestCollectionNameInvalid(object):
+    """
+    Test adding vectors with invalid collection names
+    """
+
+    @pytest.fixture(
+        scope="function",
+        # params=gen_invalid_collection_names()
+        params=gen_invalid_strs()
+    )
+    def get_invalid_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_flush_with_invalid_collection_name(self, connect, get_invalid_collection_name):
+        collection_name = get_invalid_collection_name
+        if collection_name is None or not collection_name:
+            pytest.skip("while collection_name is None, then flush all collections")
+        with pytest.raises(Exception) as e:
+            connect.flush(collection_name)
diff --git a/tests/python_test/test_index.py b/tests/python_test/test_index.py
new file mode 100644
index 000000000..833457a1b
--- /dev/null
+++ b/tests/python_test/test_index.py
@@ -0,0 +1,832 @@
+import logging
+import time
+import pdb
+import threading
+from multiprocessing import Pool, Process
+import numpy
+import pytest
+import sklearn.preprocessing
+from utils import *
+from constants import *
+
+uid = "test_index"
+BUILD_TIMEOUT = 300
+field_name = default_float_vec_field_name
+binary_field_name = default_binary_vec_field_name
+query, query_vecs = gen_query_vectors(field_name, default_entities, default_top_k, 1)
+default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
+
+
+class TestIndexBase:
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        logging.getLogger().info(request.param)
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            10,
+            1111
+        ],
+    )
+    def get_nq(self, request):
+        yield request.param
+
+    """
+    ******************************************************************
+      The following cases are used to test `create_index` function
+    ******************************************************************
+    """
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(collection, default_entities)
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index on field not existed
+        expected: error raised
+        '''
+        tmp_field_name = gen_unique_str()
+        ids = connect.insert(collection, default_entities)
+        with pytest.raises(Exception) as e:
+            connect.create_index(collection, tmp_field_name, get_simple_index)
+
+    @pytest.mark.level(2)
+    def test_create_index_on_field(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index on other field
+        expected: error raised
+        '''
+        tmp_field_name = "int64"
+        ids = connect.insert(collection, default_entities)
+        with pytest.raises(Exception) as e:
+            connect.create_index(collection, tmp_field_name, get_simple_index)
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_no_vectors(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_partition(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection, create partition, and add entities in it, create index
+        expected: return search success
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_partition_flush(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection, create partition, and add entities in it, create index
+        expected: return search success
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush()
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    def test_create_index_without_connect(self, dis_connect, collection):
+        '''
+        target: test create index without connection
+        method: create collection and add entities in it, check if added successfully
+        expected: raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.create_index(collection, field_name, get_simple_index)
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq):
+        '''
+        target: test create index interface, search with more query vectors
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(collection, default_entities)
+        connect.create_index(collection, field_name, get_simple_index)
+        logging.getLogger().info(connect.describe_index(collection, field_name))
+        nq = get_nq
+        index_type = get_simple_index["index_type"]
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, search_params=search_param)
+        connect.load_collection(collection)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    @pytest.mark.level(2)
+    def test_create_index_multithread(self, connect, collection, args):
+        '''
+        target: test create index interface with multiprocess
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        connect.insert(collection, default_entities)
+
+        def build(connect):
+            connect.create_index(collection, field_name, default_index)
+            index = connect.describe_index(collection, field_name)
+            assert index == default_index
+
+        threads_num = 8
+        threads = []
+        for i in range(threads_num):
+            m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
+            t = TestThread(target=build, args=(m,))
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+
+    def test_create_index_collection_not_existed(self, connect):
+        '''
+        target: test create index interface when collection name not existed
+        method: create collection and add entities in it, create index
+            , make sure the collection name not in index
+        expected: create index failed
+        '''
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.create_index(collection_name, field_name, default_index)
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_insert_flush(self, connect, collection, get_simple_index):
+        '''
+        target: test create index
+        method: create collection and create index, add entities in it
+        expected: create index ok, and count correct
+        '''
+        connect.create_index(collection, field_name, get_simple_index)
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
+        '''
+        target: check if index can be created repeatedly, with the same create_index params
+        method: create index after index have been built
+        expected: return code success, and search ok
+        '''
+        connect.create_index(collection, field_name, get_simple_index)
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    # TODO:
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_different_index_repeatedly(self, connect, collection):
+        '''
+        target: check if index can be created repeatedly, with the different create_index params
+        method: create another index with different index_params after index have been built
+        expected: return code 0, and describe index result equals with the second index params
+        '''
+        ids = connect.insert(collection, default_entities)
+        indexs = [default_index, {"metric_type":"L2", "index_type": "FLAT", "params":{"nlist": 1024}}]
+        for index in indexs:
+            connect.create_index(collection, field_name, index)
+        index = connect.describe_index(collection, field_name)
+        assert index == indexs[-1]
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(collection, default_entities)
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_partition_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection, create partition, and add entities in it, create index
+        expected: return search success
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush([collection])
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection, create partition, and add entities in it, create index
+        expected: return search success
+        '''
+        connect.create_partition(collection, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        connect.flush()
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == get_simple_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq):
+        '''
+        target: test create index interface, search with more query vectors
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        metric_type = "IP"
+        ids = connect.insert(collection, default_entities)
+        get_simple_index["metric_type"] = metric_type
+        connect.create_index(collection, field_name, get_simple_index)
+        logging.getLogger().info(connect.describe_index(collection))
+        nq = get_nq
+        index_type = get_simple_index["index_type"]
+        search_param = get_search_param(index_type)
+        query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, metric_type=metric_type, search_params=search_param)
+        res = connect.search(collection, query)
+        assert len(res) == nq
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    @pytest.mark.level(2)
+    def test_create_index_multithread_ip(self, connect, collection, args):
+        '''
+        target: test create index interface with multiprocess
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        connect.insert(collection, default_entities)
+
+        def build(connect):
+            default_index["metric_type"] = "IP"
+            connect.create_index(collection, field_name, default_index)
+            index = connect.describe_index(collection, field_name)
+            assert index == default_index
+
+        threads_num = 8
+        threads = []
+        for i in range(threads_num):
+            m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
+            t = TestThread(target=build, args=(m,))
+            threads.append(t)
+            t.start()
+            time.sleep(0.2)
+        for t in threads:
+            t.join()
+
+    def test_create_index_collection_not_existed_ip(self, connect, collection):
+        '''
+        target: test create index interface when collection name not existed
+        method: create collection and add entities in it, create index
+            , make sure the collection name not in index
+        expected: return code not equals to 0, create index failed
+        '''
+        collection_name = gen_unique_str(uid)
+        default_index["metric_type"] = "IP"
+        with pytest.raises(Exception) as e:
+            connect.create_index(collection_name, field_name, default_index)
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_no_vectors_insert_ip(self, connect, collection):
+        '''
+        target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
+        method: create collection and add no vectors in it, and then create index, add entities in it
+        expected: return code equals to 0
+        '''
+        default_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, default_index)
+        ids = connect.insert(collection, default_entities)
+        connect.flush([collection])
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        index = connect.describe_index(collection, field_name)
+        assert index == default_index
+
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_same_index_repeatedly_ip(self, connect, collection):
+        '''
+        target: check if index can be created repeatedly, with the same create_index params
+        method: create index after index have been built
+        expected: return code success, and search ok
+        '''
+        default_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, default_index)
+        connect.create_index(collection, field_name, default_index)
+        index = connect.describe_index(collection, field_name)
+        assert index == default_index
+
+    # TODO:
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_different_index_repeatedly_ip(self, connect, collection):
+        '''
+        target: check if index can be created repeatedly, with the different create_index params
+        method: create another index with different index_params after index have been built
+        expected: return code 0, and describe index result equals with the second index params
+        '''
+        ids = connect.insert(collection, default_entities)
+        stats = connect.get_collection_stats(collection)
+        assert stats["row_count"] == default_nb
+        default_index["metric_type"] = "IP"
+        indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}]
+        for index in indexs:
+            connect.create_index(collection, field_name, index)
+        index = connect.describe_index(collection, field_name)
+        assert index == indexs[-1]
+
+    """
+    ******************************************************************
+      The following cases are used to test `drop_index` function
+    ******************************************************************
+    """
+    def test_drop_index(self, connect, collection, get_simple_index):
+        '''
+        target: test drop index interface
+        method: create collection and add entities in it, create index, call drop index
+        expected: return code 0, and default index param
+        '''
+        # ids = connect.insert(collection, entities)
+        connect.create_index(collection, field_name, get_simple_index)
+        connect.drop_index(collection, field_name)
+        index = connect.describe_index(collection, field_name)
+        assert not index
+
+    @pytest.mark.level(2)
+    def test_drop_index_repeatedly(self, connect, collection, get_simple_index):
+        '''
+        target: test drop index repeatedly
+        method: create index, call drop index, and drop again
+        expected: return code 0
+        '''
+        connect.create_index(collection, field_name, get_simple_index)
+        connect.drop_index(collection, field_name)
+        connect.drop_index(collection, field_name)
+        index = connect.describe_index(collection, field_name)
+        assert not index
+
+    @pytest.mark.level(2)
+    def test_drop_index_without_connect(self, dis_connect, collection):
+        '''
+        target: test drop index without connection
+        method: drop index, and check if drop successfully
+        expected: raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.drop_index(collection, field_name)
+
+    def test_drop_index_collection_not_existed(self, connect):
+        '''
+        target: test drop index interface when collection name not existed
+        method: create collection and add entities in it, create index
+            , make sure the collection name not in index, and then drop it
+        expected: return code not equals to 0, drop index failed
+        '''
+        collection_name = gen_unique_str(uid)
+        with pytest.raises(Exception) as e:
+            connect.drop_index(collection_name, field_name)
+
+    def test_drop_index_collection_not_create(self, connect, collection):
+        '''
+        target: test drop index interface when index not created
+        method: create collection and add entities in it, create index
+        expected: return code not equals to 0, drop index failed
+        '''
+        # no create index
+        connect.drop_index(collection, field_name)
+
+    @pytest.mark.level(2)
+    def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index):
+        '''
+        target: test create / drop index repeatedly, use the same index params
+        method: create index, drop index, four times
+        expected: return code 0
+        '''
+        for i in range(4):
+            connect.create_index(collection, field_name, get_simple_index)
+            connect.drop_index(collection, field_name)
+
+    def test_drop_index_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test drop index interface
+        method: create collection and add entities in it, create index, call drop index
+        expected: return code 0, and default index param
+        '''
+        # ids = connect.insert(collection, entities)
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        connect.drop_index(collection, field_name)
+        index = connect.describe_index(collection, field_name)
+        assert not index
+
+    @pytest.mark.level(2)
+    def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test drop index repeatedly
+        method: create index, call drop index, and drop again
+        expected: return code 0
+        '''
+        get_simple_index["metric_type"] = "IP"
+        connect.create_index(collection, field_name, get_simple_index)
+        connect.drop_index(collection, field_name)
+        connect.drop_index(collection, field_name)
+        index = connect.describe_index(collection, field_name)
+        assert not index
+
+    @pytest.mark.level(2)
+    def test_drop_index_without_connect_ip(self, dis_connect, collection):
+        '''
+        target: test drop index without connection
+        method: drop index, and check if drop successfully
+        expected: raise exception
+        '''
+        with pytest.raises(Exception) as e:
+            dis_connect.drop_index(collection, field_name)
+
+    def test_drop_index_collection_not_create_ip(self, connect, collection):
+        '''
+        target: test drop index interface when index not created
+        method: create collection and add entities in it, create index
+        expected: return code not equals to 0, drop index failed
+        '''
+        # ids = connect.insert(collection, entities)
+        # no create index
+        connect.drop_index(collection, field_name)
+
+    @pytest.mark.level(2)
+    def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
+        '''
+        target: test create / drop index repeatedly, use the same index params
+        method: create index, drop index, four times
+        expected: return code 0
+        '''
+        get_simple_index["metric_type"] = "IP"
+        for i in range(4):
+            connect.create_index(collection, field_name, get_simple_index)
+            connect.drop_index(collection, field_name)
+
+
+class TestIndexBinary:
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_jaccard_index(self, request, connect):
+        if request.param["index_type"] in binary_support():
+            request.param["metric_type"] = "JACCARD"
+            return request.param
+        else:
+            pytest.skip("Skip index")
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_binary_index()
+    )
+    def get_l2_index(self, request, connect):
+        request.param["metric_type"] = "L2"
+        return request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=[
+            1,
+            10,
+            1111
+        ],
+    )
+    def get_nq(self, request):
+        yield request.param
+
+    """
+    ******************************************************************
+      The following cases are used to test `create_index` function
+    ******************************************************************
+    """
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(binary_collection, default_binary_entities)
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        binary_index = connect.describe_index(binary_collection, binary_field_name)
+        assert binary_index == get_jaccard_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_partition(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test create index interface
+        method: create collection, create partition, and add entities in it, create index
+        expected: return search success
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag)
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        binary_index = connect.describe_index(binary_collection, binary_field_name)
+        assert binary_index == get_jaccard_index
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq):
+        '''
+        target: test create index interface, search with more query vectors
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        nq = get_nq
+        ids = connect.insert(binary_collection, default_binary_entities)
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, nq, metric_type="JACCARD")
+        search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD")
+        logging.getLogger().info(search_param)
+        connect.load_collection(binary_collection)
+        res = connect.search(binary_collection, query, search_params=search_param)
+        assert len(res) == nq
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
+        '''
+        target: test create index interface with invalid metric type
+        method: add entitys into binary connection, flash, create index with L2 metric type.
+        expected: return create_index failure
+        '''
+        # insert 6000 vectors
+        ids = connect.insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        if get_l2_index["index_type"] == "BIN_FLAT":
+            connect.create_index(binary_collection, binary_field_name, get_l2_index)
+            binary_index = connect.describe_index(binary_collection, binary_field_name)
+            assert binary_index == get_l2_index
+        else:
+            with pytest.raises(Exception) as e:
+                res = connect.create_index(binary_collection, binary_field_name, get_l2_index)
+
+    """
+    ******************************************************************
+      The following cases are used to test `describe_index` function
+    ***************************************************************
+    """
+    @pytest.mark.skip("repeat with test_create_index binary")
+    def test_get_index_info(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test describe index interface
+        method: create collection and add entities in it, create index, call describe index
+        expected: return code 0, and index instructure
+        '''
+        ids = connect.insert(binary_collection, default_binary_entities)
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        stats = connect.get_collection_stats(binary_collection)
+        assert stats["row_count"] == default_nb
+        for partition in stats["partitions"]:
+            segments = partition["segments"]
+            if segments:
+                for segment in segments:
+                    for file in segment["files"]:
+                        if "index_type" in file:
+                            assert file["index_type"] == get_jaccard_index["index_type"]
+
+    @pytest.mark.skip("repeat with test_create_index_partition binary")
+    def test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test describe index interface
+        method: create collection, create partition and add entities in it, create index, call describe index
+        expected: return code 0, and index instructure
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag)
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        stats = connect.get_collection_stats(binary_collection)
+        logging.getLogger().info(stats)
+        assert stats["row_count"] == default_nb
+        assert len(stats["partitions"]) == 2
+        for partition in stats["partitions"]:
+            segments = partition["segments"]
+            if segments:
+                for segment in segments:
+                    for file in segment["files"]:
+                        if "index_type" in file:
+                            assert file["index_type"] == get_jaccard_index["index_type"]
+
+    """
+    ******************************************************************
+      The following cases are used to test `drop_index` function
+    ******************************************************************
+    """
+    def test_drop_index(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test drop index interface
+        method: create collection and add entities in it, create index, call drop index
+        expected: return code 0, and default index param
+        '''
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        stats = connect.get_collection_stats(binary_collection)
+        logging.getLogger().info(stats)
+        connect.drop_index(binary_collection, binary_field_name)
+        binary_index = connect.describe_index(binary_collection, binary_field_name)
+        assert not binary_index
+
+    def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
+        '''
+        target: test drop index interface
+        method: create collection, create partition and add entities in it, create index on collection, call drop collection index
+        expected: return code 0, and default index param
+        '''
+        connect.create_partition(binary_collection, default_tag)
+        ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag)
+        connect.flush([binary_collection])
+        connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
+        connect.drop_index(binary_collection, binary_field_name)
+        binary_index = connect.describe_index(binary_collection, binary_field_name)
+        assert not binary_index
+
+
+class TestIndexInvalid(object):
+    """
+    Test create / describe / drop index interfaces with invalid collection names
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(1)
+    def test_create_index_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.create_index(collection_name, field_name, default_index)
+
+    @pytest.mark.level(1)
+    def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name):
+        collection_name = get_collection_name
+        with pytest.raises(Exception) as e:
+            connect.drop_index(collection_name)
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_index()
+    )
+    def get_index(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
+        logging.getLogger().info(get_index)
+        with pytest.raises(Exception) as e:
+            connect.create_index(collection, field_name, get_index)
+
+
+class TestIndexAsync:
+    @pytest.fixture(scope="function", autouse=True)
+    def skip_http_check(self, args):
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+    """
+    ******************************************************************
+      The following cases are used to test `create_index` function
+    ******************************************************************
+    """
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_simple_index()
+    )
+    def get_simple_index(self, request, connect):
+        if str(connect._cmd("mode")) == "CPU":
+            if request.param["index_type"] in index_cpu_not_support():
+                pytest.skip("sq8h not support in CPU mode")
+        return request.param
+
+    def check_result(self, res):
+        logging.getLogger().info("In callback check search result")
+        logging.getLogger().info(res)
+
+    """
+    ******************************************************************
+      The following cases are used to test `create_index` function
+    ******************************************************************
+    """
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(collection, default_entities)
+        logging.getLogger().info("start index")
+        future = connect.create_index(collection, field_name, get_simple_index, _async=True)
+        logging.getLogger().info("before result")
+        res = future.result()
+        # TODO:
+        logging.getLogger().info(res)
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_drop(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(collection, default_entities)
+        logging.getLogger().info("start index")
+        future = connect.create_index(collection, field_name, get_simple_index, _async=True)
+        logging.getLogger().info("DROP")
+        connect.drop_collection(collection)
+
+    @pytest.mark.level(2)
+    def test_create_index_with_invalid_collection_name(self, connect):
+        collection_name = " "
+        future = connect.create_index(collection_name, field_name, default_index, _async=True)
+        with pytest.raises(Exception) as e:
+            res = future.result()
+
+    @pytest.mark.timeout(BUILD_TIMEOUT)
+    def test_create_index_callback(self, connect, collection, get_simple_index):
+        '''
+        target: test create index interface
+        method: create collection and add entities in it, create index
+        expected: return search success
+        '''
+        ids = connect.insert(collection, default_entities)
+        logging.getLogger().info("start index")
+        future = connect.create_index(collection, field_name, get_simple_index, _async=True,
+                                      _callback=self.check_result)
+        logging.getLogger().info("before result")
+        res = future.result()
+        # TODO:
+        logging.getLogger().info(res)
diff --git a/tests/python_test/test_mix.py b/tests/python_test/test_mix.py
new file mode 100644
index 000000000..2712b89d5
--- /dev/null
+++ b/tests/python_test/test_mix.py
@@ -0,0 +1,161 @@
+import pdb
+import copy
+import pytest
+import threading
+import datetime
+import logging
+from time import sleep
+from multiprocessing import Process
+import sklearn.preprocessing
+from utils import *
+
+index_file_size = 10
+vectors = gen_vectors(10000, default_dim)
+vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
+vectors = vectors.tolist()
+top_k = 1
+nprobe = 1
+epsilon = 0.001
+nlist = 128
+# index_params = {'index_type': IndexType.IVFLAT, 'nlist': 16384}
+
+
+class TestMixBase:
+    # disable
+    def _test_search_during_createIndex(self, args):
+        loops = 10000
+        collection = gen_unique_str()
+        query_vecs = [vectors[0], vectors[1]]
+        uri = "tcp://%s:%s" % (args["ip"], args["port"])
+        id_0 = 0; id_1 = 0
+        milvus_instance = get_milvus(args["handler"])
+        # milvus_instance.connect(uri=uri)
+        milvus_instance.create_collection({'collection_name': collection,
+             'dimension': default_dim,
+             'index_file_size': index_file_size,
+             'metric_type': "L2"})
+        for i in range(10):
+            status, ids = milvus_instance.bulk_insert(collection, vectors)
+            # logging.getLogger().info(ids)
+            if i == 0:
+                id_0 = ids[0]; id_1 = ids[1]
+        # def create_index(milvus_instance):
+        #     logging.getLogger().info("In create index")
+        #     status = milvus_instance.create_index(collection, index_params)
+        #     logging.getLogger().info(status)
+        #     status, result = milvus_instance.get_index_info(collection)
+        #     logging.getLogger().info(result)
+        def insert(milvus_instance):
+            logging.getLogger().info("In add vectors")
+            status, ids = milvus_instance.bulk_insert(collection, vectors)
+            logging.getLogger().info(status)
+        def search(milvus_instance):
+            logging.getLogger().info("In search vectors")
+            for i in range(loops):
+                status, result = milvus_instance.search(collection, top_k, nprobe, query_vecs)
+                logging.getLogger().info(status)
+                assert result[0][0].id == id_0
+                assert result[1][0].id == id_1
+        milvus_instance = get_milvus(args["handler"])
+        # milvus_instance.connect(uri=uri)
+        p_search = Process(target=search, args=(milvus_instance, ))
+        p_search.start()
+        milvus_instance = get_milvus(args["handler"])
+        # milvus_instance.connect(uri=uri)
+        p_create = Process(target=insert, args=(milvus_instance, ))
+        p_create.start()
+        p_create.join()
+
+    @pytest.mark.level(2)
+    def _test_mix_multi_collections(self, connect):
+        '''
+        target: test functions with multiple collections of different metric_types and index_types
+        method: create 60 collections which 30 are L2 and the other are IP, add vectors into them
+                and test describe index and search
+        expected: status ok
+        '''
+        nq = 10000
+        collection_list = []
+        idx = []
+        index_param = {'nlist': nlist}
+
+        #create collection and add vectors
+        for i in range(30):
+            collection_name = gen_unique_str('test_mix_multi_collections')
+            collection_list.append(collection_name)
+            param = {'collection_name': collection_name,
+                     'dimension': default_dim,
+                     'index_file_size': index_file_size,
+                     'metric_type': MetricType.L2}
+            connect.create_collection(param)
+            status, ids = connect.bulk_insert(collection_name=collection_name, records=vectors)
+            idx.append(ids[0])
+            idx.append(ids[10])
+            idx.append(ids[20])
+            assert status.OK()
+        for i in range(30):
+            collection_name = gen_unique_str('test_mix_multi_collections')
+            collection_list.append(collection_name)
+            param = {'collection_name': collection_name,
+                     'dimension': default_dim,
+                     'index_file_size': index_file_size,
+                     'metric_type': MetricType.IP}
+            connect.create_collection(param)
+            status, ids = connect.bulk_insert(collection_name=collection_name, records=vectors)
+            assert status.OK()
+            status = connect.flush([collection_name])
+            assert status.OK()
+            idx.append(ids[0])
+            idx.append(ids[10])
+            idx.append(ids[20])
+            assert status.OK()
+        for i in range(10):
+            status = connect.create_index(collection_list[i], IndexType.FLAT, index_param)
+            assert status.OK()
+            status = connect.create_index(collection_list[30 + i], IndexType.FLAT, index_param)
+            assert status.OK()
+            status = connect.create_index(collection_list[10 + i], IndexType.IVFLAT, index_param)
+            assert status.OK()
+            status = connect.create_index(collection_list[40 + i], IndexType.IVFLAT, index_param)
+            assert status.OK()
+            status = connect.create_index(collection_list[20 + i], IndexType.IVF_SQ8, index_param)
+            assert status.OK()
+            status = connect.create_index(collection_list[50 + i], IndexType.IVF_SQ8, index_param)
+            assert status.OK()
+
+        #describe index
+        for i in range(10):
+            status, result = connect.get_index_info(collection_list[i])
+            assert result._index_type == IndexType.FLAT
+            status, result = connect.get_index_info(collection_list[10 + i])
+            assert result._index_type == IndexType.IVFLAT
+            status, result = connect.get_index_info(collection_list[20 + i])
+            assert result._index_type == IndexType.IVF_SQ8
+            status, result = connect.get_index_info(collection_list[30 + i])
+            assert result._index_type == IndexType.FLAT
+            status, result = connect.get_index_info(collection_list[40 + i])
+            assert result._index_type == IndexType.IVFLAT
+            status, result = connect.get_index_info(collection_list[50 + i])
+            assert result._index_type == IndexType.IVF_SQ8
+
+        #search
+        query_vecs = [vectors[0], vectors[10], vectors[20]]
+        for i in range(60):
+            collection = collection_list[i]
+            status, result = connect.search(collection, top_k, query_records=query_vecs, params={"nprobe": 1})
+            assert status.OK()
+            assert len(result) == len(query_vecs)
+            logging.getLogger().info(i)
+            for j in range(len(query_vecs)):
+                assert len(result[j]) == top_k
+            for j in range(len(query_vecs)):
+                if not check_result(result[j], idx[3 * i + j]):
+                    logging.getLogger().info(result[j]._id_list)
+                    logging.getLogger().info(idx[3 * i + j])
+                assert check_result(result[j], idx[3 * i + j])
+
+def check_result(result, id):
+    if len(result) >= 5:
+        return id in [result[0].id, result[1].id, result[2].id, result[3].id, result[4].id]
+    else:
+        return id in (i.id for i in result)
diff --git a/tests/python_test/test_partition.py b/tests/python_test/test_partition.py
new file mode 100644
index 000000000..0f104b401
--- /dev/null
+++ b/tests/python_test/test_partition.py
@@ -0,0 +1,425 @@
+import time
+import random
+import pdb
+import threading
+import logging
+from multiprocessing import Pool, Process
+import pytest
+from utils import *
+from constants import *
+
+TIMEOUT = 120
+
+
+class TestCreateBase:
+    """
+    ******************************************************************
+      The following cases are used to test `create_partition` function 
+    ******************************************************************
+    """
+    def test_create_partition_a(self, connect, collection):
+        '''
+        target: test create partition, check status returned
+        method: call function: create_partition
+        expected: status ok
+        '''
+        connect.create_partition(collection, default_tag)
+
+    # TODO: enable
+    @pytest.mark.level(2)
+    @pytest.mark.timeout(600)
+    def test_create_partition_limit(self, connect, collection, args):
+        '''
+        target: test create partitions, check status returned
+        method: call function: create_partition for 4097 times
+        expected: exception raised
+        '''
+        threads_num = 8
+        threads = []
+        if args["handler"] == "HTTP":
+            pytest.skip("skip in http mode")
+
+        def create(connect, threads_num):
+            for i in range(max_partition_num // threads_num):
+                tag_tmp = gen_unique_str()
+                connect.create_partition(collection, tag_tmp)
+
+        for i in range(threads_num):
+            m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
+            t = threading.Thread(target=create, args=(m, threads_num, ))
+            threads.append(t)
+            t.start()
+        for t in threads:
+            t.join()
+        tag_tmp = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            connect.create_partition(collection, tag_tmp)
+
+    def test_create_partition_repeat(self, connect, collection):
+        '''
+        target: test create partition, check status returned
+        method: call function: create_partition
+        expected: status ok
+        '''
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.create_partition(collection, default_tag)
+        assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
+
+    def test_create_partition_collection_not_existed(self, connect):
+        '''
+        target: test create partition, its owner collection name not existed in db, check status returned
+        method: call function: create_partition
+        expected: status not ok
+        '''
+        collection_name = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            connect.create_partition(collection_name, default_tag)
+
+    def test_create_partition_tag_name_None(self, connect, collection):
+        '''
+        target: test create partition, tag name set None, check status returned
+        method: call function: create_partition
+        expected: status ok
+        '''
+        tag_name = None
+        with pytest.raises(Exception) as e:
+            connect.create_partition(collection, tag_name)
+
+    def test_create_different_partition_tags(self, connect, collection):
+        '''
+        target: test create partition twice with different names
+        method: call function: create_partition, and again
+        expected: status ok
+        '''
+        connect.create_partition(collection, default_tag)
+        tag_name = gen_unique_str()
+        connect.create_partition(collection, tag_name)
+        assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
+
+    def test_create_partition_insert_default(self, connect, id_collection):
+        '''
+        target: test create partition, and insert vectors, check status returned
+        method: call function: create_partition
+        expected: status ok
+        '''
+        connect.create_partition(id_collection, default_tag)
+        ids = [i for i in range(default_nb)]
+        insert_ids = connect.insert(id_collection, default_entities, ids)
+        assert len(insert_ids) == len(ids)
+ 
+    def test_create_partition_insert_with_tag(self, connect, id_collection):
+        '''
+        target: test create partition, and insert vectors, check status returned
+        method: call function: create_partition
+        expected: status ok
+        '''
+        connect.create_partition(id_collection, default_tag)
+        ids = [i for i in range(default_nb)]
+        insert_ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        assert len(insert_ids) == len(ids)
+
+    def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
+        '''
+        target: test create partition, and insert vectors, check status returned
+        method: call function: create_partition
+        expected: status not ok
+        '''
+        tag_new = "tag_new"
+        connect.create_partition(collection, default_tag)
+        ids = [i for i in range(default_nb)]
+        with pytest.raises(Exception) as e:
+            connect.insert(collection, default_entities, ids, partition_tag=tag_new)
+
+    @pytest.mark.skip("get_collection_stats")
+    def test_create_partition_insert_same_tags(self, connect, id_collection):
+        '''
+        target: test create partition, and insert vectors, check status returned
+        method: call function: create_partition
+        expected: status ok
+        '''
+        connect.create_partition(id_collection, default_tag)
+        ids = [i for i in range(default_nb)]
+        insert_ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        ids = [(i+default_nb) for i in range(default_nb)]
+        new_insert_ids = connect.insert(id_collection, default_entities, ids, partition_tag=default_tag)
+        connect.flush([id_collection])
+        res = connect.get_collection_stats(id_collection)
+        assert res["row_count"] == default_nb * 2
+
+    @pytest.mark.skip("get_collection_stats")
+    @pytest.mark.level(2)
+    def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
+        '''
+        target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
+        method: call function: create_partition
+        expected: status ok, collection length is correct
+        '''
+        connect.create_partition(collection, default_tag)
+        collection_new = gen_unique_str()
+        connect.create_collection(collection_new, default_fields)
+        connect.create_partition(collection_new, default_tag)
+        ids = connect.insert(collection, default_entities, partition_tag=default_tag)
+        ids = connect.insert(collection_new, default_entities, partition_tag=default_tag)
+        connect.flush([collection, collection_new])
+        res = connect.get_collection_stats(collection)
+        assert res["row_count"] == default_nb
+        res = connect.get_collection_stats(collection_new)
+        assert res["row_count"] == default_nb
+
+
+class TestShowBase:
+
+    """
+    ******************************************************************
+      The following cases are used to test `list_partitions` function 
+    ******************************************************************
+    """
+    def test_list_partitions(self, connect, collection):
+        '''
+        target: test show partitions, check status and partitions returned
+        method: create partition first, then call function: list_partitions
+        expected: status ok, partition correct
+        '''
+        connect.create_partition(collection, default_tag)
+        assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
+
+    def test_list_partitions_no_partition(self, connect, collection):
+        '''
+        target: test show partitions with collection name, check status and partitions returned
+        method: call function: list_partitions
+        expected: status ok, partitions correct
+        '''
+        res = connect.list_partitions(collection)
+        assert compare_list_elements(res, ['_default'])
+
+    def test_show_multi_partitions(self, connect, collection):
+        '''
+        target: test show partitions, check status and partitions returned
+        method: create partitions first, then call function: list_partitions
+        expected: status ok, partitions correct
+        '''
+        tag_new = gen_unique_str()
+        connect.create_partition(collection, default_tag)
+        connect.create_partition(collection, tag_new)
+        res = connect.list_partitions(collection)
+        assert compare_list_elements(res, [default_tag, tag_new, '_default'])
+
+
+class TestHasBase:
+
+    """
+    ******************************************************************
+      The following cases are used to test `has_partition` function
+    ******************************************************************
+    """
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_tag_name(self, request):
+        yield request.param
+
+    def test_has_partition_a(self, connect, collection):
+        '''
+        target: test has_partition, check status and result
+        method: create partition first, then call function: has_partition
+        expected: status ok, result true
+        '''
+        connect.create_partition(collection, default_tag)
+        res = connect.has_partition(collection, default_tag)
+        logging.getLogger().info(res)
+        assert res
+
+    def test_has_partition_multi_partitions(self, connect, collection):
+        '''
+        target: test has_partition, check status and result
+        method: create partition first, then call function: has_partition
+        expected: status ok, result true
+        '''
+        for tag_name in [default_tag, "tag_new", "tag_new_new"]:
+            connect.create_partition(collection, tag_name)
+        for tag_name in [default_tag, "tag_new", "tag_new_new"]:
+            res = connect.has_partition(collection, tag_name)
+            assert res
+
+    def test_has_partition_tag_not_existed(self, connect, collection):
+        '''
+        target: test has_partition, check status and result
+        method: then call function: has_partition, with tag not existed
+        expected: status ok, result empty
+        '''
+        res = connect.has_partition(collection, default_tag)
+        logging.getLogger().info(res)
+        assert not res
+
+    def test_has_partition_collection_not_existed(self, connect, collection):
+        '''
+        target: test has_partition, check status and result
+        method: then call function: has_partition, with collection not existed
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            connect.has_partition("not_existed_collection", default_tag)
+
+    @pytest.mark.level(2)
+    def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
+        '''
+        target: test has partition, with invalid tag name, check status returned
+        method: call function: has_partition
+        expected: status ok
+        '''
+        tag_name = get_tag_name
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.has_partition(collection, tag_name)
+
+
+class TestDropBase:
+
+    """
+    ******************************************************************
+      The following cases are used to test `drop_partition` function 
+    ******************************************************************
+    """
+    def test_drop_partition_a(self, connect, collection):
+        '''
+        target: test drop partition, check status and partition if existed
+        method: create partitions first, then call function: drop_partition
+        expected: status ok, no partitions in db
+        '''
+        connect.create_partition(collection, default_tag)
+        res1 = connect.list_partitions(collection)
+        assert default_tag in res1
+        connect.drop_partition(collection, default_tag)
+        res2 = connect.list_partitions(collection)
+        assert default_tag not in res2
+
+    def test_drop_partition_tag_not_existed(self, connect, collection):
+        '''
+        target: test drop partition, but tag not existed
+        method: create partitions first, then call function: drop_partition
+        expected: status not ok
+        '''
+        connect.create_partition(collection, default_tag)
+        new_tag = "new_tag"
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(collection, new_tag)
+
+    def test_drop_partition_tag_not_existed_A(self, connect, collection):
+        '''
+        target: test drop partition, but collection not existed
+        method: create partitions first, then call function: drop_partition
+        expected: status not ok
+        '''
+        connect.create_partition(collection, default_tag)
+        new_collection = gen_unique_str()
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(new_collection, default_tag)
+
+    @pytest.mark.level(2)
+    def test_drop_partition_repeatedly(self, connect, collection):
+        '''
+        target: test drop partition twice, check status and partition if existed
+        method: create partitions first, then call function: drop_partition
+        expected: status not ok, no partitions in db
+        '''
+        connect.create_partition(collection, default_tag)
+        connect.drop_partition(collection, default_tag)
+        time.sleep(2)
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(collection, default_tag)
+        tag_list = connect.list_partitions(collection)
+        assert default_tag not in tag_list
+
+    def test_drop_partition_create(self, connect, collection):
+        '''
+        target: test drop partition, and create again, check status
+        method: create partitions first, then call function: drop_partition, create_partition
+        expected: status not ok, partition in db
+        '''
+        connect.create_partition(collection, default_tag)
+        assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
+        connect.drop_partition(collection, default_tag)
+        assert compare_list_elements(connect.list_partitions(collection), ['_default'])
+        time.sleep(2)
+        connect.create_partition(collection, default_tag)
+        assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
+
+
+class TestNameInvalid(object):
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_tag_name(self, request):
+        yield request.param
+
+    @pytest.fixture(
+        scope="function",
+        params=gen_invalid_strs()
+    )
+    def get_collection_name(self, request):
+        yield request.param
+
+    @pytest.mark.level(2)
+    def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
+        '''
+        target: test drop partition, with invalid collection name, check status returned
+        method: call function: drop_partition
+        expected: status not ok
+        '''
+        collection_name = get_collection_name
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(collection_name, default_tag)
+
+    @pytest.mark.level(2)
+    def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
+        '''
+        target: test drop partition, with invalid tag name, check status returned
+        method: call function: drop_partition
+        expected: status not ok
+        '''
+        tag_name = get_tag_name
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(collection, tag_name)
+
+    @pytest.mark.level(2)
+    def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
+        '''
+        target: test show partitions, with invalid collection name, check status returned
+        method: call function: list_partitions
+        expected: status not ok
+        '''
+        collection_name = get_collection_name
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.list_partitions(collection_name)
+
+
+class TestNewCase(object):
+
+    def test_drop_default_partition_a(self, connect, collection):
+        '''
+        target: test drop partition of default, check status returned
+        method: call function: drop_partition
+        expected: status not ok
+        '''
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(collection, partition_tag='_default')
+        list_partition = connect.list_partitions(collection)
+        assert '_default' in list_partition
+
+    def test_drop_default_partition_b(self, connect, collection):
+        '''
+        target: test drop partition of default, check status returned
+        method: call function: drop_partition
+        expected: status not ok
+        '''
+        connect.create_partition(collection, default_tag)
+        with pytest.raises(Exception) as e:
+            connect.drop_partition(collection, partition_tag='_default')
+        list_partition = connect.list_partitions(collection)
+        assert '_default' in list_partition
diff --git a/tests/python_test/test_ping.py b/tests/python_test/test_ping.py
new file mode 100644
index 000000000..46ae4c898
--- /dev/null
+++ b/tests/python_test/test_ping.py
@@ -0,0 +1,129 @@
+import logging
+import pytest
+
+__version__ = '0.11.1'
+
+
+class TestPing:
+    def test_server_version(self, connect):
+        '''
+        target: test get the server version
+        method: call the server_version method after connected
+        expected: version should be the milvus version
+        '''
+        res = connect.server_version()
+        assert res == __version__
+
+    def test_server_status(self, connect):
+        '''
+        target: test get the server status
+        method: call the server_status method after connected
+        expected: status returned should be ok
+        '''
+        msg = connect.server_status()
+        assert msg 
+
+    def test_server_cmd_with_params_version(self, connect):
+        '''
+        target: test cmd: version
+        method: cmd = "version" ...
+        expected: when cmd = 'version', return version of server;
+        '''
+        cmd = "version"
+        msg = connect._cmd(cmd)
+        logging.getLogger().info(msg)
+        assert msg == __version__
+
+    def test_server_cmd_with_params_others(self, connect):
+        '''
+        target: test cmd: lalala
+        method: cmd = "lalala" ...
+        expected: when cmd = 'version', return version of server;
+        '''
+        cmd = "rm -rf test"
+        msg = connect._cmd(cmd)
+
+    def test_connected(self, connect):
+        # assert connect.connected()
+        assert connect
+
+
+class TestPingWithTimeout:
+    def test_server_version_legal_timeout(self, connect):
+        '''
+        target: test get the server version with legal timeout
+        method: call the server_version method after connected with altering timeout
+        expected: version should be the milvus version
+        '''
+        res = connect.server_version(20)
+        assert res == __version__
+
+    def test_server_version_negative_timeout(self, connect):
+        '''
+        target: test get the server version with negative timeout
+        method: call the server_version method after connected with altering timeout
+        expected: when timeout is illegal raises an error;
+        '''
+        with pytest.raises(Exception) as e:
+            res = connect.server_version(-1)
+
+    def test_server_cmd_with_params_version_with_legal_timeout(self, connect):
+        '''
+        target: test cmd: version and timeout
+        method: cmd = "version" , timeout=10
+        expected: when cmd = 'version', return version of server;
+        '''
+        cmd = "version"
+        msg = connect._cmd(cmd, 10)
+        logging.getLogger().info(msg)
+        assert msg == __version__
+
+    def test_server_cmd_with_params_version_with_illegal_timeout(self, connect):
+        '''
+        target: test cmd: version and timeout
+        method: cmd = "version" , timeout=-1
+        expected: when timeout is illegal raises an error; 
+        '''
+        with pytest.raises(Exception) as e:
+            res = connect.server_version(-1)
+
+    def test_server_cmd_with_params_others_with_illegal_timeout(self, connect):
+        '''
+        target: test cmd: lalala, timeout = -1
+        method: cmd = "lalala", timeout = -1
+        expected: when timeout is illegal raises an error;
+        '''
+        cmd = "rm -rf test"
+        with pytest.raises(Exception) as e:
+            res = connect.server_version(-1)
+
+
+class TestPingDisconnect:
+    def test_server_version(self, dis_connect):
+        '''
+        target: test get the server version, after disconnect
+        method: call the server_version method after connected
+        expected: version should not be the pymilvus version
+        '''
+        with pytest.raises(Exception) as e:
+            res = dis_connect.server_version()
+
+    def test_server_status(self, dis_connect):
+        '''
+        target: test get the server status, after disconnect
+        method: call the server_status method after connected
+        expected: status returned should be not ok
+        '''
+        with pytest.raises(Exception) as e:
+            res = dis_connect.server_status()
+
+    @pytest.mark.level(2)
+    def test_server_version_with_timeout(self, dis_connect):
+        '''
+        target: test get the server status with timeout settings after disconnect
+        method: call the server_status method after connected
+        expected: status returned should be not ok
+        '''
+        status = None
+        with pytest.raises(Exception) as e:
+            res = connect.server_status(100)
diff --git a/tests/python_test/utils.py b/tests/python_test/utils.py
new file mode 100644
index 000000000..7462e43a4
--- /dev/null
+++ b/tests/python_test/utils.py
@@ -0,0 +1,1001 @@
+import os
+import sys
+import random
+import pdb
+import string
+import struct
+import logging
+import threading
+import time
+import copy
+import numpy as np
+from sklearn import preprocessing
+from milvus import Milvus, DataType
+
+port = 19530
+epsilon = 0.000001
+namespace = "milvus"
+
+default_flush_interval = 1
+big_flush_interval = 1000
+default_drop_interval = 3
+default_dim = 128
+default_nb = 1200
+default_top_k = 10
+max_top_k = 16384
+max_partition_num = 256
+default_segment_row_limit = 1000
+default_server_segment_row_limit = 1024 * 512
+default_float_vec_field_name = "float_vector"
+default_binary_vec_field_name = "binary_vector"
+default_partition_name = "_default"
+default_tag = "1970_01_01"
+
+# TODO:
+# TODO: disable RHNSW_SQ/PQ in 0.11.0
+all_index_types = [
+    "FLAT",
+    "IVF_FLAT",
+    "IVF_SQ8",
+    "IVF_SQ8_HYBRID",
+    "IVF_PQ",
+    "HNSW",
+    # "NSG",
+    "ANNOY",
+    "RHNSW_PQ",
+    "RHNSW_SQ",
+    "BIN_FLAT",
+    "BIN_IVF_FLAT"
+]
+
+default_index_params = [
+    {"nlist": 128},
+    {"nlist": 128},
+    {"nlist": 128},
+    {"nlist": 128},
+    {"nlist": 128, "m": 16},
+    {"M": 48, "efConstruction": 500},
+    # {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50},
+    {"n_trees": 50},
+    {"M": 48, "efConstruction": 500, "PQM": 64},
+    {"M": 48, "efConstruction": 500},
+    {"nlist": 128},
+    {"nlist": 128}
+]
+
+
+def index_cpu_not_support():
+    return ["IVF_SQ8_HYBRID"]
+
+
+def binary_support():
+    return ["BIN_FLAT", "BIN_IVF_FLAT"]
+
+
+def delete_support():
+    return ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_SQ8_HYBRID", "IVF_PQ"]
+
+
+def ivf():
+    return ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_SQ8_HYBRID", "IVF_PQ"]
+
+
+def skip_pq():
+    return ["IVF_PQ", "RHNSW_PQ", "RHNSW_SQ"]
+
+
+def binary_metrics():
+    return ["JACCARD", "HAMMING", "TANIMOTO", "SUBSTRUCTURE", "SUPERSTRUCTURE"]
+
+
+def structure_metrics():
+    return ["SUBSTRUCTURE", "SUPERSTRUCTURE"]
+
+
+def l2(x, y):
+    return np.linalg.norm(np.array(x) - np.array(y))
+
+
+def ip(x, y):
+    return np.inner(np.array(x), np.array(y))
+
+
+def jaccard(x, y):
+    x = np.asarray(x, np.bool)
+    y = np.asarray(y, np.bool)
+    return 1 - np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum())
+
+
+def hamming(x, y):
+    x = np.asarray(x, np.bool)
+    y = np.asarray(y, np.bool)
+    return np.bitwise_xor(x, y).sum()
+
+
+def tanimoto(x, y):
+    x = np.asarray(x, np.bool)
+    y = np.asarray(y, np.bool)
+    return -np.log2(np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum()))
+
+
+def substructure(x, y):
+    x = np.asarray(x, np.bool)
+    y = np.asarray(y, np.bool)
+    return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(y)
+
+
+def superstructure(x, y):
+    x = np.asarray(x, np.bool)
+    y = np.asarray(y, np.bool)
+    return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(x)
+
+
+def get_milvus(host, port, uri=None, handler=None, **kwargs):
+    if handler is None:
+        handler = "GRPC"
+    try_connect = kwargs.get("try_connect", True)
+    if uri is not None:
+        milvus = Milvus(uri=uri, handler=handler, try_connect=try_connect)
+    else:
+        milvus = Milvus(host=host, port=port, handler=handler, try_connect=try_connect)
+    return milvus
+
+
+def reset_build_index_threshold(connect):
+    connect.set_config("engine", "build_index_threshold", 1024)
+
+
+def disable_flush(connect):
+    connect.set_config("storage", "auto_flush_interval", big_flush_interval)
+
+
+def enable_flush(connect):
+    # reset auto_flush_interval=1
+    connect.set_config("storage", "auto_flush_interval", default_flush_interval)
+    config_value = connect.get_config("storage", "auto_flush_interval")
+    assert config_value == str(default_flush_interval)
+
+
+def gen_inaccuracy(num):
+    return num / 255.0
+
+
+def gen_vectors(num, dim, is_normal=True):
+    vectors = [[random.random() for _ in range(dim)] for _ in range(num)]
+    vectors = preprocessing.normalize(vectors, axis=1, norm='l2')
+    return vectors.tolist()
+
+
+# def gen_vectors(num, dim, seed=np.random.RandomState(1234), is_normal=False):
+#     xb = seed.rand(num, dim).astype("float32")
+#     xb = preprocessing.normalize(xb, axis=1, norm='l2')
+#     return xb.tolist()
+
+
+def gen_binary_vectors(num, dim):
+    raw_vectors = []
+    binary_vectors = []
+    for i in range(num):
+        raw_vector = [random.randint(0, 1) for i in range(dim)]
+        raw_vectors.append(raw_vector)
+        binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist()))
+    return raw_vectors, binary_vectors
+
+
+def gen_binary_sub_vectors(vectors, length):
+    raw_vectors = []
+    binary_vectors = []
+    dim = len(vectors[0])
+    for i in range(length):
+        raw_vector = [0 for i in range(dim)]
+        vector = vectors[i]
+        for index, j in enumerate(vector):
+            if j == 1:
+                raw_vector[index] = 1
+        raw_vectors.append(raw_vector)
+        binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist()))
+    return raw_vectors, binary_vectors
+
+
+def gen_binary_super_vectors(vectors, length):
+    raw_vectors = []
+    binary_vectors = []
+    dim = len(vectors[0])
+    for i in range(length):
+        cnt_1 = np.count_nonzero(vectors[i])
+        # raw_vector = [0 for i in range(dim)] ???
+        raw_vector = [1 for i in range(dim)]
+        raw_vectors.append(raw_vector)
+        binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist()))
+    return raw_vectors, binary_vectors
+
+
+def gen_int_attr(row_num):
+    return [random.randint(0, 255) for _ in range(row_num)]
+
+
+def gen_float_attr(row_num):
+    return [random.uniform(0, 255) for _ in range(row_num)]
+
+
+def gen_unique_str(str_value=None):
+    prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
+    return "test_" + prefix if str_value is None else str_value + "_" + prefix
+
+
+def gen_single_filter_fields():
+    fields = []
+    for data_type in DataType:
+        if data_type in [DataType.INT32, DataType.INT64, DataType.FLOAT, DataType.DOUBLE]:
+            fields.append({"name": data_type.name, "type": data_type})
+    return fields
+
+
+def gen_single_vector_fields():
+    fields = []
+    for data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]:
+        field = {"name": data_type.name, "type": data_type, "params": {"dim": default_dim}}
+        fields.append(field)
+    return fields
+
+
+def gen_default_fields(auto_id=True):
+    default_fields = {
+        "fields": [
+            {"name": "int64", "type": DataType.INT64},
+            {"name": "float", "type": DataType.FLOAT},
+            {"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "params": {"dim": default_dim}},
+        ],
+        "segment_row_limit": default_segment_row_limit,
+        "auto_id": auto_id
+    }
+    return default_fields
+
+
+def gen_binary_default_fields(auto_id=True):
+    default_fields = {
+        "fields": [
+            {"name": "int64", "type": DataType.INT64},
+            {"name": "float", "type": DataType.FLOAT},
+            {"name": default_binary_vec_field_name, "type": DataType.BINARY_VECTOR, "params": {"dim": default_dim}}
+        ],
+        "segment_row_limit": default_segment_row_limit,
+        "auto_id": auto_id
+    }
+    return default_fields
+
+
+def gen_entities(nb, is_normal=False):
+    vectors = gen_vectors(nb, default_dim, is_normal)
+    entities = [
+        {"name": "int64", "type": DataType.INT64, "values": [i for i in range(nb)]},
+        {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(nb)]},
+        {"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "values": vectors}
+    ]
+    return entities
+
+
+def gen_entities_new(nb, is_normal=False):
+    vectors = gen_vectors(nb, default_dim, is_normal)
+    entities = [
+        {"name": "int64", "values": [i for i in range(nb)]},
+        {"name": "float", "values": [float(i) for i in range(nb)]},
+        {"name": default_float_vec_field_name, "values": vectors}
+    ]
+    return entities
+
+
+def gen_entities_rows(nb, is_normal=False, _id=True):
+    vectors = gen_vectors(nb, default_dim, is_normal)
+    entities = []
+    if not _id:
+        for i in range(nb):
+            entity = {
+                "_id": i,
+                "int64": i,
+                "float": float(i),
+                default_float_vec_field_name: vectors[i]
+            }
+            entities.append(entity)
+    else:
+        for i in range(nb):
+            entity = {
+                "int64": i,
+                "float": float(i),
+                default_float_vec_field_name: vectors[i]
+            }
+            entities.append(entity)
+    return entities
+
+
+def gen_binary_entities(nb):
+    raw_vectors, vectors = gen_binary_vectors(nb, default_dim)
+    entities = [
+        {"name": "int64", "type": DataType.INT64, "values": [i for i in range(nb)]},
+        {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(nb)]},
+        {"name": default_binary_vec_field_name, "type": DataType.BINARY_VECTOR, "values": vectors}
+    ]
+    return raw_vectors, entities
+
+
+def gen_binary_entities_new(nb):
+    raw_vectors, vectors = gen_binary_vectors(nb, default_dim)
+    entities = [
+        {"name": "int64", "values": [i for i in range(nb)]},
+        {"name": "float", "values": [float(i) for i in range(nb)]},
+        {"name": default_binary_vec_field_name, "values": vectors}
+    ]
+    return raw_vectors, entities
+
+
+def gen_binary_entities_rows(nb, _id=True):
+    raw_vectors, vectors = gen_binary_vectors(nb, default_dim)
+    entities = []
+    if not _id:
+        for i in range(nb):
+            entity = {
+                "_id": i,
+                "int64": i,
+                "float": float(i),
+                default_binary_vec_field_name: vectors[i]
+            }
+            entities.append(entity)
+    else:
+        for i in range(nb):
+            entity = {
+                "int64": i,
+                "float": float(i),
+                default_binary_vec_field_name: vectors[i]
+            }
+            entities.append(entity)
+    return raw_vectors, entities
+
+
+def gen_entities_by_fields(fields, nb, dim):
+    entities = []
+    for field in fields:
+        if field["type"] in [DataType.INT32, DataType.INT64]:
+            field_value = [1 for i in range(nb)]
+        elif field["type"] in [DataType.FLOAT, DataType.DOUBLE]:
+            field_value = [3.0 for i in range(nb)]
+        elif field["type"] == DataType.BINARY_VECTOR:
+            field_value = gen_binary_vectors(nb, dim)[1]
+        elif field["type"] == DataType.FLOAT_VECTOR:
+            field_value = gen_vectors(nb, dim)
+        field.update({"values": field_value})
+        entities.append(field)
+    return entities
+
+
+def assert_equal_entity(a, b):
+    pass
+
+
+def gen_query_vectors(field_name, entities, top_k, nq, search_params={"nprobe": 10}, rand_vector=False,
+                      metric_type="L2", replace_vecs=None):
+    if rand_vector is True:
+        dimension = len(entities[-1]["values"][0])
+        query_vectors = gen_vectors(nq, dimension)
+    else:
+        query_vectors = entities[-1]["values"][:nq]
+    if replace_vecs:
+        query_vectors = replace_vecs
+    must_param = {"vector": {field_name: {"topk": top_k, "query": query_vectors, "params": search_params}}}
+    must_param["vector"][field_name]["metric_type"] = metric_type
+    query = {
+        "bool": {
+            "must": [must_param]
+        }
+    }
+    return query, query_vectors
+
+
+def update_query_expr(src_query, keep_old=True, expr=None):
+    tmp_query = copy.deepcopy(src_query)
+    if expr is not None:
+        tmp_query["bool"].update(expr)
+    if keep_old is not True:
+        tmp_query["bool"].pop("must")
+    return tmp_query
+
+
+def gen_default_vector_expr(default_query):
+    return default_query["bool"]["must"][0]
+
+
+def gen_default_term_expr(keyword="term", field="int64", values=None):
+    if values is None:
+        values = [i for i in range(default_nb // 2)]
+    expr = {keyword: {field: {"values": values}}}
+    return expr
+
+
+def update_term_expr(src_term, terms):
+    tmp_term = copy.deepcopy(src_term)
+    for term in terms:
+        tmp_term["term"].update(term)
+    return tmp_term
+
+
+def gen_default_range_expr(keyword="range", field="int64", ranges=None):
+    if ranges is None:
+        ranges = {"GT": 1, "LT": default_nb // 2}
+    expr = {keyword: {field: ranges}}
+    return expr
+
+
+def update_range_expr(src_range, ranges):
+    tmp_range = copy.deepcopy(src_range)
+    for range in ranges:
+        tmp_range["range"].update(range)
+    return tmp_range
+
+
+def gen_invalid_range():
+    range = [
+        {"range": 1},
+        {"range": {}},
+        {"range": []},
+        {"range": {"range": {"int64": {"GT": 0, "LT": default_nb // 2}}}}
+    ]
+    return range
+
+
+def gen_valid_ranges():
+    ranges = [
+        {"GT": 0, "LT": default_nb // 2},
+        {"GT": default_nb // 2, "LT": default_nb * 2},
+        {"GT": 0},
+        {"LT": default_nb},
+        {"GT": -1, "LT": default_top_k},
+    ]
+    return ranges
+
+
+def gen_invalid_term():
+    terms = [
+        {"term": 1},
+        {"term": []},
+        {"term": {}},
+        {"term": {"term": {"int64": {"values": [i for i in range(default_nb // 2)]}}}}
+    ]
+    return terms
+
+
+def add_field_default(default_fields, type=DataType.INT64, field_name=None):
+    tmp_fields = copy.deepcopy(default_fields)
+    if field_name is None:
+        field_name = gen_unique_str()
+    field = {
+        "name": field_name,
+        "type": type
+    }
+    tmp_fields["fields"].append(field)
+    return tmp_fields
+
+
+def add_field(entities, field_name=None):
+    nb = len(entities[0]["values"])
+    tmp_entities = copy.deepcopy(entities)
+    if field_name is None:
+        field_name = gen_unique_str()
+    field = {
+        "name": field_name,
+        "type": DataType.INT64,
+        "values": [i for i in range(nb)]
+    }
+    tmp_entities.append(field)
+    return tmp_entities
+
+
+def add_vector_field(entities, is_normal=False):
+    nb = len(entities[0]["values"])
+    vectors = gen_vectors(nb, default_dim, is_normal)
+    field = {
+        "name": gen_unique_str(),
+        "type": DataType.FLOAT_VECTOR,
+        "values": vectors
+    }
+    entities.append(field)
+    return entities
+
+
+# def update_fields_metric_type(fields, metric_type):
+#     tmp_fields = copy.deepcopy(fields)
+#     if metric_type in ["L2", "IP"]:
+#         tmp_fields["fields"][-1]["type"] = DataType.FLOAT_VECTOR
+#     else:
+#         tmp_fields["fields"][-1]["type"] = DataType.BINARY_VECTOR
+#     tmp_fields["fields"][-1]["params"]["metric_type"] = metric_type
+#     return tmp_fields
+
+
+def remove_field(entities):
+    del entities[0]
+    return entities
+
+
+def remove_vector_field(entities):
+    del entities[-1]
+    return entities
+
+
+def update_field_name(entities, old_name, new_name):
+    tmp_entities = copy.deepcopy(entities)
+    for item in tmp_entities:
+        if item["name"] == old_name:
+            item["name"] = new_name
+    return tmp_entities
+
+
+def update_field_type(entities, old_name, new_name):
+    tmp_entities = copy.deepcopy(entities)
+    for item in tmp_entities:
+        if item["name"] == old_name:
+            item["type"] = new_name
+    return tmp_entities
+
+
+def update_field_value(entities, old_type, new_value):
+    tmp_entities = copy.deepcopy(entities)
+    for item in tmp_entities:
+        if item["type"] == old_type:
+            for index, value in enumerate(item["values"]):
+                item["values"][index] = new_value
+    return tmp_entities
+
+
+def update_field_name_row(entities, old_name, new_name):
+    tmp_entities = copy.deepcopy(entities)
+    for item in tmp_entities:
+        if old_name in item:
+            item[new_name] = item[old_name]
+            item.pop(old_name)
+        else:
+            raise Exception("Field %s not in field" % old_name)
+    return tmp_entities
+
+
+def update_field_type_row(entities, old_name, new_name):
+    tmp_entities = copy.deepcopy(entities)
+    for item in tmp_entities:
+        if old_name in item:
+            item["type"] = new_name
+    return tmp_entities
+
+
+def add_vector_field(nb, dimension=default_dim):
+    field_name = gen_unique_str()
+    field = {
+        "name": field_name,
+        "type": DataType.FLOAT_VECTOR,
+        "values": gen_vectors(nb, dimension)
+    }
+    return field_name
+
+
+def gen_segment_row_limits():
+    sizes = [
+        1024,
+        4096
+    ]
+    return sizes
+
+
+def gen_invalid_ips():
+    ips = [
+        # "255.0.0.0",
+        # "255.255.0.0",
+        # "255.255.255.0",
+        # "255.255.255.255",
+        "127.0.0",
+        # "123.0.0.2",
+        "12-s",
+        " ",
+        "12 s",
+        "BB。A",
+        " siede ",
+        "(mn)",
+        "中文",
+        "a".join("a" for _ in range(256))
+    ]
+    return ips
+
+
+def gen_invalid_uris():
+    ip = None
+    uris = [
+        " ",
+        "中文",
+        # invalid protocol
+        # "tc://%s:%s" % (ip, port),
+        # "tcp%s:%s" % (ip, port),
+
+        # # invalid port
+        # "tcp://%s:100000" % ip,
+        # "tcp://%s: " % ip,
+        # "tcp://%s:19540" % ip,
+        # "tcp://%s:-1" % ip,
+        # "tcp://%s:string" % ip,
+
+        # invalid ip
+        "tcp:// :19530",
+        # "tcp://123.0.0.1:%s" % port,
+        "tcp://127.0.0:19530",
+        # "tcp://255.0.0.0:%s" % port,
+        # "tcp://255.255.0.0:%s" % port,
+        # "tcp://255.255.255.0:%s" % port,
+        # "tcp://255.255.255.255:%s" % port,
+        "tcp://\n:19530",
+    ]
+    return uris
+
+
+def gen_invalid_strs():
+    strings = [
+        1,
+        [1],
+        None,
+        "12-s",
+        # " ",
+        # "",
+        # None,
+        "12 s",
+        "(mn)",
+        "中文",
+        "a".join("a" for i in range(256))
+    ]
+    return strings
+
+
+def gen_invalid_field_types():
+    field_types = [
+        # 1,
+        "=c",
+        # 0,
+        None,
+        "",
+        "a".join("a" for i in range(256))
+    ]
+    return field_types
+
+
+def gen_invalid_metric_types():
+    metric_types = [
+        1,
+        "=c",
+        0,
+        None,
+        "",
+        "a".join("a" for i in range(256))
+    ]
+    return metric_types
+
+
+# TODO:
+def gen_invalid_ints():
+    int_values = [
+        # 1.0,
+        None,
+        [1, 2, 3],
+        " ",
+        "",
+        -1,
+        "String",
+        "=c",
+        "中文",
+        "a".join("a" for i in range(256))
+    ]
+    return int_values
+
+
+def gen_invalid_params():
+    params = [
+        9999999999,
+        -1,
+        # None,
+        [1, 2, 3],
+        " ",
+        "",
+        "String",
+        "中文"
+    ]
+    return params
+
+
+def gen_invalid_vectors():
+    invalid_vectors = [
+        "1*2",
+        [],
+        [1],
+        [1, 2],
+        [" "],
+        ['a'],
+        [None],
+        None,
+        (1, 2),
+        {"a": 1},
+        " ",
+        "",
+        "String",
+        " siede ",
+        "中文",
+        "a".join("a" for i in range(256))
+    ]
+    return invalid_vectors
+
+
+def gen_invaild_search_params():
+    invalid_search_key = 100
+    search_params = []
+    for index_type in all_index_types:
+        if index_type == "FLAT":
+            continue
+        search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}})
+        if index_type in delete_support():
+            for nprobe in gen_invalid_params():
+                ivf_search_params = {"index_type": index_type, "search_params": {"nprobe": nprobe}}
+                search_params.append(ivf_search_params)
+        elif index_type in ["HNSW", "RHNSW_PQ", "RHNSW_SQ"]:
+            for ef in gen_invalid_params():
+                hnsw_search_param = {"index_type": index_type, "search_params": {"ef": ef}}
+                search_params.append(hnsw_search_param)
+        elif index_type == "NSG":
+            for search_length in gen_invalid_params():
+                nsg_search_param = {"index_type": index_type, "search_params": {"search_length": search_length}}
+                search_params.append(nsg_search_param)
+            search_params.append({"index_type": index_type, "search_params": {"invalid_key": 100}})
+        elif index_type == "ANNOY":
+            for search_k in gen_invalid_params():
+                if isinstance(search_k, int):
+                    continue
+                annoy_search_param = {"index_type": index_type, "search_params": {"search_k": search_k}}
+                search_params.append(annoy_search_param)
+    return search_params
+
+
+def gen_invalid_index():
+    index_params = []
+    for index_type in gen_invalid_strs():
+        index_param = {"index_type": index_type, "params": {"nlist": 1024}}
+        index_params.append(index_param)
+    for nlist in gen_invalid_params():
+        index_param = {"index_type": "IVF_FLAT", "params": {"nlist": nlist}}
+        index_params.append(index_param)
+    for M in gen_invalid_params():
+        index_param = {"index_type": "HNSW", "params": {"M": M, "efConstruction": 100}}
+        index_param = {"index_type": "RHNSW_PQ", "params": {"M": M, "efConstruction": 100}}
+        index_param = {"index_type": "RHNSW_SQ", "params": {"M": M, "efConstruction": 100}}
+        index_params.append(index_param)
+    for efConstruction in gen_invalid_params():
+        index_param = {"index_type": "HNSW", "params": {"M": 16, "efConstruction": efConstruction}}
+        index_param = {"index_type": "RHNSW_PQ", "params": {"M": 16, "efConstruction": efConstruction}}
+        index_param = {"index_type": "RHNSW_SQ", "params": {"M": 16, "efConstruction": efConstruction}}
+        index_params.append(index_param)
+    for search_length in gen_invalid_params():
+        index_param = {"index_type": "NSG",
+                       "params": {"search_length": search_length, "out_degree": 40, "candidate_pool_size": 50,
+                                  "knng": 100}}
+        index_params.append(index_param)
+    for out_degree in gen_invalid_params():
+        index_param = {"index_type": "NSG",
+                       "params": {"search_length": 100, "out_degree": out_degree, "candidate_pool_size": 50,
+                                  "knng": 100}}
+        index_params.append(index_param)
+    for candidate_pool_size in gen_invalid_params():
+        index_param = {"index_type": "NSG", "params": {"search_length": 100, "out_degree": 40,
+                                                       "candidate_pool_size": candidate_pool_size,
+                                                       "knng": 100}}
+        index_params.append(index_param)
+    index_params.append({"index_type": "IVF_FLAT", "params": {"invalid_key": 1024}})
+    index_params.append({"index_type": "HNSW", "params": {"invalid_key": 16, "efConstruction": 100}})
+    index_params.append({"index_type": "RHNSW_PQ", "params": {"invalid_key": 16, "efConstruction": 100}})
+    index_params.append({"index_type": "RHNSW_SQ", "params": {"invalid_key": 16, "efConstruction": 100}})
+    index_params.append({"index_type": "NSG",
+                         "params": {"invalid_key": 100, "out_degree": 40, "candidate_pool_size": 300,
+                                    "knng": 100}})
+    for invalid_n_trees in gen_invalid_params():
+        index_params.append({"index_type": "ANNOY", "params": {"n_trees": invalid_n_trees}})
+
+    return index_params
+
+
+def gen_index():
+    nlists = [1, 1024, 16384]
+    pq_ms = [128, 64, 32, 16, 8, 4]
+    Ms = [5, 24, 48]
+    efConstructions = [100, 300, 500]
+    search_lengths = [10, 100, 300]
+    out_degrees = [5, 40, 300]
+    candidate_pool_sizes = [50, 100, 300]
+    knngs = [5, 100, 300]
+
+    index_params = []
+    for index_type in all_index_types:
+        if index_type in ["FLAT", "BIN_FLAT", "BIN_IVF_FLAT"]:
+            index_params.append({"index_type": index_type, "index_param": {"nlist": 1024}})
+        elif index_type in ["IVF_FLAT", "IVF_SQ8", "IVF_SQ8_HYBRID"]:
+            ivf_params = [{"index_type": index_type, "index_param": {"nlist": nlist}} \
+                          for nlist in nlists]
+            index_params.extend(ivf_params)
+        elif index_type == "IVF_PQ":
+            IVFPQ_params = [{"index_type": index_type, "index_param": {"nlist": nlist, "m": m}} \
+                            for nlist in nlists \
+                            for m in pq_ms]
+            index_params.extend(IVFPQ_params)
+        elif index_type in ["HNSW", "RHNSW_SQ", "RHNSW_PQ"]:
+            hnsw_params = [{"index_type": index_type, "index_param": {"M": M, "efConstruction": efConstruction}} \
+                           for M in Ms \
+                           for efConstruction in efConstructions]
+            index_params.extend(hnsw_params)
+        elif index_type == "NSG":
+            nsg_params = [{"index_type": index_type,
+                           "index_param": {"search_length": search_length, "out_degree": out_degree,
+                                           "candidate_pool_size": candidate_pool_size, "knng": knng}} \
+                          for search_length in search_lengths \
+                          for out_degree in out_degrees \
+                          for candidate_pool_size in candidate_pool_sizes \
+                          for knng in knngs]
+            index_params.extend(nsg_params)
+
+    return index_params
+
+
+def gen_simple_index():
+    index_params = []
+    for i in range(len(all_index_types)):
+        if all_index_types[i] in binary_support():
+            continue
+        dic = {"index_type": all_index_types[i], "metric_type": "L2"}
+        dic.update({"params": default_index_params[i]})
+        index_params.append(dic)
+    return index_params
+
+
+def gen_binary_index():
+    index_params = []
+    for i in range(len(all_index_types)):
+        if all_index_types[i] in binary_support():
+            dic = {"index_type": all_index_types[i]}
+            dic.update({"params": default_index_params[i]})
+            index_params.append(dic)
+    return index_params
+
+
+def get_search_param(index_type, metric_type="L2"):
+    search_params = {"metric_type": metric_type}
+    if index_type in ivf() or index_type in binary_support():
+        search_params.update({"nprobe": 64})
+    elif index_type in ["HNSW", "RHNSW_SQ", "RHNSW_PQ"]:
+        search_params.update({"ef": 64})
+    elif index_type == "NSG":
+        search_params.update({"search_length": 100})
+    elif index_type == "ANNOY":
+        search_params.update({"search_k": 1000})
+    else:
+        logging.getLogger().error("Invalid index_type.")
+        raise Exception("Invalid index_type.")
+    return search_params
+
+
+def assert_equal_vector(v1, v2):
+    if len(v1) != len(v2):
+        assert False
+    for i in range(len(v1)):
+        assert abs(v1[i] - v2[i]) < epsilon
+
+
+def restart_server(helm_release_name):
+    res = True
+    timeout = 120
+    from kubernetes import client, config
+    client.rest.logger.setLevel(logging.WARNING)
+
+    # service_name = "%s.%s.svc.cluster.local" % (helm_release_name, namespace)
+    config.load_kube_config()
+    v1 = client.CoreV1Api()
+    pod_name = None
+    # config_map_names = v1.list_namespaced_config_map(namespace, pretty='true')
+    # body = {"replicas": 0}
+    pods = v1.list_namespaced_pod(namespace)
+    for i in pods.items:
+        if i.metadata.name.find(helm_release_name) != -1 and i.metadata.name.find("mysql") == -1:
+            pod_name = i.metadata.name
+            break
+            # v1.patch_namespaced_config_map(config_map_name, namespace, body, pretty='true')
+    # status_res = v1.read_namespaced_service_status(helm_release_name, namespace, pretty='true')
+    logging.getLogger().debug("Pod name: %s" % pod_name)
+    if pod_name is not None:
+        try:
+            v1.delete_namespaced_pod(pod_name, namespace)
+        except Exception as e:
+            logging.error(str(e))
+            logging.error("Exception when calling CoreV1Api->delete_namespaced_pod")
+            res = False
+            return res
+        logging.error("Sleep 10s after pod deleted")
+        time.sleep(10)
+        # check if restart successfully
+        pods = v1.list_namespaced_pod(namespace)
+        for i in pods.items:
+            pod_name_tmp = i.metadata.name
+            logging.error(pod_name_tmp)
+            if pod_name_tmp == pod_name:
+                continue
+            elif pod_name_tmp.find(helm_release_name) == -1 or pod_name_tmp.find("mysql") != -1:
+                continue
+            else:
+                status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true')
+                logging.error(status_res.status.phase)
+                start_time = time.time()
+                ready_break = False
+                while time.time() - start_time <= timeout:
+                    logging.error(time.time())
+                    status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true')
+                    if status_res.status.phase == "Running":
+                        logging.error("Already running")
+                        ready_break = True
+                        time.sleep(10)
+                        break
+                    else:
+                        time.sleep(1)
+                if time.time() - start_time > timeout:
+                    logging.error("Restart pod: %s timeout" % pod_name_tmp)
+                    res = False
+                    return res
+                if ready_break:
+                    break
+    else:
+        raise Exception("Pod: %s not found" % pod_name)
+    follow = True
+    pretty = True
+    previous = True  # bool | Return previous terminated container logs. Defaults to false. (optional)
+    since_seconds = 56  # int | A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. (optional)
+    timestamps = True  # bool | If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. (optional)
+    container = "milvus"
+    # start_time = time.time()
+    # while time.time() - start_time <= timeout:
+    #     try:
+    #         api_response = v1.read_namespaced_pod_log(pod_name_tmp, namespace, container=container, follow=follow,
+    #                                                 pretty=pretty, previous=previous, since_seconds=since_seconds,
+    #                                                 timestamps=timestamps)
+    #         logging.error(api_response)
+    #         return res
+    #     except Exception as e:
+    #         logging.error("Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
+    #         # waiting for server start
+    #         time.sleep(5)
+    #         # res = False
+    #         # return res
+    # if time.time() - start_time > timeout:
+    #     logging.error("Restart pod: %s timeout" % pod_name_tmp)
+    #     res = False
+    return res
+
+
+def compare_list_elements(_first, _second):
+    if not isinstance(_first, list) or not isinstance(_second, list) or len(_first) != len(_second):
+        return False
+
+    for ele in _first:
+        if ele not in _second:
+            return False
+
+    return True
+
+
+class TestThread(threading.Thread):
+    def __init__(self, target, args=()):
+        threading.Thread.__init__(self, target=target, args=args)
+
+    def run(self):
+        self.exc = None
+        try:
+            super(TestThread, self).run()
+        except BaseException as e:
+            self.exc = e
+
+    def join(self):
+        super(TestThread, self).join()
+        if self.exc:
+            raise self.exc
-- 
GitLab