diff --git a/.jenkins/modules/Regression/PythonRegression.groovy b/.jenkins/modules/Regression/PythonRegression.groovy
index 5b3f4589e0860afc134a3e15d50e01aa0e83cbaa..d3fa37147c0c52521cbd4226a1e51e84c7426226 100644
--- a/.jenkins/modules/Regression/PythonRegression.groovy
+++ b/.jenkins/modules/Regression/PythonRegression.groovy
@@ -54,7 +54,7 @@ timeout(time: 150, unit: 'MINUTES') {
                     echo "This is Cron Job!"
                     sh "pytest --tags=0331 --ip ${env.HELM_RELEASE_NAME}-milvus-ha.${env.HELM_RELEASE_NAMESPACE}.svc.cluster.local"
                 } else {
-                    sh "pytest --tags=0331+l1 --ip ${env.HELM_RELEASE_NAME}-milvus-ha.${env.HELM_RELEASE_NAMESPACE}.svc.cluster.local"
+                    sh "pytest --tags=0331+l1 -n 2 --ip ${env.HELM_RELEASE_NAME}-milvus-ha.${env.HELM_RELEASE_NAMESPACE}.svc.cluster.local"
                 }
             }
         } catch (exc) {
diff --git a/cmd/singlenode/main.go b/cmd/singlenode/main.go
index a6f2e76c9ea63118a84e0dbb5c9ba0264ac7bfb5..95e9e4cade591dcae12ba45d0ea3f292079fe653 100644
--- a/cmd/singlenode/main.go
+++ b/cmd/singlenode/main.go
@@ -29,7 +29,12 @@ func initLogCfg() log.Config {
 	logCfg.File.MaxSize = 300
 	logCfg.File.MaxBackups = 20
 	logCfg.File.MaxDays = 10
-	logCfg.File.Filename = "/tmp/milvus/singlenode.log"
+	ciFileDir := "/milvus-distributed/logs/"
+	if _, err := os.Stat(ciFileDir); err == nil {
+		logCfg.File.Filename = ciFileDir + "singlenode.log"
+	} else {
+		logCfg.File.Filename = "/tmp/milvus/singlenode.log"
+	}
 	return logCfg
 }
 
diff --git a/go.mod b/go.mod
index 765b12e9a53d1e04e0fb8291484f2c79a7e0f050..c132aee3e891bf8f82620cee9462e17f9320c096 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.15
 
 require (
 	github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
-	github.com/apache/pulsar-client-go v0.1.1
+	github.com/apache/pulsar-client-go v0.3.0
 	github.com/apache/thrift/lib/go/thrift v0.0.0-20210120171102-e27e82c46ba4
 	github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
 	github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
@@ -14,7 +14,7 @@ require (
 	github.com/gogo/protobuf v1.3.1
 	github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
 	github.com/golang/mock v1.3.1
-	github.com/golang/protobuf v1.3.2
+	github.com/golang/protobuf v1.4.2
 	github.com/google/btree v1.0.0
 	github.com/jarcoal/httpmock v1.0.8
 	github.com/klauspost/compress v1.10.11 // indirect
diff --git a/go.sum b/go.sum
index 102b8309b2236071ba6da583567881c568db1168..bfced19b1d018801dbd09b54b972ef48fec01985 100644
--- a/go.sum
+++ b/go.sum
@@ -10,6 +10,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
 cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/99designs/keyring v1.1.5 h1:wLv7QyzYpFIyMSwOADq1CLTF9KbjbBfcnfmOGJ64aO4=
+github.com/99designs/keyring v1.1.5/go.mod h1:7hsVvt2qXgtadGevGJ4ujg+u8m6SpJ5TpHqTozIPqf0=
 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -17,9 +19,15 @@ github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bE
 github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM=
 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/apache/pulsar-client-go v0.1.1 h1:v/kU+2ZCC6yFIcbZrFtWa9/nvVzVr18L+xYJUvZSxEQ=
 github.com/apache/pulsar-client-go v0.1.1/go.mod h1:mlxC65KL1BLhGO2bnT9zWMttVzR2czVPb27D477YpyU=
+github.com/apache/pulsar-client-go v0.3.0 h1:rNhJ/ENwoEfZPHHwUHNxPBTNqNQE2LQEm7DXu043giM=
+github.com/apache/pulsar-client-go v0.3.0/go.mod h1:9eSgOadVhCfb2DfWtS1SCYaYIMk9VDOZztr4u3FO8cQ=
+github.com/apache/pulsar-client-go/oauth2 v0.0.0-20200715083626-b9f8c5cedefb h1:E1P0FudxDdj2RhbveZC9i3PwukLCA/4XQSkBS/dw6/I=
+github.com/apache/pulsar-client-go/oauth2 v0.0.0-20200715083626-b9f8c5cedefb/go.mod h1:0UtvvETGDdvXNDCHa8ZQpxl+w3HbdFtfYZvDHLgWGTY=
 github.com/apache/thrift v0.14.1 h1:Yh8v0hpCj63p5edXOLaqTJW0IJ1p+eMW6+YSOqw1d6s=
 github.com/apache/thrift/lib/go/thrift v0.0.0-20210120171102-e27e82c46ba4 h1:orNYqmQGnSjgOauLWjHEp9/qIDT98xv/0Aa4Zet3/Y8=
 github.com/apache/thrift/lib/go/thrift v0.0.0-20210120171102-e27e82c46ba4/go.mod h1:V/LzksIyqd3KZuQ2SunvReTG/UkArhII1dAWY5U1sCE=
@@ -35,6 +43,8 @@ github.com/beefsack/go-rate v0.0.0-20180408011153-efa7637bb9b6/go.mod h1:6YNgTHL
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
 github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
 github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
@@ -42,6 +52,8 @@ github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXD
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
 github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
@@ -58,6 +70,10 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbp
 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/danieljoos/wincred v1.0.2 h1:zf4bhty2iLuwgjgpraD2E9UbvO+fe54XXGJbOwe23fU=
+github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U=
+github.com/datadog/zstd v1.4.6-0.20200617134701-89f69fb7df32 h1:QWqadCIHYA5zja4b6h9uGQn93u1vL+G/aewImumdg/M=
+github.com/datadog/zstd v1.4.6-0.20200617134701-89f69fb7df32/go.mod h1:inRp+etsHuvVqMPNTXaFlpf/Tj7wqviBtdJoPVrPEFQ=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -68,6 +84,8 @@ github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4w
 github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0=
 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dvsekhvalnov/jose2go v0.0.0-20180829124132-7f401d37b68a h1:mq+R6XEM6lJX5VlLyZIrUSP8tSuJp82xTK89hvBwJbU=
+github.com/dvsekhvalnov/jose2go v0.0.0-20180829124132-7f401d37b68a/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
 github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
@@ -81,17 +99,22 @@ github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d8
 github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
 github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/go-basic/ipv4 v1.0.0 h1:gjyFAa1USC1hhXTkPOwBWDPfMcUaIM+tvo1XzV9EZxs=
 github.com/go-basic/ipv4 v1.0.0/go.mod h1:etLBnaxbidQfuqE6wgZQfs38nEWNmzALkxDZe4xY8Dg=
 github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
 github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
 github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
+github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
 github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
@@ -119,6 +142,8 @@ github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
 github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -155,6 +180,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
 github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
 github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
+github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU=
+github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0=
 github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
 github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -176,6 +203,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
 github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
 github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/jarcoal/httpmock v1.0.8 h1:8kI16SoO6LQKgPE7PvQuV+YuD/inwHd7fOOe2zMbo4k=
 github.com/jarcoal/httpmock v1.0.8/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik=
@@ -194,6 +222,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
 github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
 github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM=
+github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc=
 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -216,6 +246,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/linkedin/goavro/v2 v2.9.8 h1:jN50elxBsGBDGVDEKqUlDuU1cFwJ11K/yrJCBMe/7Wg=
+github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
 github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
 github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
 github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@@ -247,11 +279,19 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs=
+github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
 github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
 github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/opentracing-contrib/go-grpc v0.0.0-20200813121455-4a6760c71486 h1:K35HCWaOTJIPW6cDHK4yj3QfRY/NhE0pBbfoc0M2NMQ=
 github.com/opentracing-contrib/go-grpc v0.0.0-20200813121455-4a6760c71486/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@@ -276,18 +316,26 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
 github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
 github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
 github.com/protocolbuffers/protobuf v3.15.3+incompatible h1:5WExaSYHEGvU73sVHvqe+3/APOOyCVg/pDCeAlfpCrw=
 github.com/protocolbuffers/protobuf v3.15.4+incompatible h1:Blv4dGFGqHXX+r5Tqoc1ziXPMDElqZ+/ryYcE4bddN4=
@@ -301,6 +349,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
 github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
@@ -406,6 +455,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
 golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -417,11 +467,13 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
 golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
 golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
 golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
 golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
@@ -429,13 +481,18 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -447,10 +504,17 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -535,6 +599,7 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8X
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
 gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
 gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -542,6 +607,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXL
 gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
 gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/internal/core/src/query/SearchOnSealed.cpp b/internal/core/src/query/SearchOnSealed.cpp
index caf304d8d07a3b0e760e8bad2a5ed9fb0300f95f..e2eceb9064e8cf19bd9f9b8008ce1ca39cca8c6a 100644
--- a/internal/core/src/query/SearchOnSealed.cpp
+++ b/internal/core/src/query/SearchOnSealed.cpp
@@ -29,16 +29,24 @@ AssembleNegBitset(const BitsetSimple& bitset_simple) {
     for (auto& bitset : bitset_simple) {
         N += bitset.size();
     }
+
     aligned_vector<uint8_t> result(upper_align(upper_div(N, 8), 64));
 
-    auto acc_byte_count = 0;
-    for (auto& bitset : bitset_simple) {
-        auto size = bitset.size();
-        Assert(size % 8 == 0);
-        auto byte_count = size / 8;
+    if (bitset_simple.size() == 1) {
+        auto& bitset = bitset_simple[0];
+        auto byte_count = upper_div(bitset.size(), 8);
         auto src_ptr = boost_ext::get_data(bitset);
-        memcpy(result.data() + acc_byte_count, src_ptr, byte_count);
-        acc_byte_count += byte_count;
+        memcpy(result.data(), src_ptr, byte_count);
+    } else {
+        auto acc_byte_count = 0;
+        for (auto& bitset : bitset_simple) {
+            auto size = bitset.size();
+            Assert(size % 8 == 0);
+            auto byte_count = size / 8;
+            auto src_ptr = boost_ext::get_data(bitset);
+            memcpy(result.data() + acc_byte_count, src_ptr, byte_count);
+            acc_byte_count += byte_count;
+        }
     }
 
     // revert the bitset
diff --git a/internal/core/src/query/visitors/ExecExprVisitor.cpp b/internal/core/src/query/visitors/ExecExprVisitor.cpp
index 058854af6a5aea731842bb1597d50a459460a1ea..b89a095a02b55a5f474b9d8bf3abab24c3b4a84c 100644
--- a/internal/core/src/query/visitors/ExecExprVisitor.cpp
+++ b/internal/core/src/query/visitors/ExecExprVisitor.cpp
@@ -206,7 +206,14 @@ ExecExprVisitor::ExecRangeVisitorDispatcher(RangeExpr& expr_raw) -> RetType {
         T val1, val2;
         std::tie(op1, val1) = conditions[0];
         std::tie(op2, val2) = conditions[1];
-        Assert(val1 <= val2);
+        // TODO: disable check?
+        if (val1 > val2) {
+            // Empty
+            auto size_per_chunk = segment_.size_per_chunk();
+            auto num_chunk = upper_div(row_count_, size_per_chunk);
+            RetType ret(num_chunk, boost::dynamic_bitset<>(size_per_chunk));
+            return ret;
+        }
         auto ops = std::make_tuple(op1, op2);
         if (false) {
         } else if (ops == std::make_tuple(OpType::GreaterThan, OpType::LessThan)) {
diff --git a/internal/core/src/segcore/segment_c.cpp b/internal/core/src/segcore/segment_c.cpp
index 685f4159418cb8f783c2d05951df7be9780a0d04..38a88a5bc7aa954ac375340bc007aa08c49df0a3 100644
--- a/internal/core/src/segcore/segment_c.cpp
+++ b/internal/core/src/segcore/segment_c.cpp
@@ -260,6 +260,43 @@ UpdateSealedSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_inde
     }
 }
 
+CStatus
+DropFieldData(CSegmentInterface c_segment, int64_t field_id) {
+    try {
+        auto segment_interface = reinterpret_cast<milvus::segcore::SegmentInterface*>(c_segment);
+        auto segment = dynamic_cast<milvus::segcore::SegmentSealed*>(segment_interface);
+        AssertInfo(segment != nullptr, "segment conversion failed");
+        segment->DropFieldData(milvus::FieldId(field_id));
+        auto status = CStatus();
+        status.error_code = Success;
+        status.error_msg = "";
+        return status;
+    } catch (std::exception& e) {
+        auto status = CStatus();
+        status.error_code = UnexpectedException;
+        status.error_msg = strdup(e.what());
+        return status;
+    }
+}
+
+CStatus
+DropSealedSegmentIndex(CSegmentInterface c_segment, int64_t field_id) {
+    auto status = CStatus();
+    try {
+        auto segment_interface = reinterpret_cast<milvus::segcore::SegmentInterface*>(c_segment);
+        auto segment = dynamic_cast<milvus::segcore::SegmentSealed*>(segment_interface);
+        AssertInfo(segment != nullptr, "segment conversion failed");
+        segment->DropIndex(milvus::FieldId(field_id));
+        status.error_code = Success;
+        status.error_msg = "";
+        return status;
+    } catch (std::exception& e) {
+        status.error_code = UnexpectedException;
+        status.error_msg = strdup(e.what());
+        return status;
+    }
+}
+
 //////////////////////////////    deprecated interfaces    //////////////////////////////
 CStatus
 UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info) {
diff --git a/internal/core/src/segcore/segment_c.h b/internal/core/src/segcore/segment_c.h
index 1d0ea55669c33d83eebea4569cf2d7900ebf55be..8d8416e95ab0dcdcebbbbff9dcaa6fbc3ce20af0 100644
--- a/internal/core/src/segcore/segment_c.h
+++ b/internal/core/src/segcore/segment_c.h
@@ -80,13 +80,19 @@ Delete(CSegmentInterface c_segment,
 int64_t
 PreDelete(CSegmentInterface c_segment, int64_t size);
 
-//////////////////////////////    interfaces for growing segment    //////////////////////////////
+//////////////////////////////    interfaces for sealed segment    //////////////////////////////
 CStatus
 LoadFieldData(CSegmentInterface c_segment, CLoadFieldDataInfo load_field_data_info);
 
 CStatus
 UpdateSealedSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info);
 
+CStatus
+DropFieldData(CSegmentInterface c_segment, int64_t field_id);
+
+CStatus
+DropSealedSegmentIndex(CSegmentInterface c_segment, int64_t field_id);
+
 //////////////////////////////    deprecated interfaces    //////////////////////////////
 CStatus
 UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info);
diff --git a/internal/core/unittest/test_query.cpp b/internal/core/unittest/test_query.cpp
index 8b1317e8acbe44121db256e8c337d13bad870571..8acb99956d03b8e011cf43d1ff69ff66c98a1f5d 100644
--- a/internal/core/unittest/test_query.cpp
+++ b/internal/core/unittest/test_query.cpp
@@ -223,6 +223,57 @@ TEST(Query, ExecWithPredicateLoader) {
     ASSERT_EQ(json.dump(2), ref.dump(2));
 }
 
+TEST(Query, ExecWithPredicateSmallN) {
+    using namespace milvus::query;
+    using namespace milvus::segcore;
+    auto schema = std::make_shared<Schema>();
+    schema->AddDebugField("fakevec", DataType::VECTOR_FLOAT, 7, MetricType::METRIC_L2);
+    schema->AddDebugField("age", DataType::FLOAT);
+    std::string dsl = R"({
+        "bool": {
+            "must": [
+            {
+                "range": {
+                    "age": {
+                        "GE": -1,
+                        "LT": 1
+                    }
+                }
+            },
+            {
+                "vector": {
+                    "fakevec": {
+                        "metric_type": "L2",
+                        "params": {
+                            "nprobe": 10
+                        },
+                        "query": "$0",
+                        "topk": 5
+                    }
+                }
+            }
+            ]
+        }
+    })";
+    int64_t N = 177;
+    auto dataset = DataGen(schema, N);
+    auto segment = CreateGrowingSegment(schema);
+    segment->PreInsert(N);
+    segment->Insert(0, N, dataset.row_ids_.data(), dataset.timestamps_.data(), dataset.raw_);
+
+    auto plan = CreatePlan(*schema, dsl);
+    auto num_queries = 5;
+    auto ph_group_raw = CreatePlaceholderGroup(num_queries, 7, 1024);
+    auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
+    Timestamp time = 1000000;
+    std::vector<const PlaceholderGroup*> ph_group_arr = {ph_group.get()};
+    auto qr = segment->Search(plan.get(), ph_group_arr.data(), &time, 1);
+    int topk = 5;
+
+    Json json = QueryResultToJson(qr);
+    std::cout << json.dump(2);
+}
+
 TEST(Query, ExecWithPredicate) {
     using namespace milvus::query;
     using namespace milvus::segcore;
diff --git a/internal/datanode/collection_replica.go b/internal/datanode/collection_replica.go
index 604a4152e476523449ed66a113203265a24653f0..86a66a5741ab136c5ef529018b3c0c975961629f 100644
--- a/internal/datanode/collection_replica.go
+++ b/internal/datanode/collection_replica.go
@@ -46,6 +46,7 @@ type Segment struct {
 	endTime       Timestamp // not using
 	startPosition *internalpb.MsgPosition
 	endPosition   *internalpb.MsgPosition // not using
+	channelName   string
 }
 
 type CollectionSegmentReplica struct {
@@ -99,6 +100,7 @@ func (replica *CollectionSegmentReplica) addSegment(
 		createTime:    0,
 		startPosition: position,
 		endPosition:   new(internalpb.MsgPosition),
+		channelName:   channelName,
 	}
 
 	seg.isNew.Store(true)
diff --git a/internal/datanode/data_sync_service.go b/internal/datanode/data_sync_service.go
index b35383cca30a085453951e4a72c6104b3a276866..44703ec309f614ca7d5549db79b8ef6a2fff3938 100644
--- a/internal/datanode/data_sync_service.go
+++ b/internal/datanode/data_sync_service.go
@@ -73,7 +73,7 @@ func (dsService *dataSyncService) initNodes() {
 		}
 		return nil
 	}
-	err := retry.Retry(200, time.Millisecond*200, connectEtcdFn)
+	err := retry.Retry(100000, time.Millisecond*200, connectEtcdFn)
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/datanode/flow_graph_insert_buffer_node.go b/internal/datanode/flow_graph_insert_buffer_node.go
index 74a18e1c30edad537d3fbc6d3c35541222c2f0ea..d8485008d4a7b4c6e7fbb55fc690edb8707a0d47 100644
--- a/internal/datanode/flow_graph_insert_buffer_node.go
+++ b/internal/datanode/flow_graph_insert_buffer_node.go
@@ -117,7 +117,21 @@ func (ibNode *insertBufferNode) Operate(ctx context.Context, in []Msg) ([]Msg, c
 			case iMsg.startPositions == nil || len(iMsg.startPositions) <= 0:
 				log.Error("insert Msg StartPosition empty")
 			default:
-				ibNode.replica.setStartPosition(currentSegID, iMsg.startPositions[0])
+				segment, err := ibNode.replica.getSegmentByID(currentSegID)
+				if err != nil {
+					log.Error("get segment wrong", zap.Error(err))
+				}
+				var startPosition *internalpb.MsgPosition = nil
+				for _, pos := range iMsg.startPositions {
+					if pos.ChannelName == segment.channelName {
+						startPosition = pos
+					}
+				}
+				if startPosition == nil {
+					log.Error("get position wrong", zap.Error(err))
+				} else {
+					ibNode.replica.setStartPosition(currentSegID, startPosition)
+				}
 			}
 		}
 
@@ -418,7 +432,20 @@ func (ibNode *insertBufferNode) Operate(ctx context.Context, in []Msg) ([]Msg, c
 		case iMsg.endPositions == nil || len(iMsg.endPositions) <= 0:
 			log.Error("insert Msg EndPosition empty")
 		default:
-			ibNode.replica.setEndPosition(currentSegID, iMsg.endPositions[0])
+			segment, err := ibNode.replica.getSegmentByID(currentSegID)
+			if err != nil {
+				log.Error("get segment wrong", zap.Error(err))
+			}
+			var endPosition *internalpb.MsgPosition = nil
+			for _, pos := range iMsg.endPositions {
+				if pos.ChannelName == segment.channelName {
+					endPosition = pos
+				}
+			}
+			if endPosition == nil {
+				log.Error("get position wrong", zap.Error(err))
+			}
+			ibNode.replica.setEndPosition(currentSegID, endPosition)
 		}
 
 		// 1.4 if full
diff --git a/internal/dataservice/meta.go b/internal/dataservice/meta.go
index bb37cd01a72c43d9e96d40f8eea76be6433c6eef..71a8c91b312808668e90a495daee5bf64d4a72dd 100644
--- a/internal/dataservice/meta.go
+++ b/internal/dataservice/meta.go
@@ -169,7 +169,6 @@ func (meta *meta) AddSegment(segmentInfo *datapb.SegmentInfo) error {
 func (meta *meta) UpdateSegment(segmentInfo *datapb.SegmentInfo) error {
 	meta.ddLock.Lock()
 	defer meta.ddLock.Unlock()
-
 	meta.segID2Info[segmentInfo.SegmentID] = segmentInfo
 	if err := meta.saveSegmentInfo(segmentInfo); err != nil {
 		_ = meta.reloadFromKV()
@@ -252,7 +251,7 @@ func (meta *meta) FlushSegment(segID UniqueID, timetick Timestamp) error {
 	}
 
 	segInfo.FlushedTime = timetick
-
+	segInfo.State = commonpb.SegmentState_Flushed
 	err := meta.saveSegmentInfo(segInfo)
 	if err != nil {
 		_ = meta.reloadFromKV()
diff --git a/internal/dataservice/server.go b/internal/dataservice/server.go
index c6f49c1550223480f4fa16ed38d7df7cf1204465..f9e7ab146faa7515d3b67ba081b66d046acaafde 100644
--- a/internal/dataservice/server.go
+++ b/internal/dataservice/server.go
@@ -150,7 +150,7 @@ func (s *Server) initMeta() error {
 		}
 		return nil
 	}
-	err := retry.Retry(200, time.Millisecond*200, connectEtcdFn)
+	err := retry.Retry(100000, time.Millisecond*200, connectEtcdFn)
 	if err != nil {
 		return err
 	}
@@ -343,18 +343,12 @@ func (s *Server) startSegmentFlushChannel(ctx context.Context) {
 				continue
 			}
 			realMsg := msg.(*msgstream.FlushCompletedMsg)
-
-			segmentInfo, err := s.meta.GetSegment(realMsg.SegmentID)
+			err := s.meta.FlushSegment(realMsg.SegmentID, realMsg.BeginTimestamp)
+			log.Debug("dataservice flushed segment", zap.Any("segmentID", realMsg.SegmentID), zap.Error(err))
 			if err != nil {
 				log.Error("get segment from meta error", zap.Int64("segmentID", realMsg.SegmentID), zap.Error(err))
 				continue
 			}
-			segmentInfo.FlushedTime = realMsg.BeginTimestamp
-			segmentInfo.State = commonpb.SegmentState_Flushed
-			if err = s.meta.UpdateSegment(segmentInfo); err != nil {
-				log.Error("update segment error", zap.Error(err))
-				continue
-			}
 		}
 	}
 }
diff --git a/internal/dataservice/stats_handler.go b/internal/dataservice/stats_handler.go
index 35e0e330dce1c94f29eb6314af563a7088a7554b..c1a105110945d3dd870a80bfd914ffcd261ee2a7 100644
--- a/internal/dataservice/stats_handler.go
+++ b/internal/dataservice/stats_handler.go
@@ -1,7 +1,9 @@
 package dataservice
 
 import (
+	"github.com/zilliztech/milvus-distributed/internal/log"
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
+	"go.uber.org/zap"
 )
 
 type statsHandler struct {
@@ -32,6 +34,6 @@ func (handler *statsHandler) HandleSegmentStat(segStats *internalpb.SegmentStati
 	segMeta.SealedTime = segStats.EndTime
 	segMeta.NumRows = segStats.NumRows
 	segMeta.MemSize = segStats.MemorySize
-
+	log.Debug("stats_handler update segment", zap.Any("segmentID", segMeta.SegmentID), zap.Any("State", segMeta.State))
 	return handler.meta.UpdateSegment(segMeta)
 }
diff --git a/internal/distributed/datanode/client/client.go b/internal/distributed/datanode/client/client.go
index 7ee418c66e1324656c7df17c4603c6034036a0a0..94ce59a9312f932c7cbb8b3426778f91a10d6efd 100644
--- a/internal/distributed/datanode/client/client.go
+++ b/internal/distributed/datanode/client/client.go
@@ -48,7 +48,7 @@ func (c *Client) Init() error {
 		return nil
 	}
 
-	err := retry.Retry(100, time.Millisecond*200, connectGrpcFunc)
+	err := retry.Retry(100000, time.Millisecond*200, connectGrpcFunc)
 	if err != nil {
 		return err
 	}
diff --git a/internal/distributed/datanode/service.go b/internal/distributed/datanode/service.go
index 3660b767f9ad8f2458a429cbbb9094caa08b748b..35f56a39c529f63d3822f89f00a78716b1a040f6 100644
--- a/internal/distributed/datanode/service.go
+++ b/internal/distributed/datanode/service.go
@@ -182,7 +182,7 @@ func (s *Server) init() error {
 	if err = masterClient.Start(); err != nil {
 		panic(err)
 	}
-	err = funcutil.WaitForComponentHealthy(ctx, masterClient, "MasterService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentHealthy(ctx, masterClient, "MasterService", 1000000, time.Millisecond*200)
 
 	if err != nil {
 		panic(err)
@@ -202,7 +202,7 @@ func (s *Server) init() error {
 	if err = dataService.Start(); err != nil {
 		panic(err)
 	}
-	err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/distributed/dataservice/client/client.go b/internal/distributed/dataservice/client/client.go
index a96dad4c122af8c4f87e2174eaa2eaf7bb26773b..95f26c4fd981aa677b036e4296721eedb84be36b 100644
--- a/internal/distributed/dataservice/client/client.go
+++ b/internal/distributed/dataservice/client/client.go
@@ -47,7 +47,7 @@ func (c *Client) Init() error {
 		return nil
 	}
 
-	err := retry.Retry(100, time.Millisecond*200, connectGrpcFunc)
+	err := retry.Retry(100000, time.Millisecond*200, connectGrpcFunc)
 	if err != nil {
 		return err
 	}
diff --git a/internal/distributed/dataservice/service.go b/internal/distributed/dataservice/service.go
index f28d5379719bfbe854b6ba8d36991b4122d1256b..d3c7aad1354bdc143dc60115837bff5c11441b3a 100644
--- a/internal/distributed/dataservice/service.go
+++ b/internal/distributed/dataservice/service.go
@@ -104,7 +104,7 @@ func (s *Server) init() error {
 	s.dataService.UpdateStateCode(internalpb.StateCode_Initializing)
 
 	ctx := context.Background()
-	err = funcutil.WaitForComponentInitOrHealthy(ctx, client, "MasterService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentInitOrHealthy(ctx, client, "MasterService", 1000000, time.Millisecond*200)
 
 	if err != nil {
 		panic(err)
diff --git a/internal/distributed/indexnode/client/client.go b/internal/distributed/indexnode/client/client.go
index 06c06c5d6fe539faa37b4adceeee1f4166160eb6..7ef3a9fb59223cd7a122f3dfc71353dc9a1295ea 100644
--- a/internal/distributed/indexnode/client/client.go
+++ b/internal/distributed/indexnode/client/client.go
@@ -45,7 +45,7 @@ func (c *Client) Init() error {
 		c.grpcClient = indexpb.NewIndexNodeClient(conn)
 		return nil
 	}
-	err := retry.Retry(10, time.Millisecond*200, connectGrpcFunc)
+	err := retry.Retry(100000, time.Millisecond*200, connectGrpcFunc)
 	if err != nil {
 		return err
 	}
diff --git a/internal/distributed/indexnode/paramtable.go b/internal/distributed/indexnode/paramtable.go
index 462ed371ebf750fc3d6b801da8821c1007e6da8c..b92dbc29446c71943277c1d4daa656e3a7854d72 100644
--- a/internal/distributed/indexnode/paramtable.go
+++ b/internal/distributed/indexnode/paramtable.go
@@ -1,9 +1,6 @@
 package grpcindexnode
 
 import (
-	"net"
-	"os"
-	"strconv"
 	"sync"
 
 	"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
@@ -35,11 +32,6 @@ func (pt *ParamTable) LoadFromArgs() {
 }
 
 func (pt *ParamTable) LoadFromEnv() {
-	indexServiceAddress := os.Getenv("INDEX_SERVICE_ADDRESS")
-	if indexServiceAddress != "" {
-		pt.IndexServerAddress = indexServiceAddress
-	}
-
 	Params.IP = funcutil.GetLocalIP()
 }
 
@@ -50,28 +42,11 @@ func (pt *ParamTable) initParams() {
 
 // todo remove and use load from env
 func (pt *ParamTable) initIndexServerAddress() {
-	addr, err := pt.Load("indexService.address")
-	if err != nil {
-		panic(err)
-	}
-
-	hostName, _ := net.LookupHost(addr)
-	if len(hostName) <= 0 {
-		if ip := net.ParseIP(addr); ip == nil {
-			panic("invalid ip indexServer.address")
-		}
-	}
-
-	port, err := pt.Load("indexService.port")
-	if err != nil {
-		panic(err)
-	}
-	_, err = strconv.Atoi(port)
+	ret, err := pt.Load("IndexServiceAddress")
 	if err != nil {
 		panic(err)
 	}
-
-	pt.IndexServerAddress = addr + ":" + port
+	pt.IndexServerAddress = ret
 }
 
 func (pt *ParamTable) initPort() {
diff --git a/internal/distributed/indexservice/client/client.go b/internal/distributed/indexservice/client/client.go
index cd0fab43e94084c4c2f366872f8eea7975cbaa4d..58f3419b0e1c8e2824d63ee09cbd9bbfe1a81fe7 100644
--- a/internal/distributed/indexservice/client/client.go
+++ b/internal/distributed/indexservice/client/client.go
@@ -50,7 +50,7 @@ func (c *Client) Init() error {
 		c.grpcClient = indexpb.NewIndexServiceClient(conn)
 		return nil
 	}
-	err := retry.Retry(10, time.Millisecond*200, connectGrpcFunc)
+	err := retry.Retry(100000, time.Millisecond*200, connectGrpcFunc)
 	if err != nil {
 		return err
 	}
diff --git a/internal/distributed/indexservice/paramtable.go b/internal/distributed/indexservice/paramtable.go
index 1e2fb057bb40a96a4b6324f49d70bfc2b1a5f3be..d30955855cae8ddd1bc5dfaa7c0196161287c1cc 100644
--- a/internal/distributed/indexservice/paramtable.go
+++ b/internal/distributed/indexservice/paramtable.go
@@ -1,8 +1,6 @@
 package grpcindexservice
 
 import (
-	"net"
-	"strconv"
 	"sync"
 
 	"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
@@ -35,25 +33,9 @@ func (pt *ParamTable) initServicePort() {
 }
 
 func (pt *ParamTable) initServiceAddress() {
-	addr, err := pt.Load("indexService.address")
+	ret, err := pt.Load("IndexServiceAddress")
 	if err != nil {
 		panic(err)
 	}
-
-	hostName, _ := net.LookupHost(addr)
-	if len(hostName) <= 0 {
-		if ip := net.ParseIP(addr); ip == nil {
-			panic("invalid ip proxyService.address")
-		}
-	}
-
-	port, err := pt.Load("indexService.port")
-	if err != nil {
-		panic(err)
-	}
-	_, err = strconv.Atoi(port)
-	if err != nil {
-		panic(err)
-	}
-	pt.ServiceAddress = addr + ":" + port
+	pt.ServiceAddress = ret
 }
diff --git a/internal/distributed/masterservice/client/client.go b/internal/distributed/masterservice/client/client.go
index 0140f40a5429468404b5191b09e717b53de2f4a9..9824a26435b198cd3f654ceb7a549a8b5677271a 100644
--- a/internal/distributed/masterservice/client/client.go
+++ b/internal/distributed/masterservice/client/client.go
@@ -32,7 +32,7 @@ func NewClient(addr string, timeout time.Duration) (*GrpcClient, error) {
 		addr:        addr,
 		timeout:     timeout,
 		grpcTimeout: time.Second * 5,
-		retry:       3,
+		retry:       300,
 	}, nil
 }
 
diff --git a/internal/distributed/masterservice/masterservice_test.go b/internal/distributed/masterservice/masterservice_test.go
index 2bd47e3e92cab763eda6e04364ce399e1b921d1f..49fd4a8b8c9fa63d9c78b11a2f4b2f0b47aea5f1 100644
--- a/internal/distributed/masterservice/masterservice_test.go
+++ b/internal/distributed/masterservice/masterservice_test.go
@@ -357,6 +357,7 @@ func TestGrpcService(t *testing.T) {
 		assert.Nil(t, err)
 		assert.Equal(t, partMeta.PartitionName, "testPartition")
 
+		assert.Equal(t, 1, len(collectionMetaCache))
 	})
 
 	t.Run("has partition", func(t *testing.T) {
@@ -600,6 +601,7 @@ func TestGrpcService(t *testing.T) {
 		partMeta, err := core.MetaTable.GetPartitionByID(collMeta.PartitionIDs[0])
 		assert.Nil(t, err)
 		assert.Equal(t, partMeta.PartitionName, cms.Params.DefaultPartitionName)
+		assert.Equal(t, 2, len(collectionMetaCache))
 	})
 
 	t.Run("drop collection", func(t *testing.T) {
@@ -620,7 +622,7 @@ func TestGrpcService(t *testing.T) {
 		assert.Equal(t, dropCollectionArray[0].Base.MsgType, commonpb.MsgType_DropCollection)
 		assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_Success)
 		assert.Equal(t, dropCollectionArray[0].CollectionName, "testColl")
-		assert.Equal(t, len(collectionMetaCache), 1)
+		assert.Equal(t, len(collectionMetaCache), 3)
 		assert.Equal(t, collectionMetaCache[0], "testColl")
 
 		req = &milvuspb.DropCollectionRequest{
diff --git a/internal/distributed/masterservice/server.go b/internal/distributed/masterservice/server.go
index 4a01be5168ac1be0417c11685a960175acf1c63f..c453323094e21286fb988f23875e95e73ccf4dbe 100644
--- a/internal/distributed/masterservice/server.go
+++ b/internal/distributed/masterservice/server.go
@@ -123,7 +123,7 @@ func (s *Server) init() error {
 			panic(err)
 		}
 
-		err := funcutil.WaitForComponentInitOrHealthy(ctx, proxyService, "ProxyService", 100, 200*time.Millisecond)
+		err := funcutil.WaitForComponentInitOrHealthy(ctx, proxyService, "ProxyService", 1000000, 200*time.Millisecond)
 		if err != nil {
 			panic(err)
 		}
@@ -141,7 +141,7 @@ func (s *Server) init() error {
 		if err := dataService.Start(); err != nil {
 			panic(err)
 		}
-		err := funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, 200*time.Millisecond)
+		err := funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 1000000, 200*time.Millisecond)
 		if err != nil {
 			panic(err)
 		}
diff --git a/internal/distributed/proxynode/client/client.go b/internal/distributed/proxynode/client/client.go
index 69cd5d6ad988dfdcd29c092e8c1a6d4ee684333b..08dc15045e6a38a83261ef3492d26daac75fdbfd 100644
--- a/internal/distributed/proxynode/client/client.go
+++ b/internal/distributed/proxynode/client/client.go
@@ -44,7 +44,7 @@ func (c *Client) Init() error {
 		c.grpcClient = proxypb.NewProxyNodeServiceClient(conn)
 		return nil
 	}
-	err := retry.Retry(10, time.Millisecond*200, connectGrpcFunc)
+	err := retry.Retry(100000, time.Millisecond*200, connectGrpcFunc)
 	if err != nil {
 		return err
 	}
diff --git a/internal/distributed/proxynode/paramtable.go b/internal/distributed/proxynode/paramtable.go
index 7e7a8ff065ee1061adbb025a75ec214080d117d9..593bf9c4c80150430321a34bf704b119972a48cb 100644
--- a/internal/distributed/proxynode/paramtable.go
+++ b/internal/distributed/proxynode/paramtable.go
@@ -1,9 +1,6 @@
 package grpcproxynode
 
 import (
-	"net"
-	"os"
-	"strconv"
 	"sync"
 
 	"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
@@ -42,32 +39,6 @@ func (pt *ParamTable) LoadFromArgs() {
 }
 
 func (pt *ParamTable) LoadFromEnv() {
-
-	masterAddress := os.Getenv("MASTER_ADDRESS")
-	if masterAddress != "" {
-		pt.MasterAddress = masterAddress
-	}
-
-	proxyServiceAddress := os.Getenv("PROXY_SERVICE_ADDRESS")
-	if proxyServiceAddress != "" {
-		pt.ProxyServiceAddress = proxyServiceAddress
-	}
-
-	indexServiceAddress := os.Getenv("INDEX_SERVICE_ADDRESS")
-	if indexServiceAddress != "" {
-		pt.IndexServerAddress = indexServiceAddress
-	}
-
-	queryServiceAddress := os.Getenv("QUERY_SERVICE_ADDRESS")
-	if queryServiceAddress != "" {
-		pt.QueryServiceAddress = queryServiceAddress
-	}
-
-	dataServiceAddress := os.Getenv("DATA_SERVICE_ADDRESS")
-	if dataServiceAddress != "" {
-		pt.DataServiceAddress = dataServiceAddress
-	}
-
 	Params.IP = funcutil.GetLocalIP()
 }
 
@@ -86,96 +57,47 @@ func (pt *ParamTable) initPoxyServicePort() {
 }
 
 func (pt *ParamTable) initProxyServiceAddress() {
-	addr, err := pt.Load("proxyService.address")
+	ret, err := pt.Load("_PROXY_SERVICE_ADDRESS")
 	if err != nil {
 		panic(err)
 	}
-
-	hostName, _ := net.LookupHost(addr)
-	if len(hostName) <= 0 {
-		if ip := net.ParseIP(addr); ip == nil {
-			panic("invalid ip proxyService.address")
-		}
-	}
-
-	port, err := pt.Load("proxyService.port")
-	if err != nil {
-		panic(err)
-	}
-	_, err = strconv.Atoi(port)
-	if err != nil {
-		panic(err)
-	}
-	pt.ProxyServiceAddress = addr + ":" + port
+	pt.ProxyServiceAddress = ret
 }
 
 // todo remove and use load from env
 func (pt *ParamTable) initIndexServerAddress() {
-	addr, err := pt.Load("indexService.address")
+	ret, err := pt.Load("IndexServiceAddress")
 	if err != nil {
 		panic(err)
 	}
-
-	hostName, _ := net.LookupHost(addr)
-	if len(hostName) <= 0 {
-		if ip := net.ParseIP(addr); ip == nil {
-			panic("invalid ip indexService.address")
-		}
-	}
-
-	port, err := pt.Load("indexService.port")
-	if err != nil {
-		panic(err)
-	}
-	_, err = strconv.Atoi(port)
-	if err != nil {
-		panic(err)
-	}
-
-	pt.IndexServerAddress = addr + ":" + port
+	pt.IndexServerAddress = ret
 }
 
 // todo remove and use load from env
 func (pt *ParamTable) initMasterAddress() {
-
-	masterHost, err := pt.Load("master.address")
+	ret, err := pt.Load("_MasterAddress")
 	if err != nil {
 		panic(err)
 	}
-	port, err := pt.Load("master.port")
-	if err != nil {
-		panic(err)
-	}
-	pt.MasterAddress = masterHost + ":" + port
-
+	pt.MasterAddress = ret
 }
 
 // todo remove and use load from env
 func (pt *ParamTable) initDataServiceAddress() {
-	addr, err := pt.Load("dataService.address")
-	if err != nil {
-		panic(err)
-	}
-
-	port, err := pt.Load("dataService.port")
+	ret, err := pt.Load("_DataServiceAddress")
 	if err != nil {
 		panic(err)
 	}
-	pt.DataServiceAddress = addr + ":" + port
+	pt.DataServiceAddress = ret
 }
 
 // todo remove and use load from env
 func (pt *ParamTable) initQueryServiceAddress() {
-	addr, err := pt.Load("queryService.address")
-	if err != nil {
-		panic(err)
-	}
-
-	port, err := pt.Load("queryService.port")
+	ret, err := pt.Load("_QueryServiceAddress")
 	if err != nil {
 		panic(err)
 	}
-	pt.QueryServiceAddress = addr + ":" + port
+	pt.QueryServiceAddress = ret
 }
 
 func (pt *ParamTable) initPort() {
diff --git a/internal/distributed/proxynode/service.go b/internal/distributed/proxynode/service.go
index c975f548e4fac22c081c31bea30c39b7cd6af899..f9fad82eff17724146e2fd2fdd2699dc081412ee 100644
--- a/internal/distributed/proxynode/service.go
+++ b/internal/distributed/proxynode/service.go
@@ -185,7 +185,7 @@ func (s *Server) init() error {
 	if err != nil {
 		return err
 	}
-	err = funcutil.WaitForComponentHealthy(ctx, s.masterServiceClient, "MasterService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentHealthy(ctx, s.masterServiceClient, "MasterService", 1000000, time.Millisecond*200)
 
 	if err != nil {
 		panic(err)
diff --git a/internal/distributed/proxyservice/client/client.go b/internal/distributed/proxyservice/client/client.go
index c679e287c7c4db848420aa71ac245768baa96bc1..27804f4e80fe43fcdf0ba72efd15c54f73ebb169 100644
--- a/internal/distributed/proxyservice/client/client.go
+++ b/internal/distributed/proxyservice/client/client.go
@@ -44,7 +44,7 @@ func (c *Client) Init() error {
 		c.proxyServiceClient = proxypb.NewProxyServiceClient(conn)
 		return nil
 	}
-	err := retry.Retry(10, time.Millisecond*200, connectGrpcFunc)
+	err := retry.Retry(100000, time.Millisecond*200, connectGrpcFunc)
 	if err != nil {
 		return err
 	}
diff --git a/internal/distributed/proxyservice/paramtable.go b/internal/distributed/proxyservice/paramtable.go
index 5ca4e61b490bcfe057553261d21aca4445c47dcd..9102b74a469cad200db5aa6124e2540d611e64b8 100644
--- a/internal/distributed/proxyservice/paramtable.go
+++ b/internal/distributed/proxyservice/paramtable.go
@@ -1,8 +1,6 @@
 package grpcproxyservice
 
 import (
-	"net"
-	"strconv"
 	"sync"
 
 	"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
@@ -35,25 +33,9 @@ func (pt *ParamTable) initServicePort() {
 }
 
 func (pt *ParamTable) initServiceAddress() {
-	addr, err := pt.Load("proxyService.address")
+	ret, err := pt.Load("_PROXY_SERVICE_ADDRESS")
 	if err != nil {
 		panic(err)
 	}
-
-	hostName, _ := net.LookupHost(addr)
-	if len(hostName) <= 0 {
-		if ip := net.ParseIP(addr); ip == nil {
-			panic("invalid ip proxyService.address")
-		}
-	}
-
-	port, err := pt.Load("proxyService.port")
-	if err != nil {
-		panic(err)
-	}
-	_, err = strconv.Atoi(port)
-	if err != nil {
-		panic(err)
-	}
-	pt.ServiceAddress = addr + ":" + port
+	pt.ServiceAddress = ret
 }
diff --git a/internal/distributed/querynode/client/client.go b/internal/distributed/querynode/client/client.go
index 6fb86ad5d418783bea37583c57fd47c8aac544e5..802edcffce43fa9867261195f048bafca1781798 100644
--- a/internal/distributed/querynode/client/client.go
+++ b/internal/distributed/querynode/client/client.go
@@ -37,7 +37,7 @@ func (c *Client) Init() error {
 	ctx, cancel := context.WithTimeout(context.Background(), RPCConnectionTimeout)
 	defer cancel()
 	var err error
-	for i := 0; i < Retry; i++ {
+	for i := 0; i < Retry*100; i++ {
 		if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock(),
 			grpc.WithUnaryInterceptor(
 				otgrpc.OpenTracingClientInterceptor(tracer)),
diff --git a/internal/distributed/querynode/service.go b/internal/distributed/querynode/service.go
index 63b3482f4e41df86e97936118e13f93b141af3d7..ad98185730c72618939b6178336fb3fff8e01e13 100644
--- a/internal/distributed/querynode/service.go
+++ b/internal/distributed/querynode/service.go
@@ -111,7 +111,7 @@ func (s *Server) init() error {
 		panic(err)
 	}
 
-	err = funcutil.WaitForComponentInitOrHealthy(ctx, queryService, "QueryService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentInitOrHealthy(ctx, queryService, "QueryService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
@@ -139,7 +139,7 @@ func (s *Server) init() error {
 		panic(err)
 	}
 
-	err = funcutil.WaitForComponentHealthy(ctx, masterService, "MasterService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentHealthy(ctx, masterService, "MasterService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
@@ -160,7 +160,7 @@ func (s *Server) init() error {
 		panic(err)
 	}
 	// wait indexservice healthy
-	err = funcutil.WaitForComponentHealthy(ctx, indexService, "IndexService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentHealthy(ctx, indexService, "IndexService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
@@ -180,7 +180,7 @@ func (s *Server) init() error {
 	if err = dataService.Start(); err != nil {
 		panic(err)
 	}
-	err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/distributed/queryservice/client/client.go b/internal/distributed/queryservice/client/client.go
index ae4372f41ee1d99f68798b903a2a855323537b8b..cd6af22741d738a6b52182a8b676508d90d11d80 100644
--- a/internal/distributed/queryservice/client/client.go
+++ b/internal/distributed/queryservice/client/client.go
@@ -32,7 +32,7 @@ func NewClient(address string, timeout time.Duration) (*Client, error) {
 		conn:       nil,
 		addr:       address,
 		timeout:    timeout,
-		retry:      3,
+		retry:      300,
 	}, nil
 }
 
diff --git a/internal/distributed/queryservice/service.go b/internal/distributed/queryservice/service.go
index 8322a0ab57c1b42aaf4cd1d1db69f90ee957fcd2..0d205424d851f7d8df078165eb03db42ac592490 100644
--- a/internal/distributed/queryservice/service.go
+++ b/internal/distributed/queryservice/service.go
@@ -101,7 +101,7 @@ func (s *Server) init() error {
 		panic(err)
 	}
 	// wait for master init or healthy
-	err = funcutil.WaitForComponentInitOrHealthy(ctx, masterService, "MasterService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentInitOrHealthy(ctx, masterService, "MasterService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
@@ -121,7 +121,7 @@ func (s *Server) init() error {
 	if err = dataService.Start(); err != nil {
 		panic(err)
 	}
-	err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 100, time.Millisecond*200)
+	err = funcutil.WaitForComponentInitOrHealthy(ctx, dataService, "DataService", 1000000, time.Millisecond*200)
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/indexnode/indexnode.go b/internal/indexnode/indexnode.go
index e18c14ccbf29bdd59e1d1e6e989d23e2677b7bb3..624d49243bc6cdda90a7325bfb145ff6c5e5ae0b 100644
--- a/internal/indexnode/indexnode.go
+++ b/internal/indexnode/indexnode.go
@@ -67,7 +67,7 @@ func NewIndexNode(ctx context.Context) (*IndexNode, error) {
 
 func (i *IndexNode) Init() error {
 	ctx := context.Background()
-	err := funcutil.WaitForComponentHealthy(ctx, i.serviceClient, "IndexService", 100, time.Millisecond*200)
+	err := funcutil.WaitForComponentHealthy(ctx, i.serviceClient, "IndexService", 1000000, time.Millisecond*200)
 
 	if err != nil {
 		return err
diff --git a/internal/indexservice/indexservice.go b/internal/indexservice/indexservice.go
index 4b4bd72afd0a3d7f6436aff2d5690eeb7b6d7961..e3fc36811aeeb9bf8bc02a7702b68197638a3197 100644
--- a/internal/indexservice/indexservice.go
+++ b/internal/indexservice/indexservice.go
@@ -86,7 +86,7 @@ func (i *IndexService) Init() error {
 		i.metaTable = metakv
 		return nil
 	}
-	err := retry.Retry(200, time.Millisecond*200, connectEtcdFn)
+	err := retry.Retry(100000, time.Millisecond*200, connectEtcdFn)
 	if err != nil {
 		return err
 	}
diff --git a/internal/indexservice/paramtable.go b/internal/indexservice/paramtable.go
index 520f72777134879e863afcc1c38dc35a9bf17e5b..a0ca05ccd458f01085b396316d57d970e0abbf67 100644
--- a/internal/indexservice/paramtable.go
+++ b/internal/indexservice/paramtable.go
@@ -1,7 +1,6 @@
 package indexservice
 
 import (
-	"net"
 	"path"
 	"strconv"
 	"sync"
@@ -38,8 +37,6 @@ func (pt *ParamTable) Init() {
 	once.Do(func() {
 		pt.BaseTable.Init()
 		pt.initLogCfg()
-		pt.initAddress()
-		pt.initPort()
 		pt.initEtcdAddress()
 		pt.initMasterAddress()
 		pt.initMetaRootPath()
@@ -52,35 +49,6 @@ func (pt *ParamTable) Init() {
 	})
 }
 
-func (pt *ParamTable) initAddress() {
-	addr, err := pt.Load("indexService.address")
-	if err != nil {
-		panic(err)
-	}
-
-	hostName, _ := net.LookupHost(addr)
-	if len(hostName) <= 0 {
-		if ip := net.ParseIP(addr); ip == nil {
-			panic("invalid ip indexServer.address")
-		}
-	}
-
-	port, err := pt.Load("indexService.port")
-	if err != nil {
-		panic(err)
-	}
-	_, err = strconv.Atoi(port)
-	if err != nil {
-		panic(err)
-	}
-
-	pt.Address = addr + ":" + port
-}
-
-func (pt *ParamTable) initPort() {
-	pt.Port = pt.ParseInt("indexService.port")
-}
-
 func (pt *ParamTable) initEtcdAddress() {
 	addr, err := pt.Load("_EtcdAddress")
 	if err != nil {
diff --git a/internal/kv/minio/minio_kv.go b/internal/kv/minio/minio_kv.go
index 0d6c9ff80979550b60cea0ba4bdc2f4468d233ea..53cee1e86c62f7cf65bd4251aaa8e89dec2e2e95 100644
--- a/internal/kv/minio/minio_kv.go
+++ b/internal/kv/minio/minio_kv.go
@@ -43,7 +43,7 @@ func NewMinIOKV(ctx context.Context, option *Option) (*MinIOKV, error) {
 		return nil
 	}
 
-	err := retry.Retry(200, time.Millisecond*200, connectMinIOFn)
+	err := retry.Retry(100000, time.Millisecond*200, connectMinIOFn)
 	if err != nil {
 		return nil, err
 	}
diff --git a/internal/masterservice/master_service.go b/internal/masterservice/master_service.go
index 5cd20b4f97a85c3621c3573c89b2494381e1bdec..756a97bdbf7a26b96d9a413d9116139d60516abc 100644
--- a/internal/masterservice/master_service.go
+++ b/internal/masterservice/master_service.go
@@ -765,7 +765,7 @@ func (c *Core) Init() error {
 			c.kvBase = etcdkv.NewEtcdKV(c.etcdCli, Params.KvRootPath)
 			return nil
 		}
-		err := retry.Retry(200, time.Millisecond*200, connectEtcdFn)
+		err := retry.Retry(100000, time.Millisecond*200, connectEtcdFn)
 		if err != nil {
 			return
 		}
@@ -1001,7 +1001,7 @@ func (c *Core) DescribeCollection(ctx context.Context, in *milvuspb.DescribeColl
 	c.ddReqQueue <- t
 	err := t.WaitToFinish()
 	if err != nil {
-		log.Debug("DescribeCollection Failed", zap.String("name", in.CollectionName))
+		log.Debug("DescribeCollection Failed", zap.String("name", in.CollectionName), zap.Error(err))
 		return &milvuspb.DescribeCollectionResponse{
 			Status: &commonpb.Status{
 				ErrorCode: commonpb.ErrorCode_UnexpectedError,
diff --git a/internal/masterservice/master_service_test.go b/internal/masterservice/master_service_test.go
index 29bb068aae151000b3db4ef4fd80ce175e764e09..acc7e9e5b079083eb4fe2187c47c2a2850d567f6 100644
--- a/internal/masterservice/master_service_test.go
+++ b/internal/masterservice/master_service_test.go
@@ -508,6 +508,9 @@ func TestMasterService(t *testing.T) {
 		assert.True(t, ok)
 		assert.Equal(t, partMsg.CollectionID, collMeta.ID)
 		assert.Equal(t, partMsg.PartitionID, partMeta.PartitionID)
+
+		assert.Equal(t, 1, len(pm.GetCollArray()))
+		assert.Equal(t, "testColl", pm.GetCollArray()[0])
 	})
 
 	t.Run("has partition", func(t *testing.T) {
@@ -893,6 +896,9 @@ func TestMasterService(t *testing.T) {
 		assert.True(t, ok)
 		assert.Equal(t, dmsg.CollectionID, collMeta.ID)
 		assert.Equal(t, dmsg.PartitionID, dropPartID)
+
+		assert.Equal(t, 2, len(pm.GetCollArray()))
+		assert.Equal(t, "testColl", pm.GetCollArray()[1])
 	})
 
 	t.Run("drop collection", func(t *testing.T) {
@@ -919,8 +925,8 @@ func TestMasterService(t *testing.T) {
 		assert.True(t, ok)
 		assert.Equal(t, dmsg.CollectionID, collMeta.ID)
 		collArray := pm.GetCollArray()
-		assert.Equal(t, len(collArray), 1)
-		assert.Equal(t, collArray[0], "testColl")
+		assert.Equal(t, len(collArray), 3)
+		assert.Equal(t, collArray[2], "testColl")
 
 		time.Sleep(time.Millisecond * 100)
 		qm.mutex.Lock()
@@ -944,8 +950,8 @@ func TestMasterService(t *testing.T) {
 		time.Sleep(time.Second)
 		assert.Zero(t, len(ddStream.Chan()))
 		collArray = pm.GetCollArray()
-		assert.Equal(t, len(collArray), 1)
-		assert.Equal(t, collArray[0], "testColl")
+		assert.Equal(t, len(collArray), 3)
+		assert.Equal(t, collArray[2], "testColl")
 	})
 
 	err = core.Stop()
diff --git a/internal/masterservice/task.go b/internal/masterservice/task.go
index 44abf6926b67703982f5723615045d8ee6a85918..441f24f99512e5e4c1de268dc076300c753cb8c8 100644
--- a/internal/masterservice/task.go
+++ b/internal/masterservice/task.go
@@ -419,6 +419,9 @@ func (t *CreatePartitionReqTask) Execute(ctx context.Context) error {
 		return err
 	}
 
+	// error doesn't matter here
+	_ = t.core.InvalidateCollectionMetaCache(ctx, t.Req.Base.Timestamp, t.Req.DbName, t.Req.CollectionName)
+
 	return nil
 }
 
@@ -467,6 +470,9 @@ func (t *DropPartitionReqTask) Execute(ctx context.Context) error {
 	if err != nil {
 		return err
 	}
+
+	// error doesn't matter here
+	_ = t.core.InvalidateCollectionMetaCache(ctx, t.Req.Base.Timestamp, t.Req.DbName, t.Req.CollectionName)
 	return nil
 }
 
diff --git a/internal/msgstream/pulsarms/pulsar_msgstream.go b/internal/msgstream/pulsarms/pulsar_msgstream.go
index d3e36c553dc6a05f6db388a369a9b13728fc8bed..d391542f8a1c2daa619a0b15bb39b6ffd7db75a7 100644
--- a/internal/msgstream/pulsarms/pulsar_msgstream.go
+++ b/internal/msgstream/pulsarms/pulsar_msgstream.go
@@ -2,17 +2,18 @@ package pulsarms
 
 import (
 	"context"
+	"errors"
 	"path/filepath"
 	"reflect"
 	"strconv"
 	"sync"
 	"time"
 
-	"errors"
-
 	"github.com/apache/pulsar-client-go/pulsar"
 	"github.com/golang/protobuf/proto"
 	"github.com/opentracing/opentracing-go"
+	"go.uber.org/zap"
+
 	"github.com/zilliztech/milvus-distributed/internal/log"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
@@ -20,7 +21,6 @@ import (
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
 	"github.com/zilliztech/milvus-distributed/internal/util/trace"
 	"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
-	"go.uber.org/zap"
 )
 
 type TsMsg = msgstream.TsMsg
@@ -41,8 +41,9 @@ type UnmarshalDispatcher = msgstream.UnmarshalDispatcher
 type PulsarMsgStream struct {
 	ctx              context.Context
 	client           pulsar.Client
-	producers        []Producer
-	consumers        []Consumer
+	producers        map[string]Producer
+	producerChannels []string
+	consumers        map[string]Consumer
 	consumerChannels []string
 	repackFunc       RepackFunc
 	unmarshal        UnmarshalDispatcher
@@ -50,6 +51,7 @@ type PulsarMsgStream struct {
 	wait             *sync.WaitGroup
 	streamCancel     func()
 	pulsarBufSize    int64
+	producerLock     *sync.Mutex
 	consumerLock     *sync.Mutex
 	consumerReflects []reflect.SelectCase
 
@@ -63,8 +65,9 @@ func newPulsarMsgStream(ctx context.Context,
 	unmarshal UnmarshalDispatcher) (*PulsarMsgStream, error) {
 
 	streamCtx, streamCancel := context.WithCancel(ctx)
-	producers := make([]Producer, 0)
-	consumers := make([]Consumer, 0)
+	producers := make(map[string]Producer)
+	consumers := make(map[string]Consumer)
+	producerChannels := make([]string, 0)
 	consumerChannels := make([]string, 0)
 	consumerReflects := make([]reflect.SelectCase, 0)
 	receiveBuf := make(chan *MsgPack, receiveBufSize)
@@ -85,6 +88,7 @@ func newPulsarMsgStream(ctx context.Context,
 		ctx:              streamCtx,
 		client:           client,
 		producers:        producers,
+		producerChannels: producerChannels,
 		consumers:        consumers,
 		consumerChannels: consumerChannels,
 		unmarshal:        unmarshal,
@@ -92,6 +96,7 @@ func newPulsarMsgStream(ctx context.Context,
 		receiveBuf:       receiveBuf,
 		streamCancel:     streamCancel,
 		consumerReflects: consumerReflects,
+		producerLock:     &sync.Mutex{},
 		consumerLock:     &sync.Mutex{},
 		wait:             &sync.WaitGroup{},
 		scMap:            &sync.Map{},
@@ -101,22 +106,24 @@ func newPulsarMsgStream(ctx context.Context,
 }
 
 func (ms *PulsarMsgStream) AsProducer(channels []string) {
-	for i := 0; i < len(channels); i++ {
+	for _, channel := range channels {
 		fn := func() error {
-			pp, err := ms.client.CreateProducer(pulsar.ProducerOptions{Topic: channels[i]})
+			pp, err := ms.client.CreateProducer(pulsar.ProducerOptions{Topic: channel})
 			if err != nil {
 				return err
 			}
 			if pp == nil {
 				return errors.New("pulsar is not ready, producer is nil")
 			}
-
-			ms.producers = append(ms.producers, pp)
+			ms.producerLock.Lock()
+			ms.producers[channel] = pp
+			ms.producerChannels = append(ms.producerChannels, channel)
+			ms.producerLock.Unlock()
 			return nil
 		}
 		err := util.Retry(20, time.Millisecond*200, fn)
 		if err != nil {
-			errMsg := "Failed to create producer " + channels[i] + ", error = " + err.Error()
+			errMsg := "Failed to create producer " + channel + ", error = " + err.Error()
 			panic(errMsg)
 		}
 	}
@@ -124,14 +131,17 @@ func (ms *PulsarMsgStream) AsProducer(channels []string) {
 
 func (ms *PulsarMsgStream) AsConsumer(channels []string,
 	subName string) {
-	for i := 0; i < len(channels); i++ {
+	for _, channel := range channels {
+		if _, ok := ms.consumers[channel]; ok {
+			continue
+		}
 		fn := func() error {
 			receiveChannel := make(chan pulsar.ConsumerMessage, ms.pulsarBufSize)
 			pc, err := ms.client.Subscribe(pulsar.ConsumerOptions{
-				Topic:                       channels[i],
+				Topic:                       channel,
 				SubscriptionName:            subName,
 				Type:                        pulsar.KeyShared,
-				SubscriptionInitialPosition: pulsar.SubscriptionPositionLatest,
+				SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
 				MessageChannel:              receiveChannel,
 			})
 			if err != nil {
@@ -141,8 +151,8 @@ func (ms *PulsarMsgStream) AsConsumer(channels []string,
 				return errors.New("pulsar is not ready, consumer is nil")
 			}
 
-			ms.consumers = append(ms.consumers, pc)
-			ms.consumerChannels = append(ms.consumerChannels, channels[i])
+			ms.consumers[channel] = pc
+			ms.consumerChannels = append(ms.consumerChannels, channel)
 			ms.consumerReflects = append(ms.consumerReflects, reflect.SelectCase{
 				Dir:  reflect.SelectRecv,
 				Chan: reflect.ValueOf(pc.Chan()),
@@ -153,7 +163,7 @@ func (ms *PulsarMsgStream) AsConsumer(channels []string,
 		}
 		err := util.Retry(20, time.Millisecond*200, fn)
 		if err != nil {
-			errMsg := "Failed to create consumer " + channels[i] + ", error = " + err.Error()
+			errMsg := "Failed to create consumer " + channel + ", error = " + err.Error()
 			panic(errMsg)
 		}
 	}
@@ -233,6 +243,7 @@ func (ms *PulsarMsgStream) Produce(ctx context.Context, msgPack *MsgPack) error
 		return err
 	}
 	for k, v := range result {
+		channel := ms.producerChannels[k]
 		for i := 0; i < len(v.Msgs); i++ {
 			mb, err := v.Msgs[i].Marshal(v.Msgs[i])
 			if err != nil {
@@ -249,7 +260,7 @@ func (ms *PulsarMsgStream) Produce(ctx context.Context, msgPack *MsgPack) error
 			sp, spanCtx := trace.MsgSpanFromCtx(ctx, v.Msgs[i])
 			trace.InjectContextToPulsarMsgProperties(sp.Context(), msg.Properties)
 
-			if _, err := ms.producers[k].Send(
+			if _, err := ms.producers[channel].Send(
 				spanCtx,
 				msg,
 			); err != nil {
@@ -264,7 +275,6 @@ func (ms *PulsarMsgStream) Produce(ctx context.Context, msgPack *MsgPack) error
 }
 
 func (ms *PulsarMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) error {
-	producerLen := len(ms.producers)
 	for _, v := range msgPack.Msgs {
 		mb, err := v.Marshal(v)
 		if err != nil {
@@ -281,8 +291,9 @@ func (ms *PulsarMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) erro
 		sp, spanCtx := trace.MsgSpanFromCtx(ctx, v)
 		trace.InjectContextToPulsarMsgProperties(sp.Context(), msg.Properties)
 
-		for i := 0; i < producerLen; i++ {
-			if _, err := ms.producers[i].Send(
+		ms.producerLock.Lock()
+		for _, producer := range ms.producers {
+			if _, err := producer.Send(
 				spanCtx,
 				msg,
 			); err != nil {
@@ -291,6 +302,7 @@ func (ms *PulsarMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) erro
 				return err
 			}
 		}
+		ms.producerLock.Unlock()
 		sp.Finish()
 	}
 	return nil
@@ -319,7 +331,7 @@ func (ms *PulsarMsgStream) Consume() (*MsgPack, context.Context) {
 			sp.Finish()
 			return cm, ctx
 		case <-ms.ctx.Done():
-			log.Debug("context closed")
+			//log.Debug("context closed")
 			return nil, nil
 		}
 	}
@@ -469,18 +481,17 @@ func (ms *PulsarMsgStream) Chan() <-chan *MsgPack {
 }
 
 func (ms *PulsarMsgStream) Seek(mp *internalpb.MsgPosition) error {
-	for index, channel := range ms.consumerChannels {
-		if channel == mp.ChannelName {
-			messageID, err := typeutil.StringToPulsarMsgID(mp.MsgID)
-			if err != nil {
-				return err
-			}
-			err = ms.consumers[index].Seek(messageID)
-			if err != nil {
-				return err
-			}
-			return nil
+	if _, ok := ms.consumers[mp.ChannelName]; ok {
+		consumer := ms.consumers[mp.ChannelName]
+		messageID, err := typeutil.StringToPulsarMsgID(mp.MsgID)
+		if err != nil {
+			return err
 		}
+		err = consumer.Seek(messageID)
+		if err != nil {
+			return err
+		}
+		return nil
 	}
 
 	return errors.New("msgStream seek fail")
@@ -488,11 +499,12 @@ func (ms *PulsarMsgStream) Seek(mp *internalpb.MsgPosition) error {
 
 type PulsarTtMsgStream struct {
 	PulsarMsgStream
-	unsolvedBuf   map[Consumer][]TsMsg
-	msgPositions  map[Consumer]*internalpb.MsgPosition
-	unsolvedMutex *sync.Mutex
-	lastTimeStamp Timestamp
-	syncConsumer  chan int
+	unsolvedBuf     map[Consumer][]TsMsg
+	msgPositions    map[Consumer]*internalpb.MsgPosition
+	unsolvedMutex   *sync.Mutex
+	lastTimeStamp   Timestamp
+	syncConsumer    chan int
+	stopConsumeChan map[Consumer]chan bool
 }
 
 func newPulsarTtMsgStream(ctx context.Context,
@@ -505,6 +517,7 @@ func newPulsarTtMsgStream(ctx context.Context,
 		return nil, err
 	}
 	unsolvedBuf := make(map[Consumer][]TsMsg)
+	stopChannel := make(map[Consumer]chan bool)
 	msgPositions := make(map[Consumer]*internalpb.MsgPosition)
 	syncConsumer := make(chan int, 1)
 
@@ -514,19 +527,39 @@ func newPulsarTtMsgStream(ctx context.Context,
 		msgPositions:    msgPositions,
 		unsolvedMutex:   &sync.Mutex{},
 		syncConsumer:    syncConsumer,
+		stopConsumeChan: stopChannel,
 	}, nil
 }
 
+func (ms *PulsarTtMsgStream) addConsumer(consumer Consumer, channel string) {
+	if len(ms.consumers) == 0 {
+		ms.syncConsumer <- 1
+	}
+	ms.consumers[channel] = consumer
+	ms.unsolvedBuf[consumer] = make([]TsMsg, 0)
+	ms.consumerChannels = append(ms.consumerChannels, channel)
+	ms.msgPositions[consumer] = &internalpb.MsgPosition{
+		ChannelName: channel,
+		MsgID:       "",
+		Timestamp:   ms.lastTimeStamp,
+	}
+	stopConsumeChan := make(chan bool)
+	ms.stopConsumeChan[consumer] = stopConsumeChan
+}
+
 func (ms *PulsarTtMsgStream) AsConsumer(channels []string,
 	subName string) {
-	for i := 0; i < len(channels); i++ {
+	for _, channel := range channels {
+		if _, ok := ms.consumers[channel]; ok {
+			continue
+		}
 		fn := func() error {
 			receiveChannel := make(chan pulsar.ConsumerMessage, ms.pulsarBufSize)
 			pc, err := ms.client.Subscribe(pulsar.ConsumerOptions{
-				Topic:                       channels[i],
+				Topic:                       channel,
 				SubscriptionName:            subName,
 				Type:                        pulsar.KeyShared,
-				SubscriptionInitialPosition: pulsar.SubscriptionPositionLatest,
+				SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
 				MessageChannel:              receiveChannel,
 			})
 			if err != nil {
@@ -537,23 +570,13 @@ func (ms *PulsarTtMsgStream) AsConsumer(channels []string,
 			}
 
 			ms.consumerLock.Lock()
-			if len(ms.consumers) == 0 {
-				ms.syncConsumer <- 1
-			}
-			ms.consumers = append(ms.consumers, pc)
-			ms.unsolvedBuf[pc] = make([]TsMsg, 0)
-			ms.msgPositions[pc] = &internalpb.MsgPosition{
-				ChannelName: channels[i],
-				MsgID:       "",
-				Timestamp:   ms.lastTimeStamp,
-			}
-			ms.consumerChannels = append(ms.consumerChannels, channels[i])
+			ms.addConsumer(pc, channel)
 			ms.consumerLock.Unlock()
 			return nil
 		}
 		err := util.Retry(10, time.Millisecond*200, fn)
 		if err != nil {
-			errMsg := "Failed to create consumer " + channels[i] + ", error = " + err.Error()
+			errMsg := "Failed to create consumer " + channel + ", error = " + err.Error()
 			panic(errMsg)
 		}
 	}
@@ -728,79 +751,87 @@ func (ms *PulsarTtMsgStream) findTimeTick(consumer Consumer,
 				return
 			}
 			sp.Finish()
+		case <-ms.stopConsumeChan[consumer]:
+			return
 		}
 	}
 }
 
 func (ms *PulsarTtMsgStream) Seek(mp *internalpb.MsgPosition) error {
+	if len(mp.MsgID) == 0 {
+		return errors.New("when msgID's length equal to 0, please use AsConsumer interface")
+	}
 	var consumer Consumer
-	var messageID MessageID
-	for index, channel := range ms.consumerChannels {
-		if filepath.Base(channel) == filepath.Base(mp.ChannelName) {
-			consumer = ms.consumers[index]
-			if len(mp.MsgID) == 0 {
-				// TODO:: collection should has separate channels; otherwise will consume redundant msg
-				messageID = pulsar.EarliestMessageID()
-				break
-			}
-			seekMsgID, err := typeutil.StringToPulsarMsgID(mp.MsgID)
-			if err != nil {
-				return err
-			}
-			messageID = seekMsgID
-			break
-		}
+	var err error
+	var hasWatched bool
+	seekChannel := mp.ChannelName
+	subName := mp.MsgGroup
+	ms.consumerLock.Lock()
+	defer ms.consumerLock.Unlock()
+	consumer, hasWatched = ms.consumers[seekChannel]
+
+	if hasWatched {
+		return errors.New("the channel should has been subscribed")
+	}
+
+	receiveChannel := make(chan pulsar.ConsumerMessage, ms.pulsarBufSize)
+	consumer, err = ms.client.Subscribe(pulsar.ConsumerOptions{
+		Topic:                       seekChannel,
+		SubscriptionName:            subName,
+		Type:                        pulsar.KeyShared,
+		SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
+		MessageChannel:              receiveChannel,
+	})
+	if err != nil {
+		return err
+	}
+	if consumer == nil {
+		return errors.New("pulsar is not ready, consumer is nil")
+	}
+	seekMsgID, err := typeutil.StringToPulsarMsgID(mp.MsgID)
+	if err != nil {
+		return err
 	}
+	consumer.Seek(seekMsgID)
+	ms.addConsumer(consumer, seekChannel)
 
-	if consumer != nil {
-		err := (consumer).Seek(messageID)
-		if err != nil {
-			return err
-		}
-		if messageID == nil {
+	if len(consumer.Chan()) == 0 {
+		return nil
+	}
+	for {
+		select {
+		case <-ms.ctx.Done():
 			return nil
-		}
-
-		ms.unsolvedMutex.Lock()
-		ms.unsolvedBuf[consumer] = make([]TsMsg, 0)
-		for {
-			select {
-			case <-ms.ctx.Done():
-				return nil
-			case pulsarMsg, ok := <-consumer.Chan():
-				if !ok {
-					return errors.New("consumer closed")
-				}
-				consumer.Ack(pulsarMsg)
+		case pulsarMsg, ok := <-consumer.Chan():
+			if !ok {
+				return errors.New("consumer closed")
+			}
+			consumer.Ack(pulsarMsg)
 
-				headerMsg := commonpb.MsgHeader{}
-				err := proto.Unmarshal(pulsarMsg.Payload(), &headerMsg)
-				if err != nil {
-					log.Error("Failed to unmarshal message header", zap.Error(err))
-				}
-				tsMsg, err := ms.unmarshal.Unmarshal(pulsarMsg.Payload(), headerMsg.Base.MsgType)
-				if err != nil {
-					log.Error("Failed to unmarshal tsMsg", zap.Error(err))
-				}
-				if tsMsg.Type() == commonpb.MsgType_TimeTick {
-					if tsMsg.BeginTs() >= mp.Timestamp {
-						ms.unsolvedMutex.Unlock()
-						return nil
-					}
-					continue
-				}
-				if tsMsg.BeginTs() > mp.Timestamp {
-					tsMsg.SetPosition(&msgstream.MsgPosition{
-						ChannelName: filepath.Base(pulsarMsg.Topic()),
-						MsgID:       typeutil.PulsarMsgIDToString(pulsarMsg.ID()),
-					})
-					ms.unsolvedBuf[consumer] = append(ms.unsolvedBuf[consumer], tsMsg)
+			headerMsg := commonpb.MsgHeader{}
+			err := proto.Unmarshal(pulsarMsg.Payload(), &headerMsg)
+			if err != nil {
+				log.Error("Failed to unmarshal message header", zap.Error(err))
+			}
+			tsMsg, err := ms.unmarshal.Unmarshal(pulsarMsg.Payload(), headerMsg.Base.MsgType)
+			if err != nil {
+				log.Error("Failed to unmarshal tsMsg", zap.Error(err))
+			}
+			if tsMsg.Type() == commonpb.MsgType_TimeTick {
+				if tsMsg.BeginTs() >= mp.Timestamp {
+					return nil
 				}
+				continue
+			}
+			if tsMsg.BeginTs() > mp.Timestamp {
+				tsMsg.SetPosition(&msgstream.MsgPosition{
+					ChannelName: filepath.Base(pulsarMsg.Topic()),
+					MsgID:       typeutil.PulsarMsgIDToString(pulsarMsg.ID()),
+				})
+				ms.unsolvedBuf[consumer] = append(ms.unsolvedBuf[consumer], tsMsg)
 			}
 		}
 	}
-
-	return errors.New("msgStream seek fail")
 }
 
 func checkTimeTickMsg(msg map[Consumer]Timestamp,
@@ -839,10 +870,8 @@ func checkTimeTickMsg(msg map[Consumer]Timestamp,
 type InMemMsgStream struct {
 	buffer chan *MsgPack
 }
-
 func (ms *InMemMsgStream) Start() {}
 func (ms *InMemMsgStream) Close() {}
-
 func (ms *InMemMsgStream) ProduceOne(msg TsMsg) error {
 	msgPack := MsgPack{}
 	msgPack.BeginTs = msg.BeginTs()
@@ -851,23 +880,19 @@ func (ms *InMemMsgStream) ProduceOne(msg TsMsg) error {
 	buffer <- &msgPack
 	return nil
 }
-
 func (ms *InMemMsgStream) Produce(msgPack *MsgPack) error {
 	buffer <- msgPack
 	return nil
 }
-
 func (ms *InMemMsgStream) Broadcast(msgPack *MsgPack) error {
 	return ms.Produce(msgPack)
 }
-
 func (ms *InMemMsgStream) Consume() *MsgPack {
 	select {
 	case msgPack := <-ms.buffer:
 		return msgPack
 	}
 }
-
 func (ms *InMemMsgStream) Chan() <- chan *MsgPack {
 	return buffer
 }
diff --git a/internal/msgstream/rmqms/rmq_msgstream.go b/internal/msgstream/rmqms/rmq_msgstream.go
index 8e4babdc4362eb3d4667abe63698266c38a5ef8b..49c3d6bbb1aa75ec936230e2df008633545cc3cc 100644
--- a/internal/msgstream/rmqms/rmq_msgstream.go
+++ b/internal/msgstream/rmqms/rmq_msgstream.go
@@ -9,14 +9,15 @@ import (
 	"sync"
 	"time"
 
+	"go.uber.org/zap"
+
 	"github.com/gogo/protobuf/proto"
 	"github.com/zilliztech/milvus-distributed/internal/log"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
+	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
 	client "github.com/zilliztech/milvus-distributed/internal/util/rocksmq/client/rocksmq"
-
-	"go.uber.org/zap"
 )
 
 type TsMsg = msgstream.TsMsg
@@ -35,8 +36,9 @@ type Consumer = client.Consumer
 type RmqMsgStream struct {
 	ctx              context.Context
 	client           client.Client
-	producers        []Producer
-	consumers        []Consumer
+	producers        map[string]Producer
+	producerChannels []string
+	consumers        map[string]Consumer
 	consumerChannels []string
 	unmarshal        msgstream.UnmarshalDispatcher
 	repackFunc       msgstream.RepackFunc
@@ -45,6 +47,7 @@ type RmqMsgStream struct {
 	wait             *sync.WaitGroup
 	streamCancel     func()
 	rmqBufSize       int64
+	producerLock     *sync.Mutex
 	consumerLock     *sync.Mutex
 	consumerReflects []reflect.SelectCase
 
@@ -55,10 +58,11 @@ func newRmqMsgStream(ctx context.Context, receiveBufSize int64, rmqBufSize int64
 	unmarshal msgstream.UnmarshalDispatcher) (*RmqMsgStream, error) {
 
 	streamCtx, streamCancel := context.WithCancel(ctx)
-	producers := make([]Producer, 0)
-	consumers := make([]Consumer, 0)
-	consumerChannels := make([]string, 0)
+	producers := make(map[string]Producer)
+	producerChannels := make([]string, 0)
 	consumerReflects := make([]reflect.SelectCase, 0)
+	consumers := make(map[string]Consumer)
+	consumerChannels := make([]string, 0)
 	receiveBuf := make(chan *MsgPack, receiveBufSize)
 
 	var clientOpts client.ClientOptions
@@ -73,12 +77,14 @@ func newRmqMsgStream(ctx context.Context, receiveBufSize int64, rmqBufSize int64
 		ctx:              streamCtx,
 		client:           client,
 		producers:        producers,
+		producerChannels: producerChannels,
 		consumers:        consumers,
 		consumerChannels: consumerChannels,
 		unmarshal:        unmarshal,
 		receiveBuf:       receiveBuf,
 		streamCancel:     streamCancel,
 		consumerReflects: consumerReflects,
+		producerLock:     &sync.Mutex{},
 		consumerLock:     &sync.Mutex{},
 		wait:             &sync.WaitGroup{},
 		scMap:            &sync.Map{},
@@ -92,6 +98,8 @@ func (rms *RmqMsgStream) Start() {
 
 func (rms *RmqMsgStream) Close() {
 	rms.streamCancel()
+	rms.wait.Wait()
+
 	if rms.client != nil {
 		rms.client.Close()
 	}
@@ -105,7 +113,10 @@ func (rms *RmqMsgStream) AsProducer(channels []string) {
 	for _, channel := range channels {
 		pp, err := rms.client.CreateProducer(client.ProducerOptions{Topic: channel})
 		if err == nil {
-			rms.producers = append(rms.producers, pp)
+			rms.producerLock.Lock()
+			rms.producers[channel] = pp
+			rms.producerChannels = append(rms.producerChannels, channel)
+			rms.producerLock.Unlock()
 		} else {
 			errMsg := "Failed to create producer " + channel + ", error = " + err.Error()
 			panic(errMsg)
@@ -114,11 +125,14 @@ func (rms *RmqMsgStream) AsProducer(channels []string) {
 }
 
 func (rms *RmqMsgStream) AsConsumer(channels []string, groupName string) {
-	for i := 0; i < len(channels); i++ {
+	for _, channel := range channels {
+		if _, ok := rms.consumers[channel]; ok {
+			continue
+		}
 		fn := func() error {
 			receiveChannel := make(chan client.ConsumerMessage, rms.rmqBufSize)
 			pc, err := rms.client.Subscribe(client.ConsumerOptions{
-				Topic:            channels[i],
+				Topic:            channel,
 				SubscriptionName: groupName,
 				MessageChannel:   receiveChannel,
 			})
@@ -129,8 +143,8 @@ func (rms *RmqMsgStream) AsConsumer(channels []string, groupName string) {
 				return errors.New("RocksMQ is not ready, consumer is nil")
 			}
 
-			rms.consumers = append(rms.consumers, pc)
-			rms.consumerChannels = append(rms.consumerChannels, channels[i])
+			rms.consumers[channel] = pc
+			rms.consumerChannels = append(rms.consumerChannels, channel)
 			rms.consumerReflects = append(rms.consumerReflects, reflect.SelectCase{
 				Dir:  reflect.SelectRecv,
 				Chan: reflect.ValueOf(pc.Chan()),
@@ -141,7 +155,7 @@ func (rms *RmqMsgStream) AsConsumer(channels []string, groupName string) {
 		}
 		err := util.Retry(20, time.Millisecond*200, fn)
 		if err != nil {
-			errMsg := "Failed to create consumer " + channels[i] + ", error = " + err.Error()
+			errMsg := "Failed to create consumer " + channel + ", error = " + err.Error()
 			panic(errMsg)
 		}
 	}
@@ -194,6 +208,7 @@ func (rms *RmqMsgStream) Produce(ctx context.Context, pack *msgstream.MsgPack) e
 		return err
 	}
 	for k, v := range result {
+		channel := rms.producerChannels[k]
 		for i := 0; i < len(v.Msgs); i++ {
 			mb, err := v.Msgs[i].Marshal(v.Msgs[i])
 			if err != nil {
@@ -205,7 +220,7 @@ func (rms *RmqMsgStream) Produce(ctx context.Context, pack *msgstream.MsgPack) e
 				return err
 			}
 			msg := &client.ProducerMessage{Payload: m}
-			if err := rms.producers[k].Send(msg); err != nil {
+			if err := rms.producers[channel].Send(msg); err != nil {
 				return err
 			}
 		}
@@ -214,7 +229,6 @@ func (rms *RmqMsgStream) Produce(ctx context.Context, pack *msgstream.MsgPack) e
 }
 
 func (rms *RmqMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) error {
-	producerLen := len(rms.producers)
 	for _, v := range msgPack.Msgs {
 		mb, err := v.Marshal(v)
 		if err != nil {
@@ -228,13 +242,15 @@ func (rms *RmqMsgStream) Broadcast(ctx context.Context, msgPack *MsgPack) error
 
 		msg := &client.ProducerMessage{Payload: m}
 
-		for i := 0; i < producerLen; i++ {
-			if err := rms.producers[i].Send(
+		rms.producerLock.Lock()
+		for _, producer := range rms.producers {
+			if err := producer.Send(
 				msg,
 			); err != nil {
 				return err
 			}
 		}
+		rms.producerLock.Unlock()
 	}
 	return nil
 }
@@ -249,7 +265,7 @@ func (rms *RmqMsgStream) Consume() (*msgstream.MsgPack, context.Context) {
 			}
 			return cm, nil
 		case <-rms.ctx.Done():
-			log.Debug("context closed")
+			//log.Debug("context closed")
 			return nil, nil
 		}
 	}
@@ -298,19 +314,17 @@ func (rms *RmqMsgStream) Chan() <-chan *msgstream.MsgPack {
 }
 
 func (rms *RmqMsgStream) Seek(mp *msgstream.MsgPosition) error {
-	for index, channel := range rms.consumerChannels {
-		if channel == mp.ChannelName {
-			msgID, err := strconv.ParseInt(mp.MsgID, 10, 64)
-			if err != nil {
-				return err
-			}
-			messageID := UniqueID(msgID)
-			err = rms.consumers[index].Seek(messageID)
-			if err != nil {
-				return err
-			}
-			return nil
+	if _, ok := rms.consumers[mp.ChannelName]; ok {
+		consumer := rms.consumers[mp.ChannelName]
+		msgID, err := strconv.ParseInt(mp.MsgID, 10, 64)
+		if err != nil {
+			return err
+		}
+		err = consumer.Seek(msgID)
+		if err != nil {
+			return err
 		}
+		return nil
 	}
 
 	return errors.New("msgStream seek fail")
@@ -319,6 +333,7 @@ func (rms *RmqMsgStream) Seek(mp *msgstream.MsgPosition) error {
 type RmqTtMsgStream struct {
 	RmqMsgStream
 	unsolvedBuf   map[Consumer][]TsMsg
+	msgPositions  map[Consumer]*internalpb.MsgPosition
 	unsolvedMutex *sync.Mutex
 	lastTimeStamp Timestamp
 	syncConsumer  chan int
@@ -330,24 +345,44 @@ func newRmqTtMsgStream(ctx context.Context, receiveBufSize int64, rmqBufSize int
 	if err != nil {
 		return nil, err
 	}
+
 	unsolvedBuf := make(map[Consumer][]TsMsg)
 	syncConsumer := make(chan int, 1)
+	msgPositions := make(map[Consumer]*internalpb.MsgPosition)
 
 	return &RmqTtMsgStream{
 		RmqMsgStream:  *rmqMsgStream,
 		unsolvedBuf:   unsolvedBuf,
+		msgPositions:  msgPositions,
 		unsolvedMutex: &sync.Mutex{},
 		syncConsumer:  syncConsumer,
 	}, nil
 }
 
+func (rtms *RmqTtMsgStream) addConsumer(consumer Consumer, channel string) {
+	if len(rtms.consumers) == 0 {
+		rtms.syncConsumer <- 1
+	}
+	rtms.consumers[channel] = consumer
+	rtms.unsolvedBuf[consumer] = make([]TsMsg, 0)
+	rtms.msgPositions[consumer] = &internalpb.MsgPosition{
+		ChannelName: channel,
+		MsgID:       "",
+		Timestamp:   rtms.lastTimeStamp,
+	}
+	rtms.consumerChannels = append(rtms.consumerChannels, channel)
+}
+
 func (rtms *RmqTtMsgStream) AsConsumer(channels []string,
 	groupName string) {
-	for i := 0; i < len(channels); i++ {
+	for _, channel := range channels {
+		if _, ok := rtms.consumers[channel]; ok {
+			continue
+		}
 		fn := func() error {
 			receiveChannel := make(chan client.ConsumerMessage, rtms.rmqBufSize)
 			pc, err := rtms.client.Subscribe(client.ConsumerOptions{
-				Topic:            channels[i],
+				Topic:            channel,
 				SubscriptionName: groupName,
 				MessageChannel:   receiveChannel,
 			})
@@ -355,22 +390,17 @@ func (rtms *RmqTtMsgStream) AsConsumer(channels []string,
 				return err
 			}
 			if pc == nil {
-				return errors.New("pulsar is not ready, consumer is nil")
+				return errors.New("RocksMQ is not ready, consumer is nil")
 			}
 
 			rtms.consumerLock.Lock()
-			if len(rtms.consumers) == 0 {
-				rtms.syncConsumer <- 1
-			}
-			rtms.consumers = append(rtms.consumers, pc)
-			rtms.unsolvedBuf[pc] = make([]TsMsg, 0)
-			rtms.consumerChannels = append(rtms.consumerChannels, channels[i])
+			rtms.addConsumer(pc, channel)
 			rtms.consumerLock.Unlock()
 			return nil
 		}
 		err := util.Retry(10, time.Millisecond*200, fn)
 		if err != nil {
-			errMsg := "Failed to create consumer " + channels[i] + ", error = " + err.Error()
+			errMsg := "Failed to create consumer " + channel + ", error = " + err.Error()
 			panic(errMsg)
 		}
 	}
@@ -427,7 +457,8 @@ func (rtms *RmqTtMsgStream) bufMsgPackToChannel() {
 				continue
 			}
 			timeTickBuf := make([]TsMsg, 0)
-			msgPositions := make([]*msgstream.MsgPosition, 0)
+			startMsgPosition := make([]*internalpb.MsgPosition, 0)
+			endMsgPositions := make([]*internalpb.MsgPosition, 0)
 			rtms.unsolvedMutex.Lock()
 			for consumer, msgs := range rtms.unsolvedBuf {
 				if len(msgs) == 0 {
@@ -448,19 +479,24 @@ func (rtms *RmqTtMsgStream) bufMsgPackToChannel() {
 				}
 				rtms.unsolvedBuf[consumer] = tempBuffer
 
+				startMsgPosition = append(startMsgPosition, rtms.msgPositions[consumer])
+				var newPos *internalpb.MsgPosition
 				if len(tempBuffer) > 0 {
-					msgPositions = append(msgPositions, &msgstream.MsgPosition{
+					newPos = &internalpb.MsgPosition{
 						ChannelName: tempBuffer[0].Position().ChannelName,
 						MsgID:       tempBuffer[0].Position().MsgID,
 						Timestamp:   timeStamp,
-					})
+					}
+					endMsgPositions = append(endMsgPositions, newPos)
 				} else {
-					msgPositions = append(msgPositions, &msgstream.MsgPosition{
+					newPos = &internalpb.MsgPosition{
 						ChannelName: timeTickMsg.Position().ChannelName,
 						MsgID:       timeTickMsg.Position().MsgID,
 						Timestamp:   timeStamp,
-					})
+					}
+					endMsgPositions = append(endMsgPositions, newPos)
 				}
+				rtms.msgPositions[consumer] = newPos
 			}
 			rtms.unsolvedMutex.Unlock()
 
@@ -468,7 +504,8 @@ func (rtms *RmqTtMsgStream) bufMsgPackToChannel() {
 				BeginTs:        rtms.lastTimeStamp,
 				EndTs:          timeStamp,
 				Msgs:           timeTickBuf,
-				StartPositions: msgPositions,
+				StartPositions: startMsgPosition,
+				EndPositions:   endMsgPositions,
 			}
 
 			rtms.receiveBuf <- &msgPack
@@ -524,73 +561,78 @@ func (rtms *RmqTtMsgStream) findTimeTick(consumer Consumer,
 }
 
 func (rtms *RmqTtMsgStream) Seek(mp *msgstream.MsgPosition) error {
+	if len(mp.MsgID) == 0 {
+		return errors.New("when msgID's length equal to 0, please use AsConsumer interface")
+	}
 	var consumer Consumer
-	var messageID UniqueID
-	for index, channel := range rtms.consumerChannels {
-		if filepath.Base(channel) == filepath.Base(mp.ChannelName) {
-			consumer = rtms.consumers[index]
-			if len(mp.MsgID) == 0 {
-				messageID = -1
-				break
-			}
-			seekMsgID, err := strconv.ParseInt(mp.MsgID, 10, 64)
-			if err != nil {
-				return err
-			}
-			messageID = seekMsgID
-			break
-		}
+	var err error
+	var hasWatched bool
+	seekChannel := mp.ChannelName
+	subName := mp.MsgGroup
+	rtms.consumerLock.Lock()
+	defer rtms.consumerLock.Unlock()
+	consumer, hasWatched = rtms.consumers[seekChannel]
+
+	if hasWatched {
+		return errors.New("the channel should has been subscribed")
 	}
 
-	if consumer != nil {
-		err := (consumer).Seek(messageID)
-		if err != nil {
-			return err
-		}
-		//TODO: Is this right?
-		if messageID == 0 {
-			return nil
-		}
+	receiveChannel := make(chan client.ConsumerMessage, rtms.rmqBufSize)
+	consumer, err = rtms.client.Subscribe(client.ConsumerOptions{
+		Topic:            seekChannel,
+		SubscriptionName: subName,
+		MessageChannel:   receiveChannel,
+	})
+	if err != nil {
+		return err
+	}
+	if consumer == nil {
+		return errors.New("RocksMQ is not ready, consumer is nil")
+	}
+	seekMsgID, err := strconv.ParseInt(mp.MsgID, 10, 64)
+	if err != nil {
+		return err
+	}
+	consumer.Seek(seekMsgID)
+	rtms.addConsumer(consumer, seekChannel)
 
-		rtms.unsolvedMutex.Lock()
-		rtms.unsolvedBuf[consumer] = make([]TsMsg, 0)
-		for {
-			select {
-			case <-rtms.ctx.Done():
-				return nil
-			case rmqMsg, ok := <-consumer.Chan():
-				if !ok {
-					return errors.New("consumer closed")
-				}
+	if len(consumer.Chan()) == 0 {
+		return nil
+	}
 
-				headerMsg := commonpb.MsgHeader{}
-				err := proto.Unmarshal(rmqMsg.Payload, &headerMsg)
-				if err != nil {
-					log.Error("Failed to unmarshal message header", zap.Error(err))
-				}
-				tsMsg, err := rtms.unmarshal.Unmarshal(rmqMsg.Payload, headerMsg.Base.MsgType)
-				if err != nil {
-					log.Error("Failed to unmarshal tsMsg", zap.Error(err))
-				}
-				if tsMsg.Type() == commonpb.MsgType_TimeTick {
-					if tsMsg.BeginTs() >= mp.Timestamp {
-						rtms.unsolvedMutex.Unlock()
-						return nil
-					}
-					continue
-				}
-				if tsMsg.BeginTs() > mp.Timestamp {
-					tsMsg.SetPosition(&msgstream.MsgPosition{
-						ChannelName: filepath.Base(consumer.Topic()),
-						MsgID:       strconv.Itoa(int(rmqMsg.MsgID)),
-					})
-					rtms.unsolvedBuf[consumer] = append(rtms.unsolvedBuf[consumer], tsMsg)
+	for {
+		select {
+		case <-rtms.ctx.Done():
+			return nil
+		case rmqMsg, ok := <-consumer.Chan():
+			if !ok {
+				return errors.New("consumer closed")
+			}
+
+			headerMsg := commonpb.MsgHeader{}
+			err := proto.Unmarshal(rmqMsg.Payload, &headerMsg)
+			if err != nil {
+				log.Error("Failed to unmarshal message header", zap.Error(err))
+			}
+			tsMsg, err := rtms.unmarshal.Unmarshal(rmqMsg.Payload, headerMsg.Base.MsgType)
+			if err != nil {
+				log.Error("Failed to unmarshal tsMsg", zap.Error(err))
+			}
+			if tsMsg.Type() == commonpb.MsgType_TimeTick {
+				if tsMsg.BeginTs() >= mp.Timestamp {
+					return nil
 				}
+				continue
+			}
+			if tsMsg.BeginTs() > mp.Timestamp {
+				tsMsg.SetPosition(&msgstream.MsgPosition{
+					ChannelName: filepath.Base(consumer.Topic()),
+					MsgID:       strconv.Itoa(int(rmqMsg.MsgID)),
+				})
+				rtms.unsolvedBuf[consumer] = append(rtms.unsolvedBuf[consumer], tsMsg)
 			}
 		}
 	}
-
-	return errors.New("msgStream seek fail")
 }
 
 func checkTimeTickMsg(msg map[Consumer]Timestamp,
diff --git a/internal/proto/internal.proto b/internal/proto/internal.proto
index 0b7f3fab341bc11b479a0df73fea62f7bd65dbc9..8e8a656345a26e34161d861744c77962a642ef78 100644
--- a/internal/proto/internal.proto
+++ b/internal/proto/internal.proto
@@ -209,5 +209,6 @@ message QueryNodeStats {
 message MsgPosition {
   string channel_name = 1;
   string msgID = 2;
-  uint64 timestamp = 3;
+  string msgGroup = 3;
+  uint64 timestamp = 4;
 }
diff --git a/internal/proto/internalpb/internal.pb.go b/internal/proto/internalpb/internal.pb.go
index 072bc6b87d757dabd9becb20b085fa021d3592c7..869a7f7657df99d5770482182e06c7f6727f610d 100644
--- a/internal/proto/internalpb/internal.pb.go
+++ b/internal/proto/internalpb/internal.pb.go
@@ -1802,7 +1802,8 @@ func (m *QueryNodeStats) GetFieldStats() []*FieldStats {
 type MsgPosition struct {
 	ChannelName          string   `protobuf:"bytes,1,opt,name=channel_name,json=channelName,proto3" json:"channel_name,omitempty"`
 	MsgID                string   `protobuf:"bytes,2,opt,name=msgID,proto3" json:"msgID,omitempty"`
-	Timestamp            uint64   `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	MsgGroup             string   `protobuf:"bytes,3,opt,name=msgGroup,proto3" json:"msgGroup,omitempty"`
+	Timestamp            uint64   `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
 	XXX_NoUnkeyedLiteral struct{} `json:"-"`
 	XXX_unrecognized     []byte   `json:"-"`
 	XXX_sizecache        int32    `json:"-"`
@@ -1847,6 +1848,13 @@ func (m *MsgPosition) GetMsgID() string {
 	return ""
 }
 
+func (m *MsgPosition) GetMsgGroup() string {
+	if m != nil {
+		return m.MsgGroup
+	}
+	return ""
+}
+
 func (m *MsgPosition) GetTimestamp() uint64 {
 	if m != nil {
 		return m.Timestamp
@@ -1890,101 +1898,102 @@ func init() {
 func init() { proto.RegisterFile("internal.proto", fileDescriptor_41f4a519b878ee3b) }
 
 var fileDescriptor_41f4a519b878ee3b = []byte{
-	// 1524 bytes of a gzipped FileDescriptorProto
+	// 1539 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x6f, 0x1b, 0x45,
-	0x14, 0x67, 0x6d, 0x27, 0xb6, 0xdf, 0x3a, 0xa9, 0xbb, 0xfd, 0xda, 0xd0, 0x94, 0xba, 0xcb, 0x57,
-	0xa0, 0x22, 0xa9, 0x52, 0x84, 0x10, 0x97, 0xb6, 0x89, 0x69, 0xb0, 0xda, 0x44, 0x61, 0x9d, 0x56,
-	0x82, 0xcb, 0x6a, 0xbc, 0x3b, 0xb1, 0xa7, 0xdd, 0x0f, 0x77, 0x66, 0xb6, 0xa9, 0x73, 0xe6, 0x86,
-	0xe0, 0x80, 0xc4, 0x3f, 0xc0, 0x1f, 0xc0, 0x99, 0x13, 0x20, 0x4e, 0x48, 0xdc, 0x91, 0x90, 0xf8,
-	0x4b, 0x38, 0xa1, 0xf9, 0xd8, 0xf5, 0x47, 0x9d, 0x34, 0x35, 0x54, 0x08, 0xc1, 0xcd, 0xf3, 0x9b,
-	0xb7, 0x6f, 0xe6, 0xf7, 0x7b, 0xef, 0xcd, 0x9b, 0x31, 0x2c, 0x92, 0x98, 0x63, 0x1a, 0xa3, 0x70,
-	0xb5, 0x4f, 0x13, 0x9e, 0x58, 0xe7, 0x22, 0x12, 0x3e, 0x4e, 0x99, 0x1a, 0xad, 0x66, 0x93, 0x2f,
-	0xd7, 0xfc, 0x24, 0x8a, 0x92, 0x58, 0xc1, 0xce, 0xf7, 0x06, 0x2c, 0x6c, 0x26, 0x51, 0x3f, 0x89,
-	0x71, 0xcc, 0x5b, 0xf1, 0x7e, 0x62, 0x9d, 0x87, 0xf9, 0x38, 0x09, 0x70, 0xab, 0x69, 0x1b, 0x0d,
-	0x63, 0xa5, 0xe8, 0xea, 0x91, 0x65, 0x41, 0x89, 0x26, 0x21, 0xb6, 0x0b, 0x0d, 0x63, 0xa5, 0xea,
-	0xca, 0xdf, 0xd6, 0x0d, 0x00, 0xc6, 0x11, 0xc7, 0x9e, 0x9f, 0x04, 0xd8, 0x2e, 0x36, 0x8c, 0x95,
-	0xc5, 0xf5, 0xc6, 0xea, 0xd4, 0x75, 0x57, 0xdb, 0xc2, 0x70, 0x33, 0x09, 0xb0, 0x5b, 0x65, 0xd9,
-	0x4f, 0xeb, 0x26, 0x00, 0x7e, 0xc2, 0x29, 0xf2, 0x48, 0xbc, 0x9f, 0xd8, 0xa5, 0x46, 0x71, 0xc5,
-	0x5c, 0xbf, 0x32, 0xee, 0x40, 0x6f, 0xf7, 0x0e, 0x1e, 0xdc, 0x47, 0x61, 0x8a, 0x77, 0x11, 0xa1,
-	0x6e, 0x55, 0x7e, 0x24, 0xb6, 0xeb, 0xfc, 0x66, 0xc0, 0xa9, 0x9c, 0x80, 0x5c, 0x83, 0x59, 0x1f,
-	0xc0, 0x9c, 0x5c, 0x42, 0x32, 0x30, 0xd7, 0x5f, 0x3b, 0x62, 0x47, 0x63, 0xbc, 0x5d, 0xf5, 0x89,
-	0x75, 0x0f, 0xce, 0xb0, 0xb4, 0xe3, 0x67, 0x53, 0x9e, 0x44, 0x99, 0x5d, 0x90, 0x5b, 0x3b, 0x99,
-	0x27, 0x6b, 0xd4, 0x81, 0xde, 0xd2, 0x75, 0x98, 0x17, 0x9e, 0x52, 0x26, 0x55, 0x32, 0xd7, 0x2f,
-	0x4e, 0x25, 0xd9, 0x96, 0x26, 0xae, 0x36, 0x75, 0x2e, 0xc2, 0xd2, 0x16, 0xe6, 0x13, 0xec, 0x5c,
-	0xfc, 0x28, 0xc5, 0x8c, 0xeb, 0xc9, 0x3d, 0x12, 0xe1, 0x3d, 0xe2, 0x3f, 0xdc, 0xec, 0xa1, 0x38,
-	0xc6, 0x61, 0x36, 0x79, 0x09, 0x2e, 0x6e, 0x61, 0xf9, 0x01, 0x61, 0x9c, 0xf8, 0x6c, 0x62, 0xfa,
-	0x1c, 0x9c, 0xd9, 0xc2, 0xbc, 0x19, 0x4c, 0xc0, 0xf7, 0xa1, 0xb2, 0x23, 0x82, 0x2d, 0xd2, 0xe0,
-	0x3d, 0x28, 0xa3, 0x20, 0xa0, 0x98, 0x31, 0xad, 0xe2, 0xf2, 0xd4, 0x1d, 0xdf, 0x52, 0x36, 0x6e,
-	0x66, 0x3c, 0x2d, 0x4d, 0x9c, 0x07, 0x00, 0xad, 0x98, 0xf0, 0x5d, 0x44, 0x51, 0xc4, 0x8e, 0x4c,
-	0xb0, 0x26, 0xd4, 0x18, 0x47, 0x94, 0x7b, 0x7d, 0x69, 0xa7, 0x25, 0x3f, 0x41, 0x36, 0x98, 0xf2,
-	0x33, 0xe5, 0xdd, 0xf9, 0x04, 0xa0, 0xcd, 0x29, 0x89, 0xbb, 0x77, 0x09, 0xe3, 0x62, 0xad, 0xc7,
-	0xc2, 0x4e, 0x90, 0x28, 0xae, 0x54, 0x5d, 0x3d, 0x1a, 0x09, 0x47, 0xe1, 0xe4, 0xe1, 0xb8, 0x01,
-	0x66, 0x26, 0xf7, 0x36, 0xeb, 0x5a, 0xd7, 0xa0, 0xd4, 0x41, 0x0c, 0x1f, 0x2b, 0xcf, 0x36, 0xeb,
-	0x6e, 0x20, 0x86, 0x5d, 0x69, 0xe9, 0xfc, 0x6e, 0xc0, 0x85, 0x4d, 0x8a, 0x65, 0xf2, 0x87, 0x21,
-	0xf6, 0x39, 0x49, 0x62, 0xad, 0xfd, 0xf3, 0x7b, 0xb3, 0x2e, 0x40, 0x39, 0xe8, 0x78, 0x31, 0x8a,
-	0x32, 0xb1, 0xe7, 0x83, 0xce, 0x0e, 0x8a, 0xb0, 0xf5, 0x06, 0x2c, 0xfa, 0xb9, 0x7f, 0x81, 0xc8,
-	0x9c, 0xab, 0xba, 0x13, 0xa8, 0x08, 0x55, 0xd0, 0x69, 0x35, 0xed, 0x92, 0x0c, 0x83, 0xfc, 0x6d,
-	0x39, 0x50, 0x1b, 0x5a, 0xb5, 0x9a, 0xf6, 0x9c, 0x9c, 0x1b, 0xc3, 0x84, 0xa8, 0xcc, 0xef, 0xe1,
-	0x08, 0xd9, 0xf3, 0x0d, 0x63, 0xa5, 0xe6, 0xea, 0x91, 0xf3, 0x93, 0x01, 0xe7, 0x9a, 0x34, 0xe9,
-	0xff, 0x9b, 0xc9, 0x39, 0x5f, 0x14, 0xe0, 0xbc, 0x8a, 0xd1, 0x2e, 0xa2, 0x9c, 0xbc, 0x20, 0x16,
-	0x6f, 0xc2, 0xa9, 0xe1, 0xaa, 0xca, 0x60, 0x3a, 0x8d, 0xd7, 0x61, 0xb1, 0x9f, 0xed, 0x43, 0xd9,
-	0x95, 0xa4, 0xdd, 0x42, 0x8e, 0x8e, 0xb1, 0x9d, 0x3b, 0x86, 0xed, 0xfc, 0x94, 0x50, 0x36, 0xc0,
-	0xcc, 0x1d, 0xb5, 0x9a, 0x76, 0x59, 0x9a, 0x8c, 0x42, 0xce, 0xe7, 0x05, 0x38, 0x2b, 0x82, 0xfa,
-	0xbf, 0x1a, 0x42, 0x8d, 0x1f, 0x0a, 0x60, 0xa9, 0xec, 0x68, 0xc5, 0x01, 0x7e, 0xf2, 0x4f, 0x6a,
-	0x71, 0x09, 0x60, 0x9f, 0xe0, 0x30, 0x18, 0xd5, 0xa1, 0x2a, 0x91, 0xbf, 0xa4, 0x81, 0x0d, 0x65,
-	0xe9, 0x24, 0xe7, 0x9f, 0x0d, 0xc5, 0xf9, 0xac, 0x7a, 0xb5, 0x3e, 0x9f, 0x2b, 0x27, 0x3e, 0x9f,
-	0xe5, 0x67, 0xfa, 0x7c, 0xfe, 0xb6, 0x08, 0x0b, 0xad, 0x98, 0x61, 0xca, 0xff, 0xcb, 0x89, 0x64,
-	0x2d, 0x43, 0x95, 0xe1, 0x6e, 0x24, 0xae, 0x0c, 0x4d, 0xbb, 0x22, 0xe7, 0x87, 0x80, 0x98, 0xf5,
-	0x55, 0x6b, 0x6e, 0x35, 0xed, 0xaa, 0x0a, 0x6d, 0x0e, 0x58, 0xaf, 0x00, 0x70, 0x12, 0x61, 0xc6,
-	0x51, 0xd4, 0x67, 0x36, 0x34, 0x8a, 0x2b, 0x25, 0x77, 0x04, 0x11, 0xe7, 0x33, 0x4d, 0x0e, 0x5a,
-	0x4d, 0x66, 0x9b, 0x8d, 0xa2, 0x68, 0xb0, 0x6a, 0x64, 0xbd, 0x0b, 0x15, 0x9a, 0x1c, 0x78, 0x01,
-	0xe2, 0xc8, 0xae, 0xc9, 0xe0, 0x2d, 0x4d, 0x15, 0x7b, 0x23, 0x4c, 0x3a, 0x6e, 0x99, 0x26, 0x07,
-	0x4d, 0xc4, 0x91, 0xf3, 0x5d, 0x01, 0x16, 0xda, 0x18, 0x51, 0xbf, 0x37, 0x7b, 0xc0, 0xde, 0x82,
-	0x3a, 0xc5, 0x2c, 0x0d, 0xb9, 0x37, 0xa4, 0xa5, 0x22, 0x77, 0x4a, 0xe1, 0x9b, 0x39, 0xb9, 0x4c,
-	0xf2, 0xe2, 0x31, 0x92, 0x97, 0xa6, 0x48, 0xee, 0x40, 0x6d, 0x44, 0x5f, 0x66, 0xcf, 0x49, 0xea,
-	0x63, 0x98, 0x55, 0x87, 0x62, 0xc0, 0x42, 0x19, 0xb1, 0xaa, 0x2b, 0x7e, 0x5a, 0x57, 0xe1, 0x74,
-	0x3f, 0x44, 0x3e, 0xee, 0x25, 0x61, 0x80, 0xa9, 0xd7, 0xa5, 0x49, 0xda, 0x97, 0xe1, 0xaa, 0xb9,
-	0xf5, 0x91, 0x89, 0x2d, 0x81, 0x5b, 0x6b, 0x30, 0xf7, 0x28, 0xc5, 0x74, 0x20, 0xe3, 0x75, 0xac,
-	0x78, 0xca, 0xce, 0xf9, 0xd5, 0x18, 0x4a, 0x27, 0x58, 0xb2, 0x19, 0xa4, 0x9b, 0xe5, 0xa6, 0x32,
-	0x55, 0xef, 0xe2, 0x74, 0xbd, 0x2f, 0x83, 0x19, 0x61, 0x4e, 0x89, 0xef, 0xf1, 0x41, 0x3f, 0x2b,
-	0x03, 0x50, 0xd0, 0xde, 0xa0, 0x2f, 0x6b, 0xa0, 0x47, 0xb8, 0x12, 0xb4, 0xe6, 0xca, 0xdf, 0xce,
-	0x2f, 0x06, 0x2c, 0x34, 0x71, 0x88, 0x39, 0x9e, 0x3d, 0x27, 0xa6, 0xd4, 0x6a, 0x61, 0x6a, 0xad,
-	0x8e, 0x15, 0x43, 0xf1, 0xf8, 0x62, 0x28, 0x3d, 0x55, 0x0c, 0x57, 0xa0, 0xd6, 0xa7, 0x24, 0x42,
-	0x74, 0xe0, 0x3d, 0xc4, 0x83, 0x2c, 0x2f, 0x4c, 0x8d, 0xdd, 0xc1, 0x03, 0xe6, 0x7c, 0x63, 0x40,
-	0xe5, 0x76, 0x98, 0xb2, 0xde, 0x4c, 0xb7, 0xba, 0xf1, 0x52, 0x2e, 0x4c, 0x96, 0xf2, 0x64, 0xee,
-	0x16, 0x9f, 0x91, 0xbb, 0x7b, 0xa8, 0xab, 0x83, 0x30, 0x86, 0x39, 0x7f, 0x18, 0x50, 0xbd, 0x9b,
-	0xa0, 0x40, 0xf6, 0x9d, 0xbf, 0x7d, 0x97, 0xcb, 0x30, 0x6c, 0x1d, 0x99, 0xc6, 0xc3, 0x5e, 0x32,
-	0xd2, 0x13, 0x4a, 0xe3, 0x3d, 0xe1, 0x32, 0x98, 0x44, 0x6c, 0xc8, 0xeb, 0x23, 0xde, 0x53, 0xe2,
-	0x56, 0x5d, 0x90, 0xd0, 0xae, 0x40, 0x44, 0xd3, 0xc8, 0x0c, 0x64, 0xd3, 0x98, 0x3f, 0x71, 0xd3,
-	0xd0, 0x4e, 0x64, 0xd3, 0xf8, 0xb1, 0x00, 0x76, 0x5b, 0x6d, 0x76, 0xf8, 0xa6, 0xb9, 0xd7, 0x0f,
-	0xe4, 0xd3, 0x6a, 0x19, 0xaa, 0xed, 0x9c, 0x99, 0x7a, 0x52, 0x0c, 0x01, 0x91, 0x1f, 0xdb, 0x38,
-	0x4a, 0xe8, 0xa0, 0x4d, 0x0e, 0xb1, 0x26, 0x3e, 0x82, 0x08, 0x6e, 0x3b, 0x69, 0xe4, 0x26, 0x07,
-	0x4c, 0x87, 0x26, 0x1b, 0x0a, 0x6e, 0xbe, 0x6c, 0xf5, 0x9e, 0x48, 0x27, 0xc9, 0xbc, 0xe4, 0x82,
-	0x82, 0xc4, 0x3b, 0xc0, 0x5a, 0x82, 0x0a, 0x8e, 0x03, 0x35, 0x3b, 0x27, 0x67, 0xcb, 0x38, 0x0e,
-	0xe4, 0x54, 0x0b, 0x16, 0xf5, 0x5b, 0x26, 0x61, 0x32, 0x84, 0xf2, 0xd0, 0x31, 0xd7, 0x9d, 0x23,
-	0x1e, 0x90, 0xdb, 0xac, 0xbb, 0xab, 0x2d, 0xdd, 0x05, 0xf5, 0x9c, 0xd1, 0x43, 0xeb, 0x43, 0xa8,
-	0x89, 0x55, 0x72, 0x47, 0xe5, 0x13, 0x3b, 0x32, 0x71, 0x1c, 0x64, 0x03, 0xe7, 0x2b, 0x03, 0x4e,
-	0x3f, 0x25, 0xe1, 0x0c, 0x79, 0x74, 0x07, 0x2a, 0x6d, 0xdc, 0x15, 0x2e, 0xb2, 0x17, 0xda, 0xda,
-	0x51, 0x0f, 0xfe, 0x23, 0x02, 0xe6, 0xe6, 0x0e, 0x9c, 0x07, 0x79, 0x58, 0x65, 0xfd, 0x89, 0x97,
-	0xae, 0x38, 0x54, 0x82, 0x17, 0x50, 0x88, 0xce, 0x67, 0x86, 0x78, 0x85, 0x06, 0xf8, 0x89, 0x5c,
-	0xfa, 0xa9, 0xc4, 0x34, 0x66, 0x49, 0x4c, 0xeb, 0x1a, 0x9c, 0x8d, 0xd3, 0xc8, 0xa3, 0x38, 0x44,
-	0x1c, 0x07, 0x9e, 0x5e, 0x8d, 0xe9, 0xd5, 0xad, 0x38, 0x8d, 0x5c, 0x35, 0xa5, 0x69, 0x32, 0xe7,
-	0x4b, 0x03, 0xe0, 0xb6, 0xa8, 0x1e, 0xb5, 0x8d, 0xc9, 0xe3, 0xc1, 0x38, 0xfe, 0x4a, 0x56, 0x18,
-	0x2f, 0xbf, 0x8d, 0xac, 0xfc, 0x98, 0x8c, 0x47, 0x71, 0x1a, 0x87, 0x3c, 0x1e, 0x43, 0xf2, 0xba,
-	0x42, 0x55, 0x0c, 0xbe, 0x36, 0xa0, 0x36, 0x12, 0x2a, 0x36, 0x2e, 0xa3, 0x31, 0x79, 0x52, 0xc8,
-	0x7e, 0x21, 0xaa, 0xc7, 0x63, 0x23, 0x05, 0x15, 0x0d, 0x0b, 0x6a, 0x09, 0x2a, 0x52, 0x92, 0x91,
-	0x8a, 0x8a, 0x75, 0x45, 0x5d, 0x85, 0xd3, 0x14, 0xfb, 0x38, 0xe6, 0xe1, 0xc0, 0x8b, 0x92, 0x80,
-	0xec, 0x13, 0x1c, 0xc8, 0xba, 0xaa, 0xb8, 0xf5, 0x6c, 0x62, 0x5b, 0xe3, 0xce, 0xcf, 0x06, 0x2c,
-	0x7e, 0x2c, 0xda, 0xe8, 0x4e, 0x12, 0x60, 0xb5, 0xb3, 0xe7, 0x4f, 0x89, 0x9b, 0x92, 0x8b, 0x96,
-	0x47, 0xa5, 0xeb, 0xab, 0xcf, 0x4e, 0x57, 0xe6, 0x56, 0x98, 0x4e, 0x51, 0x21, 0xb1, 0xba, 0x66,
-	0x9f, 0x44, 0xe2, 0x61, 0x60, 0x5d, 0x75, 0x39, 0x57, 0x12, 0x07, 0x60, 0x8e, 0xd4, 0xa5, 0x68,
-	0x49, 0xba, 0x7f, 0xa9, 0xb6, 0x67, 0xc8, 0xf3, 0xd6, 0xd4, 0x98, 0x3c, 0x71, 0xcf, 0xc2, 0x5c,
-	0xc4, 0xba, 0xf9, 0x2d, 0x49, 0x0d, 0x44, 0x64, 0xf2, 0xce, 0x26, 0xb5, 0x2d, 0xb9, 0x43, 0xe0,
-	0xed, 0xf7, 0xa1, 0x9a, 0xff, 0xc7, 0x66, 0xd5, 0xa1, 0xd6, 0x8a, 0x09, 0x27, 0x28, 0x24, 0x87,
-	0x24, 0xee, 0xd6, 0x5f, 0xb2, 0x4c, 0x28, 0x7f, 0x84, 0x51, 0xc8, 0x7b, 0x83, 0xba, 0x61, 0xd5,
-	0xa0, 0x72, 0xab, 0x13, 0x27, 0x34, 0x42, 0x61, 0xbd, 0xb0, 0xd1, 0xfc, 0x74, 0xa3, 0x4b, 0x78,
-	0x2f, 0xed, 0x08, 0x11, 0xd7, 0x0e, 0x49, 0x18, 0x92, 0x43, 0x8e, 0xfd, 0xde, 0x9a, 0x62, 0xf9,
-	0x4e, 0x40, 0x18, 0xa7, 0xa4, 0x93, 0x72, 0x1c, 0xac, 0x65, 0x5c, 0xd7, 0x24, 0xf5, 0x7c, 0xd8,
-	0xef, 0x74, 0xe6, 0x25, 0x72, 0xfd, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x64, 0x4c, 0x6c, 0x9f,
-	0x88, 0x14, 0x00, 0x00,
+	0x1b, 0x7f, 0xd7, 0x76, 0x62, 0xfb, 0x59, 0x27, 0x75, 0xb7, 0x5f, 0x9b, 0x36, 0x7d, 0xeb, 0xee,
+	0xfb, 0x02, 0x81, 0x8a, 0xa4, 0x4a, 0x11, 0x42, 0x5c, 0xda, 0x26, 0xa6, 0xc1, 0x6a, 0x13, 0x85,
+	0x75, 0x5a, 0x09, 0x2e, 0xab, 0xf1, 0xee, 0xc4, 0x9e, 0x76, 0x3f, 0xdc, 0x99, 0xd9, 0xa6, 0xce,
+	0x89, 0x03, 0x37, 0x04, 0x07, 0x24, 0xfe, 0x01, 0xfe, 0x00, 0xce, 0x9c, 0x00, 0x71, 0x42, 0xe2,
+	0x8e, 0x84, 0xc4, 0x5f, 0xc2, 0x09, 0xcd, 0xc7, 0xae, 0x3f, 0xea, 0xa4, 0xa9, 0xa1, 0x42, 0x08,
+	0x6e, 0x9e, 0xdf, 0xf3, 0xec, 0x33, 0xf3, 0xfc, 0x9e, 0xaf, 0x19, 0xc3, 0x22, 0x89, 0x39, 0xa6,
+	0x31, 0x0a, 0x57, 0xfb, 0x34, 0xe1, 0x89, 0x75, 0x2e, 0x22, 0xe1, 0x93, 0x94, 0xa9, 0xd5, 0x6a,
+	0x26, 0xbc, 0x58, 0xf3, 0x93, 0x28, 0x4a, 0x62, 0x05, 0x3b, 0xdf, 0x1a, 0xb0, 0xb0, 0x99, 0x44,
+	0xfd, 0x24, 0xc6, 0x31, 0x6f, 0xc5, 0xfb, 0x89, 0x75, 0x1e, 0xe6, 0xe3, 0x24, 0xc0, 0xad, 0xa6,
+	0x6d, 0x34, 0x8c, 0x95, 0xa2, 0xab, 0x57, 0x96, 0x05, 0x25, 0x9a, 0x84, 0xd8, 0x2e, 0x34, 0x8c,
+	0x95, 0xaa, 0x2b, 0x7f, 0x5b, 0x37, 0x01, 0x18, 0x47, 0x1c, 0x7b, 0x7e, 0x12, 0x60, 0xbb, 0xd8,
+	0x30, 0x56, 0x16, 0xd7, 0x1b, 0xab, 0x53, 0xf7, 0x5d, 0x6d, 0x0b, 0xc5, 0xcd, 0x24, 0xc0, 0x6e,
+	0x95, 0x65, 0x3f, 0xad, 0x5b, 0x00, 0xf8, 0x29, 0xa7, 0xc8, 0x23, 0xf1, 0x7e, 0x62, 0x97, 0x1a,
+	0xc5, 0x15, 0x73, 0xfd, 0xea, 0xb8, 0x01, 0x7d, 0xdc, 0xbb, 0x78, 0xf0, 0x00, 0x85, 0x29, 0xde,
+	0x45, 0x84, 0xba, 0x55, 0xf9, 0x91, 0x38, 0xae, 0xf3, 0x8b, 0x01, 0xa7, 0x72, 0x07, 0xe4, 0x1e,
+	0xcc, 0x7a, 0x17, 0xe6, 0xe4, 0x16, 0xd2, 0x03, 0x73, 0xfd, 0xff, 0x47, 0x9c, 0x68, 0xcc, 0x6f,
+	0x57, 0x7d, 0x62, 0xdd, 0x87, 0x33, 0x2c, 0xed, 0xf8, 0x99, 0xc8, 0x93, 0x28, 0xb3, 0x0b, 0xf2,
+	0x68, 0x27, 0xb3, 0x64, 0x8d, 0x1a, 0xd0, 0x47, 0xba, 0x01, 0xf3, 0xc2, 0x52, 0xca, 0x24, 0x4b,
+	0xe6, 0xfa, 0xa5, 0xa9, 0x4e, 0xb6, 0xa5, 0x8a, 0xab, 0x55, 0x9d, 0x4b, 0xb0, 0xb4, 0x85, 0xf9,
+	0x84, 0x77, 0x2e, 0x7e, 0x9c, 0x62, 0xc6, 0xb5, 0x70, 0x8f, 0x44, 0x78, 0x8f, 0xf8, 0x8f, 0x36,
+	0x7b, 0x28, 0x8e, 0x71, 0x98, 0x09, 0x2f, 0xc3, 0xa5, 0x2d, 0x2c, 0x3f, 0x20, 0x8c, 0x13, 0x9f,
+	0x4d, 0x88, 0xcf, 0xc1, 0x99, 0x2d, 0xcc, 0x9b, 0xc1, 0x04, 0xfc, 0x00, 0x2a, 0x3b, 0x22, 0xd8,
+	0x22, 0x0d, 0xde, 0x86, 0x32, 0x0a, 0x02, 0x8a, 0x19, 0xd3, 0x2c, 0x2e, 0x4f, 0x3d, 0xf1, 0x6d,
+	0xa5, 0xe3, 0x66, 0xca, 0xd3, 0xd2, 0xc4, 0x79, 0x08, 0xd0, 0x8a, 0x09, 0xdf, 0x45, 0x14, 0x45,
+	0xec, 0xc8, 0x04, 0x6b, 0x42, 0x8d, 0x71, 0x44, 0xb9, 0xd7, 0x97, 0x7a, 0x9a, 0xf2, 0x13, 0x64,
+	0x83, 0x29, 0x3f, 0x53, 0xd6, 0x9d, 0x0f, 0x01, 0xda, 0x9c, 0x92, 0xb8, 0x7b, 0x8f, 0x30, 0x2e,
+	0xf6, 0x7a, 0x22, 0xf4, 0x84, 0x13, 0xc5, 0x95, 0xaa, 0xab, 0x57, 0x23, 0xe1, 0x28, 0x9c, 0x3c,
+	0x1c, 0x37, 0xc1, 0xcc, 0xe8, 0xde, 0x66, 0x5d, 0xeb, 0x3a, 0x94, 0x3a, 0x88, 0xe1, 0x63, 0xe9,
+	0xd9, 0x66, 0xdd, 0x0d, 0xc4, 0xb0, 0x2b, 0x35, 0x9d, 0x5f, 0x0d, 0xb8, 0xb0, 0x49, 0xb1, 0x4c,
+	0xfe, 0x30, 0xc4, 0x3e, 0x27, 0x49, 0xac, 0xb9, 0x7f, 0x71, 0x6b, 0xd6, 0x05, 0x28, 0x07, 0x1d,
+	0x2f, 0x46, 0x51, 0x46, 0xf6, 0x7c, 0xd0, 0xd9, 0x41, 0x11, 0xb6, 0x5e, 0x85, 0x45, 0x3f, 0xb7,
+	0x2f, 0x10, 0x99, 0x73, 0x55, 0x77, 0x02, 0x15, 0xa1, 0x0a, 0x3a, 0xad, 0xa6, 0x5d, 0x92, 0x61,
+	0x90, 0xbf, 0x2d, 0x07, 0x6a, 0x43, 0xad, 0x56, 0xd3, 0x9e, 0x93, 0xb2, 0x31, 0x4c, 0x90, 0xca,
+	0xfc, 0x1e, 0x8e, 0x90, 0x3d, 0xdf, 0x30, 0x56, 0x6a, 0xae, 0x5e, 0x39, 0x3f, 0x18, 0x70, 0xae,
+	0x49, 0x93, 0xfe, 0xdf, 0xd9, 0x39, 0xe7, 0xb3, 0x02, 0x9c, 0x57, 0x31, 0xda, 0x45, 0x94, 0x93,
+	0x97, 0xe4, 0xc5, 0x6b, 0x70, 0x6a, 0xb8, 0xab, 0x52, 0x98, 0xee, 0xc6, 0x2b, 0xb0, 0xd8, 0xcf,
+	0xce, 0xa1, 0xf4, 0x4a, 0x52, 0x6f, 0x21, 0x47, 0xc7, 0xbc, 0x9d, 0x3b, 0xc6, 0xdb, 0xf9, 0x29,
+	0xa1, 0x6c, 0x80, 0x99, 0x1b, 0x6a, 0x35, 0xed, 0xb2, 0x54, 0x19, 0x85, 0x9c, 0x4f, 0x0b, 0x70,
+	0x56, 0x04, 0xf5, 0x5f, 0x36, 0x04, 0x1b, 0xdf, 0x15, 0xc0, 0x52, 0xd9, 0xd1, 0x8a, 0x03, 0xfc,
+	0xf4, 0xaf, 0xe4, 0xe2, 0x32, 0xc0, 0x3e, 0xc1, 0x61, 0x30, 0xca, 0x43, 0x55, 0x22, 0x7f, 0x88,
+	0x03, 0x1b, 0xca, 0xd2, 0x48, 0xee, 0x7f, 0xb6, 0x14, 0xfd, 0x59, 0xcd, 0x6a, 0xdd, 0x9f, 0x2b,
+	0x27, 0xee, 0xcf, 0xf2, 0x33, 0xdd, 0x9f, 0xbf, 0x2e, 0xc2, 0x42, 0x2b, 0x66, 0x98, 0xf2, 0x7f,
+	0x72, 0x22, 0x59, 0xcb, 0x50, 0x65, 0xb8, 0x1b, 0x89, 0x2b, 0x43, 0xd3, 0xae, 0x48, 0xf9, 0x10,
+	0x10, 0x52, 0x5f, 0x8d, 0xe6, 0x56, 0xd3, 0xae, 0xaa, 0xd0, 0xe6, 0x80, 0xf5, 0x5f, 0x00, 0x4e,
+	0x22, 0xcc, 0x38, 0x8a, 0xfa, 0xcc, 0x86, 0x46, 0x71, 0xa5, 0xe4, 0x8e, 0x20, 0xa2, 0x3f, 0xd3,
+	0xe4, 0xa0, 0xd5, 0x64, 0xb6, 0xd9, 0x28, 0x8a, 0x01, 0xab, 0x56, 0xd6, 0x5b, 0x50, 0xa1, 0xc9,
+	0x81, 0x17, 0x20, 0x8e, 0xec, 0x9a, 0x0c, 0xde, 0xd2, 0x54, 0xb2, 0x37, 0xc2, 0xa4, 0xe3, 0x96,
+	0x69, 0x72, 0xd0, 0x44, 0x1c, 0x39, 0xdf, 0x14, 0x60, 0xa1, 0x8d, 0x11, 0xf5, 0x7b, 0xb3, 0x07,
+	0xec, 0x75, 0xa8, 0x53, 0xcc, 0xd2, 0x90, 0x7b, 0x43, 0xb7, 0x54, 0xe4, 0x4e, 0x29, 0x7c, 0x33,
+	0x77, 0x2e, 0xa3, 0xbc, 0x78, 0x0c, 0xe5, 0xa5, 0x29, 0x94, 0x3b, 0x50, 0x1b, 0xe1, 0x97, 0xd9,
+	0x73, 0xd2, 0xf5, 0x31, 0xcc, 0xaa, 0x43, 0x31, 0x60, 0xa1, 0x8c, 0x58, 0xd5, 0x15, 0x3f, 0xad,
+	0x6b, 0x70, 0xba, 0x1f, 0x22, 0x1f, 0xf7, 0x92, 0x30, 0xc0, 0xd4, 0xeb, 0xd2, 0x24, 0xed, 0xcb,
+	0x70, 0xd5, 0xdc, 0xfa, 0x88, 0x60, 0x4b, 0xe0, 0xd6, 0x1a, 0xcc, 0x3d, 0x4e, 0x31, 0x1d, 0xc8,
+	0x78, 0x1d, 0x4b, 0x9e, 0xd2, 0x73, 0x7e, 0x36, 0x86, 0xd4, 0x09, 0x2f, 0xd9, 0x0c, 0xd4, 0xcd,
+	0x72, 0x53, 0x99, 0xca, 0x77, 0x71, 0x3a, 0xdf, 0x57, 0xc0, 0x8c, 0x30, 0xa7, 0xc4, 0xf7, 0xf8,
+	0xa0, 0x9f, 0x95, 0x01, 0x28, 0x68, 0x6f, 0xd0, 0x97, 0x35, 0xd0, 0x23, 0x5c, 0x11, 0x5a, 0x73,
+	0xe5, 0x6f, 0xe7, 0x27, 0x03, 0x16, 0x9a, 0x38, 0xc4, 0x1c, 0xcf, 0x9e, 0x13, 0x53, 0x6a, 0xb5,
+	0x30, 0xb5, 0x56, 0xc7, 0x8a, 0xa1, 0x78, 0x7c, 0x31, 0x94, 0x9e, 0x29, 0x86, 0xab, 0x50, 0xeb,
+	0x53, 0x12, 0x21, 0x3a, 0xf0, 0x1e, 0xe1, 0x41, 0x96, 0x17, 0xa6, 0xc6, 0xee, 0xe2, 0x01, 0x73,
+	0xbe, 0x32, 0xa0, 0x72, 0x27, 0x4c, 0x59, 0x6f, 0xa6, 0x5b, 0xdd, 0x78, 0x29, 0x17, 0x26, 0x4b,
+	0x79, 0x32, 0x77, 0x8b, 0xcf, 0xc9, 0xdd, 0x3d, 0xd4, 0xd5, 0x41, 0x18, 0xc3, 0x9c, 0xdf, 0x0c,
+	0xa8, 0xde, 0x4b, 0x50, 0x20, 0xe7, 0xce, 0x9f, 0x7e, 0xca, 0x65, 0x18, 0x8e, 0x8e, 0x8c, 0xe3,
+	0xe1, 0x2c, 0x19, 0x99, 0x09, 0xa5, 0xf1, 0x99, 0x70, 0x05, 0x4c, 0x22, 0x0e, 0xe4, 0xf5, 0x11,
+	0xef, 0x29, 0x72, 0xab, 0x2e, 0x48, 0x68, 0x57, 0x20, 0x62, 0x68, 0x64, 0x0a, 0x72, 0x68, 0xcc,
+	0x9f, 0x78, 0x68, 0x68, 0x23, 0x72, 0x68, 0x7c, 0x5f, 0x00, 0xbb, 0xad, 0x0e, 0x3b, 0x7c, 0xd3,
+	0xdc, 0xef, 0x07, 0xf2, 0x69, 0xb5, 0x0c, 0xd5, 0x76, 0xee, 0x99, 0x7a, 0x52, 0x0c, 0x01, 0x91,
+	0x1f, 0xdb, 0x38, 0x4a, 0xe8, 0xa0, 0x4d, 0x0e, 0xb1, 0x76, 0x7c, 0x04, 0x11, 0xbe, 0xed, 0xa4,
+	0x91, 0x9b, 0x1c, 0x30, 0x1d, 0x9a, 0x6c, 0x29, 0x7c, 0xf3, 0xe5, 0xa8, 0xf7, 0x44, 0x3a, 0x49,
+	0xcf, 0x4b, 0x2e, 0x28, 0x48, 0xbc, 0x03, 0xac, 0x25, 0xa8, 0xe0, 0x38, 0x50, 0xd2, 0x39, 0x29,
+	0x2d, 0xe3, 0x38, 0x90, 0xa2, 0x16, 0x2c, 0xea, 0xb7, 0x4c, 0xc2, 0x64, 0x08, 0x65, 0xd3, 0x31,
+	0xd7, 0x9d, 0x23, 0x1e, 0x90, 0xdb, 0xac, 0xbb, 0xab, 0x35, 0xdd, 0x05, 0xf5, 0x9c, 0xd1, 0x4b,
+	0xeb, 0x3d, 0xa8, 0x89, 0x5d, 0x72, 0x43, 0xe5, 0x13, 0x1b, 0x32, 0x71, 0x1c, 0x64, 0x0b, 0xe7,
+	0x0b, 0x03, 0x4e, 0x3f, 0x43, 0xe1, 0x0c, 0x79, 0x74, 0x17, 0x2a, 0x6d, 0xdc, 0x15, 0x26, 0xb2,
+	0x17, 0xda, 0xda, 0x51, 0x0f, 0xfe, 0x23, 0x02, 0xe6, 0xe6, 0x06, 0x9c, 0x87, 0x79, 0x58, 0x65,
+	0xfd, 0x89, 0x97, 0xae, 0x68, 0x2a, 0xc1, 0x4b, 0x28, 0x44, 0xe7, 0x13, 0x43, 0xbc, 0x42, 0x03,
+	0xfc, 0x54, 0x6e, 0xfd, 0x4c, 0x62, 0x1a, 0xb3, 0x24, 0xa6, 0x75, 0x1d, 0xce, 0xc6, 0x69, 0xe4,
+	0x51, 0x1c, 0x22, 0x8e, 0x03, 0x4f, 0xef, 0xc6, 0xf4, 0xee, 0x56, 0x9c, 0x46, 0xae, 0x12, 0x69,
+	0x37, 0x99, 0xf3, 0xb9, 0x01, 0x70, 0x47, 0x54, 0x8f, 0x3a, 0xc6, 0x64, 0x7b, 0x30, 0x8e, 0xbf,
+	0x92, 0x15, 0xc6, 0xcb, 0x6f, 0x23, 0x2b, 0x3f, 0x26, 0xe3, 0x51, 0x9c, 0xe6, 0x43, 0x1e, 0x8f,
+	0xa1, 0xf3, 0xba, 0x42, 0x55, 0x0c, 0xbe, 0x34, 0xa0, 0x36, 0x12, 0x2a, 0x36, 0x4e, 0xa3, 0x31,
+	0xd9, 0x29, 0xe4, 0xbc, 0x10, 0xd5, 0xe3, 0xb1, 0x91, 0x82, 0x8a, 0x86, 0x05, 0xb5, 0x04, 0x15,
+	0x49, 0xc9, 0x48, 0x45, 0xc5, 0xba, 0xa2, 0xae, 0xc1, 0x69, 0x8a, 0x7d, 0x1c, 0xf3, 0x70, 0xe0,
+	0x45, 0x49, 0x40, 0xf6, 0x09, 0x0e, 0x64, 0x5d, 0x55, 0xdc, 0x7a, 0x26, 0xd8, 0xd6, 0xb8, 0xf3,
+	0xa3, 0x01, 0x8b, 0x1f, 0x88, 0x31, 0xba, 0x93, 0x04, 0x58, 0x9d, 0xec, 0xc5, 0x53, 0xe2, 0x96,
+	0xf4, 0x45, 0xd3, 0xa3, 0xd2, 0xf5, 0x7f, 0xcf, 0x4f, 0x57, 0xe6, 0x56, 0x98, 0x4e, 0x51, 0x41,
+	0xb1, 0xba, 0x66, 0x9f, 0x84, 0xe2, 0x61, 0x60, 0x5d, 0x75, 0x39, 0x57, 0x14, 0x7f, 0x6c, 0x80,
+	0x39, 0x52, 0x98, 0x62, 0x26, 0xe9, 0x01, 0xa6, 0xe6, 0x9e, 0x21, 0x1b, 0xae, 0xa9, 0x31, 0xd9,
+	0x72, 0xcf, 0xc2, 0x5c, 0xc4, 0xba, 0xf9, 0x35, 0x49, 0x2d, 0xac, 0x8b, 0x50, 0x89, 0x58, 0x57,
+	0xde, 0x46, 0x74, 0x97, 0xce, 0xd7, 0x22, 0x6c, 0xf9, 0xd8, 0xd3, 0xcd, 0x6a, 0x08, 0xbc, 0xf1,
+	0x0e, 0x54, 0xf3, 0x3f, 0xe0, 0xac, 0x3a, 0xd4, 0x5a, 0x31, 0xe1, 0x04, 0x85, 0xe4, 0x90, 0xc4,
+	0xdd, 0xfa, 0x7f, 0x2c, 0x13, 0xca, 0xef, 0x63, 0x14, 0xf2, 0xde, 0xa0, 0x6e, 0x58, 0x35, 0xa8,
+	0xdc, 0xee, 0xc4, 0x09, 0x8d, 0x50, 0x58, 0x2f, 0x6c, 0x34, 0x3f, 0xda, 0xe8, 0x12, 0xde, 0x4b,
+	0x3b, 0x82, 0xe1, 0xb5, 0x43, 0x12, 0x86, 0xe4, 0x90, 0x63, 0xbf, 0xb7, 0xa6, 0x28, 0x78, 0x33,
+	0x20, 0x8c, 0x53, 0xd2, 0x49, 0x39, 0x0e, 0xd6, 0x32, 0x22, 0xd6, 0x24, 0x2f, 0xf9, 0xb2, 0xdf,
+	0xe9, 0xcc, 0x4b, 0xe4, 0xc6, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0x54, 0x28, 0x8f, 0xa8, 0xa5,
+	0x14, 0x00, 0x00,
 }
diff --git a/internal/proxynode/meta_cache.go b/internal/proxynode/meta_cache.go
index 0b35d3949a506a099af7cf83487ba88b69c9ca44..53c7bca9a37ff4cf80d75f5c792342e72fcda0ee 100644
--- a/internal/proxynode/meta_cache.go
+++ b/internal/proxynode/meta_cache.go
@@ -6,6 +6,9 @@ import (
 	"fmt"
 	"sync"
 
+	"github.com/zilliztech/milvus-distributed/internal/log"
+	"go.uber.org/zap"
+
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
@@ -16,6 +19,7 @@ import (
 type Cache interface {
 	GetCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error)
 	GetPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error)
+	GetPartitions(ctx context.Context, collectionName string) (map[string]typeutil.UniqueID, error)
 	GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error)
 	RemoveCollection(ctx context.Context, collectionName string)
 	RemovePartition(ctx context.Context, collectionName string, partitionName string)
@@ -52,83 +56,143 @@ func NewMetaCache(client types.MasterService) (*MetaCache, error) {
 	}, nil
 }
 
-func (m *MetaCache) readCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error) {
+func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error) {
 	m.mu.RLock()
-	defer m.mu.RUnlock()
-
 	collInfo, ok := m.collInfo[collectionName]
+
 	if !ok {
-		return 0, fmt.Errorf("can't find collection name:%s", collectionName)
+		m.mu.RUnlock()
+		coll, err := m.describeCollection(ctx, collectionName)
+		if err != nil {
+			return 0, err
+		}
+		m.mu.Lock()
+		defer m.mu.Unlock()
+		m.updateCollection(coll, collectionName)
+		collInfo = m.collInfo[collectionName]
+		return collInfo.collID, nil
 	}
+	defer m.mu.RUnlock()
+
 	return collInfo.collID, nil
 }
 
-func (m *MetaCache) readCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) {
+func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) {
 	m.mu.RLock()
-	defer m.mu.RUnlock()
-
 	collInfo, ok := m.collInfo[collectionName]
+
 	if !ok {
-		return nil, fmt.Errorf("can't find collection name:%s", collectionName)
+		m.mu.RUnlock()
+		coll, err := m.describeCollection(ctx, collectionName)
+		if err != nil {
+			return nil, err
+		}
+		m.mu.Lock()
+		defer m.mu.Unlock()
+		m.updateCollection(coll, collectionName)
+		collInfo = m.collInfo[collectionName]
+		return collInfo.schema, nil
 	}
+	defer m.mu.RUnlock()
+
 	return collInfo.schema, nil
 }
 
-func (m *MetaCache) readPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) {
+func (m *MetaCache) updateCollection(coll *milvuspb.DescribeCollectionResponse, collectionName string) {
+	_, ok := m.collInfo[collectionName]
+	if !ok {
+		m.collInfo[collectionName] = &collectionInfo{}
+	}
+	m.collInfo[collectionName].schema = coll.Schema
+	m.collInfo[collectionName].collID = coll.CollectionID
+}
+
+func (m *MetaCache) GetPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) {
+	_, err := m.GetCollectionID(ctx, collectionName)
+	if err != nil {
+		return 0, err
+	}
+
 	m.mu.RLock()
-	defer m.mu.RUnlock()
 
 	collInfo, ok := m.collInfo[collectionName]
 	if !ok {
+		m.mu.RUnlock()
 		return 0, fmt.Errorf("can't find collection name:%s", collectionName)
 	}
 
 	partitionID, ok := collInfo.partInfo[partitionName]
+	m.mu.RUnlock()
+
 	if !ok {
-		return 0, fmt.Errorf("can't find partition name:%s", partitionName)
+		partitions, err := m.showPartitions(ctx, collectionName)
+		if err != nil {
+			return 0, err
+		}
+
+		m.mu.Lock()
+		defer m.mu.Unlock()
+		log.Debug("proxynode", zap.Any("GetPartitionID:partitions before update", partitions), zap.Any("collectionName", collectionName))
+		m.updatePartitions(partitions, collectionName)
+		log.Debug("proxynode", zap.Any("GetPartitionID:partitions after update", partitions), zap.Any("collectionName", collectionName))
+
+		partInfo := m.collInfo[collectionName].partInfo
+		_, ok := partInfo[partitionName]
+		if !ok {
+			return 0, fmt.Errorf("partitionID of partitionName:%s can not be find", partitionName)
+		}
+		return partInfo[partitionName], nil
 	}
 	return partitionID, nil
 }
 
-func (m *MetaCache) GetCollectionID(ctx context.Context, collectionName string) (typeutil.UniqueID, error) {
-	collID, err := m.readCollectionID(ctx, collectionName)
-	if err == nil {
-		return collID, nil
+func (m *MetaCache) GetPartitions(ctx context.Context, collectionName string) (map[string]typeutil.UniqueID, error) {
+	_, err := m.GetCollectionID(ctx, collectionName)
+	if err != nil {
+		return nil, err
 	}
-	m.mu.Lock()
-	defer m.mu.Unlock()
 
-	req := &milvuspb.DescribeCollectionRequest{
-		Base: &commonpb.MsgBase{
-			MsgType: commonpb.MsgType_DescribeCollection,
-		},
-		CollectionName: collectionName,
-	}
-	coll, err := m.client.DescribeCollection(ctx, req)
-	if err != nil {
-		return 0, err
+	m.mu.RLock()
+
+	collInfo, ok := m.collInfo[collectionName]
+	if !ok {
+		m.mu.RUnlock()
+		return nil, fmt.Errorf("can't find collection name:%s", collectionName)
 	}
-	if coll.Status.ErrorCode != commonpb.ErrorCode_Success {
-		return 0, errors.New(coll.Status.Reason)
+
+	if collInfo.partInfo == nil || len(collInfo.partInfo) == 0 {
+		m.mu.RUnlock()
+
+		partitions, err := m.showPartitions(ctx, collectionName)
+		if err != nil {
+			return nil, err
+		}
+
+		m.mu.Lock()
+		defer m.mu.Unlock()
+
+		m.updatePartitions(partitions, collectionName)
+
+		ret := make(map[string]typeutil.UniqueID)
+		partInfo := m.collInfo[collectionName].partInfo
+		for k, v := range partInfo {
+			ret[k] = v
+		}
+		return ret, nil
+
 	}
+	defer m.mu.RUnlock()
 
-	_, ok := m.collInfo[collectionName]
-	if !ok {
-		m.collInfo[collectionName] = &collectionInfo{}
+	ret := make(map[string]typeutil.UniqueID)
+	partInfo := m.collInfo[collectionName].partInfo
+	for k, v := range partInfo {
+		ret[k] = v
 	}
-	m.collInfo[collectionName].schema = coll.Schema
-	m.collInfo[collectionName].collID = coll.CollectionID
 
-	return m.collInfo[collectionName].collID, nil
+	return ret, nil
 }
-func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName string) (*schemapb.CollectionSchema, error) {
-	collSchema, err := m.readCollectionSchema(ctx, collectionName)
-	if err == nil {
-		return collSchema, nil
-	}
-	m.mu.Lock()
-	defer m.mu.Unlock()
 
+func (m *MetaCache) describeCollection(ctx context.Context, collectionName string) (*milvuspb.DescribeCollectionResponse, error) {
 	req := &milvuspb.DescribeCollectionRequest{
 		Base: &commonpb.MsgBase{
 			MsgType: commonpb.MsgType_DescribeCollection,
@@ -142,45 +206,34 @@ func (m *MetaCache) GetCollectionSchema(ctx context.Context, collectionName stri
 	if coll.Status.ErrorCode != commonpb.ErrorCode_Success {
 		return nil, errors.New(coll.Status.Reason)
 	}
-
-	_, ok := m.collInfo[collectionName]
-	if !ok {
-		m.collInfo[collectionName] = &collectionInfo{}
-	}
-	m.collInfo[collectionName].schema = coll.Schema
-	m.collInfo[collectionName].collID = coll.CollectionID
-
-	return m.collInfo[collectionName].schema, nil
+	return coll, nil
 }
 
-func (m *MetaCache) GetPartitionID(ctx context.Context, collectionName string, partitionName string) (typeutil.UniqueID, error) {
-	partitionID, err := m.readPartitionID(ctx, collectionName, partitionName)
-	if err == nil {
-		return partitionID, nil
-	}
-
+func (m *MetaCache) showPartitions(ctx context.Context, collectionName string) (*milvuspb.ShowPartitionsResponse, error) {
 	req := &milvuspb.ShowPartitionsRequest{
 		Base: &commonpb.MsgBase{
 			MsgType: commonpb.MsgType_ShowPartitions,
 		},
 		CollectionName: collectionName,
 	}
+
 	partitions, err := m.client.ShowPartitions(ctx, req)
 	if err != nil {
-		return 0, err
+		return nil, err
 	}
 	if partitions.Status.ErrorCode != commonpb.ErrorCode_Success {
-		return 0, fmt.Errorf("%s", partitions.Status.Reason)
+		return nil, fmt.Errorf("%s", partitions.Status.Reason)
 	}
 
-	m.mu.Lock()
-	defer m.mu.Unlock()
-
 	if len(partitions.PartitionIDs) != len(partitions.PartitionNames) {
-		return 0, fmt.Errorf("partition ids len: %d doesn't equal Partition name len %d",
+		return nil, fmt.Errorf("partition ids len: %d doesn't equal Partition name len %d",
 			len(partitions.PartitionIDs), len(partitions.PartitionNames))
 	}
 
+	return partitions, nil
+}
+
+func (m *MetaCache) updatePartitions(partitions *milvuspb.ShowPartitionsResponse, collectionName string) {
 	_, ok := m.collInfo[collectionName]
 	if !ok {
 		m.collInfo[collectionName] = &collectionInfo{
@@ -198,12 +251,7 @@ func (m *MetaCache) GetPartitionID(ctx context.Context, collectionName string, p
 			partInfo[partitions.PartitionNames[i]] = partitions.PartitionIDs[i]
 		}
 	}
-	_, ok = partInfo[partitionName]
-	if !ok {
-		return 0, fmt.Errorf("partitionID of partitionName:%s can not be find", partitionName)
-	}
-
-	return partInfo[partitionName], nil
+	m.collInfo[collectionName].partInfo = partInfo
 }
 
 func (m *MetaCache) RemoveCollection(ctx context.Context, collectionName string) {
diff --git a/internal/proxynode/proxy_node.go b/internal/proxynode/proxy_node.go
index fa28f3be836092e2cacc5768d409bde74a0b0b00..c32280075fd219bce15548e43d487f927daa2c16 100644
--- a/internal/proxynode/proxy_node.go
+++ b/internal/proxynode/proxy_node.go
@@ -74,7 +74,7 @@ func (node *ProxyNode) Init() error {
 	// todo wait for proxyservice state changed to Healthy
 	ctx := context.Background()
 
-	err := funcutil.WaitForComponentHealthy(ctx, node.proxyService, "ProxyService", 100, time.Millisecond*200)
+	err := funcutil.WaitForComponentHealthy(ctx, node.proxyService, "ProxyService", 1000000, time.Millisecond*200)
 	if err != nil {
 		return err
 	}
@@ -102,7 +102,7 @@ func (node *ProxyNode) Init() error {
 
 	// wait for dataservice state changed to Healthy
 	if node.dataService != nil {
-		err := funcutil.WaitForComponentHealthy(ctx, node.dataService, "DataService", 100, time.Millisecond*200)
+		err := funcutil.WaitForComponentHealthy(ctx, node.dataService, "DataService", 1000000, time.Millisecond*200)
 		if err != nil {
 			return err
 		}
@@ -110,7 +110,7 @@ func (node *ProxyNode) Init() error {
 
 	// wait for queryService state changed to Healthy
 	if node.queryService != nil {
-		err := funcutil.WaitForComponentHealthy(ctx, node.queryService, "QueryService", 100, time.Millisecond*200)
+		err := funcutil.WaitForComponentHealthy(ctx, node.queryService, "QueryService", 1000000, time.Millisecond*200)
 		if err != nil {
 			return err
 		}
@@ -118,7 +118,7 @@ func (node *ProxyNode) Init() error {
 
 	// wait for indexservice state changed to Healthy
 	if node.indexService != nil {
-		err := funcutil.WaitForComponentHealthy(ctx, node.indexService, "IndexService", 100, time.Millisecond*200)
+		err := funcutil.WaitForComponentHealthy(ctx, node.indexService, "IndexService", 1000000, time.Millisecond*200)
 		if err != nil {
 			return err
 		}
diff --git a/internal/proxynode/task.go b/internal/proxynode/task.go
index 864347b9d799691a10f407ffbab46054128045f1..adf2e623a91e7183fd9b86905938598e3cd204d0 100644
--- a/internal/proxynode/task.go
+++ b/internal/proxynode/task.go
@@ -5,6 +5,7 @@ import (
 	"errors"
 	"fmt"
 	"math"
+	"regexp"
 	"strconv"
 
 	"go.uber.org/zap"
@@ -552,13 +553,35 @@ func (st *SearchTask) PreExecute(ctx context.Context) error {
 	}
 	st.CollectionID = collectionID
 	st.PartitionIDs = make([]UniqueID, 0)
+
+	partitionsMap, err := globalMetaCache.GetPartitions(ctx, collectionName)
+	if err != nil {
+		return err
+	}
+
+	partitionsRecord := make(map[UniqueID]bool)
 	for _, partitionName := range st.query.PartitionNames {
-		partitionID, err := globalMetaCache.GetPartitionID(ctx, collectionName, partitionName)
+		pattern := fmt.Sprintf("^%s$", partitionName)
+		re, err := regexp.Compile(pattern)
 		if err != nil {
-			continue
+			return errors.New("invalid partition names")
+		}
+		found := false
+		for name, pID := range partitionsMap {
+			if re.MatchString(name) {
+				if _, exist := partitionsRecord[pID]; !exist {
+					st.PartitionIDs = append(st.PartitionIDs, pID)
+					partitionsRecord[pID] = true
+				}
+				found = true
+			}
+		}
+		if !found {
+			errMsg := fmt.Sprintf("PartitonName: %s not found", partitionName)
+			return errors.New(errMsg)
 		}
-		st.PartitionIDs = append(st.PartitionIDs, partitionID)
 	}
+
 	st.Dsl = st.query.Dsl
 	st.PlaceholderGroup = st.query.PlaceholderGroup
 
diff --git a/internal/querynode/collection_replica.go b/internal/querynode/collection_replica.go
index 48f910f95c6410e0b5f5dd75fe82199cb472afd3..7e89d6a11f57d19445c85d156cc5f8b0c1b45264 100644
--- a/internal/querynode/collection_replica.go
+++ b/internal/querynode/collection_replica.go
@@ -68,21 +68,31 @@ type ReplicaInterface interface {
 	getSegmentsToLoadBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
 	getSegmentStatistics() []*internalpb.SegmentStats
 
+	// excluded segments
+	initExcludedSegments(collectionID UniqueID)
+	removeExcludedSegments(collectionID UniqueID)
+	addExcludedSegments(collectionID UniqueID, segmentIDs []UniqueID) error
+	getExcludedSegments(collectionID UniqueID) ([]UniqueID, error)
+
 	getEnabledSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
 	getSegmentsBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID)
 	replaceGrowingSegmentBySealedSegment(segment *Segment) error
 
-	getTSafe() tSafer
+	getTSafe(collectionID UniqueID) tSafer
+	addTSafe(collectionID UniqueID)
+	removeTSafe(collectionID UniqueID)
 	freeAll()
 }
 
 type collectionReplica struct {
-	tSafe tSafer
+	tSafes map[UniqueID]tSafer // map[collectionID]tSafer
 
 	mu          sync.RWMutex // guards all
 	collections map[UniqueID]*Collection
 	partitions  map[UniqueID]*Partition
 	segments    map[UniqueID]*Segment
+
+	excludedSegments map[UniqueID][]UniqueID // map[collectionID]segmentIDs
 }
 
 //----------------------------------------------------------------------------------------------------- collection
@@ -101,7 +111,7 @@ func (colReplica *collectionReplica) addCollection(collectionID UniqueID, schema
 	defer colReplica.mu.Unlock()
 
 	if ok := colReplica.hasCollectionPrivate(collectionID); ok {
-		return fmt.Errorf("collection has been existed, id %d", collectionID)
+		return errors.New("collection has been loaded, id %d" + strconv.FormatInt(collectionID, 10))
 	}
 
 	var newCollection = newCollection(collectionID, schema)
@@ -143,7 +153,7 @@ func (colReplica *collectionReplica) getCollectionByID(collectionID UniqueID) (*
 func (colReplica *collectionReplica) getCollectionByIDPrivate(collectionID UniqueID) (*Collection, error) {
 	collection, ok := colReplica.collections[collectionID]
 	if !ok {
-		return nil, fmt.Errorf("cannot find collection, id = %d", collectionID)
+		return nil, errors.New("collection hasn't been loaded or has been released, collection id = %d" + strconv.FormatInt(collectionID, 10))
 	}
 
 	return collection, nil
@@ -195,7 +205,7 @@ func (colReplica *collectionReplica) getVecFieldIDsByCollectionID(collectionID U
 	}
 
 	if len(vecFields) <= 0 {
-		return nil, fmt.Errorf("no vector field in collection %d", collectionID)
+		return nil, errors.New("no vector field in collection %d" + strconv.FormatInt(collectionID, 10))
 	}
 
 	return vecFields, nil
@@ -228,7 +238,7 @@ func (colReplica *collectionReplica) getFieldsByCollectionIDPrivate(collectionID
 	}
 
 	if len(collection.Schema().Fields) <= 0 {
-		return nil, fmt.Errorf("no field in collection %d", collectionID)
+		return nil, errors.New("no field in collection %d" + strconv.FormatInt(collectionID, 10))
 	}
 
 	return collection.Schema().Fields, nil
@@ -291,7 +301,7 @@ func (colReplica *collectionReplica) getPartitionByID(partitionID UniqueID) (*Pa
 func (colReplica *collectionReplica) getPartitionByIDPrivate(partitionID UniqueID) (*Partition, error) {
 	partition, ok := colReplica.partitions[partitionID]
 	if !ok {
-		return nil, fmt.Errorf("cannot find partition, id = %d", partitionID)
+		return nil, errors.New("partition hasn't been loaded or has been released, partition id = %d" + strconv.FormatInt(partitionID, 10))
 	}
 
 	return partition, nil
@@ -426,7 +436,7 @@ func (colReplica *collectionReplica) getSegmentByID(segmentID UniqueID) (*Segmen
 func (colReplica *collectionReplica) getSegmentByIDPrivate(segmentID UniqueID) (*Segment, error) {
 	segment, ok := colReplica.segments[segmentID]
 	if !ok {
-		return nil, errors.New("cannot find segment, id = " + strconv.FormatInt(segmentID, 10))
+		return nil, errors.New("cannot find segment in query node, id = " + strconv.FormatInt(segmentID, 10))
 	}
 
 	return segment, nil
@@ -529,7 +539,7 @@ func (colReplica *collectionReplica) getSegmentsBySegmentType(segType segmentTyp
 func (colReplica *collectionReplica) replaceGrowingSegmentBySealedSegment(segment *Segment) error {
 	colReplica.mu.Lock()
 	defer colReplica.mu.Unlock()
-	if segment.segmentType != segmentTypeSealed && segment.segmentType != segTypeIndexing {
+	if segment.segmentType != segmentTypeSealed && segment.segmentType != segmentTypeIndexing {
 		return errors.New("unexpected segment type")
 	}
 	targetSegment, err := colReplica.getSegmentByIDPrivate(segment.ID())
@@ -573,40 +583,79 @@ func (colReplica *collectionReplica) setSegmentEnableLoadBinLog(segmentID Unique
 	return nil
 }
 
-//-----------------------------------------------------------------------------------------------------
-func (colReplica *collectionReplica) getTSafe() tSafer {
-	return colReplica.tSafe
+func (colReplica *collectionReplica) initExcludedSegments(collectionID UniqueID) {
+	colReplica.mu.Lock()
+	defer colReplica.mu.Unlock()
+
+	colReplica.excludedSegments[collectionID] = make([]UniqueID, 0)
 }
 
-func (colReplica *collectionReplica) freeAll() {
+func (colReplica *collectionReplica) removeExcludedSegments(collectionID UniqueID) {
 	colReplica.mu.Lock()
 	defer colReplica.mu.Unlock()
 
-	for id := range colReplica.collections {
-		_ = colReplica.removeCollectionPrivate(id)
+	delete(colReplica.excludedSegments, collectionID)
+}
+
+func (colReplica *collectionReplica) addExcludedSegments(collectionID UniqueID, segmentIDs []UniqueID) error {
+	colReplica.mu.Lock()
+	defer colReplica.mu.Unlock()
+
+	if _, ok := colReplica.excludedSegments[collectionID]; !ok {
+		return errors.New("addExcludedSegments failed, cannot found collection, id =" + fmt.Sprintln(collectionID))
 	}
 
-	colReplica.collections = make(map[UniqueID]*Collection)
-	colReplica.partitions = make(map[UniqueID]*Partition)
-	colReplica.segments = make(map[UniqueID]*Segment)
+	colReplica.excludedSegments[collectionID] = append(colReplica.excludedSegments[collectionID], segmentIDs...)
+	return nil
 }
 
-func newCollectionReplica() ReplicaInterface {
-	collections := make(map[int64]*Collection)
-	partitions := make(map[int64]*Partition)
-	segments := make(map[int64]*Segment)
+func (colReplica *collectionReplica) getExcludedSegments(collectionID UniqueID) ([]UniqueID, error) {
+	colReplica.mu.RLock()
+	defer colReplica.mu.RUnlock()
 
-	tSafe := newTSafe()
+	if _, ok := colReplica.excludedSegments[collectionID]; !ok {
+		return nil, errors.New("getExcludedSegments failed, cannot found collection, id =" + fmt.Sprintln(collectionID))
+	}
 
-	var replica ReplicaInterface = &collectionReplica{
-		collections: collections,
-		partitions:  partitions,
-		segments:    segments,
+	return colReplica.excludedSegments[collectionID], nil
+}
+
+//-----------------------------------------------------------------------------------------------------
+func (colReplica *collectionReplica) getTSafe(collectionID UniqueID) tSafer {
+	colReplica.mu.RLock()
+	defer colReplica.mu.RUnlock()
+	return colReplica.getTSafePrivate(collectionID)
+}
 
-		tSafe: tSafe,
+func (colReplica *collectionReplica) getTSafePrivate(collectionID UniqueID) tSafer {
+	return colReplica.tSafes[collectionID]
+}
+
+func (colReplica *collectionReplica) addTSafe(collectionID UniqueID) {
+	colReplica.mu.Lock()
+	defer colReplica.mu.Unlock()
+	colReplica.tSafes[collectionID] = newTSafe()
+}
+
+func (colReplica *collectionReplica) removeTSafe(collectionID UniqueID) {
+	colReplica.mu.Lock()
+	defer colReplica.mu.Unlock()
+	ts := colReplica.getTSafePrivate(collectionID)
+	ts.close()
+	delete(colReplica.tSafes, collectionID)
+}
+
+func (colReplica *collectionReplica) freeAll() {
+	colReplica.mu.Lock()
+	defer colReplica.mu.Unlock()
+
+	for id := range colReplica.collections {
+		_ = colReplica.removeCollectionPrivate(id)
 	}
 
-	return replica
+	colReplica.collections = make(map[UniqueID]*Collection)
+	colReplica.partitions = make(map[UniqueID]*Partition)
+	colReplica.segments = make(map[UniqueID]*Segment)
 }
 
 func (colReplica *collectionReplica) getSegmentsToLoadBySegmentType(segType segmentType) ([]UniqueID, []UniqueID, []UniqueID) {
@@ -634,3 +683,22 @@ func (colReplica *collectionReplica) getSegmentsToLoadBySegmentType(segType segm
 
 	return targetCollectionIDs, targetPartitionIDs, targetSegmentIDs
 }
+
+func newCollectionReplica() ReplicaInterface {
+	collections := make(map[UniqueID]*Collection)
+	partitions := make(map[UniqueID]*Partition)
+	segments := make(map[UniqueID]*Segment)
+	excludedSegments := make(map[UniqueID][]UniqueID)
+
+	var replica ReplicaInterface = &collectionReplica{
+		collections: collections,
+		partitions:  partitions,
+		segments:    segments,
+
+		excludedSegments: excludedSegments,
+
+		tSafes: make(map[UniqueID]tSafer),
+	}
+
+	return replica
+}
diff --git a/internal/querynode/data_sync_service.go b/internal/querynode/data_sync_service.go
index efb5feca6eca4903454b242ca08fa3bd5768fb39..d3f5128797df69a6b2c888d868fc645f399c1450 100644
--- a/internal/querynode/data_sync_service.go
+++ b/internal/querynode/data_sync_service.go
@@ -12,8 +12,11 @@ import (
 )
 
 type dataSyncService struct {
-	ctx context.Context
-	fg  *flowgraph.TimeTickedFlowGraph
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	collectionID UniqueID
+	fg           *flowgraph.TimeTickedFlowGraph
 
 	dmStream  msgstream.MsgStream
 	msFactory msgstream.Factory
@@ -21,12 +24,16 @@ type dataSyncService struct {
 	replica ReplicaInterface
 }
 
-func newDataSyncService(ctx context.Context, replica ReplicaInterface, factory msgstream.Factory) *dataSyncService {
+func newDataSyncService(ctx context.Context, replica ReplicaInterface, factory msgstream.Factory, collectionID UniqueID) *dataSyncService {
+	ctx1, cancel := context.WithCancel(ctx)
+
 	service := &dataSyncService{
-		ctx:       ctx,
-		fg:        nil,
-		replica:   replica,
-		msFactory: factory,
+		ctx:          ctx1,
+		cancel:       cancel,
+		collectionID: collectionID,
+		fg:           nil,
+		replica:      replica,
+		msFactory:    factory,
 	}
 
 	service.initNodes()
@@ -38,6 +45,7 @@ func (dsService *dataSyncService) start() {
 }
 
 func (dsService *dataSyncService) close() {
+	dsService.cancel()
 	if dsService.fg != nil {
 		dsService.fg.Close()
 	}
@@ -50,10 +58,10 @@ func (dsService *dataSyncService) initNodes() {
 
 	var dmStreamNode node = dsService.newDmInputNode(dsService.ctx)
 
-	var filterDmNode node = newFilteredDmNode(dsService.replica)
+	var filterDmNode node = newFilteredDmNode(dsService.replica, dsService.collectionID)
 
-	var insertNode node = newInsertNode(dsService.replica)
-	var serviceTimeNode node = newServiceTimeNode(dsService.ctx, dsService.replica, dsService.msFactory)
+	var insertNode node = newInsertNode(dsService.replica, dsService.collectionID)
+	var serviceTimeNode node = newServiceTimeNode(dsService.ctx, dsService.replica, dsService.msFactory, dsService.collectionID)
 
 	dsService.fg.AddNode(dmStreamNode)
 
diff --git a/internal/querynode/data_sync_service_test.go b/internal/querynode/data_sync_service_test.go
index 9a09d13ba6578a62ad9d947ee799c93212b50e73..fcd932796ff48175406c8bed789d1159b34108d5 100644
--- a/internal/querynode/data_sync_service_test.go
+++ b/internal/querynode/data_sync_service_test.go
@@ -18,6 +18,8 @@ import (
 func TestDataSyncService_Start(t *testing.T) {
 	ctx := context.Background()
 
+	collectionID := UniqueID(0)
+
 	node := newQueryNodeMock()
 	initTestMeta(t, node, 0, 0)
 	// test data generate
@@ -64,7 +66,7 @@ func TestDataSyncService_Start(t *testing.T) {
 					Timestamp: uint64(i + 1000),
 					SourceID:  0,
 				},
-				CollectionID: UniqueID(0),
+				CollectionID: collectionID,
 				PartitionID:  defaultPartitionID,
 				SegmentID:    int64(0),
 				ChannelID:    "0",
@@ -132,8 +134,8 @@ func TestDataSyncService_Start(t *testing.T) {
 	assert.NoError(t, err)
 
 	// dataSync
-	node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica, msFactory)
-	go node.dataSyncService.start()
+	node.dataSyncServices[collectionID] = newDataSyncService(node.queryNodeLoopCtx, node.replica, msFactory, collectionID)
+	go node.dataSyncServices[collectionID].start()
 
 	<-node.queryNodeLoopCtx.Done()
 	node.Stop()
diff --git a/internal/querynode/flow_graph_filter_dm_node.go b/internal/querynode/flow_graph_filter_dm_node.go
index 31008629f774573fff69492d2ecf4c3f1ce1ff58..e2e705ef87f8f14c13a03aa51d24ad03971da184 100644
--- a/internal/querynode/flow_graph_filter_dm_node.go
+++ b/internal/querynode/flow_graph_filter_dm_node.go
@@ -2,6 +2,7 @@ package querynode
 
 import (
 	"context"
+	"fmt"
 
 	"go.uber.org/zap"
 
@@ -12,7 +13,8 @@ import (
 
 type filterDmNode struct {
 	baseNode
-	replica ReplicaInterface
+	collectionID UniqueID
+	replica      ReplicaInterface
 }
 
 func (fdmNode *filterDmNode) Name() string {
@@ -33,6 +35,10 @@ func (fdmNode *filterDmNode) Operate(ctx context.Context, in []Msg) ([]Msg, cont
 		// TODO: add error handling
 	}
 
+	if msgStreamMsg == nil {
+		return []Msg{}, ctx
+	}
+
 	var iMsg = insertMsg{
 		insertMessages: make([]*msgstream.InsertMsg, 0),
 		timeRange: TimeRange{
@@ -60,14 +66,31 @@ func (fdmNode *filterDmNode) Operate(ctx context.Context, in []Msg) ([]Msg, cont
 }
 
 func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
-	// TODO: open this check
-	// check if partition dm enable
-	enableCollection := fdmNode.replica.hasCollection(msg.CollectionID)
-	enablePartition := fdmNode.replica.hasPartition(msg.PartitionID)
-	if !enableCollection || !enablePartition {
+	// check if collection and partition exist
+	collection := fdmNode.replica.hasCollection(msg.CollectionID)
+	partition := fdmNode.replica.hasPartition(msg.PartitionID)
+	if !collection || !partition {
 		return nil
 	}
 
+	// check if the collection from message is target collection
+	if msg.CollectionID != fdmNode.collectionID {
+		return nil
+	}
+
+	// check if the segment is in excluded segments
+	excludedSegments, err := fdmNode.replica.getExcludedSegments(fdmNode.collectionID)
+	log.Debug("excluded segments", zap.String("segmentIDs", fmt.Sprintln(excludedSegments)))
+	if err != nil {
+		log.Error(err.Error())
+		return nil
+	}
+	for _, id := range excludedSegments {
+		if msg.SegmentID == id {
+			return nil
+		}
+	}
+
 	// TODO: If the last record is drop type, all insert requests are invalid.
 	//if !records[len(records)-1].createOrDrop {
 	//	return nil
@@ -80,27 +103,14 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
 		return nil
 	}
 
-	tmpTimestamps := make([]Timestamp, 0)
-	tmpRowIDs := make([]int64, 0)
-	tmpRowData := make([]*commonpb.Blob, 0)
-
-	for i, t := range msg.Timestamps {
-		tmpTimestamps = append(tmpTimestamps, t)
-		tmpRowIDs = append(tmpRowIDs, msg.RowIDs[i])
-		tmpRowData = append(tmpRowData, msg.RowData[i])
-	}
-
-	if len(tmpRowIDs) <= 0 {
+	if len(msg.Timestamps) <= 0 {
 		return nil
 	}
 
-	msg.Timestamps = tmpTimestamps
-	msg.RowIDs = tmpRowIDs
-	msg.RowData = tmpRowData
 	return msg
 }
 
-func newFilteredDmNode(replica ReplicaInterface) *filterDmNode {
+func newFilteredDmNode(replica ReplicaInterface, collectionID UniqueID) *filterDmNode {
 	maxQueueLength := Params.FlowGraphMaxQueueLength
 	maxParallelism := Params.FlowGraphMaxParallelism
 
@@ -109,7 +119,8 @@ func newFilteredDmNode(replica ReplicaInterface) *filterDmNode {
 	baseNode.SetMaxParallelism(maxParallelism)
 
 	return &filterDmNode{
-		baseNode: baseNode,
-		replica:  replica,
+		baseNode:     baseNode,
+		collectionID: collectionID,
+		replica:      replica,
 	}
 }
diff --git a/internal/querynode/flow_graph_insert_node.go b/internal/querynode/flow_graph_insert_node.go
index cd49e78d392ac3df2d6e358fac9bb3a43e8a1430..91c4e08a82a7bd64b23aecc1a4ad16ba784c9b6e 100644
--- a/internal/querynode/flow_graph_insert_node.go
+++ b/internal/querynode/flow_graph_insert_node.go
@@ -12,7 +12,8 @@ import (
 
 type insertNode struct {
 	baseNode
-	replica ReplicaInterface
+	collectionID UniqueID
+	replica      ReplicaInterface
 }
 
 type InsertData struct {
@@ -48,6 +49,10 @@ func (iNode *insertNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.
 		insertOffset:     make(map[int64]int64),
 	}
 
+	if iMsg == nil {
+		return []Msg{}, ctx
+	}
+
 	// 1. hash insertMessages to insertData
 	for _, task := range iMsg.insertMessages {
 		// check if segment exists, if not, create this segment
@@ -119,6 +124,11 @@ func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *syn
 		return
 	}
 
+	if targetSegment.segmentType != segmentTypeGrowing {
+		wg.Done()
+		return
+	}
+
 	ids := insertData.insertIDs[segmentID]
 	timestamps := insertData.insertTimestamps[segmentID]
 	records := insertData.insertRecords[segmentID]
@@ -132,11 +142,13 @@ func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *syn
 		return
 	}
 
-	log.Debug("Do insert done", zap.Int("len", len(insertData.insertIDs[segmentID])))
+	log.Debug("Do insert done", zap.Int("len", len(insertData.insertIDs[segmentID])),
+		zap.Int64("segmentID", segmentID),
+		zap.Int64("collectionID", iNode.collectionID))
 	wg.Done()
 }
 
-func newInsertNode(replica ReplicaInterface) *insertNode {
+func newInsertNode(replica ReplicaInterface, collectionID UniqueID) *insertNode {
 	maxQueueLength := Params.FlowGraphMaxQueueLength
 	maxParallelism := Params.FlowGraphMaxParallelism
 
@@ -145,7 +157,8 @@ func newInsertNode(replica ReplicaInterface) *insertNode {
 	baseNode.SetMaxParallelism(maxParallelism)
 
 	return &insertNode{
-		baseNode: baseNode,
-		replica:  replica,
+		baseNode:     baseNode,
+		collectionID: collectionID,
+		replica:      replica,
 	}
 }
diff --git a/internal/querynode/flow_graph_service_time_node.go b/internal/querynode/flow_graph_service_time_node.go
index 7b1ebbdf2ee4793fcf2aff12217ceb891e01ebd3..0e676e714bf17e08ffea68d5990538a72adacd0b 100644
--- a/internal/querynode/flow_graph_service_time_node.go
+++ b/internal/querynode/flow_graph_service_time_node.go
@@ -13,6 +13,7 @@ import (
 
 type serviceTimeNode struct {
 	baseNode
+	collectionID      UniqueID
 	replica           ReplicaInterface
 	timeTickMsgStream msgstream.MsgStream
 }
@@ -35,9 +36,18 @@ func (stNode *serviceTimeNode) Operate(ctx context.Context, in []Msg) ([]Msg, co
 		// TODO: add error handling
 	}
 
+	if serviceTimeMsg == nil {
+		return []Msg{}, ctx
+	}
+
 	// update service time
-	stNode.replica.getTSafe().set(serviceTimeMsg.timeRange.timestampMax)
-	//log.Debug("update tSafe to:", getPhysicalTime(serviceTimeMsg.timeRange.timestampMax))
+	ts := stNode.replica.getTSafe(stNode.collectionID)
+	if ts != nil {
+		ts.set(serviceTimeMsg.timeRange.timestampMax)
+		log.Debug("update tSafe:",
+			zap.Int64("tSafe", int64(serviceTimeMsg.timeRange.timestampMax)),
+			zap.Int64("collectionID", stNode.collectionID))
+	}
 
 	if err := stNode.sendTimeTick(serviceTimeMsg.timeRange.timestampMax); err != nil {
 		log.Error("Error: send time tick into pulsar channel failed", zap.Error(err))
@@ -71,7 +81,7 @@ func (stNode *serviceTimeNode) sendTimeTick(ts Timestamp) error {
 	return stNode.timeTickMsgStream.Produce(context.TODO(), &msgPack)
 }
 
-func newServiceTimeNode(ctx context.Context, replica ReplicaInterface, factory msgstream.Factory) *serviceTimeNode {
+func newServiceTimeNode(ctx context.Context, replica ReplicaInterface, factory msgstream.Factory, collectionID UniqueID) *serviceTimeNode {
 	maxQueueLength := Params.FlowGraphMaxQueueLength
 	maxParallelism := Params.FlowGraphMaxParallelism
 
@@ -85,6 +95,7 @@ func newServiceTimeNode(ctx context.Context, replica ReplicaInterface, factory m
 
 	return &serviceTimeNode{
 		baseNode:          baseNode,
+		collectionID:      collectionID,
 		replica:           replica,
 		timeTickMsgStream: timeTimeMsgStream,
 	}
diff --git a/internal/querynode/index_loader.go b/internal/querynode/index_loader.go
index fce7d3591c734270fb18fc5fedc8a074a66791d4..3076a71b8c65651035b7d5e483e8d225380f850d 100644
--- a/internal/querynode/index_loader.go
+++ b/internal/querynode/index_loader.go
@@ -111,7 +111,12 @@ func (loader *indexLoader) execute(l *loadIndex) error {
 	if err != nil {
 		return err
 	}
-	// 3. update segment index stats
+	// 3. drop vector field data if index loaded successfully
+	err = loader.dropVectorFieldData(l.segmentID, l.fieldID)
+	if err != nil {
+		return err
+	}
+	// 4. update segment index stats
 	err = loader.updateSegmentIndexStats(indexParams, indexName, indexID, l)
 	if err != nil {
 		return err
@@ -277,6 +282,14 @@ func (loader *indexLoader) updateSegmentIndex(indexParams indexParam, bytesIndex
 	return segment.updateSegmentIndex(loadIndexInfo)
 }
 
+func (loader *indexLoader) dropVectorFieldData(segmentID UniqueID, vecFieldID int64) error {
+	segment, err := loader.replica.getSegmentByID(segmentID)
+	if err != nil {
+		return err
+	}
+	return segment.dropFieldData(vecFieldID)
+}
+
 func (loader *indexLoader) sendQueryNodeStats() error {
 	resultFieldsStats := make([]*internalpb.FieldStats, 0)
 	for fieldStatsKey, indexStats := range loader.fieldIndexes {
diff --git a/internal/querynode/load_service.go b/internal/querynode/load_service.go
index d21a62a0bd40af33e80d0f62ef55f1ce53e4d451..9bdd010f4f38d721662b08f9c496a25b551ee3b6 100644
--- a/internal/querynode/load_service.go
+++ b/internal/querynode/load_service.go
@@ -2,19 +2,16 @@ package querynode
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"sync"
 	"time"
 
-	"github.com/zilliztech/milvus-distributed/internal/types"
-
-	"errors"
-
 	"go.uber.org/zap"
 
 	"github.com/zilliztech/milvus-distributed/internal/log"
-	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
+	"github.com/zilliztech/milvus-distributed/internal/types"
 )
 
 const loadingCheckInterval = 3
@@ -76,7 +73,7 @@ func (s *loadService) loadSegmentActively(wg *sync.WaitGroup) {
 }
 
 // load segment passively
-func (s *loadService) loadSegment(collectionID UniqueID, partitionID UniqueID, segmentIDs []UniqueID, fieldIDs []int64) error {
+func (s *loadService) loadSegmentPassively(collectionID UniqueID, partitionID UniqueID, segmentIDs []UniqueID, fieldIDs []int64) error {
 	// TODO: interim solution
 	if len(fieldIDs) == 0 {
 		var err error
@@ -168,10 +165,10 @@ func (s *loadService) loadSegmentInternal(collectionID UniqueID, partitionID Uni
 	return nil
 }
 
-func newLoadService(ctx context.Context, masterService types.MasterService, dataService types.DataService, indexService types.IndexService, replica ReplicaInterface, dmStream msgstream.MsgStream) *loadService {
+func newLoadService(ctx context.Context, masterService types.MasterService, dataService types.DataService, indexService types.IndexService, replica ReplicaInterface) *loadService {
 	ctx1, cancel := context.WithCancel(ctx)
 
-	segLoader := newSegmentLoader(ctx1, masterService, indexService, dataService, replica, dmStream)
+	segLoader := newSegmentLoader(ctx1, masterService, indexService, dataService, replica)
 
 	return &loadService{
 		ctx:    ctx1,
diff --git a/internal/querynode/load_service_test.go b/internal/querynode/load_service_test.go
index 649565382528fbceddf1006b7988a012a1eb046b..f22123ad8f495986298a8af151616819b719ad11 100644
--- a/internal/querynode/load_service_test.go
+++ b/internal/querynode/load_service_test.go
@@ -1129,7 +1129,7 @@ func TestSegmentLoad_Search_Vector(t *testing.T) {
 	defer node.Stop()
 
 	ctx := node.queryNodeLoopCtx
-	node.loadService = newLoadService(ctx, nil, nil, nil, node.replica, nil)
+	node.loadService = newLoadService(ctx, nil, nil, nil, node.replica)
 
 	initTestMeta(t, node, collectionID, 0)
 
diff --git a/internal/querynode/meta_service.go b/internal/querynode/meta_service.go
index 4d06ce01c86f47b27218466c62c7dc38985127ed..95805f5cab73b35c4bd14b1e9b0caae4caf7d13a 100644
--- a/internal/querynode/meta_service.go
+++ b/internal/querynode/meta_service.go
@@ -46,7 +46,7 @@ func newMetaService(ctx context.Context, replica ReplicaInterface) *metaService
 		}
 		return nil
 	}
-	err = retry.Retry(200, time.Millisecond*200, connectEtcdFn)
+	err = retry.Retry(100000, time.Millisecond*200, connectEtcdFn)
 	if err != nil {
 		panic(err)
 	}
diff --git a/internal/querynode/query_node.go b/internal/querynode/query_node.go
index a74f159dc55571718f57498d225405768cde79dd..1f9a8413dbb8383cad315484023e4a10d4dc967e 100644
--- a/internal/querynode/query_node.go
+++ b/internal/querynode/query_node.go
@@ -14,17 +14,14 @@ import "C"
 
 import (
 	"context"
+	"errors"
 	"fmt"
-	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
 	"math/rand"
+	"strconv"
 	"strings"
 	"sync/atomic"
 	"time"
 
-	"github.com/zilliztech/milvus-distributed/internal/types"
-
-	"errors"
-
 	"go.uber.org/zap"
 
 	"github.com/zilliztech/milvus-distributed/internal/log"
@@ -33,7 +30,9 @@ import (
 	"github.com/zilliztech/milvus-distributed/internal/msgstream/rmqms"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
+	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
 	queryPb "github.com/zilliztech/milvus-distributed/internal/proto/querypb"
+	"github.com/zilliztech/milvus-distributed/internal/types"
 	"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
 )
 
@@ -47,11 +46,11 @@ type QueryNode struct {
 	replica ReplicaInterface
 
 	// internal services
-	dataSyncService *dataSyncService
-	metaService     *metaService
-	searchService   *searchService
-	loadService     *loadService
-	statsService    *statsService
+	dataSyncServices map[UniqueID]*dataSyncService
+	metaService      *metaService
+	searchService    *searchService
+	loadService      *loadService
+	statsService     *statsService
 
 	// clients
 	masterService types.MasterService
@@ -70,10 +69,10 @@ func NewQueryNode(ctx context.Context, queryNodeID UniqueID, factory msgstream.F
 		queryNodeLoopCancel: cancel,
 		QueryNodeID:         queryNodeID,
 
-		dataSyncService: nil,
-		metaService:     nil,
-		searchService:   nil,
-		statsService:    nil,
+		dataSyncServices: make(map[UniqueID]*dataSyncService),
+		metaService:      nil,
+		searchService:    nil,
+		statsService:     nil,
 
 		msFactory: factory,
 	}
@@ -89,10 +88,10 @@ func NewQueryNodeWithoutID(ctx context.Context, factory msgstream.Factory) *Quer
 		queryNodeLoopCtx:    ctx1,
 		queryNodeLoopCancel: cancel,
 
-		dataSyncService: nil,
-		metaService:     nil,
-		searchService:   nil,
-		statsService:    nil,
+		dataSyncServices: make(map[UniqueID]*dataSyncService),
+		metaService:      nil,
+		searchService:    nil,
+		statsService:     nil,
 
 		msFactory: factory,
 	}
@@ -167,15 +166,13 @@ func (node *QueryNode) Start() error {
 	}
 
 	// init services and manager
-	node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica, node.msFactory)
 	node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica, node.msFactory)
 	//node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
 
-	node.loadService = newLoadService(node.queryNodeLoopCtx, node.masterService, node.dataService, node.indexService, node.replica, node.dataSyncService.dmStream)
+	node.loadService = newLoadService(node.queryNodeLoopCtx, node.masterService, node.dataService, node.indexService, node.replica)
 	node.statsService = newStatsService(node.queryNodeLoopCtx, node.replica, node.loadService.segLoader.indexLoader.fieldStatsChan, node.msFactory)
 
 	// start services
-	go node.dataSyncService.start()
 	go node.searchService.start()
 	//go node.metaService.start()
 	go node.loadService.start()
@@ -192,8 +189,10 @@ func (node *QueryNode) Stop() error {
 	node.replica.freeAll()
 
 	// close services
-	if node.dataSyncService != nil {
-		node.dataSyncService.close()
+	for _, dsService := range node.dataSyncServices {
+		if dsService != nil {
+			dsService.close()
+		}
 	}
 	if node.searchService != nil {
 		node.searchService.close()
@@ -366,17 +365,20 @@ func (node *QueryNode) RemoveQueryChannel(ctx context.Context, in *queryPb.Remov
 }
 
 func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error) {
-	if node.dataSyncService == nil || node.dataSyncService.dmStream == nil {
-		errMsg := "null data sync service or null data manipulation stream"
+	log.Debug("starting WatchDmChannels ...", zap.String("ChannelIDs", fmt.Sprintln(in.ChannelIDs)))
+	collectionID := in.CollectionID
+	service, ok := node.dataSyncServices[collectionID]
+	if !ok || service.dmStream == nil {
+		errMsg := "null data sync service or null data manipulation stream, collectionID = " + fmt.Sprintln(collectionID)
 		status := &commonpb.Status{
 			ErrorCode: commonpb.ErrorCode_UnexpectedError,
 			Reason:    errMsg,
 		}
-
+		log.Error(errMsg)
 		return status, errors.New(errMsg)
 	}
 
-	switch t := node.dataSyncService.dmStream.(type) {
+	switch t := service.dmStream.(type) {
 	case *pulsarms.PulsarTtMsgStream:
 	case *rmqms.RmqTtMsgStream:
 	default:
@@ -386,19 +388,61 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmC
 			ErrorCode: commonpb.ErrorCode_UnexpectedError,
 			Reason:    errMsg,
 		}
-
+		log.Error(errMsg)
 		return status, errors.New(errMsg)
 	}
 
+	getUniqueSubName := func() string {
+		prefixName := Params.MsgChannelSubName
+		return prefixName + "-" + strconv.FormatInt(collectionID, 10)
+	}
+
 	// add request channel
 	consumeChannels := in.ChannelIDs
-	consumeSubName := Params.MsgChannelSubName
-	node.dataSyncService.dmStream.AsConsumer(consumeChannels, consumeSubName)
+	toSeekInfo := make([]*internalpb.MsgPosition, 0)
+	toDirSubChannels := make([]string, 0)
+
+	consumeSubName := getUniqueSubName()
+
+	for _, info := range in.Infos {
+		if len(info.Pos.MsgID) == 0 {
+			toDirSubChannels = append(toDirSubChannels, info.ChannelID)
+			continue
+		}
+		info.Pos.MsgGroup = consumeSubName
+		toSeekInfo = append(toSeekInfo, info.Pos)
+
+		log.Debug("prevent inserting segments", zap.String("segmentIDs", fmt.Sprintln(info.ExcludedSegments)))
+		err := node.replica.addExcludedSegments(collectionID, info.ExcludedSegments)
+		if err != nil {
+			status := &commonpb.Status{
+				ErrorCode: commonpb.ErrorCode_UnexpectedError,
+				Reason:    err.Error(),
+			}
+			log.Error(err.Error())
+			return status, err
+		}
+	}
+
+	service.dmStream.AsConsumer(toDirSubChannels, consumeSubName)
+	for _, pos := range toSeekInfo {
+		err := service.dmStream.Seek(pos)
+		if err != nil {
+			errMsg := "msgStream seek error :" + err.Error()
+			status := &commonpb.Status{
+				ErrorCode: commonpb.ErrorCode_UnexpectedError,
+				Reason:    errMsg,
+			}
+			log.Error(errMsg)
+			return status, errors.New(errMsg)
+		}
+	}
 	log.Debug("querynode AsConsumer: " + strings.Join(consumeChannels, ", ") + " : " + consumeSubName)
 
 	status := &commonpb.Status{
 		ErrorCode: commonpb.ErrorCode_Success,
 	}
+	log.Debug("WatchDmChannels done", zap.String("ChannelIDs", fmt.Sprintln(in.ChannelIDs)))
 	return status, nil
 }
 
@@ -418,12 +462,18 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
 	hasCollection := node.replica.hasCollection(collectionID)
 	hasPartition := node.replica.hasPartition(partitionID)
 	if !hasCollection {
+		// loading init
 		err := node.replica.addCollection(collectionID, schema)
 		if err != nil {
 			status.ErrorCode = commonpb.ErrorCode_UnexpectedError
 			status.Reason = err.Error()
 			return status, err
 		}
+		node.replica.initExcludedSegments(collectionID)
+		node.dataSyncServices[collectionID] = newDataSyncService(node.queryNodeLoopCtx, node.replica, node.msFactory, collectionID)
+		go node.dataSyncServices[collectionID].start()
+		node.replica.addTSafe(collectionID)
+		node.searchService.register(collectionID)
 	}
 	if !hasPartition {
 		err := node.replica.addPartition(collectionID, partitionID)
@@ -444,48 +494,28 @@ func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegment
 		return status, nil
 	}
 
-	if len(in.SegmentIDs) != len(in.SegmentStates) {
-		err := errors.New("len(segmentIDs) should equal to len(segmentStates)")
-		status.ErrorCode = commonpb.ErrorCode_UnexpectedError
-		status.Reason = err.Error()
-		return status, err
-	}
-
-	// segments are ordered before LoadSegments calling
-	//var position *internalpb.MsgPosition = nil
-	for i, state := range in.SegmentStates {
-		//thisPosition := state.StartPosition
-		if state.State <= commonpb.SegmentState_Growing {
-			//if position == nil {
-			//	position = &internalpb2.MsgPosition{
-			//		ChannelName: thisPosition.ChannelName,
-			//	}
-			//}
-			segmentIDs = segmentIDs[:i]
-			break
-		}
-		//position = state.StartPosition
-	}
-
-	//err = node.dataSyncService.seekSegment(position)
-	//if err != nil {
-	//	status := &commonpb.Status{
-	//		ErrorCode: commonpb.ErrorCode_UnexpectedError,
-	//		Reason:    err.Error(),
-	//	}
-	//	return status, err
-	//}
-
-	err = node.loadService.loadSegment(collectionID, partitionID, segmentIDs, fieldIDs)
+	err = node.loadService.loadSegmentPassively(collectionID, partitionID, segmentIDs, fieldIDs)
 	if err != nil {
 		status.ErrorCode = commonpb.ErrorCode_UnexpectedError
 		status.Reason = err.Error()
 		return status, err
 	}
+
+	log.Debug("LoadSegments done", zap.String("segmentIDs", fmt.Sprintln(in.SegmentIDs)))
 	return status, nil
 }
 
 func (node *QueryNode) ReleaseCollection(ctx context.Context, in *queryPb.ReleaseCollectionRequest) (*commonpb.Status, error) {
+	if _, ok := node.dataSyncServices[in.CollectionID]; ok {
+		node.dataSyncServices[in.CollectionID].close()
+		delete(node.dataSyncServices, in.CollectionID)
+		node.searchService.tSafeMutex.Lock()
+		delete(node.searchService.tSafeWatcher, in.CollectionID)
+		node.searchService.tSafeMutex.Unlock()
+		node.replica.removeTSafe(in.CollectionID)
+		node.replica.removeExcludedSegments(in.CollectionID)
+	}
+
 	err := node.replica.removeCollection(in.CollectionID)
 	if err != nil {
 		status := &commonpb.Status{
@@ -495,6 +525,7 @@ func (node *QueryNode) ReleaseCollection(ctx context.Context, in *queryPb.Releas
 		return status, err
 	}
 
+	log.Debug("ReleaseCollection done", zap.Int64("collectionID", in.CollectionID))
 	return &commonpb.Status{
 		ErrorCode: commonpb.ErrorCode_Success,
 	}, nil
diff --git a/internal/querynode/search_service.go b/internal/querynode/search_service.go
index f3ccc40b495d3f6d3cb42de3c0204b13ef400d44..c1173a449976e3a1014ae4fa5ce120793eaad90f 100644
--- a/internal/querynode/search_service.go
+++ b/internal/querynode/search_service.go
@@ -4,12 +4,12 @@ import "C"
 import (
 	"context"
 	"errors"
+	"fmt"
+	"go.uber.org/zap"
 	"strconv"
 	"strings"
 	"sync"
 
-	"go.uber.org/zap"
-
 	"github.com/golang/protobuf/proto"
 	"github.com/zilliztech/milvus-distributed/internal/log"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
@@ -24,13 +24,14 @@ type searchService struct {
 	cancel context.CancelFunc
 
 	replica      ReplicaInterface
-	tSafeWatcher *tSafeWatcher
+	tSafeMutex   *sync.Mutex
+	tSafeWatcher map[UniqueID]*tSafeWatcher
 
 	serviceableTimeMutex sync.Mutex // guards serviceableTime
-	serviceableTime      Timestamp
+	serviceableTime      map[UniqueID]Timestamp
 
-	msgBuffer             chan msgstream.TsMsg
-	unsolvedMsg           []msgstream.TsMsg
+	msgBuffer             chan *msgstream.SearchMsg
+	unsolvedMsg           []*msgstream.SearchMsg
 	searchMsgStream       msgstream.MsgStream
 	searchResultMsgStream msgstream.MsgStream
 	queryNodeID           UniqueID
@@ -54,17 +55,18 @@ func newSearchService(ctx context.Context, replica ReplicaInterface, factory msg
 	log.Debug("querynode AsProducer: " + strings.Join(producerChannels, ", "))
 
 	searchServiceCtx, searchServiceCancel := context.WithCancel(ctx)
-	msgBuffer := make(chan msgstream.TsMsg, receiveBufSize)
-	unsolvedMsg := make([]msgstream.TsMsg, 0)
+	msgBuffer := make(chan *msgstream.SearchMsg, receiveBufSize)
+	unsolvedMsg := make([]*msgstream.SearchMsg, 0)
 	return &searchService{
 		ctx:             searchServiceCtx,
 		cancel:          searchServiceCancel,
-		serviceableTime: Timestamp(0),
+		serviceableTime: make(map[UniqueID]Timestamp),
 		msgBuffer:       msgBuffer,
 		unsolvedMsg:     unsolvedMsg,
 
 		replica:      replica,
-		tSafeWatcher: newTSafeWatcher(),
+		tSafeMutex:   &sync.Mutex{},
+		tSafeWatcher: make(map[UniqueID]*tSafeWatcher),
 
 		searchMsgStream:       searchStream,
 		searchResultMsgStream: searchResultStream,
@@ -75,7 +77,6 @@ func newSearchService(ctx context.Context, replica ReplicaInterface, factory msg
 func (ss *searchService) start() {
 	ss.searchMsgStream.Start()
 	ss.searchResultMsgStream.Start()
-	ss.register()
 	ss.wait.Add(2)
 	go ss.receiveSearchMsg()
 	go ss.doUnsolvedMsgSearch()
@@ -92,32 +93,63 @@ func (ss *searchService) close() {
 	ss.cancel()
 }
 
-func (ss *searchService) register() {
-	tSafe := ss.replica.getTSafe()
-	tSafe.registerTSafeWatcher(ss.tSafeWatcher)
+func (ss *searchService) register(collectionID UniqueID) {
+	tSafe := ss.replica.getTSafe(collectionID)
+	ss.tSafeMutex.Lock()
+	ss.tSafeWatcher[collectionID] = newTSafeWatcher()
+	ss.tSafeMutex.Unlock()
+	tSafe.registerTSafeWatcher(ss.tSafeWatcher[collectionID])
 }
 
-func (ss *searchService) waitNewTSafe() Timestamp {
+func (ss *searchService) waitNewTSafe(collectionID UniqueID) (Timestamp, error) {
 	// block until dataSyncService updating tSafe
-	ss.tSafeWatcher.hasUpdate()
-	timestamp := ss.replica.getTSafe().get()
-	return timestamp
+	ss.tSafeWatcher[collectionID].hasUpdate()
+	ts := ss.replica.getTSafe(collectionID)
+	if ts != nil {
+		return ts.get(), nil
+	}
+	return 0, errors.New("tSafe closed, collectionID =" + fmt.Sprintln(collectionID))
 }
 
-func (ss *searchService) getServiceableTime() Timestamp {
+func (ss *searchService) getServiceableTime(collectionID UniqueID) Timestamp {
 	ss.serviceableTimeMutex.Lock()
 	defer ss.serviceableTimeMutex.Unlock()
-	return ss.serviceableTime
+	//t, ok := ss.serviceableTime[collectionID]
+	//if !ok {
+	//	return 0, errors.New("cannot found")
+	//}
+	return ss.serviceableTime[collectionID]
 }
 
-func (ss *searchService) setServiceableTime(t Timestamp) {
+func (ss *searchService) setServiceableTime(collectionID UniqueID, t Timestamp) {
 	ss.serviceableTimeMutex.Lock()
 	// hard code gracefultime to 1 second
 	// TODO: use config to set gracefultime
-	ss.serviceableTime = t + 1000*1000*1000
+	ss.serviceableTime[collectionID] = t + 1000*1000*1000
 	ss.serviceableTimeMutex.Unlock()
 }
 
+func (ss *searchService) collectionCheck(collectionID UniqueID) error {
+	// check if collection exists
+	if _, ok := ss.tSafeWatcher[collectionID]; !ok {
+		err := errors.New("no collection found, collectionID = " + strconv.FormatInt(collectionID, 10))
+		log.Error(err.Error())
+		return err
+	}
+	return nil
+}
+
+func (ss *searchService) emptySearch(searchMsg *msgstream.SearchMsg) {
+	err := ss.search(searchMsg)
+	if err != nil {
+		log.Error(err.Error())
+		err2 := ss.publishFailedSearchResult(searchMsg, err.Error())
+		if err2 != nil {
+			log.Error("publish FailedSearchResult failed", zap.Error(err2))
+		}
+	}
+}
+
 func (ss *searchService) receiveSearchMsg() {
 	defer ss.wait.Done()
 	for {
@@ -129,26 +161,34 @@ func (ss *searchService) receiveSearchMsg() {
 			if msgPack == nil || len(msgPack.Msgs) <= 0 {
 				continue
 			}
-			searchMsg := make([]msgstream.TsMsg, 0)
-			serverTime := ss.getServiceableTime()
-			for i, msg := range msgPack.Msgs {
-				if msg.BeginTs() > serverTime {
-					ss.msgBuffer <- msg
+			searchNum := 0
+			for _, msg := range msgPack.Msgs {
+				sm, ok := msg.(*msgstream.SearchMsg)
+				if !ok {
 					continue
 				}
-				searchMsg = append(searchMsg, msgPack.Msgs[i])
-			}
-			for _, msg := range searchMsg {
-				err := ss.search(msg)
+				err := ss.collectionCheck(sm.CollectionID)
+				if err != nil {
+					ss.emptySearch(sm)
+					searchNum++
+					continue
+				}
+				serviceTime := ss.getServiceableTime(sm.CollectionID)
+				if msg.BeginTs() > serviceTime {
+					ss.msgBuffer <- sm
+					continue
+				}
+				err = ss.search(sm)
 				if err != nil {
 					log.Error(err.Error())
-					err2 := ss.publishFailedSearchResult(msg, err.Error())
+					err2 := ss.publishFailedSearchResult(sm, err.Error())
 					if err2 != nil {
 						log.Error("publish FailedSearchResult failed", zap.Error(err2))
 					}
 				}
+				searchNum++
 			}
-			log.Debug("ReceiveSearchMsg, do search done", zap.Int("num of searchMsg", len(searchMsg)))
+			log.Debug("ReceiveSearchMsg, do search done", zap.Int("num of searchMsg", searchNum))
 		}
 	}
 }
@@ -160,18 +200,36 @@ func (ss *searchService) doUnsolvedMsgSearch() {
 		case <-ss.ctx.Done():
 			return
 		default:
-			serviceTime := ss.waitNewTSafe()
-			ss.setServiceableTime(serviceTime)
-			searchMsg := make([]msgstream.TsMsg, 0)
-			tempMsg := make([]msgstream.TsMsg, 0)
+			searchMsg := make([]*msgstream.SearchMsg, 0)
+			tempMsg := make([]*msgstream.SearchMsg, 0)
 			tempMsg = append(tempMsg, ss.unsolvedMsg...)
 			ss.unsolvedMsg = ss.unsolvedMsg[:0]
-			for _, msg := range tempMsg {
-				if msg.EndTs() <= serviceTime {
-					searchMsg = append(searchMsg, msg)
+
+			serviceTimeTmpTable := make(map[UniqueID]Timestamp)
+
+			searchNum := 0
+			for _, sm := range tempMsg {
+				err := ss.collectionCheck(sm.CollectionID)
+				if err != nil {
+					ss.emptySearch(sm)
+					searchNum++
+					continue
+				}
+				_, ok := serviceTimeTmpTable[sm.CollectionID]
+				if !ok {
+					serviceTime, err := ss.waitNewTSafe(sm.CollectionID)
+					if err != nil {
+						// TODO: emptySearch or continue, note: collection has been released
+						continue
+					}
+					ss.setServiceableTime(sm.CollectionID, serviceTime)
+					serviceTimeTmpTable[sm.CollectionID] = serviceTime
+				}
+				if sm.EndTs() <= serviceTimeTmpTable[sm.CollectionID] {
+					searchMsg = append(searchMsg, sm)
 					continue
 				}
-				ss.unsolvedMsg = append(ss.unsolvedMsg, msg)
+				ss.unsolvedMsg = append(ss.unsolvedMsg, sm)
 			}
 
 			for {
@@ -179,40 +237,52 @@ func (ss *searchService) doUnsolvedMsgSearch() {
 				if msgBufferLength <= 0 {
 					break
 				}
-				msg := <-ss.msgBuffer
-				if msg.EndTs() <= serviceTime {
-					searchMsg = append(searchMsg, msg)
+				sm := <-ss.msgBuffer
+				err := ss.collectionCheck(sm.CollectionID)
+				if err != nil {
+					ss.emptySearch(sm)
+					searchNum++
+					continue
+				}
+				_, ok := serviceTimeTmpTable[sm.CollectionID]
+				if !ok {
+					serviceTime, err := ss.waitNewTSafe(sm.CollectionID)
+					if err != nil {
+						// TODO: emptySearch or continue, note: collection has been released
+						continue
+					}
+					ss.setServiceableTime(sm.CollectionID, serviceTime)
+					serviceTimeTmpTable[sm.CollectionID] = serviceTime
+				}
+				if sm.EndTs() <= serviceTimeTmpTable[sm.CollectionID] {
+					searchMsg = append(searchMsg, sm)
 					continue
 				}
-				ss.unsolvedMsg = append(ss.unsolvedMsg, msg)
+				ss.unsolvedMsg = append(ss.unsolvedMsg, sm)
 			}
 
 			if len(searchMsg) <= 0 {
 				continue
 			}
-			for _, msg := range searchMsg {
-				err := ss.search(msg)
+			for _, sm := range searchMsg {
+				err := ss.search(sm)
 				if err != nil {
 					log.Error(err.Error())
-					err2 := ss.publishFailedSearchResult(msg, err.Error())
+					err2 := ss.publishFailedSearchResult(sm, err.Error())
 					if err2 != nil {
 						log.Error("publish FailedSearchResult failed", zap.Error(err2))
 					}
 				}
+				searchNum++
 			}
-			log.Debug("doUnsolvedMsgSearch, do search done", zap.Int("num of searchMsg", len(searchMsg)))
+			log.Debug("doUnsolvedMsgSearch, do search done", zap.Int("num of searchMsg", searchNum))
 		}
 	}
 }
 
 // TODO:: cache map[dsl]plan
 // TODO: reBatched search requests
-func (ss *searchService) search(msg msgstream.TsMsg) error {
-	searchMsg, ok := msg.(*msgstream.SearchMsg)
-	if !ok {
-		return errors.New("invalid request type = " + string(msg.Type()))
-	}
-
+func (ss *searchService) search(searchMsg *msgstream.SearchMsg) error {
 	searchTimestamp := searchMsg.Base.Timestamp
 	var queryBlob = searchMsg.Query.Value
 	query := milvuspb.SearchRequest{}
@@ -250,21 +320,17 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
 	partitionIDsInQuery := searchMsg.PartitionIDs
 	if len(partitionIDsInQuery) == 0 {
 		if len(partitionIDsInCol) == 0 {
-			return errors.New("can't find any partition in this collection on query node")
+			return errors.New("none of this collection's partition has been loaded")
 		}
 		searchPartitionIDs = partitionIDsInCol
 	} else {
-		findPartition := false
 		for _, id := range partitionIDsInQuery {
-			_, err := ss.replica.getPartitionByID(id)
-			if err == nil {
-				searchPartitionIDs = append(searchPartitionIDs, id)
-				findPartition = true
+			_, err2 := ss.replica.getPartitionByID(id)
+			if err2 != nil {
+				return err2
 			}
 		}
-		if !findPartition {
-			return errors.New("partition to be searched not exist in query node")
-		}
+		searchPartitionIDs = partitionIDsInQuery
 	}
 
 	for _, partitionID := range searchPartitionIDs {
@@ -380,14 +446,15 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
 		}
 
 		// For debugging, please don't delete.
+		//fmt.Println("==================== search result ======================")
 		//for i := 0; i < len(hits); i++ {
 		//	testHits := milvuspb.Hits{}
 		//	err := proto.Unmarshal(hits[i], &testHits)
 		//	if err != nil {
 		//		panic(err)
 		//	}
-		//	log.Debug(testHits.IDs)
-		//	log.Debug(testHits.Scores)
+		//	fmt.Println(testHits.IDs)
+		//	fmt.Println(testHits.Scores)
 		//}
 		err = ss.publishSearchResult(searchResultMsg)
 		if err != nil {
@@ -412,16 +479,12 @@ func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
 	return err
 }
 
-func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg string) error {
+func (ss *searchService) publishFailedSearchResult(searchMsg *msgstream.SearchMsg, errMsg string) error {
 	// span, ctx := opentracing.StartSpanFromContext(msg.GetMsgContext(), "receive search msg")
 	// defer span.Finish()
 	// msg.SetMsgContext(ctx)
 	//log.Debug("Public fail SearchResult!")
 	msgPack := msgstream.MsgPack{}
-	searchMsg, ok := msg.(*msgstream.SearchMsg)
-	if !ok {
-		return errors.New("invalid request type = " + string(msg.Type()))
-	}
 
 	resultChannelInt, _ := strconv.ParseInt(searchMsg.ResultChannelID, 10, 64)
 	searchResultMsg := &msgstream.SearchResultMsg{
diff --git a/internal/querynode/search_service_test.go b/internal/querynode/search_service_test.go
index 95e8c555ef225e173ffee99780562a66a6b3bdbf..b2fb474c87c4a198082337db28d7639835ef5c6f 100644
--- a/internal/querynode/search_service_test.go
+++ b/internal/querynode/search_service_test.go
@@ -21,6 +21,8 @@ import (
 func TestSearch_Search(t *testing.T) {
 	ctx := context.Background()
 
+	collectionID := UniqueID(0)
+
 	node := newQueryNodeMock()
 	initTestMeta(t, node, 0, 0)
 
@@ -111,6 +113,8 @@ func TestSearch_Search(t *testing.T) {
 
 	node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica, msFactory)
 	go node.searchService.start()
+	node.replica.addTSafe(collectionID)
+	node.searchService.register(collectionID)
 
 	// start insert
 	timeRange := TimeRange{
@@ -143,7 +147,7 @@ func TestSearch_Search(t *testing.T) {
 					Timestamp: uint64(10 + 1000),
 					SourceID:  0,
 				},
-				CollectionID: UniqueID(0),
+				CollectionID: collectionID,
 				PartitionID:  defaultPartitionID,
 				SegmentID:    int64(0),
 				ChannelID:    "0",
@@ -209,8 +213,8 @@ func TestSearch_Search(t *testing.T) {
 	assert.NoError(t, err)
 
 	// dataSync
-	node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica, msFactory)
-	go node.dataSyncService.start()
+	node.dataSyncServices[collectionID] = newDataSyncService(node.queryNodeLoopCtx, node.replica, msFactory, collectionID)
+	go node.dataSyncServices[collectionID].start()
 
 	time.Sleep(1 * time.Second)
 
@@ -219,6 +223,9 @@ func TestSearch_Search(t *testing.T) {
 
 func TestSearch_SearchMultiSegments(t *testing.T) {
 	ctx := context.Background()
+
+	collectionID := UniqueID(0)
+
 	pulsarURL := Params.PulsarAddress
 	const receiveBufSize = 1024
 
@@ -309,6 +316,8 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
 
 	node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica, msFactory)
 	go node.searchService.start()
+	node.replica.addTSafe(collectionID)
+	node.searchService.register(collectionID)
 
 	// start insert
 	timeRange := TimeRange{
@@ -345,7 +354,7 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
 					Timestamp: uint64(i + 1000),
 					SourceID:  0,
 				},
-				CollectionID: UniqueID(0),
+				CollectionID: collectionID,
 				PartitionID:  defaultPartitionID,
 				SegmentID:    int64(segmentID),
 				ChannelID:    "0",
@@ -411,8 +420,8 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
 	assert.NoError(t, err)
 
 	// dataSync
-	node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica, msFactory)
-	go node.dataSyncService.start()
+	node.dataSyncServices[collectionID] = newDataSyncService(node.queryNodeLoopCtx, node.replica, msFactory, collectionID)
+	go node.dataSyncServices[collectionID].start()
 
 	time.Sleep(1 * time.Second)
 
diff --git a/internal/querynode/segment.go b/internal/querynode/segment.go
index 5c5246deb31741735e3265cd53df94f17c5c3b28..9f89cf83b37cf6c6c7b2b1bf5b0d82cba1cb0282 100644
--- a/internal/querynode/segment.go
+++ b/internal/querynode/segment.go
@@ -12,6 +12,7 @@ package querynode
 */
 import "C"
 import (
+	"fmt"
 	"strconv"
 	"sync"
 	"unsafe"
@@ -31,7 +32,7 @@ const (
 	segmentTypeInvalid segmentType = iota
 	segmentTypeGrowing
 	segmentTypeSealed
-	segTypeIndexing
+	segmentTypeIndexing
 )
 
 type indexParam = map[string]string
@@ -268,34 +269,6 @@ func (s *Segment) fillTargetEntry(plan *Plan,
 	return nil
 }
 
-// segment, err := loadService.replica.getSegmentByID(segmentID)
-func (s *Segment) updateSegmentIndex(loadIndexInfo *LoadIndexInfo) error {
-	if s.segmentPtr == nil {
-		return errors.New("null seg core pointer")
-	}
-	var status C.CStatus
-
-	if s.segmentType == segmentTypeGrowing {
-		status = C.UpdateSegmentIndex(s.segmentPtr, loadIndexInfo.cLoadIndexInfo)
-	} else if s.segmentType == segmentTypeSealed {
-		status = C.UpdateSealedSegmentIndex(s.segmentPtr, loadIndexInfo.cLoadIndexInfo)
-	} else {
-		return errors.New("illegal segment type")
-	}
-
-	errorCode := status.error_code
-
-	if errorCode != 0 {
-		errorMsg := C.GoString(status.error_msg)
-		defer C.free(unsafe.Pointer(status.error_msg))
-		return errors.New("updateSegmentIndex failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
-	}
-
-	s.setType(segTypeIndexing)
-
-	return nil
-}
-
 func (s *Segment) setIndexParam(fieldID int64, indexParamKv []*commonpb.KeyValuePair) error {
 	s.paramMutex.Lock()
 	defer s.paramMutex.Unlock()
@@ -461,7 +434,8 @@ func (s *Segment) segmentLoadFieldData(fieldID int64, rowCount int, data interfa
 		return errors.New("null seg core pointer")
 	}
 	if s.segmentType != segmentTypeSealed {
-		return errors.New("illegal segment type when loading field data")
+		errMsg := fmt.Sprintln("segmentLoadFieldData failed, illegal segment type ", s.segmentType, "segmentID = ", s.ID())
+		return errors.New(errMsg)
 	}
 
 	// data interface check
@@ -536,7 +510,86 @@ func (s *Segment) segmentLoadFieldData(fieldID int64, rowCount int, data interfa
 		return errors.New("LoadFieldData failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
 	}
 
-	log.Debug("load field done", zap.Int64("fieldID", fieldID), zap.Int("row count", rowCount))
+	log.Debug("load field done",
+		zap.Int64("fieldID", fieldID),
+		zap.Int("row count", rowCount),
+		zap.Int64("segmentID", s.ID()))
+
+	return nil
+}
+
+func (s *Segment) dropFieldData(fieldID int64) error {
+	/*
+		CStatus
+		DropFieldData(CSegmentInterface c_segment, int64_t field_id);
+	*/
+	if s.segmentPtr == nil {
+		return errors.New("null seg core pointer")
+	}
+	if s.segmentType != segmentTypeIndexing {
+		errMsg := fmt.Sprintln("dropFieldData failed, illegal segment type ", s.segmentType, "segmentID = ", s.ID())
+		return errors.New(errMsg)
+	}
+
+	var status = C.DropFieldData(s.segmentPtr, C.long(fieldID))
+	errorCode := status.error_code
+	if errorCode != 0 {
+		errorMsg := C.GoString(status.error_msg)
+		defer C.free(unsafe.Pointer(status.error_msg))
+		return errors.New("dropFieldData failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
+	}
+
+	log.Debug("dropFieldData done", zap.Int64("fieldID", fieldID), zap.Int64("segmentID", s.ID()))
+
+	return nil
+}
+
+func (s *Segment) updateSegmentIndex(loadIndexInfo *LoadIndexInfo) error {
+	if s.segmentPtr == nil {
+		return errors.New("null seg core pointer")
+	}
+
+	if s.segmentType != segmentTypeSealed {
+		errMsg := fmt.Sprintln("updateSegmentIndex failed, illegal segment type ", s.segmentType, "segmentID = ", s.ID())
+		return errors.New(errMsg)
+	}
+
+	status := C.UpdateSealedSegmentIndex(s.segmentPtr, loadIndexInfo.cLoadIndexInfo)
+	errorCode := status.error_code
+	if errorCode != 0 {
+		errorMsg := C.GoString(status.error_msg)
+		defer C.free(unsafe.Pointer(status.error_msg))
+		return errors.New("updateSegmentIndex failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
+	}
+
+	s.setType(segmentTypeIndexing)
+	log.Debug("updateSegmentIndex done", zap.Int64("segmentID", s.ID()))
+
+	return nil
+}
+
+func (s *Segment) dropSegmentIndex(fieldID int64) error {
+	/*
+		CStatus
+		DropSealedSegmentIndex(CSegmentInterface c_segment, int64_t field_id);
+	*/
+	if s.segmentPtr == nil {
+		return errors.New("null seg core pointer")
+	}
+	if s.segmentType != segmentTypeIndexing {
+		errMsg := fmt.Sprintln("dropFieldData failed, illegal segment type ", s.segmentType, "segmentID = ", s.ID())
+		return errors.New(errMsg)
+	}
+
+	var status = C.DropSealedSegmentIndex(s.segmentPtr, C.long(fieldID))
+	errorCode := status.error_code
+	if errorCode != 0 {
+		errorMsg := C.GoString(status.error_msg)
+		defer C.free(unsafe.Pointer(status.error_msg))
+		return errors.New("dropSegmentIndex failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
+	}
+
+	log.Debug("dropSegmentIndex done", zap.Int64("fieldID", fieldID), zap.Int64("segmentID", s.ID()))
 
 	return nil
 }
diff --git a/internal/querynode/segment_loader.go b/internal/querynode/segment_loader.go
index 45bc79567d9a49ebfa33ea4c8f573eec5b1a3b2d..91498c4c3303ef0128f6a6296aa6a8d00c92498d 100644
--- a/internal/querynode/segment_loader.go
+++ b/internal/querynode/segment_loader.go
@@ -2,31 +2,25 @@ package querynode
 
 import (
 	"context"
-	"strconv"
-
-	"github.com/zilliztech/milvus-distributed/internal/types"
-
 	"errors"
+	"strconv"
 
 	"github.com/zilliztech/milvus-distributed/internal/kv"
 	minioKV "github.com/zilliztech/milvus-distributed/internal/kv/minio"
-	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
 	"github.com/zilliztech/milvus-distributed/internal/storage"
+	"github.com/zilliztech/milvus-distributed/internal/types"
 )
 
 // segmentLoader is only responsible for loading the field data from binlog
 type segmentLoader struct {
 	replica ReplicaInterface
 
-	dmStream msgstream.MsgStream
-
 	dataService types.DataService
 
-	kv     kv.Base // minio kv
-	iCodec *storage.InsertCodec
+	kv kv.Base // minio kv
 
 	indexLoader *indexLoader
 }
@@ -117,6 +111,8 @@ func (loader *segmentLoader) checkTargetFields(paths []*internalpb.StringList, s
 }
 
 func (loader *segmentLoader) loadSegmentFieldsData(segment *Segment, targetFields map[int64]*internalpb.StringList) error {
+	iCodec := storage.InsertCodec{}
+	defer iCodec.Close()
 	for id, p := range targetFields {
 		if id == timestampFieldID {
 			// seg core doesn't need timestamp field
@@ -136,7 +132,7 @@ func (loader *segmentLoader) loadSegmentFieldsData(segment *Segment, targetField
 				Value: []byte(binLog),
 			})
 		}
-		_, _, insertData, err := loader.iCodec.Deserialize(blobs)
+		_, _, insertData, err := iCodec.Deserialize(blobs)
 		if err != nil {
 			// TODO: return or continue
 			return err
@@ -193,7 +189,7 @@ func (loader *segmentLoader) loadSegmentFieldsData(segment *Segment, targetField
 	return nil
 }
 
-func newSegmentLoader(ctx context.Context, masterService types.MasterService, indexService types.IndexService, dataService types.DataService, replica ReplicaInterface, dmStream msgstream.MsgStream) *segmentLoader {
+func newSegmentLoader(ctx context.Context, masterService types.MasterService, indexService types.IndexService, dataService types.DataService, replica ReplicaInterface) *segmentLoader {
 	option := &minioKV.Option{
 		Address:           Params.MinioEndPoint,
 		AccessKeyID:       Params.MinioAccessKeyID,
@@ -212,12 +208,9 @@ func newSegmentLoader(ctx context.Context, masterService types.MasterService, in
 	return &segmentLoader{
 		replica: replica,
 
-		dmStream: dmStream,
-
 		dataService: dataService,
 
-		kv:     client,
-		iCodec: &storage.InsertCodec{},
+		kv: client,
 
 		indexLoader: iLoader,
 	}
diff --git a/internal/querynode/tsafe.go b/internal/querynode/tsafe.go
index 103e5200d36a522a451dfafc08b9c360b27cb332..e8e0ee537dcafca481c60082042eed41a868eb0f 100644
--- a/internal/querynode/tsafe.go
+++ b/internal/querynode/tsafe.go
@@ -28,6 +28,7 @@ type tSafer interface {
 	get() Timestamp
 	set(t Timestamp)
 	registerTSafeWatcher(t *tSafeWatcher)
+	close()
 }
 
 type tSafe struct {
@@ -64,3 +65,12 @@ func (ts *tSafe) set(t Timestamp) {
 		watcher.notify()
 	}
 }
+
+func (ts *tSafe) close() {
+	ts.tSafeMu.Lock()
+	defer ts.tSafeMu.Unlock()
+
+	for _, watcher := range ts.watcherList {
+		close(watcher.notifyChan)
+	}
+}
diff --git a/internal/queryservice/meta_replica.go b/internal/queryservice/meta_replica.go
index 987cd1ef9df2da0963c6d3c535ade1c5f657b2dc..b70211cb9d47fc277f7cbd6a53d315f7310ecc3c 100644
--- a/internal/queryservice/meta_replica.go
+++ b/internal/queryservice/meta_replica.go
@@ -1,10 +1,9 @@
 package queryservice
 
 import (
-	"strconv"
-
 	"errors"
 
+	internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/querypb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
 )
@@ -21,8 +20,9 @@ type Replica interface {
 	getPartitionStates(dbID UniqueID, collectionID UniqueID, partitionIDs []UniqueID) ([]*querypb.PartitionStates, error)
 	releaseCollection(dbID UniqueID, collectionID UniqueID) error
 	releasePartition(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) error
-	addDmChannels(dbID UniqueID, collectionID UniqueID, channels2NodeID map[string]int64) error
-	getAssignedNodeIDByChannelName(dbID UniqueID, collectionID UniqueID, channel string) (int64, error)
+	addDmChannel(dbID UniqueID, collectionID UniqueID, channel string, watchedStartPos *internalPb.MsgPosition) error
+	addExcludeSegmentIDs(dbID UniqueID, collectionID UniqueID, excludeSegments []UniqueID) error
+	//getAssignedNodeIDByChannelName(dbID UniqueID, collectionID UniqueID, channel string) (int64, error)
 }
 
 type segment struct {
@@ -36,10 +36,12 @@ type partition struct {
 }
 
 type collection struct {
-	id              UniqueID
-	partitions      map[UniqueID]*partition
-	dmChannels2Node map[string]int64
-	schema          *schemapb.CollectionSchema
+	id                UniqueID
+	partitions        map[UniqueID]*partition
+	dmChannels        []string
+	dmChannels2Pos    map[string]*internalPb.MsgPosition
+	excludeSegmentIds []UniqueID
+	schema            *schemapb.CollectionSchema
 }
 
 type metaReplica struct {
@@ -62,12 +64,16 @@ func (mp *metaReplica) addCollection(dbID UniqueID, collectionID UniqueID, schem
 	//TODO:: assert dbID = 0 exist
 	if _, ok := mp.db2collections[dbID]; ok {
 		partitions := make(map[UniqueID]*partition)
-		channels := make(map[string]int64)
+		channels := make([]string, 0)
+		startPos := make(map[string]*internalPb.MsgPosition)
+		excludeSegmentIDs := make([]UniqueID, 0)
 		newCollection := &collection{
-			id:              collectionID,
-			partitions:      partitions,
-			schema:          schema,
-			dmChannels2Node: channels,
+			id:                collectionID,
+			partitions:        partitions,
+			schema:            schema,
+			dmChannels:        channels,
+			dmChannels2Pos:    startPos,
+			excludeSegmentIds: excludeSegmentIDs,
 		}
 		mp.db2collections[dbID] = append(mp.db2collections[dbID], newCollection)
 		return nil
@@ -216,8 +222,7 @@ func (mp *metaReplica) releaseCollection(dbID UniqueID, collectionID UniqueID) e
 		}
 	}
 
-	errorStr := "releaseCollection: can't find dbID or collectionID " + strconv.FormatInt(collectionID, 10)
-	return errors.New(errorStr)
+	return nil
 }
 
 func (mp *metaReplica) releasePartition(dbID UniqueID, collectionID UniqueID, partitionID UniqueID) error {
@@ -232,17 +237,15 @@ func (mp *metaReplica) releasePartition(dbID UniqueID, collectionID UniqueID, pa
 		}
 	}
 
-	errorStr := "releasePartition: can't find dbID or collectionID or partitionID " + strconv.FormatInt(partitionID, 10)
-	return errors.New(errorStr)
+	return nil
 }
 
-func (mp *metaReplica) addDmChannels(dbID UniqueID, collectionID UniqueID, channels2NodeID map[string]int64) error {
+func (mp *metaReplica) addDmChannel(dbID UniqueID, collectionID UniqueID, channel string, watchedStartPos *internalPb.MsgPosition) error {
 	if collections, ok := mp.db2collections[dbID]; ok {
 		for _, collection := range collections {
 			if collectionID == collection.id {
-				for channel, id := range channels2NodeID {
-					collection.dmChannels2Node[channel] = id
-				}
+				collection.dmChannels = append(collection.dmChannels, channel)
+				collection.dmChannels2Pos[channel] = watchedStartPos
 				return nil
 			}
 		}
@@ -250,16 +253,14 @@ func (mp *metaReplica) addDmChannels(dbID UniqueID, collectionID UniqueID, chann
 	return errors.New("addDmChannels: can't find dbID or collectionID")
 }
 
-func (mp *metaReplica) getAssignedNodeIDByChannelName(dbID UniqueID, collectionID UniqueID, channel string) (int64, error) {
+func (mp *metaReplica) addExcludeSegmentIDs(dbID UniqueID, collectionID UniqueID, excludeSegments []UniqueID) error {
 	if collections, ok := mp.db2collections[dbID]; ok {
 		for _, collection := range collections {
 			if collectionID == collection.id {
-				if id, ok := collection.dmChannels2Node[channel]; ok {
-					return id, nil
-				}
+				collection.excludeSegmentIds = append(collection.excludeSegmentIds, excludeSegments...)
+				return nil
 			}
 		}
 	}
-
-	return 0, errors.New("getAssignedNodeIDByChannelName: can't find dbID or collectionID")
+	return errors.New("addExcludeSegmentIDs: can't find dbID or collectionID")
 }
diff --git a/internal/queryservice/querynode.go b/internal/queryservice/querynode.go
index 790962e45ac5964042f2d16f86514ca6b3b49d45..fd3302f2c5b7faf9c445be83f6ce9f6a6aeb1f1e 100644
--- a/internal/queryservice/querynode.go
+++ b/internal/queryservice/querynode.go
@@ -10,9 +10,9 @@ import (
 )
 
 type queryNodeInfo struct {
-	client         types.QueryNode
-	segments       []UniqueID
-	dmChannelNames []string
+	client       types.QueryNode
+	segments     map[UniqueID][]UniqueID
+	channels2Col map[UniqueID][]string
 }
 
 func (qn *queryNodeInfo) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
@@ -31,8 +31,20 @@ func (qn *queryNodeInfo) WatchDmChannels(ctx context.Context, in *querypb.WatchD
 	return qn.client.WatchDmChannels(ctx, in)
 }
 
-func (qn *queryNodeInfo) AddDmChannels(channels []string) {
-	qn.dmChannelNames = append(qn.dmChannelNames, channels...)
+func (qn *queryNodeInfo) AddDmChannels(channels []string, collectionID UniqueID) {
+	if _, ok := qn.channels2Col[collectionID]; !ok {
+		chs := make([]string, 0)
+		qn.channels2Col[collectionID] = chs
+	}
+	qn.channels2Col[collectionID] = append(qn.channels2Col[collectionID], channels...)
+}
+
+func (qn *queryNodeInfo) AddSegments(segmentIDs []UniqueID, collectionID UniqueID) {
+	if _, ok := qn.segments[collectionID]; !ok {
+		seg := make([]UniqueID, 0)
+		qn.segments[collectionID] = seg
+	}
+	qn.segments[collectionID] = append(qn.segments[collectionID], segmentIDs...)
 }
 
 func (qn *queryNodeInfo) AddQueryChannel(ctx context.Context, in *querypb.AddQueryChannelRequest) (*commonpb.Status, error) {
@@ -40,7 +52,13 @@ func (qn *queryNodeInfo) AddQueryChannel(ctx context.Context, in *querypb.AddQue
 }
 
 func (qn *queryNodeInfo) ReleaseCollection(ctx context.Context, in *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
-	return qn.client.ReleaseCollection(ctx, in)
+	status, err := qn.client.ReleaseCollection(ctx, in)
+	if err != nil {
+		return status, err
+	}
+	delete(qn.segments, in.CollectionID)
+	delete(qn.channels2Col, in.CollectionID)
+	return status, nil
 }
 
 func (qn *queryNodeInfo) ReleasePartitions(ctx context.Context, in *querypb.ReleasePartitionsRequest) (*commonpb.Status, error) {
@@ -48,11 +66,11 @@ func (qn *queryNodeInfo) ReleasePartitions(ctx context.Context, in *querypb.Rele
 }
 
 func newQueryNodeInfo(client types.QueryNode) *queryNodeInfo {
-	segments := make([]UniqueID, 0)
-	dmChannelNames := make([]string, 0)
+	segments := make(map[UniqueID][]UniqueID)
+	channels := make(map[UniqueID][]string)
 	return &queryNodeInfo{
-		client:         client,
-		segments:       segments,
-		dmChannelNames: dmChannelNames,
+		client:       client,
+		segments:     segments,
+		channels2Col: channels,
 	}
 }
diff --git a/internal/queryservice/queryservice.go b/internal/queryservice/queryservice.go
index bd9c7b3be09f7cabc124a5edc129d8193c49d522..868ca8c1b280075bf893931ff70a62d40a19a982 100644
--- a/internal/queryservice/queryservice.go
+++ b/internal/queryservice/queryservice.go
@@ -14,17 +14,17 @@ import (
 
 	"github.com/opentracing/opentracing-go"
 	"github.com/uber/jaeger-client-go/config"
-	nodeclient "github.com/zilliztech/milvus-distributed/internal/distributed/querynode/client"
-	"github.com/zilliztech/milvus-distributed/internal/log"
-	"github.com/zilliztech/milvus-distributed/internal/types"
 	"go.uber.org/zap"
 
+	nodeclient "github.com/zilliztech/milvus-distributed/internal/distributed/querynode/client"
+	"github.com/zilliztech/milvus-distributed/internal/log"
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
 	"github.com/zilliztech/milvus-distributed/internal/proto/querypb"
+	"github.com/zilliztech/milvus-distributed/internal/types"
 	"github.com/zilliztech/milvus-distributed/internal/util/retry"
 )
 
@@ -232,11 +232,6 @@ func (qs *QueryService) LoadCollection(ctx context.Context, req *querypb.LoadCol
 		if err != nil {
 			return fn(err), err
 		}
-
-		err = qs.watchDmChannels(dbID, collectionID)
-		if err != nil {
-			return fn(err), err
-		}
 	}
 
 	// get partitionIDs
@@ -297,9 +292,15 @@ func (qs *QueryService) LoadCollection(ctx context.Context, req *querypb.LoadCol
 		log.Error("LoadCollectionRequest failed", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Error(err))
 		return status, fmt.Errorf("load partitions: %s", err)
 	}
+
+	err = qs.watchDmChannels(dbID, collectionID)
+	if err != nil {
+		log.Error("LoadCollectionRequest failed", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID), zap.Error(err))
+		return fn(err), err
+	}
+
 	log.Debug("LoadCollectionRequest completed", zap.String("role", Params.RoleName), zap.Int64("msgID", req.Base.MsgID))
 	return status, nil
-
 }
 
 func (qs *QueryService) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
@@ -330,7 +331,7 @@ func (qs *QueryService) ReleaseCollection(ctx context.Context, req *querypb.Rele
 		}, err
 	}
 
-	log.Debug("release collection end")
+	log.Debug("release collection end", zap.Int64("collectionID", collectionID))
 	//TODO:: queryNode cancel subscribe dmChannels
 	return &commonpb.Status{
 		ErrorCode: commonpb.ErrorCode_Success,
@@ -388,16 +389,14 @@ func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPar
 		return fn(err), err
 	}
 
+	watchNeeded := false
 	_, err := qs.replica.getCollectionByID(dbID, collectionID)
 	if err != nil {
 		err = qs.replica.addCollection(dbID, collectionID, schema)
 		if err != nil {
 			return fn(err), err
 		}
-		err = qs.watchDmChannels(dbID, collectionID)
-		if err != nil {
-			return fn(err), err
-		}
+		watchNeeded = true
 	}
 
 	for _, partitionID := range partitionIDs {
@@ -446,7 +445,7 @@ func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPar
 			return fn(err), err
 		}
 		for _, state := range resp.States {
-			log.Error("segment ", zap.String("state.SegmentID", fmt.Sprintln(state.SegmentID)), zap.String("state", fmt.Sprintln(state.StartPosition)))
+			log.Debug("segment ", zap.String("state.SegmentID", fmt.Sprintln(state.SegmentID)), zap.String("state", fmt.Sprintln(state.StartPosition)))
 			segmentID := state.SegmentID
 			segmentStates[segmentID] = state
 			channelName := state.StartPosition.ChannelName
@@ -459,38 +458,79 @@ func (qs *QueryService) LoadPartitions(ctx context.Context, req *querypb.LoadPar
 			}
 		}
 
+		excludeSegment := make([]UniqueID, 0)
+		for id, state := range segmentStates {
+			if state.State > commonpb.SegmentState_Growing {
+				excludeSegment = append(excludeSegment, id)
+			}
+		}
 		for channel, segmentIDs := range channel2segs {
 			sort.Slice(segmentIDs, func(i, j int) bool {
 				return segmentStates[segmentIDs[i]].StartPosition.Timestamp < segmentStates[segmentIDs[j]].StartPosition.Timestamp
 			})
-
-			states := make([]*datapb.SegmentStateInfo, 0)
-			for _, id := range segmentIDs {
-				states = append(states, segmentStates[id])
+			toLoadSegmentIDs := make([]UniqueID, 0)
+			var watchedStartPos *internalpb.MsgPosition = nil
+			var startPosition *internalpb.MsgPosition = nil
+			for index, id := range segmentIDs {
+				if segmentStates[id].State <= commonpb.SegmentState_Growing {
+					if index > 0 {
+						pos := segmentStates[id].StartPosition
+						if len(pos.MsgID) == 0 {
+							watchedStartPos = startPosition
+							break
+						}
+					}
+					watchedStartPos = segmentStates[id].StartPosition
+					break
+				}
+				toLoadSegmentIDs = append(toLoadSegmentIDs, id)
+				watchedStartPos = segmentStates[id].EndPosition
+				startPosition = segmentStates[id].StartPosition
 			}
-			loadSegmentRequest := &querypb.LoadSegmentsRequest{
-				CollectionID:  collectionID,
-				PartitionID:   partitionID,
-				SegmentIDs:    segmentIDs,
-				SegmentStates: states,
-				Schema:        schema,
+			if watchedStartPos == nil {
+				watchedStartPos = &internalpb.MsgPosition{
+					ChannelName: channel,
+				}
 			}
-			nodeID, err := qs.replica.getAssignedNodeIDByChannelName(dbID, collectionID, channel)
+
+			err = qs.replica.addDmChannel(dbID, collectionID, channel, watchedStartPos)
 			if err != nil {
 				return fn(err), err
 			}
-			queryNode := qs.queryNodes[nodeID]
-			//TODO:: seek when loadSegment may cause more msgs consumed
-			//TODO:: all query node should load partition's msg
-			status, err := queryNode.LoadSegments(ctx, loadSegmentRequest)
+			err = qs.replica.addExcludeSegmentIDs(dbID, collectionID, toLoadSegmentIDs)
 			if err != nil {
-				return status, err
+				return fn(err), err
+			}
+
+			segment2Node := qs.shuffleSegmentsToQueryNode(toLoadSegmentIDs)
+			for nodeID, assignedSegmentIDs := range segment2Node {
+				loadSegmentRequest := &querypb.LoadSegmentsRequest{
+					CollectionID: collectionID,
+					PartitionID:  partitionID,
+					SegmentIDs:   assignedSegmentIDs,
+					Schema:       schema,
+				}
+
+				queryNode := qs.queryNodes[nodeID]
+				status, err := queryNode.LoadSegments(ctx, loadSegmentRequest)
+				if err != nil {
+					return status, err
+				}
+				queryNode.AddSegments(assignedSegmentIDs, collectionID)
 			}
 		}
 
 		qs.replica.updatePartitionState(dbID, collectionID, partitionID, querypb.PartitionState_InMemory)
 	}
 
+	if watchNeeded {
+		err = qs.watchDmChannels(dbID, collectionID)
+		if err != nil {
+			log.Debug("LoadPartitionRequest completed", zap.Int64("msgID", req.Base.MsgID), zap.Int64s("partitionIDs", partitionIDs), zap.Error(err))
+			return fn(err), err
+		}
+	}
+
 	log.Debug("LoadPartitionRequest completed", zap.Int64("msgID", req.Base.MsgID), zap.Int64s("partitionIDs", partitionIDs))
 	return &commonpb.Status{
 		ErrorCode: commonpb.ErrorCode_Success,
@@ -529,7 +569,7 @@ func (qs *QueryService) ReleasePartitions(ctx context.Context, req *querypb.Rele
 		}
 	}
 
-	log.Debug("start release partitions end")
+	log.Debug("start release partitions end", zap.String("partitionIDs", fmt.Sprintln(partitionIDs)))
 	//TODO:: queryNode cancel subscribe dmChannels
 	return &commonpb.Status{
 		ErrorCode: commonpb.ErrorCode_Success,
@@ -683,93 +723,159 @@ func (qs *QueryService) watchDmChannels(dbID UniqueID, collectionID UniqueID) er
 	}
 
 	dmChannels := resp.Values
-	watchedChannels2NodeID := make(map[string]int64)
-	unWatchedChannels := make([]string, 0)
+	channelsWithoutPos := make([]string, 0)
 	for _, channel := range dmChannels {
 		findChannel := false
-		for nodeID, node := range qs.queryNodes {
-			watchedChannels := node.dmChannelNames
-			for _, watchedChannel := range watchedChannels {
-				if channel == watchedChannel {
-					findChannel = true
-					watchedChannels2NodeID[channel] = nodeID
-					break
-				}
+		ChannelsWithPos := collection.dmChannels
+		for _, ch := range ChannelsWithPos {
+			if channel == ch {
+				findChannel = true
+				break
 			}
 		}
 		if !findChannel {
-			unWatchedChannels = append(unWatchedChannels, channel)
+			channelsWithoutPos = append(channelsWithoutPos, channel)
 		}
 	}
-	channels2NodeID := qs.shuffleChannelsToQueryNode(unWatchedChannels)
-	err = qs.replica.addDmChannels(dbID, collection.id, channels2NodeID)
-	if err != nil {
-		return err
-	}
-	err = qs.replica.addDmChannels(dbID, collection.id, watchedChannels2NodeID)
-	if err != nil {
-		return err
-	}
-	node2channels := make(map[int64][]string)
-	for channel, nodeID := range channels2NodeID {
-		if _, ok := node2channels[nodeID]; ok {
-			node2channels[nodeID] = append(node2channels[nodeID], channel)
-		} else {
-			channels := make([]string, 0)
-			channels = append(channels, channel)
-			node2channels[nodeID] = channels
+	for _, ch := range channelsWithoutPos {
+		pos := &internalpb.MsgPosition{
+			ChannelName: ch,
+		}
+		err = qs.replica.addDmChannel(dbID, collectionID, ch, pos)
+		if err != nil {
+			return err
 		}
 	}
 
-	for nodeID, channels := range node2channels {
+	channels2NodeID := qs.shuffleChannelsToQueryNode(dmChannels)
+	for nodeID, channels := range channels2NodeID {
 		node := qs.queryNodes[nodeID]
+		watchDmChannelsInfo := make([]*querypb.WatchDmChannelInfo, 0)
+		for _, ch := range channels {
+			info := &querypb.WatchDmChannelInfo{
+				ChannelID:        ch,
+				Pos:              collection.dmChannels2Pos[ch],
+				ExcludedSegments: collection.excludeSegmentIds,
+			}
+			watchDmChannelsInfo = append(watchDmChannelsInfo, info)
+		}
 		request := &querypb.WatchDmChannelsRequest{
-			ChannelIDs: channels,
+			CollectionID: collectionID,
+			ChannelIDs:   channels,
+			Infos:        watchDmChannelsInfo,
 		}
 		_, err := node.WatchDmChannels(ctx, request)
 		if err != nil {
 			return err
 		}
+		node.AddDmChannels(channels, collectionID)
 		log.Debug("query node ", zap.String("nodeID", strconv.FormatInt(nodeID, 10)), zap.String("watch channels", fmt.Sprintln(channels)))
-		node.AddDmChannels(channels)
 	}
 
 	return nil
 }
 
-func (qs *QueryService) shuffleChannelsToQueryNode(dmChannels []string) map[string]int64 {
-	maxNumDMChannel := 0
-	res := make(map[string]int64)
-	if len(dmChannels) == 0 {
-		return res
-	}
-	node2lens := make(map[int64]int)
-	for id, node := range qs.queryNodes {
-		node2lens[id] = len(node.dmChannelNames)
+func (qs *QueryService) shuffleChannelsToQueryNode(dmChannels []string) map[int64][]string {
+	maxNumChannels := 0
+	for _, node := range qs.queryNodes {
+		numChannels := 0
+		for _, chs := range node.channels2Col {
+			numChannels += len(chs)
+		}
+		if numChannels > maxNumChannels {
+			maxNumChannels = numChannels
+		}
 	}
+	res := make(map[int64][]string)
 	offset := 0
+	loopAll := false
 	for {
 		lastOffset := offset
-		for id, len := range node2lens {
-			if len >= maxNumDMChannel {
-				maxNumDMChannel = len
-			} else {
-				res[dmChannels[offset]] = id
-				node2lens[id]++
+		if !loopAll {
+			for id, node := range qs.queryNodes {
+				if len(node.segments) >= maxNumChannels {
+					continue
+				}
+				if _, ok := res[id]; !ok {
+					res[id] = make([]string, 0)
+				}
+				res[id] = append(res[id], dmChannels[offset])
+				offset++
+				if offset == len(dmChannels) {
+					return res
+				}
+			}
+		} else {
+			for id := range qs.queryNodes {
+				if _, ok := res[id]; !ok {
+					res[id] = make([]string, 0)
+				}
+				res[id] = append(res[id], dmChannels[offset])
 				offset++
+				if offset == len(dmChannels) {
+					return res
+				}
 			}
 		}
 		if lastOffset == offset {
-			for id := range node2lens {
-				res[dmChannels[offset]] = id
-				node2lens[id]++
+			loopAll = true
+		}
+	}
+}
+
+func (qs *QueryService) shuffleSegmentsToQueryNode(segmentIDs []UniqueID) map[int64][]UniqueID {
+	maxNumSegments := 0
+	for _, node := range qs.queryNodes {
+		numSegments := 0
+		for _, ids := range node.segments {
+			numSegments += len(ids)
+		}
+		if numSegments > maxNumSegments {
+			maxNumSegments = numSegments
+		}
+	}
+	res := make(map[int64][]UniqueID)
+	for nodeID := range qs.queryNodes {
+		segments := make([]UniqueID, 0)
+		res[nodeID] = segments
+	}
+
+	if len(segmentIDs) == 0 {
+		return res
+	}
+
+	offset := 0
+	loopAll := false
+	for {
+		lastOffset := offset
+		if !loopAll {
+			for id, node := range qs.queryNodes {
+				if len(node.segments) >= maxNumSegments {
+					continue
+				}
+				if _, ok := res[id]; !ok {
+					res[id] = make([]UniqueID, 0)
+				}
+				res[id] = append(res[id], segmentIDs[offset])
 				offset++
-				break
+				if offset == len(segmentIDs) {
+					return res
+				}
+			}
+		} else {
+			for id := range qs.queryNodes {
+				if _, ok := res[id]; !ok {
+					res[id] = make([]UniqueID, 0)
+				}
+				res[id] = append(res[id], segmentIDs[offset])
+				offset++
+				if offset == len(segmentIDs) {
+					return res
+				}
 			}
 		}
-		if offset == len(dmChannels) {
-			break
+		if lastOffset == offset {
+			loopAll = true
 		}
 	}
-	return res
 }
diff --git a/internal/util/flowgraph/flow_graph.go b/internal/util/flowgraph/flow_graph.go
index edae1d777af75329f6664ab4276db12d130c9d2d..70b92d00aaee79aa567781cefdee44985a1d8736 100644
--- a/internal/util/flowgraph/flow_graph.go
+++ b/internal/util/flowgraph/flow_graph.go
@@ -77,6 +77,7 @@ func (fg *TimeTickedFlowGraph) Close() {
 			}
 			(*inStream.inStream).Close()
 		}
+		// v.Close()
 	}
 }
 
diff --git a/internal/util/flowgraph/input_node.go b/internal/util/flowgraph/input_node.go
index ecffa86f235014f46b3c2652908ad72093f51250..bb9b38eaac4b88a343339fd59409430e844f5696 100644
--- a/internal/util/flowgraph/input_node.go
+++ b/internal/util/flowgraph/input_node.go
@@ -2,11 +2,9 @@ package flowgraph
 
 import (
 	"context"
-	"log"
-
-	"errors"
 
 	"github.com/opentracing/opentracing-go"
+
 	"github.com/zilliztech/milvus-distributed/internal/msgstream"
 	"github.com/zilliztech/milvus-distributed/internal/util/trace"
 )
@@ -40,8 +38,6 @@ func (inNode *InputNode) Operate(ctx context.Context, msgs []Msg) ([]Msg, contex
 
 	// TODO: add status
 	if msgPack == nil {
-		log.Println("null msg pack")
-		trace.LogError(sp, errors.New("null msg pack"))
 		return nil, ctx
 	}
 
diff --git a/internal/util/flowgraph/node.go b/internal/util/flowgraph/node.go
index a8e78c740b0628fc6fd1456e365a772ff6a49cac..e3ac1ec58443736f28c45f5a333490539b98d7aa 100644
--- a/internal/util/flowgraph/node.go
+++ b/internal/util/flowgraph/node.go
@@ -77,7 +77,7 @@ func (nodeCtx *nodeCtx) Start(ctx context.Context, wg *sync.WaitGroup) {
 				log.Println("nodeCtx.downstream length = ", len(nodeCtx.downstream))
 			}
 			if len(res) < downstreamLength {
-				log.Println("node result length = ", len(res))
+				// log.Println("node result length = ", len(res))
 				break
 			}
 
diff --git a/tests/python_test/collection/test_describe_collection.py b/tests/python_test/collection/test_describe_collection.py
index e5d958976dcd1189bbf9320eceab850d2d6b630f..89772dec5943ec1a78ebee606dcad78df92fab26 100644
--- a/tests/python_test/collection/test_describe_collection.py
+++ b/tests/python_test/collection/test_describe_collection.py
@@ -68,10 +68,11 @@ class TestDescribeCollection:
     @pytest.mark.tags("0331", "l1")
     def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
         connect.create_index(collection, default_float_vec_field_name, get_simple_index)
-        index = connect.describe_index(collection, default_float_vec_field_name)
-        assert index["index_type"] == get_simple_index["index_type"]
-        assert index["metric_type"] == get_simple_index["metric_type"]
-        assert index["params"] == get_simple_index["params"]
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, default_float_vec_field_name)
+            assert index["index_type"] == get_simple_index["index_type"]
+            assert index["metric_type"] == get_simple_index["metric_type"]
+            assert index["params"] == get_simple_index["params"]
 
     @pytest.mark.level(2)
     @pytest.mark.tags("0331")
diff --git a/tests/python_test/entity/test_insert.py b/tests/python_test/entity/test_insert.py
index d5b90ce98a53ed9f0dfadd2db797be5ef39ee080..8001e280af1a2b273dc78b36eda86e7807e2e007 100644
--- a/tests/python_test/entity/test_insert.py
+++ b/tests/python_test/entity/test_insert.py
@@ -133,8 +133,9 @@ class TestInsertBase:
         assert len(ids) == default_nb
         connect.flush([collection])
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.timeout(ADD_TIMEOUT)
     @pytest.mark.tags("0331", "l1")
@@ -147,8 +148,9 @@ class TestInsertBase:
         connect.create_index(collection, field_name, get_simple_index)
         ids = connect.insert(collection, default_entities)
         assert len(ids) == default_nb
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.timeout(ADD_TIMEOUT)
     @pytest.mark.tags("0331", "l1")
@@ -507,9 +509,10 @@ class TestInsertBase:
         with pytest.raises(Exception):
             connect.insert(collection, tmp_entity)
 
-    @pytest.mark.level(2)
-    @pytest.mark.timeout(30)
-    @pytest.mark.tags("0331")
+# todo fix timeout
+#     @pytest.mark.level(2)
+#     @pytest.mark.timeout(30)
+#     @pytest.mark.tags("0331")
     def test_collection_insert_rows_count_multi_threading(self, args, collection):
         '''
         target: test collection rows_count is correct or not with multi threading
@@ -839,8 +842,9 @@ class TestInsertMultiCollections:
         connect.create_index(collection, field_name, get_simple_index)
         ids = connect.insert(collection_name, default_entity)
         assert len(ids) == 1
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
         connect.drop_collection(collection_name)
 
     @pytest.mark.timeout(ADD_TIMEOUT)
@@ -856,8 +860,9 @@ class TestInsertMultiCollections:
         ids = connect.insert(collection, default_entity)
         connect.flush([collection])
         connect.create_index(collection_name, field_name, get_simple_index)
-        index = connect.describe_index(collection_name, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection_name, field_name)
+            assert index == get_simple_index
         stats = connect.get_collection_stats(collection)
         assert stats[row_count] == 1
 
diff --git a/tests/python_test/entity/test_search.py b/tests/python_test/entity/test_search.py
index 50a13e3742f02c0be159e1f894a3f0fa24686e4f..b4c8345e6080cd6f02e61dfbae52ea8dc4ca5d4a 100644
--- a/tests/python_test/entity/test_search.py
+++ b/tests/python_test/entity/test_search.py
@@ -25,12 +25,12 @@ default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field
                                                                     nq)
 
 
-def init_data(connect, collection, nb=1200, partition_tags=None, auto_id=True):
+def init_data(connect, collection, nb=3000, partition_tags=None, auto_id=True):
     '''
     Generate entities and add it in collection
     '''
     global entities
-    if nb == 1200:
+    if nb == 3000:
         insert_entities = entities
     else:
         insert_entities = gen_entities(nb, is_normal=True)
@@ -48,14 +48,14 @@ def init_data(connect, collection, nb=1200, partition_tags=None, auto_id=True):
     return insert_entities, ids
 
 
-def init_binary_data(connect, collection, nb=1200, insert=True, partition_tags=None):
+def init_binary_data(connect, collection, nb=3000, insert=True, partition_tags=None):
     '''
     Generate entities and add it in collection
     '''
     ids = []
     global binary_entities
     global raw_vectors
-    if nb == 1200:
+    if nb == 3000:
         insert_entities = binary_entities
         insert_raw_vectors = raw_vectors
     else:
@@ -92,7 +92,7 @@ class TestSearchBase:
         # if str(connect._cmd("mode")) == "CPU":
         #     if request.param["index_type"] in index_cpu_not_support():
         #         pytest.skip("sq8h not support in CPU mode")
-        return request.param
+        return copy.deepcopy(request.param)
 
     @pytest.fixture(
         scope="function",
@@ -257,7 +257,6 @@ class TestSearchBase:
             assert res2[0][0].id == res[0][1].id
             assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
 
-    # TODO:
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
@@ -287,7 +286,7 @@ class TestSearchBase:
             assert res[0]._distances[0] < epsilon
             assert check_id_result(res[0], ids[0])
 
-    # @pytest.mark.tags("0331")
+    @pytest.mark.tags("0331")
     def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
         '''
         target: test search with different metric_type
@@ -311,12 +310,7 @@ class TestSearchBase:
             with pytest.raises(Exception) as e:
                 res = connect.search(collection, query)
 
-    # TODO: need to enable 
-    # description: create/load/search
-    # @pytest.mark.tags("0331")
-    def _test_search_after_index_different_metric_type_2(self, connect, collection, get_simple_index):
-        pass
-
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
         '''
@@ -350,7 +344,9 @@ class TestSearchBase:
             res = connect.search(collection, query, partition_tags=[default_tag])
             assert len(res[0]) == 0
 
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
+    @pytest.mark.timeout(600)
     def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
         '''
         target: test basic search function, all the search params is correct, test all index params, and build
@@ -368,18 +364,19 @@ class TestSearchBase:
         connect.create_index(collection, field_name, get_simple_index)
         search_param = get_search_param(index_type)
         query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
-        for tags in [[default_tag], [default_tag, "new_tag"]]:
-            if top_k > max_top_k:
-                with pytest.raises(Exception) as e:
-                    res = connect.search(collection, query, partition_tags=tags)
-            else:
-                connect.load_partitions(collection, tags)
-                res = connect.search(collection, query, partition_tags=tags)
-                assert len(res) == nq
-                assert len(res[0]) >= top_k
-                assert res[0]._distances[0] < epsilon
-                assert check_id_result(res[0], ids[0])
+        if top_k > max_top_k:
+            with pytest.raises(Exception) as e:
+                res = connect.search(collection, query, partition_tags=[default_tag])
+        else:
+            connect.load_partitions(collection, [default_tag])
+            res = connect.search(collection, query, partition_tags=[default_tag])
+            assert len(res) == nq
+            assert len(res[0]) == top_k
+            assert res[0]._distances[0] < epsilon
+            assert check_id_result(res[0], ids[0])
 
+
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq, get_simple_index):
         '''
@@ -396,10 +393,9 @@ class TestSearchBase:
             with pytest.raises(Exception) as e:
                 res = connect.search(collection, query, partition_tags=["new_tag"])
         else:
-            connect.load_partitions(collection, ["new_tag"])
-            res = connect.search(collection, query, partition_tags=["new_tag"])
-            assert len(res) == nq
-            assert len(res[0]) == 0
+            connect.load_collection(collection)
+            with pytest.raises(Exception) as e:
+                connect.search(collection, query, partition_tags=["new_tag"])
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
@@ -473,10 +469,7 @@ class TestSearchBase:
             assert res[1]._distances[0] < epsilon
             connect.release_collection(collection)
 
-    #
-    # test for ip metric
-    #
-    # @pytest.mark.tags("0331")
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
         '''
@@ -520,7 +513,7 @@ class TestSearchBase:
         assert check_id_result(res[0], ids[0])
         assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
 
-    # @pytest.mark.tags("0331")
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
         '''
@@ -630,8 +623,7 @@ class TestSearchBase:
         res = connect.search(collection, query)
         assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
 
-    # TODO
-    # @pytest.mark.tags("0331")
+    @pytest.mark.tags("0331")
     def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
         '''
         target: search collection, and check the result: distance
@@ -662,7 +654,7 @@ class TestSearchBase:
         # TODO:
         # assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
 
-    # @pytest.mark.tags("0331", "l1")
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_distance_ip(self, connect, collection):
         '''
@@ -685,7 +677,7 @@ class TestSearchBase:
         res = connect.search(collection, query)
         assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
 
-    # @pytest.mark.tags("0331")
+    @pytest.mark.tags("0331")
     def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
         '''
         target: search collection, and check the result: distance
@@ -769,7 +761,6 @@ class TestSearchBase:
         res = connect.search(binary_collection, query)
         assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
 
-    # TODO
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_distance_substructure_flat_index(self, connect, binary_collection):
@@ -850,7 +841,7 @@ class TestSearchBase:
         assert res[1][0].id in ids
         assert res[1][0].distance <= epsilon
 
-    # @pytest.mark.tags("0331")
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
         '''
@@ -870,7 +861,7 @@ class TestSearchBase:
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
-    @pytest.mark.timeout(30)
+    @pytest.mark.timeout(300)
     def test_search_concurrent_multithreads(self, connect, args):
         '''
         target: test concurrent search with multiprocessess
@@ -906,7 +897,7 @@ class TestSearchBase:
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
-    @pytest.mark.timeout(30)
+    @pytest.mark.timeout(300)
     def test_search_concurrent_multithreads_single_connection(self, connect, args):
         '''
         target: test concurrent search with multiprocessess
@@ -1071,6 +1062,7 @@ class TestSearchDSL(object):
     ******************************************************************
     """
 
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_term_value_not_in(self, connect, collection):
         '''
@@ -1087,7 +1079,7 @@ class TestSearchDSL(object):
         assert len(res[0]) == 0
         # TODO:
 
-    # TODO:
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_term_value_all_in(self, connect, collection):
         '''
@@ -1103,7 +1095,7 @@ class TestSearchDSL(object):
         assert len(res[0]) == 1
         # TODO:
 
-    # TODO:
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_term_values_not_in(self, connect, collection):
         '''
@@ -1120,6 +1112,7 @@ class TestSearchDSL(object):
         assert len(res[0]) == 0
         # TODO:
 
+    @pytest.mark.tags("0331")
     def test_query_term_values_all_in(self, connect, collection):
         '''
         method: build query with vector and term expr, with all term can be filtered
@@ -1139,6 +1132,7 @@ class TestSearchDSL(object):
                 assert result.id in ids[:limit]
         # TODO:
 
+    @pytest.mark.tags("0331")
     def test_query_term_values_parts_in(self, connect, collection):
         '''
         method: build query with vector and term expr, with parts of term can be filtered
@@ -1155,7 +1149,7 @@ class TestSearchDSL(object):
         assert len(res[0]) == default_top_k
         # TODO:
 
-    # TODO:
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_term_values_repeat(self, connect, collection):
         '''
@@ -1209,7 +1203,6 @@ class TestSearchDSL(object):
     ******************************************************************
     """
 
-    # TODO
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_term_key_error(self, connect, collection):
@@ -1244,7 +1237,7 @@ class TestSearchDSL(object):
         with pytest.raises(Exception) as e:
             res = connect.search(collection, query)
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_term_field_named_term(self, connect, collection):
         '''
@@ -1266,7 +1259,7 @@ class TestSearchDSL(object):
         expr = {"must": [gen_default_vector_expr(default_query),
                          term_param]}
         query = update_query_expr(default_query, expr=expr)
-        connect.load_collection(collection)
+        connect.load_collection(collection_term)
         res = connect.search(collection_term, query)
         assert len(res) == nq
         assert len(res[0]) == default_top_k
@@ -1293,7 +1286,6 @@ class TestSearchDSL(object):
     ******************************************************************
     """
 
-    # TODO
     @pytest.mark.tags("0331", "l1")
     def test_query_range_key_error(self, connect, collection):
         '''
@@ -1313,7 +1305,6 @@ class TestSearchDSL(object):
     def get_invalid_range(self, request):
         return request.param
 
-    # TODO
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
@@ -1343,6 +1334,7 @@ class TestSearchDSL(object):
         with pytest.raises(Exception) as e:
             res = connect.search(collection, query)
 
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_range_invalid_ranges(self, connect, collection):
         '''
@@ -1365,6 +1357,7 @@ class TestSearchDSL(object):
     def get_valid_ranges(self, request):
         return request.param
 
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
         '''
@@ -1401,7 +1394,7 @@ class TestSearchDSL(object):
     ************************************************************************
     """
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_multi_term_has_common(self, connect, collection):
         '''
@@ -1418,7 +1411,7 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == default_top_k
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_multi_term_no_common(self, connect, collection):
         '''
@@ -1435,7 +1428,7 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == 0
 
-    # TODO
+    @pytest.mark.tags("0331")
     def test_query_multi_term_different_fields(self, connect, collection):
         '''
          method: build query with multi range with same field, and ranges no common
@@ -1452,7 +1445,6 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == 0
 
-    # TODO
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_single_term_multi_fields(self, connect, collection):
@@ -1469,7 +1461,7 @@ class TestSearchDSL(object):
         with pytest.raises(Exception) as e:
             res = connect.search(collection, query)
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_multi_range_has_common(self, connect, collection):
         '''
@@ -1486,7 +1478,7 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == default_top_k
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_multi_range_no_common(self, connect, collection):
         '''
@@ -1503,7 +1495,7 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == 0
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_multi_range_different_fields(self, connect, collection):
         '''
@@ -1520,7 +1512,6 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == 0
 
-    # TODO
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_single_range_multi_fields(self, connect, collection):
@@ -1543,7 +1534,7 @@ class TestSearchDSL(object):
     ******************************************************************
     """
 
-    # TODO
+    @pytest.mark.tags("0331")
     @pytest.mark.level(2)
     def test_query_single_term_range_has_common(self, connect, collection):
         '''
@@ -1560,7 +1551,7 @@ class TestSearchDSL(object):
         assert len(res) == nq
         assert len(res[0]) == default_top_k
 
-    # TODO
+    @pytest.mark.tags("0331")
     def test_query_single_term_range_no_common(self, connect, collection):
         '''
         method: build query with single term single range
@@ -1582,7 +1573,6 @@ class TestSearchDSL(object):
     ******************************************************************
     """
 
-    # TODO
     @pytest.mark.tags("0331")
     def test_query_multi_vectors_same_field(self, connect, collection):
         '''
diff --git a/tests/python_test/requirements.txt b/tests/python_test/requirements.txt
index 2591eddcdce482e742444cd2b263a99b459c7997..14ecf9befb36661b63c5725ac18416b4b64bba42 100644
--- a/tests/python_test/requirements.txt
+++ b/tests/python_test/requirements.txt
@@ -10,6 +10,9 @@ allure-pytest==2.7.0
 pytest-print==0.2.1
 pytest-level==0.1.1
 pytest-xdist==2.2.1
-pymilvus-distributed==0.0.50
+pymilvus-distributed==0.0.54
 pytest-rerunfailures==9.1.1
 git+https://github.com/Projectplace/pytest-tags
+ndg-httpsclient
+pyopenssl
+pyasn1
diff --git a/tests/python_test/test_index.py b/tests/python_test/test_index.py
index 689c5a4e9b19067fb97dabfca99c289245fa9107..82e43fe361d71a4a69722aa397d91d09861268f1 100644
--- a/tests/python_test/test_index.py
+++ b/tests/python_test/test_index.py
@@ -57,8 +57,9 @@ class TestIndexBase:
         '''
         ids = connect.insert(collection, default_entities)
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331", "l1")
     def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index):
@@ -94,8 +95,9 @@ class TestIndexBase:
         expected: return search success
         '''
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -108,8 +110,9 @@ class TestIndexBase:
         connect.create_partition(collection, default_tag)
         ids = connect.insert(collection, default_entities, partition_tag=default_tag)
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331", "l1")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -123,8 +126,9 @@ class TestIndexBase:
         ids = connect.insert(collection, default_entities, partition_tag=default_tag)
         connect.flush([collection])
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331")
     def test_create_index_without_connect(self, dis_connect, collection):
@@ -169,8 +173,9 @@ class TestIndexBase:
 
         def build(connect):
             connect.create_index(collection, field_name, default_index)
-            index = connect.describe_index(collection, field_name)
-            assert index == default_index
+            if default_index["index_type"] != "FLAT":
+                index = connect.describe_index(collection, field_name)
+                assert index == default_index
 
         threads_num = 8
         threads = []
@@ -209,8 +214,9 @@ class TestIndexBase:
         connect.flush([collection])
         stats = connect.get_collection_stats(collection)
         assert stats["row_count"] == default_nb
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
@@ -223,8 +229,9 @@ class TestIndexBase:
         '''
         connect.create_index(collection, field_name, get_simple_index)
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
@@ -243,7 +250,8 @@ class TestIndexBase:
             connect.release_collection(collection)
             connect.load_collection(collection)
         index = connect.describe_index(collection, field_name)
-        assert index == indexs[-1]
+        # assert index == indexs[-1]
+        assert not index    # FLAT is the last index_type, drop all indexes in server
 
     @pytest.mark.tags("0331", "l1")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -256,8 +264,9 @@ class TestIndexBase:
         ids = connect.insert(collection, default_entities)
         get_simple_index["metric_type"] = "IP"
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331", "l1")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -269,8 +278,9 @@ class TestIndexBase:
         '''
         get_simple_index["metric_type"] = "IP"
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -284,8 +294,9 @@ class TestIndexBase:
         ids = connect.insert(collection, default_entities, partition_tag=default_tag)
         get_simple_index["metric_type"] = "IP"
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331", "l1")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -300,8 +311,9 @@ class TestIndexBase:
         connect.flush([collection])
         get_simple_index["metric_type"] = "IP"
         connect.create_index(collection, field_name, get_simple_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == get_simple_index
+        if get_simple_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == get_simple_index
 
     @pytest.mark.tags("0331", "l1")
     @pytest.mark.timeout(BUILD_TIMEOUT)
@@ -339,8 +351,9 @@ class TestIndexBase:
         def build(connect):
             default_index["metric_type"] = "IP"
             connect.create_index(collection, field_name, default_index)
-            index = connect.describe_index(collection, field_name)
-            assert index == default_index
+            if default_index["index_type"] != "FLAT":
+                index = connect.describe_index(collection, field_name)
+                assert index == default_index
 
         threads_num = 8
         threads = []
@@ -380,8 +393,9 @@ class TestIndexBase:
         connect.flush([collection])
         stats = connect.get_collection_stats(collection)
         assert stats["row_count"] == default_nb
-        index = connect.describe_index(collection, field_name)
-        assert index == default_index
+        if default_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == default_index
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
@@ -395,8 +409,9 @@ class TestIndexBase:
         default_index["metric_type"] = "IP"
         connect.create_index(collection, field_name, default_index)
         connect.create_index(collection, field_name, default_index)
-        index = connect.describe_index(collection, field_name)
-        assert index == default_index
+        if default_index["index_type"] != "FLAT":
+            index = connect.describe_index(collection, field_name)
+            assert index == default_index
 
     @pytest.mark.tags("0331")
     @pytest.mark.level(2)
@@ -419,7 +434,8 @@ class TestIndexBase:
             connect.release_collection(collection)
             connect.load_collection(collection)
         index = connect.describe_index(collection, field_name)
-        assert index == indexs[-1]
+        # assert index == indexs[-1]
+        assert not index
 
     """
     ******************************************************************