Skip to content
Snippets Groups Projects
Commit 01e9dc8e authored by Yihao Dai's avatar Yihao Dai Committed by yefu.chen
Browse files

Remove collection name


Signed-off-by: default avatarbigsheeper <yihao.dai@zilliz.com>
parent f3aad3a7
No related branches found
No related tags found
No related merge requests found
Showing
with 212 additions and 225 deletions
......@@ -16,7 +16,6 @@
#include "knowhere/index/vector_index/VecIndex.h"
struct LoadIndexInfo {
std::string field_name;
int64_t field_id;
std::map<std::string, std::string> index_params;
milvus::knowhere::VecIndexPtr index;
......
......@@ -59,11 +59,9 @@ AppendIndexParam(CLoadIndexInfo c_load_index_info, const char* c_index_key, cons
}
CStatus
AppendFieldInfo(CLoadIndexInfo c_load_index_info, const char* c_field_name, int64_t field_id) {
AppendFieldInfo(CLoadIndexInfo c_load_index_info, int64_t field_id) {
try {
auto load_index_info = (LoadIndexInfo*)c_load_index_info;
std::string field_name(c_field_name);
load_index_info->field_name = field_name;
load_index_info->field_id = field_id;
auto status = CStatus();
......@@ -97,7 +95,6 @@ AppendIndex(CLoadIndexInfo c_load_index_info, CBinarySet c_binary_set) {
load_index_info->index =
milvus::knowhere::VecIndexFactory::GetInstance().CreateVecIndex(index_params["index_type"], mode);
load_index_info->index->Load(*binary_set);
auto status = CStatus();
status.error_code = Success;
status.error_msg = "";
......
......@@ -33,7 +33,7 @@ CStatus
AppendIndexParam(CLoadIndexInfo c_load_index_info, const char* index_key, const char* index_value);
CStatus
AppendFieldInfo(CLoadIndexInfo c_load_index_info, const char* field_name, int64_t field_id);
AppendFieldInfo(CLoadIndexInfo c_load_index_info, int64_t field_id);
CStatus
AppendIndex(CLoadIndexInfo c_load_index_info, CBinarySet c_binary_set);
......
......@@ -781,7 +781,7 @@ TEST(CApiTest, LoadIndexInfo) {
status = AppendIndexParam(c_load_index_info, index_param_key2.data(), index_param_value2.data());
assert(status.error_code == Success);
std::string field_name = "field0";
status = AppendFieldInfo(c_load_index_info, field_name.data(), 0);
status = AppendFieldInfo(c_load_index_info, 0);
assert(status.error_code == Success);
status = AppendIndex(c_load_index_info, c_binary_set);
assert(status.error_code == Success);
......@@ -937,7 +937,7 @@ TEST(CApiTest, UpdateSegmentIndex_Without_Predicate) {
AppendIndexParam(c_load_index_info, index_type_key.c_str(), index_type_value.c_str());
AppendIndexParam(c_load_index_info, index_mode_key.c_str(), index_mode_value.c_str());
AppendIndexParam(c_load_index_info, metric_type_key.c_str(), metric_type_value.c_str());
AppendFieldInfo(c_load_index_info, "fakevec", 100);
AppendFieldInfo(c_load_index_info, 100);
AppendIndex(c_load_index_info, (CBinarySet)&binary_set);
status = UpdateSegmentIndex(segment, c_load_index_info);
......@@ -1074,7 +1074,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_float_Predicate_Range) {
AppendIndexParam(c_load_index_info, index_type_key.c_str(), index_type_value.c_str());
AppendIndexParam(c_load_index_info, index_mode_key.c_str(), index_mode_value.c_str());
AppendIndexParam(c_load_index_info, metric_type_key.c_str(), metric_type_value.c_str());
AppendFieldInfo(c_load_index_info, "fakevec", 100);
AppendFieldInfo(c_load_index_info, 100);
AppendIndex(c_load_index_info, (CBinarySet)&binary_set);
status = UpdateSegmentIndex(segment, c_load_index_info);
......@@ -1211,7 +1211,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_float_Predicate_Term) {
AppendIndexParam(c_load_index_info, index_type_key.c_str(), index_type_value.c_str());
AppendIndexParam(c_load_index_info, index_mode_key.c_str(), index_mode_value.c_str());
AppendIndexParam(c_load_index_info, metric_type_key.c_str(), metric_type_value.c_str());
AppendFieldInfo(c_load_index_info, "fakevec", 100);
AppendFieldInfo(c_load_index_info, 100);
AppendIndex(c_load_index_info, (CBinarySet)&binary_set);
status = UpdateSegmentIndex(segment, c_load_index_info);
......@@ -1350,7 +1350,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_binary_Predicate_Range) {
AppendIndexParam(c_load_index_info, index_type_key.c_str(), index_type_value.c_str());
AppendIndexParam(c_load_index_info, index_mode_key.c_str(), index_mode_value.c_str());
AppendIndexParam(c_load_index_info, metric_type_key.c_str(), metric_type_value.c_str());
AppendFieldInfo(c_load_index_info, "fakevec", 100);
AppendFieldInfo(c_load_index_info, 100);
AppendIndex(c_load_index_info, (CBinarySet)&binary_set);
status = UpdateSegmentIndex(segment, c_load_index_info);
......@@ -1488,7 +1488,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_binary_Predicate_Term) {
AppendIndexParam(c_load_index_info, index_type_key.c_str(), index_type_value.c_str());
AppendIndexParam(c_load_index_info, index_mode_key.c_str(), index_mode_value.c_str());
AppendIndexParam(c_load_index_info, metric_type_key.c_str(), metric_type_value.c_str());
AppendFieldInfo(c_load_index_info, "fakevec", 100);
AppendFieldInfo(c_load_index_info, 100);
AppendIndex(c_load_index_info, (CBinarySet)&binary_set);
status = UpdateSegmentIndex(segment, c_load_index_info);
......@@ -1665,7 +1665,7 @@ TEST(CApiTest, SealedSegment_search_float_Predicate_Range) {
AppendIndexParam(c_load_index_info, index_type_key.c_str(), index_type_value.c_str());
AppendIndexParam(c_load_index_info, index_mode_key.c_str(), index_mode_value.c_str());
AppendIndexParam(c_load_index_info, metric_type_key.c_str(), metric_type_value.c_str());
AppendFieldInfo(c_load_index_info, "fakevec", 100);
AppendFieldInfo(c_load_index_info, 100);
AppendIndex(c_load_index_info, (CBinarySet)&binary_set);
auto load_index_info = (LoadIndexInfo*)c_load_index_info;
......
......@@ -105,7 +105,6 @@ TEST(Sealed, without_predicate) {
auto ref_result = QueryResultToJson(qr);
LoadIndexInfo load_info;
load_info.field_name = "fakevec";
load_info.field_id = fake_id.get();
load_info.index = indexing;
load_info.index_params["metric_type"] = "L2";
......@@ -198,7 +197,6 @@ TEST(Sealed, with_predicate) {
auto result = indexing->Query(query_dataset, conf, nullptr);
LoadIndexInfo load_info;
load_info.field_name = "fakevec";
load_info.field_id = fake_id.get();
load_info.index = indexing;
load_info.index_params["metric_type"] = "L2";
......@@ -312,7 +310,6 @@ TEST(Sealed, LoadFieldData) {
LoadIndexInfo vec_info;
vec_info.field_id = fakevec_id.get();
vec_info.field_name = "fakevec";
vec_info.index = indexing;
vec_info.index_params["metric_type"] = milvus::knowhere::Metric::L2;
segment->LoadIndex(vec_info);
......
......@@ -98,7 +98,7 @@ func TestGrpcService(t *testing.T) {
var binlogLock sync.Mutex
binlogPathArray := make([]string, 0, 16)
core.BuildIndexReq = func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair) (typeutil.UniqueID, error) {
core.BuildIndexReq = func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair, indexID typeutil.UniqueID, indexName string) (typeutil.UniqueID, error) {
binlogLock.Lock()
defer binlogLock.Unlock()
binlogPathArray = append(binlogPathArray, binlog...)
......
......@@ -247,7 +247,7 @@ func (it *IndexBuildTask) Execute() error {
}
var indexCodec storage.IndexCodec
serializedIndexBlobs, err := indexCodec.Serialize(getStorageBlobs(indexBlobs), indexParams)
serializedIndexBlobs, err := indexCodec.Serialize(getStorageBlobs(indexBlobs), indexParams, it.cmd.Req.IndexName, it.cmd.Req.IndexID)
if err != nil {
return err
}
......
......@@ -152,7 +152,7 @@ type Core struct {
GetBinlogFilePathsFromDataServiceReq func(segID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]string, error)
//TODO, call index builder's client to build index, return build id
BuildIndexReq func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair) (typeutil.UniqueID, error)
BuildIndexReq func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair, indexID typeutil.UniqueID, indexName string) (typeutil.UniqueID, error)
//TODO, proxy service interface, notify proxy service to drop collection
InvalidateCollectionMetaCache func(ts typeutil.Timestamp, dbName string, collectionName string) error
......@@ -671,11 +671,13 @@ func (c *Core) SetDataService(s DataServiceInterface) error {
}
func (c *Core) SetIndexService(s IndexServiceInterface) error {
c.BuildIndexReq = func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair) (typeutil.UniqueID, error) {
c.BuildIndexReq = func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair, indexID typeutil.UniqueID, indexName string) (typeutil.UniqueID, error) {
rsp, err := s.BuildIndex(&indexpb.BuildIndexRequest{
DataPaths: binlog,
TypeParams: typeParams,
IndexParams: indexParams,
IndexID: indexID,
IndexName: indexName,
})
if err != nil {
return 0, err
......
......@@ -628,7 +628,7 @@ func (t *CreateIndexTask) BuildIndex() error {
})
}
}
bldID, err = t.core.BuildIndexReq(binlogs, t.fieldSchema.TypeParams, t.indexParams)
bldID, err = t.core.BuildIndexReq(binlogs, t.fieldSchema.TypeParams, t.indexParams, idxID, t.indexName)
if err != nil {
return err
}
......
......@@ -116,9 +116,16 @@ message InsertRequest {
message SearchRequest {
common.MsgBase base = 1;
string result_channelID = 2;
common.Blob query = 3;
int64 dbID = 3;
int64 collectionID = 4;
repeated int64 partitionIDs = 5;
string dsl = 6;
// serialized `PlaceholderGroup`
bytes placeholder_group = 7;
common.Blob query = 8;
}
message SearchResults {
common.MsgBase base = 1;
common.Status status = 2;
......
This diff is collapsed.
......@@ -23,9 +23,9 @@ type Collection struct {
partitions []*Partition
}
func (c *Collection) Name() string {
return c.schema.Name
}
//func (c *Collection) Name() string {
// return c.schema.Name
//}
func (c *Collection) ID() UniqueID {
return c.id
......@@ -43,9 +43,6 @@ func newCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) *Co
/*
CCollection
NewCollection(const char* schema_proto_blob);
const char*
GetCollectionName(CCollection collection);
*/
schemaBlob := proto.MarshalTextString(schema)
......
......@@ -39,9 +39,8 @@ type collectionReplica interface {
addCollection(collectionID UniqueID, schema *schemapb.CollectionSchema) error
removeCollection(collectionID UniqueID) error
getCollectionByID(collectionID UniqueID) (*Collection, error)
getCollectionByName(collectionName string) (*Collection, error)
hasCollection(collectionID UniqueID) bool
getVecFieldsByCollectionID(collectionID UniqueID) (map[int64]string, error)
getVecFieldsByCollectionID(collectionID UniqueID) ([]int64, error)
// partition
// Partition tags in different collections are not unique,
......@@ -150,19 +149,6 @@ func (colReplica *collectionReplicaImpl) getCollectionByIDPrivate(collectionID U
return nil, errors.New("cannot find collection, id = " + strconv.FormatInt(collectionID, 10))
}
func (colReplica *collectionReplicaImpl) getCollectionByName(collectionName string) (*Collection, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, collection := range colReplica.collections {
if collection.Name() == collectionName {
return collection, nil
}
}
return nil, errors.New("Cannot found collection: " + collectionName)
}
func (colReplica *collectionReplicaImpl) hasCollection(collectionID UniqueID) bool {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
......@@ -175,7 +161,7 @@ func (colReplica *collectionReplicaImpl) hasCollection(collectionID UniqueID) bo
return false
}
func (colReplica *collectionReplicaImpl) getVecFieldsByCollectionID(collectionID UniqueID) (map[int64]string, error) {
func (colReplica *collectionReplicaImpl) getVecFieldsByCollectionID(collectionID UniqueID) ([]int64, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
......@@ -184,10 +170,10 @@ func (colReplica *collectionReplicaImpl) getVecFieldsByCollectionID(collectionID
return nil, err
}
vecFields := make(map[int64]string)
vecFields := make([]int64, 0)
for _, field := range col.Schema().Fields {
if field.DataType == schemapb.DataType_VECTOR_BINARY || field.DataType == schemapb.DataType_VECTOR_FLOAT {
vecFields[field.FieldID] = field.Name
vecFields = append(vecFields, field.FieldID)
}
}
......
......@@ -9,20 +9,20 @@ import (
//----------------------------------------------------------------------------------------------------- collection
func TestCollectionReplica_getCollectionNum(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, "collection0", 0, 0)
initTestMeta(t, node, 0, 0)
assert.Equal(t, node.replica.getCollectionNum(), 1)
node.Stop()
}
func TestCollectionReplica_addCollection(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, "collection0", 0, 0)
initTestMeta(t, node, 0, 0)
node.Stop()
}
func TestCollectionReplica_removeCollection(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, "collection0", 0, 0)
initTestMeta(t, node, 0, 0)
assert.Equal(t, node.replica.getCollectionNum(), 1)
err := node.replica.removeCollection(0)
......@@ -33,37 +33,19 @@ func TestCollectionReplica_removeCollection(t *testing.T) {
func TestCollectionReplica_getCollectionByID(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
targetCollection, err := node.replica.getCollectionByID(collectionID)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
node.Stop()
}
func TestCollectionReplica_getCollectionByName(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
targetCollection, err := node.replica.getCollectionByName(collectionName)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
node.Stop()
}
func TestCollectionReplica_hasCollection(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
hasCollection := node.replica.hasCollection(collectionID)
assert.Equal(t, hasCollection, true)
......@@ -76,9 +58,8 @@ func TestCollectionReplica_hasCollection(t *testing.T) {
//----------------------------------------------------------------------------------------------------- partition
func TestCollectionReplica_getPartitionNum(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
partitionTags := []string{"a", "b", "c"}
for _, tag := range partitionTags {
......@@ -97,9 +78,8 @@ func TestCollectionReplica_getPartitionNum(t *testing.T) {
func TestCollectionReplica_addPartition(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
partitionTags := []string{"a", "b", "c"}
for _, tag := range partitionTags {
......@@ -114,9 +94,8 @@ func TestCollectionReplica_addPartition(t *testing.T) {
func TestCollectionReplica_removePartition(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
partitionTags := []string{"a", "b", "c"}
......@@ -134,11 +113,10 @@ func TestCollectionReplica_removePartition(t *testing.T) {
func TestCollectionReplica_addPartitionsByCollectionMeta(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
collectionMeta := genTestCollectionMeta(collectionID, false)
collectionMeta.PartitionTags = []string{"p0", "p1", "p2"}
err := node.replica.addPartitionsByCollectionMeta(collectionMeta)
......@@ -158,11 +136,10 @@ func TestCollectionReplica_addPartitionsByCollectionMeta(t *testing.T) {
func TestCollectionReplica_removePartitionsByCollectionMeta(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
collectionMeta := genTestCollectionMeta(collectionID, false)
collectionMeta.PartitionTags = []string{"p0"}
err := node.replica.addPartitionsByCollectionMeta(collectionMeta)
......@@ -183,11 +160,10 @@ func TestCollectionReplica_removePartitionsByCollectionMeta(t *testing.T) {
func TestCollectionReplica_getPartitionByTag(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
collectionMeta := genTestCollectionMeta(collectionID, false)
for _, tag := range collectionMeta.PartitionTags {
err := node.replica.addPartition2(collectionID, tag)
......@@ -202,11 +178,10 @@ func TestCollectionReplica_getPartitionByTag(t *testing.T) {
func TestCollectionReplica_hasPartition(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
collectionMeta := genTestCollectionMeta(collectionID, false)
err := node.replica.addPartition2(collectionID, collectionMeta.PartitionTags[0])
assert.NoError(t, err)
hasPartition := node.replica.hasPartition(collectionID, "default")
......@@ -219,9 +194,8 @@ func TestCollectionReplica_hasPartition(t *testing.T) {
//----------------------------------------------------------------------------------------------------- segment
func TestCollectionReplica_addSegment(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
......@@ -238,9 +212,8 @@ func TestCollectionReplica_addSegment(t *testing.T) {
func TestCollectionReplica_removeSegment(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
......@@ -260,9 +233,8 @@ func TestCollectionReplica_removeSegment(t *testing.T) {
func TestCollectionReplica_getSegmentByID(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
......@@ -280,9 +252,8 @@ func TestCollectionReplica_getSegmentByID(t *testing.T) {
func TestCollectionReplica_hasSegment(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
const segmentNum = 3
tag := "default"
......@@ -304,9 +275,8 @@ func TestCollectionReplica_hasSegment(t *testing.T) {
func TestCollectionReplica_freeAll(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
node.Stop()
......
......@@ -8,11 +8,10 @@ import (
func TestCollection_Partitions(t *testing.T) {
node := newQueryNodeMock()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, node, collectionName, collectionID, 0)
initTestMeta(t, node, collectionID, 0)
collection, err := node.replica.getCollectionByName(collectionName)
collection, err := node.replica.getCollectionByID(collectionID)
assert.NoError(t, err)
partitions := collection.Partitions()
......@@ -20,22 +19,18 @@ func TestCollection_Partitions(t *testing.T) {
}
func TestCollection_newCollection(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
collectionMeta := genTestCollectionMeta(collectionID, false)
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
}
func TestCollection_deleteCollection(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID, false)
collectionMeta := genTestCollectionMeta(collectionID, false)
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
deleteCollection(collection)
}
......@@ -16,7 +16,7 @@ import (
// NOTE: start pulsar before test
func TestDataSyncService_Start(t *testing.T) {
node := newQueryNodeMock()
initTestMeta(t, node, "collection0", 0, 0)
initTestMeta(t, node, 0, 0)
// test data generate
const msgLength = 10
const DIM = 16
......@@ -61,12 +61,12 @@ func TestDataSyncService_Start(t *testing.T) {
Timestamp: uint64(i + 1000),
SourceID: 0,
},
CollectionName: "collection0",
PartitionName: "default",
SegmentID: int64(0),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000), uint64(i + 1000)},
RowIDs: []int64{int64(i), int64(i)},
CollectionID: UniqueID(0),
PartitionName: "default",
SegmentID: int64(0),
ChannelID: "0",
Timestamps: []uint64{uint64(i + 1000), uint64(i + 1000)},
RowIDs: []int64{int64(i), int64(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
{Value: rawData},
......
......@@ -37,7 +37,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
}
var ddMsg = ddMsg{
collectionRecords: make(map[string][]metaOperateRecord),
collectionRecords: make(map[UniqueID][]metaOperateRecord),
partitionRecords: make(map[string][]metaOperateRecord),
timeRange: TimeRange{
timestampMin: msMsg.TimestampMin(),
......@@ -108,8 +108,7 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
return
}
collectionName := schema.Name
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
ddNode.ddMsg.collectionRecords[collectionID] = append(ddNode.ddMsg.collectionRecords[collectionID],
metaOperateRecord{
createOrDrop: true,
timestamp: msg.Base.Timestamp,
......@@ -125,8 +124,7 @@ func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
// return
//}
collectionName := msg.CollectionName
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
ddNode.ddMsg.collectionRecords[collectionID] = append(ddNode.ddMsg.collectionRecords[collectionID],
metaOperateRecord{
createOrDrop: false,
timestamp: msg.Base.Timestamp,
......
......@@ -110,7 +110,7 @@ func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg
//}
// No dd record, do all insert requests.
records, ok := fdmNode.ddMsg.collectionRecords[msg.CollectionName]
records, ok := fdmNode.ddMsg.collectionRecords[msg.CollectionID]
if !ok {
return msg
}
......
......@@ -81,12 +81,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
// check if segment exists, if not, create this segment
if !iNode.replica.hasSegment(task.SegmentID) {
collection, err := iNode.replica.getCollectionByName(task.CollectionName)
if err != nil {
log.Println(err)
continue
}
err = iNode.replica.addSegment2(task.SegmentID, task.PartitionName, collection.ID(), segTypeGrowing)
err := iNode.replica.addSegment2(task.SegmentID, task.PartitionName, task.CollectionID, segTypeGrowing)
if err != nil {
log.Println(err)
continue
......
......@@ -14,7 +14,7 @@ type key2SegMsg struct {
}
type ddMsg struct {
collectionRecords map[string][]metaOperateRecord
collectionRecords map[UniqueID][]metaOperateRecord
partitionRecords map[string][]metaOperateRecord
gcRecord *gcRecord
timeRange TimeRange
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment