Skip to content
Snippets Groups Projects
Commit 854accf9 authored by neza2017's avatar neza2017 Committed by yefu.chen
Browse files

Remove master and writenode


Signed-off-by: default avatarneza2017 <yefu.chen@zilliz.com>
parent d5346e13
No related branches found
No related tags found
No related merge requests found
Showing
with 0 additions and 1943 deletions
......@@ -13,10 +13,6 @@ dir ('build/docker/deploy') {
withCredentials([usernamePassword(credentialsId: "${env.DOCKER_CREDENTIALS_ID}", usernameVariable: 'DOCKER_USERNAME', passwordVariable: 'DOCKER_PASSWORD')]) {
sh 'docker login -u ${DOCKER_USERNAME} -p ${DOCKER_PASSWORD} ${DOKCER_REGISTRY_URL}'
sh 'docker pull ${SOURCE_REPO}/master:${SOURCE_TAG} || true'
sh 'docker-compose build --force-rm master'
sh 'docker-compose push master'
sh 'docker pull ${SOURCE_REPO}/proxyservice:${SOURCE_TAG} || true'
sh 'docker-compose build --force-rm proxyservice'
sh 'docker-compose push proxyservice'
......
......@@ -5,7 +5,6 @@ try {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d minio'
dir ('build/docker/deploy') {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} pull'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d master'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxyservice'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxynode'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d indexservice'
......
......@@ -78,11 +78,6 @@ endif
verifiers: getdeps cppcheck fmt static-check ruleguard
master: build-cpp
@echo "Building each component's binary to './bin'"
@echo "Building masterservice ..."
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/masterservice $(PWD)/cmd/masterservice/main.go 1>/dev/null
# Builds various components locally.
proxynode: build-cpp
......@@ -96,13 +91,6 @@ querynode: build-cpp
@echo "Building query node ..."
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/querynode $(PWD)/cmd/querynode/querynode.go 1>/dev/null
# Builds various components locally.
writenode: build-cpp
@echo "Building each component's binary to './bin'"
@echo "Building write node ..."
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/writenode $(PWD)/cmd/writenode/writenode.go 1>/dev/null
# Builds various components locally.
datanode: build-cpp
@echo "Building each component's binary to './bin'"
......@@ -134,8 +122,6 @@ build-go: build-cpp
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/proxynode $(PWD)/cmd/proxy/node/proxy_node.go 1>/dev/null
@echo "Building query service ..."
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/queryservice $(PWD)/cmd/queryservice/queryservice.go 1>/dev/null
@echo "Building query node ..."
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/writenode $(PWD)/cmd/writenode/writenode.go 1>/dev/null
@echo "Building binlog ..."
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/binlog $(PWD)/cmd/binlog/main.go 1>/dev/null
@echo "Building singlenode ..."
......@@ -179,17 +165,14 @@ test-cpp: build-cpp-with-unittest
docker: verifiers
@echo "Building query node docker image '$(TAG)'"
@echo "Building proxy docker image '$(TAG)'"
@echo "Building master docker image '$(TAG)'"
# Builds each component and installs it to $GOPATH/bin.
install: all
@echo "Installing binary to './bin'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/queryservice $(GOPATH)/bin/queryservice
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/querynode $(GOPATH)/bin/querynode
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/master $(GOPATH)/bin/master
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxynode $(GOPATH)/bin/proxynode
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxyservice $(GOPATH)/bin/proxyservice
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/writenode $(GOPATH)/bin/writenode
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/singlenode $(GOPATH)/bin/singlenode
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexservice $(GOPATH)/bin/indexservice
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexnode $(GOPATH)/bin/indexnode
......@@ -202,12 +185,10 @@ clean:
@find . -name '*~' | xargs rm -fv
@rm -rf bin/
@rm -rf lib/
@rm -rf $(GOPATH)/bin/master
@rm -rf $(GOPATH)/bin/proxynode
@rm -rf $(GOPATH)/bin/proxyservice
@rm -rf $(GOPATH)/bin/queryservice
@rm -rf $(GOPATH)/bin/querynode
@rm -rf $(GOPATH)/bin/writenode
@rm -rf $(GOPATH)/bin/singlenode
@rm -rf $(GOPATH)/bin/indexservice
@rm -rf $(GOPATH)/bin/indexnode
version: '3.5'
services:
master:
image: ${TARGET_REPO}/master:${TARGET_TAG}
build:
context: ../../../
dockerfile: build/docker/deploy/master/DockerFile
cache_from:
- ${SOURCE_REPO}/master:${SOURCE_TAG}
environment:
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
ETCD_ADDRESS: ${ETCD_ADDRESS}
INDEX_SERVICE_ADDRESS: ${INDEX_SERVICE_ADDRESS}
networks:
- milvus
proxyservice:
image: ${TARGET_REPO}/proxyservice:${TARGET_TAG}
build:
......@@ -70,21 +56,6 @@ services:
networks:
- milvus
# writenode:
# image: ${TARGET_REPO}/writenode:${TARGET_TAG}
# build:
# context: ../../../
# dockerfile: build/docker/deploy/writenode/DockerFile
# cache_from:
# - ${SOURCE_REPO}/writenode:${SOURCE_TAG}
# environment:
# PULSAR_ADDRESS: ${PULSAR_ADDRESS}
# ETCD_ADDRESS: ${ETCD_ADDRESS}
# MASTER_ADDRESS: ${MASTER_ADDRESS}
# MINIO_ADDRESS: ${MINIO_ADDRESS}
# networks:
# - milvus
datanode:
image: ${TARGET_REPO}/datanode:${TARGET_TAG}
build:
......
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM alpine:3.12.1
COPY ./bin/master /milvus-distributed/bin/master
COPY ./configs/ /milvus-distributed/configs/
WORKDIR /milvus-distributed/
CMD ["./bin/master"]
EXPOSE 53100
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM milvusdb/milvus-distributed-dev:amd64-ubuntu18.04-latest AS openblas
#FROM alpine
FROM ubuntu:bionic-20200921
RUN apt-get update && apt-get install -y --no-install-recommends libtbb-dev gfortran
#RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories \
# && apk add --no-cache libtbb gfortran
COPY --from=openblas /usr/lib/libopenblas-r0.3.9.so /usr/lib/
RUN ln -s /usr/lib/libopenblas-r0.3.9.so /usr/lib/libopenblas.so.0 && \
ln -s /usr/lib/libopenblas.so.0 /usr/lib/libopenblas.so
COPY ./bin/writenode /milvus-distributed/bin/writenode
COPY ./configs/ /milvus-distributed/configs/
COPY ./lib/ /milvus-distributed/lib/
ENV LD_LIBRARY_PATH=/milvus-distributed/lib:$LD_LIBRARY_PATH:/usr/lib
WORKDIR /milvus-distributed/
CMD ["./bin/writenode"]
package main
import (
"context"
"flag"
"log"
"os"
"os/signal"
"runtime/pprof"
"syscall"
"github.com/zilliztech/milvus-distributed/internal/master"
"go.uber.org/zap"
)
func main() {
cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
master.Init()
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr, err := master.CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(master.Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
sig := <-sc
log.Print("Got signal to exit", zap.String("signal", sig.String()))
cancel()
svr.Close()
}
......@@ -7,7 +7,6 @@ import (
"log"
"os"
"os/signal"
"runtime/pprof"
"sync"
"syscall"
"time"
......@@ -15,51 +14,10 @@ import (
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/indexnode"
"github.com/zilliztech/milvus-distributed/internal/master"
"github.com/zilliztech/milvus-distributed/internal/proxynode"
"github.com/zilliztech/milvus-distributed/internal/querynode"
"github.com/zilliztech/milvus-distributed/internal/writenode"
)
func InitMaster(cpuprofile *string, wg *sync.WaitGroup) {
defer wg.Done()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal(err)
}
defer pprof.StopCPUProfile()
}
master.Init()
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr, err := master.CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(master.Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
sig := <-sc
log.Print("Got signal to exit", zap.String("signal", sig.String()))
cancel()
svr.Close()
}
func InitProxy(wg *sync.WaitGroup) {
defer wg.Done()
//proxynode.Init()
......@@ -179,49 +137,9 @@ func InitIndexBuilder(wg *sync.WaitGroup) {
}
}
func InitWriteNode(wg *sync.WaitGroup) {
defer wg.Done()
writenode.Init()
fmt.Println("WriteNodeID is", writenode.Params.WriteNodeID)
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr := writenode.NewWriteNode(ctx, 111111)
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
var sig os.Signal
go func() {
sig = <-sc
cancel()
}()
if err := svr.Start(); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
<-ctx.Done()
log.Print("Got signal to exit", zap.String("signal", sig.String()))
svr.Close()
switch sig {
case syscall.SIGTERM:
exit(0)
default:
exit(1)
}
}
func main() {
var wg sync.WaitGroup
cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")
flag.Parse()
wg.Add(1)
go InitMaster(cpuprofile, &wg)
time.Sleep(time.Second * 1)
wg.Add(1)
go InitProxy(&wg)
......@@ -229,8 +147,6 @@ func main() {
go InitQueryNode(&wg)
wg.Add(1)
go InitIndexBuilder(&wg)
wg.Add(1)
go InitWriteNode(&wg)
wg.Wait()
}
......
package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/writenode"
)
func main() {
writenode.Init()
fmt.Println("WriteNodeID is", writenode.Params.WriteNodeID)
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr := writenode.NewWriteNode(ctx, 111111)
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
var sig os.Signal
go func() {
sig = <-sc
cancel()
}()
if err := svr.Start(); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
<-ctx.Done()
log.Print("Got signal to exit", zap.String("signal", sig.String()))
svr.Close()
switch sig {
case syscall.SIGTERM:
exit(0)
default:
exit(1)
}
}
func exit(code int) {
os.Exit(code)
}
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
writeNode:
stats:
publishInterval: 1000 # milliseconds
dataSync:
flowGraph:
maxQueueLength: 1024
maxParallelism: 1024
msgStream:
dataDefinition:
recvBufSize: 64 # msgPack chan buffer size
pulsarBufSize: 64 # pulsar chan buffer size
insert:
#streamBufSize: 1024 # msgPack chan buffer size
recvBufSize: 1024 # msgPack chan buffer size
pulsarBufSize: 1024 # pulsar chan buffer size
delete:
#streamBufSize: 1024 # msgPack chan buffer size
recvBufSize: 1024 # msgPack chan buffer size
pulsarBufSize: 1024 # pulsar chan buffer size
flush:
# max buffer size to flush
insertBufSize: 500
ddBufSize: 20
package indexnode
import (
"context"
"fmt"
"log"
"os"
"strconv"
"testing"
"github.com/zilliztech/milvus-distributed/internal/master"
"go.etcd.io/etcd/clientv3"
"go.uber.org/zap"
)
var ctx context.Context
var cancel func()
var buildClient *NodeImpl
var masterPort = 53101
var masterServer *master.Master
func makeMasterAddress(port int64) string {
masterAddr := "127.0.0.1:" + strconv.FormatInt(port, 10)
return masterAddr
}
func refreshMasterAddress() {
masterAddr := makeMasterAddress(int64(masterPort))
Params.MasterAddress = masterAddr
master.Params.Port = masterPort
}
func startMaster(ctx context.Context) {
master.Init()
refreshMasterAddress()
etcdAddr := master.Params.EtcdAddress
metaRootPath := master.Params.MetaRootPath
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
if err != nil {
panic(err)
}
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
if err != nil {
panic(err)
}
svr, err := master.CreateServer(ctx)
masterServer = svr
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(master.Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
fmt.Println("Waiting for server!", svr.IsServing())
}
func startBuilder(ctx context.Context) {
var err error
buildClient, err = NewNodeImpl(ctx)
if err != nil {
log.Print("create builder failed", zap.Error(err))
}
// TODO: change to wait until master is ready
if err := buildClient.Start(); err != nil {
log.Fatal("run builder failed", zap.Error(err))
}
}
func setup() {
Params.Init()
ctx, cancel = context.WithCancel(context.Background())
startMaster(ctx)
startBuilder(ctx)
}
func shutdown() {
cancel()
buildClient.Stop()
masterServer.Close()
}
func TestMain(m *testing.M) {
setup()
code := m.Run()
shutdown()
os.Exit(code)
}
//func TestBuilder_GRPC(t *testing.T) {
// typeParams := make(map[string]string)
// typeParams["a"] = "1"
// indexParams := make(map[string]string)
// indexParams["b"] = "2"
// columnDataPaths := []string{"dataA", "dataB"}
// indexID, err := buildClient.BuildIndex(columnDataPaths, typeParams, indexParams)
// assert.Nil(t, err)
//
// time.Sleep(time.Second * 3)
//
// description, err := buildClient.GetIndexStates([]UniqueID{indexID})
// assert.Nil(t, err)
// assert.Equal(t, commonpb.IndexState_INPROGRESS, description.States[0].State)
// assert.Equal(t, indexID, description.States[0].IndexID)
//
// indexDataPaths, err := buildClient.GetIndexFilePaths([]UniqueID{indexID})
// assert.Nil(t, err)
// assert.Nil(t, indexDataPaths[0])
//}
# How to start a master
## Requirements
### Start a etcdv3
```
./etcd -listen-peer-urls=http://192.168.1.10:12380 -advertise-client-urls=http://192.168.1.10:12379 -listen-client-urls http://0.0.0.0:12379,http://0.0.0.0:14001 -initial-advertise-peer-urls=http://192.168.1.10:12380
```
## Start from code
```
go run cmd/master.go
```
## Start with docker
## What rules does master use to write data to kv storage?
1.find the root path variable ```ETCD_ROOT_PATH ```which defined in common/config.go
2.add prefix path ```segment``` if the resource is a segement
3.add prefix path ```collection``` if the resource is a collection
4.add resource uuid
### example
if master create a collection with uuid ```46e468ee-b34a-419d-85ed-80c56bfa4e90```
the corresponding key in etcd is $(ETCD_ROOT_PATH)/collection/46e468ee-b34a-419d-85ed-80c56bfa4e90
package master
import (
"sync"
"time"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
writerclient "github.com/zilliztech/milvus-distributed/internal/writenode/client"
)
type WriteNodeClient interface {
FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error
DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error)
GetInsertBinlogPaths(segmentID UniqueID) (map[UniqueID][]string, error)
}
type MockWriteNodeClient struct {
segmentID UniqueID
flushTime time.Time
partitionTag string
timestamp Timestamp
collectionID UniqueID
lock sync.RWMutex
}
func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
m.lock.Lock()
defer m.lock.Unlock()
m.flushTime = time.Now()
m.segmentID = segmentID
m.collectionID = collectionID
m.partitionTag = partitionTag
m.timestamp = timestamp
return nil
}
func (m *MockWriteNodeClient) DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error) {
now := time.Now()
m.lock.RLock()
defer m.lock.RUnlock()
if now.Sub(m.flushTime).Seconds() > 2 {
return &writerclient.SegmentDescription{
SegmentID: segmentID,
IsClosed: true,
OpenTime: 0,
CloseTime: 1,
}, nil
}
return &writerclient.SegmentDescription{
SegmentID: segmentID,
IsClosed: false,
OpenTime: 0,
CloseTime: 1,
}, nil
}
func (m *MockWriteNodeClient) GetInsertBinlogPaths(segmentID UniqueID) (map[UniqueID][]string, error) {
return map[UniqueID][]string{
1: {"/binlog/insert/file_1"},
100: {"/binlog/insert/file_100"},
}, nil
}
type BuildIndexClient interface {
BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
}
type MockBuildIndexClient struct {
buildTime time.Time
}
func (m *MockBuildIndexClient) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
m.buildTime = time.Now()
return &indexpb.BuildIndexResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
IndexID: int64(1),
}, nil
}
func (m *MockBuildIndexClient) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
now := time.Now()
ret := &indexpb.IndexStatesResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
}
var indexStates []*indexpb.IndexInfo
if now.Sub(m.buildTime).Seconds() > 2 {
for _, indexID := range req.IndexIDs {
indexState := &indexpb.IndexInfo{
State: commonpb.IndexState_FINISHED,
IndexID: indexID,
}
indexStates = append(indexStates, indexState)
}
ret.States = indexStates
return ret, nil
}
for _, indexID := range req.IndexIDs {
indexState := &indexpb.IndexInfo{
State: commonpb.IndexState_INPROGRESS,
IndexID: indexID,
}
indexStates = append(indexStates, indexState)
}
ret.States = indexStates
return ret, nil
}
func (m *MockBuildIndexClient) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
var filePathInfos []*indexpb.IndexFilePathInfo
for _, indexID := range req.IndexIDs {
filePaths := &indexpb.IndexFilePathInfo{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
IndexID: indexID,
IndexFilePaths: []string{"/binlog/index/file_1", "/binlog/index/file_2", "/binlog/index/file_3"},
}
filePathInfos = append(filePathInfos, filePaths)
}
return &indexpb.IndexFilePathsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
FilePaths: filePathInfos,
}, nil
}
type LoadIndexClient interface {
LoadIndex(indexPaths []string, segmentID int64, fieldID int64, fieldName string, indexParams map[string]string) error
}
type MockLoadIndexClient struct {
}
func (m *MockLoadIndexClient) LoadIndex(indexPaths []string, segmentID int64, fieldID int64, fieldName string, indexParams map[string]string) error {
return nil
}
package master
import (
"errors"
"log"
"github.com/golang/protobuf/proto"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
type createCollectionTask struct {
baseTask
req *milvuspb.CreateCollectionRequest
}
type dropCollectionTask struct {
baseTask
req *milvuspb.DropCollectionRequest
segManager SegmentManager
}
type hasCollectionTask struct {
baseTask
hasCollection bool
req *milvuspb.HasCollectionRequest
}
type describeCollectionTask struct {
baseTask
description *milvuspb.DescribeCollectionResponse
req *milvuspb.DescribeCollectionRequest
}
type showCollectionsTask struct {
baseTask
stringListResponse *milvuspb.ShowCollectionResponse
req *milvuspb.ShowCollectionRequest
}
//////////////////////////////////////////////////////////////////////////
func (t *createCollectionTask) Type() commonpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.Base.MsgType
}
func (t *createCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Base.Timestamp, nil
}
func (t *createCollectionTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
var schema schemapb.CollectionSchema
err := proto.UnmarshalMerge(t.req.Schema, &schema)
if err != nil {
return err
}
for index, singleFiled := range schema.Fields {
singleFiled.FieldID = int64(index + 100)
}
zeroField := &schemapb.FieldSchema{
FieldID: int64(0),
Name: "RowID",
IsPrimaryKey: false,
DataType: schemapb.DataType_INT64,
}
oneField := &schemapb.FieldSchema{
FieldID: int64(1),
Name: "Timestamp",
IsPrimaryKey: false,
DataType: schemapb.DataType_INT64,
}
schema.Fields = append(schema.Fields, zeroField, oneField)
collectionID, err := t.sch.globalIDAllocator()
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
collection := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: ts,
SegmentIDs: make([]UniqueID, 0),
PartitionTags: make([]string, 0),
}
err = t.mt.AddCollection(&collection)
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: t.req.Base.Timestamp,
EndTimestamp: t.req.Base.Timestamp,
HashValues: []uint32{0},
}
createCollectionMsg := &internalpb2.CreateCollectionRequest{
Base: t.req.Base,
DbName: "",
CollectionName: t.req.CollectionName,
DbID: 0,
CollectionID: collectionID,
}
createCollectionMsg.Schema, err = proto.Marshal(&schema)
if err != nil {
return err
}
timeTickMsg := &ms.CreateCollectionMsg{
BaseMsg: baseMsg,
CreateCollectionRequest: *createCollectionMsg,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
func (t *dropCollectionTask) Type() commonpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.Base.MsgType
}
func (t *dropCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Base.Timestamp, nil
}
func (t *dropCollectionTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
collectionName := t.req.CollectionName
collectionMeta, err := t.mt.GetCollectionByName(collectionName)
if err != nil {
return err
}
collectionID := collectionMeta.ID
err = t.mt.DeleteCollection(collectionID)
if err != nil {
return err
}
// before drop collection in segment manager, if segment manager receive a time tick from write node,
// maybe this collection can not be found in meta table.
if err = t.segManager.DropCollection(collectionID); err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
dropReq := internalpb2.DropCollectionRequest{
Base: t.req.Base,
DbName: "",
CollectionName: t.req.CollectionName,
DbID: 0,
CollectionID: collectionID,
}
timeTickMsg := &ms.DropCollectionMsg{
BaseMsg: baseMsg,
DropCollectionRequest: dropReq,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
func (t *hasCollectionTask) Type() commonpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.Base.MsgType
}
func (t *hasCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Base.Timestamp, nil
}
func (t *hasCollectionTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
collectionName := t.req.CollectionName
_, err := t.mt.GetCollectionByName(collectionName)
if err == nil {
t.hasCollection = true
}
return nil
}
//////////////////////////////////////////////////////////////////////////
func (t *describeCollectionTask) Type() commonpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.Base.MsgType
}
func (t *describeCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Base.Timestamp, nil
}
func (t *describeCollectionTask) filterSchema() error {
// remove system field
var newFields []*schemapb.FieldSchema
for _, fieldMeta := range t.description.Schema.Fields {
fieldID := fieldMeta.FieldID
// todo not hardcode
if fieldID < 100 {
continue
}
newFields = append(newFields, fieldMeta)
}
t.description.Schema.Fields = newFields
return nil
}
func (t *describeCollectionTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
collectionName := t.req.CollectionName
collection, err := t.mt.GetCollectionByName(collectionName)
if err != nil {
return err
}
cloneSchema := proto.Clone(collection.Schema)
t.description.Schema = cloneSchema.(*schemapb.CollectionSchema)
return t.filterSchema()
}
//////////////////////////////////////////////////////////////////////////
func (t *showCollectionsTask) Type() commonpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.Base.MsgType
}
func (t *showCollectionsTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Base.Timestamp, nil
}
func (t *showCollectionsTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
colls, err := t.mt.ListCollections()
if err != nil {
return err
}
t.stringListResponse.CollectionNames = colls
return nil
}
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
master: # 21
address: localhost
port: 53100
pulsarmoniterinterval: 1
pulsartopic: "monitor-topic"
proxyidlist: [1, 2]
proxyTimeSyncChannels: ["proxy1", "proxy2"]
proxyTimeSyncSubName: "proxy-topic"
softTimeTickBarrierInterval: 500
writeidlist: [3, 4]
writeTimeSyncChannels: ["write3", "write4"]
writeTimeSyncSubName: "write-topic"
dmTimeSyncChannels: ["dm5", "dm6"]
k2sTimeSyncChannels: ["k2s7", "k2s8"]
defaultSizePerRecord: 1024
minimumAssignSize: 1048576
segmentThreshold: 536870912
segmentExpireDuration: 2000
segmentThresholdFactor: 0.75
querynodenum: 1
writenodenum: 1
statsChannels: "statistic"
etcd: # 4
address: localhost
port: 2379
rootpath: by-dev
segthreshold: 10000
timesync: # 1
interval: 400
storage: # 5
driver: TIKV
address: localhost
port: 2379
accesskey:
secretkey:
pulsar: # 6
authentication: false
user: user-default
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
address: localhost
port: 6650
topicnum: 128
reader: # 7
clientid: 0
stopflag: -1
readerqueuesize: 10000
searchchansize: 10000
key2segchansize: 10000
topicstart: 0
topicend: 128
writer: # 8
clientid: 0
stopflag: -2
readerqueuesize: 10000
searchbyidchansize: 10000
parallelism: 100
topicstart: 0
topicend: 128
bucket: "zilliz-hz"
proxy: # 21
timezone: UTC+8
proxy_id: 1
numReaderNodes: 2
tsoSaveInterval: 200
timeTickInterval: 200
pulsarTopics:
readerTopicPrefix: "milvusReader"
numReaderTopics: 2
deleteTopic: "milvusDeleter"
queryTopic: "milvusQuery"
resultTopic: "milvusResult"
resultGroup: "milvusResultGroup"
timeTickTopic: "milvusTimeTick"
network:
address: 0.0.0.0
port: 19530
logs:
level: debug
trace.enable: true
path: /tmp/logs
max_log_file_size: 1024MB
log_rotate_num: 0
storage:
path: /var/lib/milvus
auto_flush_interval: 1
package master
// system filed id:
// 0: unique row id
// 1: timestamp
// 100: first user field id
// 101: second user field id
// 102: ...
const (
RowIDField = 0
TimeStampField = 1
)
package master
import (
"context"
"log"
"time"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/errors"
)
type FlushScheduler struct {
client WriteNodeClient
metaTable *metaTable
segmentFlushChan chan UniqueID
segmentDescribeChan chan UniqueID
indexBuilderSch persistenceScheduler
ctx context.Context
cancel context.CancelFunc
globalTSOAllocator func() (Timestamp, error)
}
func NewFlushScheduler(ctx context.Context, client WriteNodeClient, metaTable *metaTable, buildScheduler *IndexBuildScheduler, globalTSOAllocator func() (Timestamp, error)) *FlushScheduler {
ctx2, cancel := context.WithCancel(ctx)
return &FlushScheduler{
client: client,
metaTable: metaTable,
indexBuilderSch: buildScheduler,
segmentFlushChan: make(chan UniqueID, 100),
segmentDescribeChan: make(chan UniqueID, 100),
ctx: ctx2,
cancel: cancel,
globalTSOAllocator: globalTSOAllocator,
}
}
func (scheduler *FlushScheduler) schedule(id interface{}) error {
segmentID := id.(UniqueID)
segmentMeta, err := scheduler.metaTable.GetSegmentByID(segmentID)
if err != nil {
return err
}
ts, err := scheduler.globalTSOAllocator()
if err != nil {
return err
}
// todo set corrent timestamp
err = scheduler.client.FlushSegment(segmentID, segmentMeta.CollectionID, segmentMeta.PartitionTag, ts)
if err != nil {
log.Println("flushsegment: ", segmentID, " error :", err.Error())
return err
}
//log.Printf("flush segment %d", segmentID)
scheduler.segmentDescribeChan <- segmentID
return nil
}
func (scheduler *FlushScheduler) describe() error {
timeTick := time.Tick(100 * time.Millisecond)
descTasks := make(map[UniqueID]bool)
closable := make([]UniqueID, 0)
for {
select {
case <-scheduler.ctx.Done():
{
log.Printf("broadcast context done, exit")
return errors.New("broadcast done exit")
}
case <-timeTick:
for singleSegmentID := range descTasks {
description, err := scheduler.client.DescribeSegment(singleSegmentID)
if err != nil {
log.Printf("describe segment %d err %s", singleSegmentID, err.Error())
continue
}
if !description.IsClosed {
//log.Println("describe segment ", singleSegmentID, " IsClosed :False")
continue
}
log.Printf("flush segment %d is closed", singleSegmentID)
mapData, err := scheduler.client.GetInsertBinlogPaths(singleSegmentID)
if err != nil {
log.Printf("get insert binlog paths err, segID: %d, err: %s", singleSegmentID, err.Error())
continue
}
segMeta, err := scheduler.metaTable.GetSegmentByID(singleSegmentID)
if err != nil {
log.Printf("get segment from metable failed, segID: %d, err: %s", singleSegmentID, err.Error())
continue
}
for fieldID, data := range mapData {
// check field indexable
indexable, err := scheduler.metaTable.IsIndexable(segMeta.CollectionID, fieldID)
if err != nil {
log.Printf("check field indexable from meta table failed, collID: %d, fieldID: %d, err %s", segMeta.CollectionID, fieldID, err.Error())
continue
}
if !indexable {
continue
}
info := &IndexBuildInfo{
segmentID: singleSegmentID,
fieldID: fieldID,
binlogFilePath: data,
}
err = scheduler.indexBuilderSch.Enqueue(info)
log.Printf("segment %d field %d enqueue build index scheduler", singleSegmentID, fieldID)
if err != nil {
log.Printf("index build enqueue failed, %s", err.Error())
continue
}
}
// Save data to meta table
segMeta.BinlogFilePaths = make([]*etcdpb.FieldBinlogFiles, 0)
for k, v := range mapData {
segMeta.BinlogFilePaths = append(segMeta.BinlogFilePaths, &etcdpb.FieldBinlogFiles{
FieldID: k,
BinlogFiles: v,
})
}
if err = scheduler.metaTable.UpdateSegment(segMeta); err != nil {
return err
}
log.Printf("flush segment %d finished", singleSegmentID)
closable = append(closable, singleSegmentID)
}
// remove closed segment and clear closable
for _, segID := range closable {
delete(descTasks, segID)
}
closable = closable[:0]
case segID := <-scheduler.segmentDescribeChan:
descTasks[segID] = false
}
}
}
func (scheduler *FlushScheduler) scheduleLoop() {
for {
select {
case id := <-scheduler.segmentFlushChan:
err := scheduler.schedule(id)
if err != nil {
log.Println(err)
}
case <-scheduler.ctx.Done():
log.Print("server is closed, exit flush scheduler loop")
return
}
}
}
func (scheduler *FlushScheduler) Enqueue(id interface{}) error {
scheduler.segmentFlushChan <- id.(UniqueID)
return nil
}
func (scheduler *FlushScheduler) Start() error {
go scheduler.scheduleLoop()
go scheduler.describe()
return nil
}
func (scheduler *FlushScheduler) Close() {
scheduler.cancel()
}
package master
import (
"log"
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
)
// Allocator is a Timestamp Oracle allocator.
type Allocator interface {
// Initialize is used to initialize a TSO allocator.
// It will synchronize TSO with etcd and initialize the
// memory for later allocation work.
Initialize() error
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
UpdateTSO() error
// SetTSO sets the physical part with given tso. It's mainly used for BR restore
// and can not forcibly set the TSO smaller than now.
SetTSO(tso uint64) error
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
GenerateTSO(count uint32) (uint64, error)
// Reset is used to reset the TSO allocator.
Reset()
}
// GlobalTSOAllocator is the global single point TSO allocator.
type GlobalTSOAllocator struct {
tso *timestampOracle
}
// NewGlobalTSOAllocator creates a new global TSO allocator.
func NewGlobalTSOAllocator(key string, kvBase kv.TxnBase) *GlobalTSOAllocator {
var saveInterval = 3 * time.Second
return &GlobalTSOAllocator{
tso: &timestampOracle{
kvBase: kvBase,
saveInterval: saveInterval,
maxResetTSGap: func() time.Duration { return 3 * time.Second },
key: key,
},
}
}
// Initialize will initialize the created global TSO allocator.
func (gta *GlobalTSOAllocator) Initialize() error {
return gta.tso.InitTimestamp()
}
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
func (gta *GlobalTSOAllocator) UpdateTSO() error {
return gta.tso.UpdateTimestamp()
}
// SetTSO sets the physical part with given tso.
func (gta *GlobalTSOAllocator) SetTSO(tso uint64) error {
return gta.tso.ResetUserTimestamp(tso)
}
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
var physical, logical int64
if count == 0 {
return 0, errors.New("tso count should be positive")
}
maxRetryCount := 10
for i := 0; i < maxRetryCount; i++ {
current := (*atomicObject)(atomic.LoadPointer(&gta.tso.TSO))
if current == nil || current.physical.Equal(typeutil.ZeroTime) {
// If it's leader, maybe SyncTimestamp hasn't completed yet
log.Println("sync hasn't completed yet, wait for a while")
time.Sleep(200 * time.Millisecond)
continue
}
physical = current.physical.UnixNano() / int64(time.Millisecond)
logical = atomic.AddInt64(&current.logical, int64(count))
if logical >= maxLogical {
log.Println("logical part outside of max logical interval, please check ntp time",
zap.Int("retry-count", i))
time.Sleep(UpdateTimestampStep)
continue
}
return tsoutil.ComposeTS(physical, logical), nil
}
return 0, errors.New("can not get timestamp")
}
func (gta *GlobalTSOAllocator) Alloc(count uint32) (typeutil.Timestamp, error) {
//return gta.tso.SyncTimestamp()
start, err := gta.GenerateTSO(count)
if err != nil {
return typeutil.ZeroTimestamp, err
}
//ret := make([]typeutil.Timestamp, count)
//for i:=uint32(0); i < count; i++{
// ret[i] = start + uint64(i)
//}
return start, err
}
func (gta *GlobalTSOAllocator) AllocOne() (typeutil.Timestamp, error) {
return gta.GenerateTSO(1)
}
// Reset is used to reset the TSO allocator.
func (gta *GlobalTSOAllocator) Reset() {
gta.tso.ResetTimestamp()
}
package master
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
)
var gTestTsoAllocator Allocator
var gTestIDAllocator *GlobalIDAllocator
func TestGlobalTSOAllocator_All(t *testing.T) {
Init()
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", tsoutil.NewTSOKVBase([]string{Params.EtcdAddress}, "/test/root/kv", "tso"))
gTestIDAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{Params.EtcdAddress}, "/test/root/kv", "gid"))
t.Run("Initialize", func(t *testing.T) {
err := gTestTsoAllocator.Initialize()
assert.Nil(t, err)
})
t.Run("GenerateTSO", func(t *testing.T) {
count := 1000
perCount := uint32(100)
startTs, err := gTestTsoAllocator.GenerateTSO(perCount)
assert.Nil(t, err)
lastPhysical, lastLogical := tsoutil.ParseTS(startTs)
for i := 0; i < count; i++ {
ts, _ := gTestTsoAllocator.GenerateTSO(perCount)
physical, logical := tsoutil.ParseTS(ts)
if lastPhysical.Equal(physical) {
diff := logical - lastLogical
assert.Equal(t, uint64(perCount), diff)
}
lastPhysical, lastLogical = physical, logical
}
})
t.Run("SetTSO", func(t *testing.T) {
curTime := time.Now()
nextTime := curTime.Add(2 * time.Second)
physical := nextTime.UnixNano() / int64(time.Millisecond)
logical := int64(0)
err := gTestTsoAllocator.SetTSO(tsoutil.ComposeTS(physical, logical))
assert.Nil(t, err)
})
t.Run("UpdateTSO", func(t *testing.T) {
err := gTestTsoAllocator.UpdateTSO()
assert.Nil(t, err)
})
t.Run("Reset", func(t *testing.T) {
gTestTsoAllocator.Reset()
})
t.Run("Initialize", func(t *testing.T) {
err := gTestIDAllocator.Initialize()
assert.Nil(t, err)
})
t.Run("AllocOne", func(t *testing.T) {
one, err := gTestIDAllocator.AllocOne()
assert.Nil(t, err)
ano, err := gTestIDAllocator.AllocOne()
assert.Nil(t, err)
assert.NotEqual(t, one, ano)
})
t.Run("Alloc", func(t *testing.T) {
count := uint32(2 << 10)
idStart, idEnd, err := gTestIDAllocator.Alloc(count)
assert.Nil(t, err)
assert.Equal(t, count, uint32(idEnd-idStart))
})
}
package master
import (
"context"
"fmt"
"time"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
)
const slowThreshold = 5 * time.Millisecond
func (s *Master) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
var t task = &createCollectionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
}
response := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
}
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Reason = "Create collection failed: " + err.Error()
return response, nil
}
response.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
func (s *Master) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
var t task = &dropCollectionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
segManager: s.segmentManager,
}
response := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
}
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Reason = "Drop collection failed: " + err.Error()
return response, nil
}
response.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
func (s *Master) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
var t task = &hasCollectionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
hasCollection: false,
}
st := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
}
response := &milvuspb.BoolResponse{
Status: st,
Value: false,
}
var err = s.scheduler.Enqueue(t)
if err != nil {
st.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
st.Reason = "Has collection failed: " + err.Error()
return response, nil
}
st.ErrorCode = commonpb.ErrorCode_SUCCESS
response.Value = t.(*hasCollectionTask).hasCollection
return response, nil
}
func (s *Master) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
var t task = &describeCollectionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
description: nil,
}
response := &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
},
Schema: nil,
}
t.(*describeCollectionTask).description = response
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Status.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Status.Reason = "Describe collection failed: " + err.Error()
return response, nil
}
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
func (s *Master) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
var t task = &showCollectionsTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
stringListResponse: nil,
}
response := &milvuspb.ShowCollectionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "",
},
CollectionNames: nil,
}
t.(*showCollectionsTask).stringListResponse = response
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Status.Reason = "Enqueue filed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Status.Reason = "Show Collections failed: " + err.Error()
return response, nil
}
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
//////////////////////////////////////////////////////////////////////////
func (s *Master) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
var t task = &createPartitionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
}
var err = s.scheduler.Enqueue(t)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Enqueue failed",
}, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "WaitToFinish failed",
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}, nil
}
func (s *Master) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
var t task = &dropPartitionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
}
var err = s.scheduler.Enqueue(t)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Enqueue failed",
}, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "WaitToFinish failed",
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}, nil
}
func (s *Master) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
var t task = &hasPartitionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
hasPartition: false,
}
var err = s.scheduler.Enqueue(t)
if err != nil {
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Enqueue failed",
},
Value: t.(*hasPartitionTask).hasPartition,
}, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
},
Value: t.(*hasPartitionTask).hasPartition,
}, nil
}
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
},
Value: t.(*hasPartitionTask).hasPartition,
}, nil
}
func (s *Master) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
var t task = &showPartitionTask{
req: in,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
resp: nil,
}
var err = s.scheduler.Enqueue(t)
if err != nil {
return &milvuspb.ShowPartitionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Enqueue failed",
},
PartitionNames: nil,
}, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
return &milvuspb.ShowPartitionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "WaitToFinish failed",
},
PartitionNames: nil,
}, nil
}
return t.(*showPartitionTask).resp, nil
}
//----------------------------------------Internal GRPC Service--------------------------------
func (s *Master) AllocTimestamp(ctx context.Context, request *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
count := request.GetCount()
ts, err := s.tsoAllocator.Alloc(count)
if err != nil {
return &masterpb.TsoResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
}, nil
}
response := &masterpb.TsoResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
Timestamp: ts,
Count: count,
}
return response, nil
}
func (s *Master) AllocID(ctx context.Context, request *masterpb.IDRequest) (*masterpb.IDResponse, error) {
count := request.GetCount()
ts, err := s.idAllocator.AllocOne()
if err != nil {
return &masterpb.IDResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
}, nil
}
response := &masterpb.IDResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
ID: ts,
Count: count,
}
return response, nil
}
func (s *Master) AssignSegmentID(ctx context.Context, request *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
segInfos, _ := s.segmentManager.AssignSegment(request.SegIDRequests)
return &datapb.AssignSegIDResponse{
SegIDAssignments: segInfos,
}, nil
}
func (s *Master) CreateIndex(ctx context.Context, req *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
ret := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
}
task := &createIndexTask{
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
req: req,
indexBuildScheduler: s.indexBuildSch,
indexLoadScheduler: s.indexLoadSch,
segManager: s.segmentManager,
}
err := s.scheduler.Enqueue(task)
if err != nil {
ret.Reason = "Enqueue failed: " + err.Error()
return ret, nil
}
err = task.WaitToFinish(ctx)
if err != nil {
ret.Reason = "Create Index error: " + err.Error()
return ret, nil
}
ret.ErrorCode = commonpb.ErrorCode_SUCCESS
return ret, nil
}
func (s *Master) DescribeIndex(ctx context.Context, req *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
resp := &milvuspb.DescribeIndexResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
//CollectionName: req.CollectionName,
//FieldName: req.FieldName,
}
//resp.
task := &describeIndexTask{
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
req: req,
resp: resp,
}
if err := s.scheduler.Enqueue(task); err != nil {
task.resp.Status.Reason = fmt.Sprintf("Enqueue failed: %s", err.Error())
return task.resp, nil
}
if err := task.WaitToFinish(ctx); err != nil {
task.resp.Status.Reason = fmt.Sprintf("Describe Index failed: %s", err.Error())
return task.resp, nil
}
resp.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return task.resp, nil
}
func (s *Master) GetIndexState(ctx context.Context, req *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
resp := &milvuspb.IndexStateResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
},
State: commonpb.IndexState_NONE,
}
task := &getIndexStateTask{
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
req: req,
resp: resp,
runtimeStats: s.runtimeStats,
}
if err := s.scheduler.Enqueue(task); err != nil {
task.resp.Status.Reason = "Enqueue failed :" + err.Error()
return task.resp, nil
}
if err := task.WaitToFinish(ctx); err != nil {
resp.Status.Reason = "Describe index progress failed:" + err.Error()
return task.resp, nil
}
task.resp.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return task.resp, nil
}
func (s *Master) GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
panic("implement me")
}
func (s *Master) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
panic("implement me")
}
func (s *Master) GetComponentStatesRPC(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
panic("implement me")
}
func (s *Master) GetTimeTickChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
panic("implement me")
}
func (s *Master) GetStatisticsChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
panic("implement me")
}
func (s *Master) DescribeSegment(ctx context.Context, request *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
panic("implement me")
}
func (s *Master) ShowSegments(ctx context.Context, request *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
panic("implement me")
}
func (s *Master) GetDdChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
panic("implement me")
}
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment