Skip to content
Snippets Groups Projects
Unverified Commit 07d9879e authored by maomao's avatar maomao Committed by GitHub
Browse files

Fix #533 Unit test failure of TestStorage, TestCatalog and TestAOEEngine(#544)

parent cf27b685
No related branches found
No related tags found
No related merge requests found
......@@ -14,7 +14,7 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/google/btree v1.0.1
github.com/matrixorigin/matrixcube v0.0.0-20210826142433-a6a297af7b00
github.com/matrixorigin/matrixcube v0.0.0-20210828045306-6108206b608a
github.com/panjf2000/ants/v2 v2.4.5
github.com/pierrec/lz4 v2.6.0+incompatible
github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7
......
......@@ -3,6 +3,7 @@ package test
import (
"fmt"
"github.com/fagongzi/log"
"github.com/matrixorigin/matrixcube/raftstore"
"github.com/stretchr/testify/require"
stdLog "log"
"matrixone/pkg/container/types"
......@@ -15,6 +16,7 @@ import (
daoe "matrixone/pkg/vm/engine/aoe/dist/aoe"
"matrixone/pkg/vm/engine/aoe/dist/config"
"matrixone/pkg/vm/engine/aoe/dist/testutil"
e "matrixone/pkg/vm/engine/aoe/storage"
md "matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
"matrixone/pkg/vm/metadata"
"testing"
......@@ -48,19 +50,39 @@ func TestCatalog(t *testing.T) {
c := testutil.NewTestAOECluster(t,
func(node int) *config.Config {
c := &config.Config{}
c.ClusterConfig.PreAllocatedGroupNum = 5
c.ClusterConfig.PreAllocatedGroupNum = 20
c.ServerConfig.ExternalServer = true
return c
},
testutil.WithTestAOEClusterAOEStorageFunc(func(path string) (*daoe.Storage, error) {
return daoe.NewStorage(path)
}), testutil.WithTestAOEClusterUsePebble())
opts := &e.Options{}
mdCfg := &md.Configuration{
Dir: path,
SegmentMaxBlocks: blockCntPerSegment,
BlockMaxRows: blockRows,
}
opts.CacheCfg = &e.CacheCfg{
IndexCapacity: blockRows * blockCntPerSegment * 80,
InsertCapacity: blockRows * uint64(colCnt) * 2000,
DataCapacity: blockRows * uint64(colCnt) * 2000,
}
opts.MetaCleanerCfg = &e.MetaCleanerCfg{
Interval: time.Duration(1) * time.Second,
}
opts.Meta.Conf = mdCfg
return daoe.NewStorageWithOptions(path, opts)
}),
testutil.WithTestAOEClusterUsePebble(),
testutil.WithTestAOEClusterRaftClusterOptions(
raftstore.WithTestClusterLogLevel("error"),
raftstore.WithTestClusterDataPath("./test")))
defer func() {
logutil.Debug(">>>>>>>>>>>>>>>>> call stop")
c.Stop()
}()
c.Start()
c.RaftCluster.WaitShardByCount(t, 1, time.Second*10)
c.RaftCluster.WaitLeadersByCount(t, 20, time.Second*30)
stdLog.Printf("app all started.")
catalog := catalog2.DefaultCatalog(c.CubeDrivers[0])
......
......@@ -175,9 +175,14 @@ func TestAOEEngine(t *testing.T) {
require.Equal(t, segmentCnt, len(tb.Segments()))
logutil.Infof("table name is %s, segment size is %d, segments is %v\n", tName, len(tb.Segments()), tb.Segments())
}
if restart {
time.Sleep(3 * time.Second)
doRestartEngine(t)
}
}
func TestAOEEngineRestart(t *testing.T) {
func doRestartEngine(t *testing.T) {
putil.SetLogger(log.NewLoggerWithPrefix("prophet"))
c := testutil.NewTestAOECluster(t,
func(node int) *config.Config {
......
......@@ -32,6 +32,7 @@ const (
colCnt = 4
segmentCnt = 5
blockCnt = blockCntPerSegment * segmentCnt
restart = false
)
var tableInfo *aoe.TableInfo
......@@ -70,7 +71,7 @@ func TestStorage(t *testing.T) {
}),
testutil.WithTestAOEClusterUsePebble(),
testutil.WithTestAOEClusterRaftClusterOptions(
raftstore.WithTestClusterLogLevel("info"),
raftstore.WithTestClusterLogLevel("error"),
raftstore.WithTestClusterDataPath("./test")))
defer func() {
stdLog.Printf(">>>>>>>>>>>>>>>>> call stop")
......@@ -214,9 +215,13 @@ func TestStorage(t *testing.T) {
time.Sleep(3 * time.Second)
if restart {
doRestartEngine(t)
}
}
func TestRestartStorage(t *testing.T) {
func doRestartStorage(t *testing.T) {
c := testutil.NewTestAOECluster(t,
func(node int) *config.Config {
c := &config.Config{}
......@@ -243,7 +248,7 @@ func TestRestartStorage(t *testing.T) {
}), testutil.WithTestAOEClusterUsePebble(),
testutil.WithTestAOEClusterRaftClusterOptions(
raftstore.WithTestClusterRecreate(false),
raftstore.WithTestClusterLogLevel("info"),
raftstore.WithTestClusterLogLevel("error"),
raftstore.WithTestClusterDataPath("./test")))
defer func() {
logutil.Debug(">>>>>>>>>>>>>>>>> call stop")
......
# Code generated by tool; DO NOT EDIT.
#
# These values must be different from each other:
# port
# nodeID
# prophetEmbedEtcdJoinAddr
#
# addr-raft
# addr-client
# dir-data
#[prophet]
# name
# rpc-addr
# external-etcd
# [prophet.embed-etcd]
# client-urls
# peer-urls
#
# Name: rootpassword
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: []
# Comment: root password
# UpdateMode: dynamic
rootpassword = ""
# Name: dumpdatabase
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [default]
# Comment: dump database name
# UpdateMode: dynamic
dumpdatabase = "default"
# Name: port
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [6001 6001 6010]
# Comment: port
# UpdateMode: dynamic
port = 6001
# Name: host
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [localhost 127.0.0.1 0.0.0.0]
# Comment: listening ip
# UpdateMode: dynamic
host = "localhost"
# Name: sendRow
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: send data row while producing
# UpdateMode: dynamic
sendRow = false
# Name: dumpEnv
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: dump Environment with memEngine Null nodes for testing
# UpdateMode: dynamic
dumpEnv = false
# Name: hostMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: host mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
hostMmuLimitation = 1099511627776
# Name: guestMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: guest mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
guestMmuLimitation = 1099511627776
# Name: mempoolMaxSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: mempool maxsize. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
mempoolMaxSize = 1099511627776
# Name: mempoolFactor
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [8]
# Comment: mempool factor. default: 8
# UpdateMode: dynamic
mempoolFactor = 8
# Name: processLimitationSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.Size. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationSize = 42949672960
# Name: processLimitationBatchRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.BatchRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationBatchRows = 42949672960
# Name: processLimitationBatchSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [0]
# Comment: process.Limitation.BatchSize. default: 0
# UpdateMode: dynamic
processLimitationBatchSize = 0
# Name: processLimitationPartitionRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.PartitionRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationPartitionRows = 42949672960
# Name: countOfRowsPerSendingToClient
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [10]
# Comment: send the count of rows to the client
# UpdateMode: dynamic
countOfRowsPerSendingToClient = 10
# Name: periodOfEpochTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [5]
# Comment: the period of epoch timer in second
# UpdateMode: dynamic
periodOfEpochTimer = 5
# Name: periodOfPersistence
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of persistence in second
# UpdateMode: dynamic
periodOfPersistence = 20
# Name: periodOfDDLDeleteTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of the ddl delete in second
# UpdateMode: dynamic
periodOfDDLDeleteTimer = 20
# Name: timeoutOfHeartbeat
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the timeout of heartbeat in second
# UpdateMode: dynamic
timeoutOfHeartbeat = 20
# Name: rejectWhenHeartbeatFromPDLeaderIsTimeout
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. the server will reject the connection and sql request when the heartbeat from pdleader is timeout.
# UpdateMode: dynamic
rejectWhenHeartbeatFromPDLeaderIsTimeout = false
# Name: enableEpochLogging
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. Print logs when the server calls catalog service to run the ddl.
# UpdateMode: dynamic
enableEpochLogging = false
# Name: recordTimeElapsedOfSqlRequest
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [true]
# Comment: record the time elapsed of executing sql request
# UpdateMode: dynamic
recordTimeElapsedOfSqlRequest = true
# Name: nodeID
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [0 0 10]
# Comment: the Node ID of the cube
# UpdateMode: dynamic
nodeID = 0
# Name: cubeDir
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [./cube]
# Comment: the root direction of the cube
# UpdateMode: dynamic
cubeDir = "./cube"
# Name: prophetEmbedEtcdJoinAddr
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [http://localhost:40000 http://127.0.0.1:40000]
# Comment: the join address of prophet of the cube
# UpdateMode: dynamic
prophetEmbedEtcdJoinAddr = "http://localhost:40000"
# Name: maxReplicas
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [1 1 1]
# Comment: the number of replicas for each resource
# UpdateMode: dynamic
maxReplicas = 1
# Name: lengthOfQueryPrinted
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [50 -1 10000]
# Comment: the length of query printed into console. -1, complete string. 0, empty string. >0 , length of characters at the header of the string.
# UpdateMode: dynamic
lengthOfQueryPrinted = 50
# Cluster Configs
pre-allocated-group-num = 20
max-group-num = 0
# Logger Configs
# Log level: debug, info, warn, error, fatal.
level = "debug"
# Log format, one of json or text.
format = "console"
# Enable automatic timestamps in log output, if not set, it will be defaulted to true.
# enable-timestamp = true
# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false.
# enable-error-stack = false
# Encoding sets the logger's encoding. Valid values are "json" and
# "console", as well as any third-party encodings registered via
# RegisterEncoder.
encoding = "console"
# Cube Configs
addr-raft = "localhost:10000"
addr-client = "localhost:20000"
dir-data = "./cube0/node"
dir-deploy = ""
version = ""
githash = ""
capacity = 0
use-memory-as-storage = false
shard-groups = 0
[replication]
max-peer-down-time = "0s"
shard-heartbeat-duration = "100ms"
store-heartbeat-duration = "1s"
shard-split-check-duration = "0s"
shard-state-check-duration = "0s"
disable-shard-split = false
allow-remove-leader = false
shard-capacity-bytes = 0
shard-split-check-bytes = 0
[snapshot]
max-concurrency-snap-chunks = 0
snap-chunk-size = 0
[raft]
enable-pre-vote = false
tick-interval = "600ms"
heartbeat-ticks = 0
election-timeout-ticks = 0
max-size-per-msg = 0
max-inflight-msgs = 0
max-entry-bytes = 314572800
send-raft-batch-size = 0
[raft.raft-log]
disable-sync = false
compact-duration = "0s"
compact-threshold = 0
max-allow-transfer-lag = 0
ForceCompactCount = 0
ForceCompactBytes = 0
CompactProtectLag = 0
[worker]
raft-apply-worker = 0
raft-msg-worker = 0
raft-event-workers = 0
[prophet]
name = "node0"
data-dir = ""
rpc-addr = "localhost:30000"
rpc-timeout = "0s"
storage-node = true
external-etcd = [""]
lease = 0
[prophet.embed-etcd]
join = ""
client-urls = "http://localhost:40000"
peer-urls = "http://localhost:50000"
advertise-client-urls = ""
advertise-peer-urls = ""
initial-cluster = ""
initial-cluster-state = ""
tick-interval = "600ms"
election-interval = "3s"
enable-prevote = false
auto-compaction-mode = ""
auto-compaction-retention = ""
quota-backend-bytes = 0
[prophet.schedule]
max-snapshot-count = 0
max-pending-peer-count = 0
max-merge-resource-size = 0
max-merge-resource-keys = 0
split-merge-interval = "0s"
enable-one-way-merge = false
enable-cross-table-merge = false
patrol-resource-interval = "0s"
max-container-down-time = "0s"
leader-schedule-limit = 0
leader-schedule-policy = ""
resource-schedule-limit = 0
replica-schedule-limit = 0
merge-schedule-limit = 0
hot-resource-schedule-limit = 0
hot-resource-cache-hits-threshold = 0
tolerant-size-ratio = 0.0
low-space-ratio = 0.0
high-space-ratio = 0.0
resource-score-formula-version = ""
scheduler-max-waiting-operator = 0
enable-remove-down-replica = false
enable-replace-offline-replica = false
enable-make-up-replica = false
enable-remove-extra-replica = false
enable-location-replacement = false
enable-debug-metrics = false
enable-joint-consensus = true
container-limit-mode = ""
[prophet.replication]
max-replicas = 3
strictly-match-label = false
enable-placement-rules = false
isolation-level = ""
[metric]
addr = ""
interval = 0
job = ""
instance = ""
# File logging.
[file]
# Log file name.
filename = ""
# Max log file size in MB (upper limit to 4096MB).
max-size = 300
# Max log file keep days. No clean up by default.
max-days = 0
# Maximum number of old log files to retain. No clean up by default.
max-backups = 0
# Code generated by tool; DO NOT EDIT.
#
# These values must be different from each other:
# port
# nodeID
# prophetEmbedEtcdJoinAddr
#
# addr-raft
# addr-client
# dir-data
#[prophet]
# name
# rpc-addr
# external-etcd
# [prophet.embed-etcd]
# client-urls
# peer-urls
#
# Name: rootpassword
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: []
# Comment: root password
# UpdateMode: dynamic
rootpassword = ""
# Name: dumpdatabase
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [default]
# Comment: dump database name
# UpdateMode: dynamic
dumpdatabase = "default"
# Name: port
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [6001 6001 6010]
# Comment: port
# UpdateMode: dynamic
port = 6002
# Name: host
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [localhost 127.0.0.1 0.0.0.0]
# Comment: listening ip
# UpdateMode: dynamic
host = "localhost"
# Name: sendRow
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: send data row while producing
# UpdateMode: dynamic
sendRow = false
# Name: dumpEnv
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: dump Environment with memEngine Null nodes for testing
# UpdateMode: dynamic
dumpEnv = false
# Name: hostMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: host mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
hostMmuLimitation = 1099511627776
# Name: guestMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: guest mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
guestMmuLimitation = 1099511627776
# Name: mempoolMaxSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: mempool maxsize. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
mempoolMaxSize = 1099511627776
# Name: mempoolFactor
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [8]
# Comment: mempool factor. default: 8
# UpdateMode: dynamic
mempoolFactor = 8
# Name: processLimitationSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.Size. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationSize = 42949672960
# Name: processLimitationBatchRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.BatchRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationBatchRows = 42949672960
# Name: processLimitationBatchSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [0]
# Comment: process.Limitation.BatchSize. default: 0
# UpdateMode: dynamic
processLimitationBatchSize = 0
# Name: processLimitationPartitionRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.PartitionRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationPartitionRows = 42949672960
# Name: countOfRowsPerSendingToClient
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [10]
# Comment: send the count of rows to the client
# UpdateMode: dynamic
countOfRowsPerSendingToClient = 10
# Name: periodOfEpochTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [5]
# Comment: the period of epoch timer in second
# UpdateMode: dynamic
periodOfEpochTimer = 5
# Name: periodOfPersistence
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of persistence in second
# UpdateMode: dynamic
periodOfPersistence = 20
# Name: periodOfDDLDeleteTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of the ddl delete in second
# UpdateMode: dynamic
periodOfDDLDeleteTimer = 20
# Name: timeoutOfHeartbeat
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the timeout of heartbeat in second
# UpdateMode: dynamic
timeoutOfHeartbeat = 20
# Name: rejectWhenHeartbeatFromPDLeaderIsTimeout
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. the server will reject the connection and sql request when the heartbeat from pdleader is timeout.
# UpdateMode: dynamic
rejectWhenHeartbeatFromPDLeaderIsTimeout = false
# Name: enableEpochLogging
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. Print logs when the server calls catalog service to run the ddl.
# UpdateMode: dynamic
enableEpochLogging = false
# Name: recordTimeElapsedOfSqlRequest
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [true]
# Comment: record the time elapsed of executing sql request
# UpdateMode: dynamic
recordTimeElapsedOfSqlRequest = true
# Name: nodeID
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [0 0 10]
# Comment: the Node ID of the cube
# UpdateMode: dynamic
nodeID = 1
# Name: cubeDir
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [./cube]
# Comment: the root direction of the cube
# UpdateMode: dynamic
cubeDir = "./cube"
# Name: prophetEmbedEtcdJoinAddr
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [http://localhost:40000 http://127.0.0.1:40000]
# Comment: the join address of prophet of the cube
# UpdateMode: dynamic
prophetEmbedEtcdJoinAddr = "http://localhost:40000"
# Name: maxReplicas
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [1 1 1]
# Comment: the number of replicas for each resource
# UpdateMode: dynamic
maxReplicas = 1
# Name: lengthOfQueryPrinted
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [50 -1 10000]
# Comment: the length of query printed into console. -1, complete string. 0, empty string. >0 , length of characters at the header of the string.
# UpdateMode: dynamic
lengthOfQueryPrinted = 50
# Cluster Configs
pre-allocated-group-num = 20
max-group-num = 0
# Logger Configs
# Log level: debug, info, warn, error, fatal.
level = "debug"
# Log format, one of json or text.
format = "console"
# Enable automatic timestamps in log output, if not set, it will be defaulted to true.
# enable-timestamp = true
# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false.
# enable-error-stack = false
# Encoding sets the logger's encoding. Valid values are "json" and
# "console", as well as any third-party encodings registered via
# RegisterEncoder.
encoding = "console"
# Cube Configs
addr-raft = "localhost:10001"
addr-client = "localhost:20001"
dir-data = "./cube1/node"
dir-deploy = ""
version = ""
githash = ""
capacity = 0
use-memory-as-storage = false
shard-groups = 0
[replication]
max-peer-down-time = "0s"
shard-heartbeat-duration = "100ms"
store-heartbeat-duration = "1s"
shard-split-check-duration = "0s"
shard-state-check-duration = "0s"
disable-shard-split = false
allow-remove-leader = false
shard-capacity-bytes = 0
shard-split-check-bytes = 0
[snapshot]
max-concurrency-snap-chunks = 0
snap-chunk-size = 0
[raft]
enable-pre-vote = false
tick-interval = "600ms"
heartbeat-ticks = 0
election-timeout-ticks = 0
max-size-per-msg = 0
max-inflight-msgs = 0
max-entry-bytes = 314572800
send-raft-batch-size = 0
[raft.raft-log]
disable-sync = false
compact-duration = "0s"
compact-threshold = 0
max-allow-transfer-lag = 0
ForceCompactCount = 0
ForceCompactBytes = 0
CompactProtectLag = 0
[worker]
raft-apply-worker = 0
raft-msg-worker = 0
raft-event-workers = 0
[prophet]
name = "node1"
data-dir = ""
rpc-addr = "localhost:30001"
rpc-timeout = "0s"
storage-node = true
external-etcd = [""]
lease = 0
[prophet.embed-etcd]
join = ""
client-urls = "http://localhost:40001"
peer-urls = "http://localhost:50001"
advertise-client-urls = ""
advertise-peer-urls = ""
initial-cluster = ""
initial-cluster-state = ""
tick-interval = "600ms"
election-interval = "3s"
enable-prevote = false
auto-compaction-mode = ""
auto-compaction-retention = ""
quota-backend-bytes = 0
[prophet.schedule]
max-snapshot-count = 0
max-pending-peer-count = 0
max-merge-resource-size = 0
max-merge-resource-keys = 0
split-merge-interval = "0s"
enable-one-way-merge = false
enable-cross-table-merge = false
patrol-resource-interval = "0s"
max-container-down-time = "0s"
leader-schedule-limit = 0
leader-schedule-policy = ""
resource-schedule-limit = 0
replica-schedule-limit = 0
merge-schedule-limit = 0
hot-resource-schedule-limit = 0
hot-resource-cache-hits-threshold = 0
tolerant-size-ratio = 0.0
low-space-ratio = 0.0
high-space-ratio = 0.0
resource-score-formula-version = ""
scheduler-max-waiting-operator = 0
enable-remove-down-replica = false
enable-replace-offline-replica = false
enable-make-up-replica = false
enable-remove-extra-replica = false
enable-location-replacement = false
enable-debug-metrics = false
enable-joint-consensus = true
container-limit-mode = ""
[prophet.replication]
max-replicas = 3
strictly-match-label = false
enable-placement-rules = false
isolation-level = ""
[metric]
addr = ""
interval = 0
job = ""
instance = ""
# File logging.
[file]
# Log file name.
filename = ""
# Max log file size in MB (upper limit to 4096MB).
max-size = 300
# Max log file keep days. No clean up by default.
max-days = 0
# Maximum number of old log files to retain. No clean up by default.
max-backups = 0
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment