Skip to content
Snippets Groups Projects
Unverified Commit 346e1ddf authored by Jin Hai's avatar Jin Hai Committed by GitHub
Browse files

Remove auto-generated config files (#697)


Signed-off-by: default avatarjinhai <haijin.chn@gmail.com>
parent fcc0b040
No related branches found
No related tags found
No related merge requests found
......@@ -13,3 +13,4 @@ vendor/
gen_config
main
mo-server
*.toml
# Code generated by tool; DO NOT EDIT.
#
# These values must be different from each other:
# port
# nodeID
# prophetEmbedEtcdJoinAddr
#
# addr-raft
# addr-client
# dir-data
#[prophet]
# name
# rpc-addr
# external-etcd
# [prophet.embed-etcd]
# client-urls
# peer-urls
#
# Name: rootpassword
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: []
# Comment: root password
# UpdateMode: dynamic
rootpassword = ""
# Name: dumpdatabase
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [default]
# Comment: dump database name
# UpdateMode: dynamic
dumpdatabase = "default"
# Name: port
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [6001 6001 6010]
# Comment: port
# UpdateMode: dynamic
port = 6001
# Name: host
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [localhost 127.0.0.1 0.0.0.0]
# Comment: listening ip
# UpdateMode: dynamic
host = "localhost"
# Name: sendRow
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: send data row while producing
# UpdateMode: dynamic
sendRow = false
# Name: dumpEnv
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: dump Environment with memEngine Null nodes for testing
# UpdateMode: dynamic
dumpEnv = false
# Name: hostMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: host mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
hostMmuLimitation = 1099511627776
# Name: guestMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: guest mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
guestMmuLimitation = 1099511627776
# Name: mempoolMaxSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: mempool maxsize. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
mempoolMaxSize = 1099511627776
# Name: mempoolFactor
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [8]
# Comment: mempool factor. default: 8
# UpdateMode: dynamic
mempoolFactor = 8
# Name: processLimitationSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.Size. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationSize = 42949672960
# Name: processLimitationBatchRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.BatchRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationBatchRows = 42949672960
# Name: processLimitationBatchSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [0]
# Comment: process.Limitation.BatchSize. default: 0
# UpdateMode: dynamic
processLimitationBatchSize = 0
# Name: processLimitationPartitionRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.PartitionRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationPartitionRows = 42949672960
# Name: countOfRowsPerSendingToClient
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [10]
# Comment: send the count of rows to the client
# UpdateMode: dynamic
countOfRowsPerSendingToClient = 10
# Name: periodOfEpochTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [5]
# Comment: the period of epoch timer in second
# UpdateMode: dynamic
periodOfEpochTimer = 5
# Name: periodOfPersistence
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of persistence in second
# UpdateMode: dynamic
periodOfPersistence = 20
# Name: periodOfDDLDeleteTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of the ddl delete in second
# UpdateMode: dynamic
periodOfDDLDeleteTimer = 20
# Name: timeoutOfHeartbeat
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the timeout of heartbeat in second
# UpdateMode: dynamic
timeoutOfHeartbeat = 20
# Name: rejectWhenHeartbeatFromPDLeaderIsTimeout
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. the server will reject the connection and sql request when the heartbeat from pdleader is timeout.
# UpdateMode: dynamic
rejectWhenHeartbeatFromPDLeaderIsTimeout = false
# Name: enableEpochLogging
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. Print logs when the server calls catalog service to run the ddl.
# UpdateMode: dynamic
enableEpochLogging = false
# Name: recordTimeElapsedOfSqlRequest
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [true]
# Comment: record the time elapsed of executing sql request
# UpdateMode: dynamic
recordTimeElapsedOfSqlRequest = true
# Name: nodeID
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [0 0 10]
# Comment: the Node ID of the cube
# UpdateMode: dynamic
nodeID = 0
# Name: cubeDir
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [./cube]
# Comment: the root direction of the cube
# UpdateMode: dynamic
cubeDir = "./cube"
# Name: prophetEmbedEtcdJoinAddr
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [http://localhost:40000 http://127.0.0.1:40000]
# Comment: the join address of prophet of the cube
# UpdateMode: dynamic
prophetEmbedEtcdJoinAddr = "http://localhost:40000"
# Name: maxReplicas
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [1 1 1]
# Comment: the number of replicas for each resource
# UpdateMode: dynamic
maxReplicas = 1
# Name: lengthOfQueryPrinted
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [50 -1 10000]
# Comment: the length of query printed into console. -1, complete string. 0, empty string. >0 , length of characters at the header of the string.
# UpdateMode: dynamic
lengthOfQueryPrinted = 50
# Cluster Configs
pre-allocated-group-num = 20
max-group-num = 0
# Logger Configs
# Log level: debug, info, warn, error, fatal.
level = "debug"
# Log format, one of json or text.
format = "console"
# Enable automatic timestamps in log output, if not set, it will be defaulted to true.
# enable-timestamp = true
# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false.
# enable-error-stack = false
# Encoding sets the logger's encoding. Valid values are "json" and
# "console", as well as any third-party encodings registered via
# RegisterEncoder.
encoding = "console"
# Cube Configs
addr-raft = "localhost:10000"
addr-client = "localhost:20000"
dir-data = "./cube0/node"
dir-deploy = ""
version = ""
githash = ""
capacity = 0
use-memory-as-storage = false
shard-groups = 0
[replication]
max-peer-down-time = "0s"
shard-heartbeat-duration = "100ms"
store-heartbeat-duration = "1s"
shard-split-check-duration = "0s"
shard-state-check-duration = "0s"
disable-shard-split = false
allow-remove-leader = false
shard-capacity-bytes = 0
shard-split-check-bytes = 0
[snapshot]
max-concurrency-snap-chunks = 0
snap-chunk-size = 0
[raft]
enable-pre-vote = false
tick-interval = "600ms"
heartbeat-ticks = 0
election-timeout-ticks = 0
max-size-per-msg = 0
max-inflight-msgs = 0
max-entry-bytes = 314572800
send-raft-batch-size = 0
[raft.raft-log]
disable-sync = false
compact-duration = "0s"
compact-threshold = 0
max-allow-transfer-lag = 0
ForceCompactCount = 0
ForceCompactBytes = 0
CompactProtectLag = 0
[worker]
raft-apply-worker = 0
raft-msg-worker = 0
raft-event-workers = 0
[prophet]
name = "node0"
data-dir = ""
rpc-addr = "localhost:30000"
rpc-timeout = "0s"
storage-node = true
external-etcd = [""]
lease = 0
[prophet.embed-etcd]
join = ""
client-urls = "http://localhost:40000"
peer-urls = "http://localhost:50000"
advertise-client-urls = ""
advertise-peer-urls = ""
initial-cluster = ""
initial-cluster-state = ""
tick-interval = "600ms"
election-interval = "3s"
enable-prevote = false
auto-compaction-mode = ""
auto-compaction-retention = ""
quota-backend-bytes = 0
[prophet.schedule]
max-snapshot-count = 0
max-pending-peer-count = 0
max-merge-resource-size = 0
max-merge-resource-keys = 0
split-merge-interval = "0s"
enable-one-way-merge = false
enable-cross-table-merge = false
patrol-resource-interval = "0s"
max-container-down-time = "0s"
leader-schedule-limit = 0
leader-schedule-policy = ""
resource-schedule-limit = 0
replica-schedule-limit = 0
merge-schedule-limit = 0
hot-resource-schedule-limit = 0
hot-resource-cache-hits-threshold = 0
tolerant-size-ratio = 0.0
low-space-ratio = 0.0
high-space-ratio = 0.0
resource-score-formula-version = ""
scheduler-max-waiting-operator = 0
enable-remove-down-replica = false
enable-replace-offline-replica = false
enable-make-up-replica = false
enable-remove-extra-replica = false
enable-location-replacement = false
enable-debug-metrics = false
enable-joint-consensus = true
container-limit-mode = ""
[prophet.replication]
max-replicas = 3
strictly-match-label = false
enable-placement-rules = false
isolation-level = ""
[metric]
addr = ""
interval = 0
job = ""
instance = ""
# File logging.
[file]
# Log file name.
filename = ""
# Max log file size in MB (upper limit to 4096MB).
max-size = 300
# Max log file keep days. No clean up by default.
max-days = 0
# Maximum number of old log files to retain. No clean up by default.
max-backups = 0
# Code generated by tool; DO NOT EDIT.
#
# These values must be different from each other:
# port
# nodeID
# prophetEmbedEtcdJoinAddr
#
# addr-raft
# addr-client
# dir-data
#[prophet]
# name
# rpc-addr
# external-etcd
# [prophet.embed-etcd]
# client-urls
# peer-urls
#
# Name: rootpassword
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: []
# Comment: root password
# UpdateMode: dynamic
rootpassword = ""
# Name: dumpdatabase
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [default]
# Comment: dump database name
# UpdateMode: dynamic
dumpdatabase = "default"
# Name: port
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [6001 6001 6010]
# Comment: port
# UpdateMode: dynamic
port = 6002
# Name: host
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [localhost 127.0.0.1 0.0.0.0]
# Comment: listening ip
# UpdateMode: dynamic
host = "localhost"
# Name: sendRow
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: send data row while producing
# UpdateMode: dynamic
sendRow = false
# Name: dumpEnv
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: dump Environment with memEngine Null nodes for testing
# UpdateMode: dynamic
dumpEnv = false
# Name: hostMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: host mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
hostMmuLimitation = 1099511627776
# Name: guestMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: guest mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
guestMmuLimitation = 1099511627776
# Name: mempoolMaxSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: mempool maxsize. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
mempoolMaxSize = 1099511627776
# Name: mempoolFactor
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [8]
# Comment: mempool factor. default: 8
# UpdateMode: dynamic
mempoolFactor = 8
# Name: processLimitationSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.Size. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationSize = 42949672960
# Name: processLimitationBatchRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.BatchRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationBatchRows = 42949672960
# Name: processLimitationBatchSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [0]
# Comment: process.Limitation.BatchSize. default: 0
# UpdateMode: dynamic
processLimitationBatchSize = 0
# Name: processLimitationPartitionRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.PartitionRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationPartitionRows = 42949672960
# Name: countOfRowsPerSendingToClient
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [10]
# Comment: send the count of rows to the client
# UpdateMode: dynamic
countOfRowsPerSendingToClient = 10
# Name: periodOfEpochTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [5]
# Comment: the period of epoch timer in second
# UpdateMode: dynamic
periodOfEpochTimer = 5
# Name: periodOfPersistence
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of persistence in second
# UpdateMode: dynamic
periodOfPersistence = 20
# Name: periodOfDDLDeleteTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of the ddl delete in second
# UpdateMode: dynamic
periodOfDDLDeleteTimer = 20
# Name: timeoutOfHeartbeat
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the timeout of heartbeat in second
# UpdateMode: dynamic
timeoutOfHeartbeat = 20
# Name: rejectWhenHeartbeatFromPDLeaderIsTimeout
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. the server will reject the connection and sql request when the heartbeat from pdleader is timeout.
# UpdateMode: dynamic
rejectWhenHeartbeatFromPDLeaderIsTimeout = false
# Name: enableEpochLogging
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. Print logs when the server calls catalog service to run the ddl.
# UpdateMode: dynamic
enableEpochLogging = false
# Name: recordTimeElapsedOfSqlRequest
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [true]
# Comment: record the time elapsed of executing sql request
# UpdateMode: dynamic
recordTimeElapsedOfSqlRequest = true
# Name: nodeID
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [0 0 10]
# Comment: the Node ID of the cube
# UpdateMode: dynamic
nodeID = 1
# Name: cubeDir
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [./cube]
# Comment: the root direction of the cube
# UpdateMode: dynamic
cubeDir = "./cube"
# Name: prophetEmbedEtcdJoinAddr
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [http://localhost:40000 http://127.0.0.1:40000]
# Comment: the join address of prophet of the cube
# UpdateMode: dynamic
prophetEmbedEtcdJoinAddr = "http://localhost:40000"
# Name: maxReplicas
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [1 1 1]
# Comment: the number of replicas for each resource
# UpdateMode: dynamic
maxReplicas = 1
# Name: lengthOfQueryPrinted
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [50 -1 10000]
# Comment: the length of query printed into console. -1, complete string. 0, empty string. >0 , length of characters at the header of the string.
# UpdateMode: dynamic
lengthOfQueryPrinted = 50
# Cluster Configs
pre-allocated-group-num = 20
max-group-num = 0
# Logger Configs
# Log level: debug, info, warn, error, fatal.
level = "debug"
# Log format, one of json or text.
format = "console"
# Enable automatic timestamps in log output, if not set, it will be defaulted to true.
# enable-timestamp = true
# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false.
# enable-error-stack = false
# Encoding sets the logger's encoding. Valid values are "json" and
# "console", as well as any third-party encodings registered via
# RegisterEncoder.
encoding = "console"
# Cube Configs
addr-raft = "localhost:10001"
addr-client = "localhost:20001"
dir-data = "./cube1/node"
dir-deploy = ""
version = ""
githash = ""
capacity = 0
use-memory-as-storage = false
shard-groups = 0
[replication]
max-peer-down-time = "0s"
shard-heartbeat-duration = "100ms"
store-heartbeat-duration = "1s"
shard-split-check-duration = "0s"
shard-state-check-duration = "0s"
disable-shard-split = false
allow-remove-leader = false
shard-capacity-bytes = 0
shard-split-check-bytes = 0
[snapshot]
max-concurrency-snap-chunks = 0
snap-chunk-size = 0
[raft]
enable-pre-vote = false
tick-interval = "600ms"
heartbeat-ticks = 0
election-timeout-ticks = 0
max-size-per-msg = 0
max-inflight-msgs = 0
max-entry-bytes = 314572800
send-raft-batch-size = 0
[raft.raft-log]
disable-sync = false
compact-duration = "0s"
compact-threshold = 0
max-allow-transfer-lag = 0
ForceCompactCount = 0
ForceCompactBytes = 0
CompactProtectLag = 0
[worker]
raft-apply-worker = 0
raft-msg-worker = 0
raft-event-workers = 0
[prophet]
name = "node1"
data-dir = ""
rpc-addr = "localhost:30001"
rpc-timeout = "0s"
storage-node = true
external-etcd = [""]
lease = 0
[prophet.embed-etcd]
join = ""
client-urls = "http://localhost:40001"
peer-urls = "http://localhost:50001"
advertise-client-urls = ""
advertise-peer-urls = ""
initial-cluster = ""
initial-cluster-state = ""
tick-interval = "600ms"
election-interval = "3s"
enable-prevote = false
auto-compaction-mode = ""
auto-compaction-retention = ""
quota-backend-bytes = 0
[prophet.schedule]
max-snapshot-count = 0
max-pending-peer-count = 0
max-merge-resource-size = 0
max-merge-resource-keys = 0
split-merge-interval = "0s"
enable-one-way-merge = false
enable-cross-table-merge = false
patrol-resource-interval = "0s"
max-container-down-time = "0s"
leader-schedule-limit = 0
leader-schedule-policy = ""
resource-schedule-limit = 0
replica-schedule-limit = 0
merge-schedule-limit = 0
hot-resource-schedule-limit = 0
hot-resource-cache-hits-threshold = 0
tolerant-size-ratio = 0.0
low-space-ratio = 0.0
high-space-ratio = 0.0
resource-score-formula-version = ""
scheduler-max-waiting-operator = 0
enable-remove-down-replica = false
enable-replace-offline-replica = false
enable-make-up-replica = false
enable-remove-extra-replica = false
enable-location-replacement = false
enable-debug-metrics = false
enable-joint-consensus = true
container-limit-mode = ""
[prophet.replication]
max-replicas = 3
strictly-match-label = false
enable-placement-rules = false
isolation-level = ""
[metric]
addr = ""
interval = 0
job = ""
instance = ""
# File logging.
[file]
# Log file name.
filename = ""
# Max log file size in MB (upper limit to 4096MB).
max-size = 300
# Max log file keep days. No clean up by default.
max-days = 0
# Maximum number of old log files to retain. No clean up by default.
max-backups = 0
# Code generated by tool; DO NOT EDIT.
#
# These values must be different from each other:
# port
# nodeID
# prophetEmbedEtcdJoinAddr
#
# addr-raft
# addr-client
# dir-data
#[prophet]
# name
# rpc-addr
# external-etcd
# [prophet.embed-etcd]
# client-urls
# peer-urls
#
# Name: rootpassword
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: []
# Comment: root password
# UpdateMode: dynamic
rootpassword = ""
# Name: dumpdatabase
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [default]
# Comment: dump database name
# UpdateMode: dynamic
dumpdatabase = "default"
# Name: port
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [6001 6001 6010]
# Comment: port
# UpdateMode: dynamic
port = 6001
# Name: host
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [localhost 127.0.0.1 0.0.0.0]
# Comment: listening ip
# UpdateMode: dynamic
host = "localhost"
# Name: sendRow
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: send data row while producing
# UpdateMode: dynamic
sendRow = false
# Name: dumpEnv
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: []
# Comment: dump Environment with memEngine Null nodes for testing
# UpdateMode: dynamic
dumpEnv = false
# Name: hostMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: host mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
hostMmuLimitation = 1099511627776
# Name: guestMmuLimitation
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: guest mmu limitation. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
guestMmuLimitation = 1099511627776
# Name: mempoolMaxSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [1099511627776]
# Comment: mempool maxsize. default: 1 << 40 = 1099511627776
# UpdateMode: dynamic
mempoolMaxSize = 1099511627776
# Name: mempoolFactor
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [8]
# Comment: mempool factor. default: 8
# UpdateMode: dynamic
mempoolFactor = 8
# Name: processLimitationSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.Size. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationSize = 42949672960
# Name: processLimitationBatchRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.BatchRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationBatchRows = 42949672960
# Name: processLimitationBatchSize
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [0]
# Comment: process.Limitation.BatchSize. default: 0
# UpdateMode: dynamic
processLimitationBatchSize = 0
# Name: processLimitationPartitionRows
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [42949672960]
# Comment: process.Limitation.PartitionRows. default: 10 << 32 = 42949672960
# UpdateMode: dynamic
processLimitationPartitionRows = 42949672960
# Name: countOfRowsPerSendingToClient
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [10]
# Comment: send the count of rows to the client
# UpdateMode: dynamic
countOfRowsPerSendingToClient = 10
# Name: periodOfEpochTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [5]
# Comment: the period of epoch timer in second
# UpdateMode: dynamic
periodOfEpochTimer = 5
# Name: periodOfPersistence
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of persistence in second
# UpdateMode: dynamic
periodOfPersistence = 20
# Name: periodOfDDLDeleteTimer
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the period of the ddl delete in second
# UpdateMode: dynamic
periodOfDDLDeleteTimer = 20
# Name: timeoutOfHeartbeat
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [20]
# Comment: the timeout of heartbeat in second
# UpdateMode: dynamic
timeoutOfHeartbeat = 20
# Name: rejectWhenHeartbeatFromPDLeaderIsTimeout
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. the server will reject the connection and sql request when the heartbeat from pdleader is timeout.
# UpdateMode: dynamic
rejectWhenHeartbeatFromPDLeaderIsTimeout = false
# Name: enableEpochLogging
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [false]
# Comment: default is false. Print logs when the server calls catalog service to run the ddl.
# UpdateMode: dynamic
enableEpochLogging = false
# Name: recordTimeElapsedOfSqlRequest
# Scope: [global]
# Access: [file]
# DataType: bool
# DomainType: set
# Values: [true]
# Comment: record the time elapsed of executing sql request
# UpdateMode: dynamic
recordTimeElapsedOfSqlRequest = true
# Name: nodeID
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [0 0 10]
# Comment: the Node ID of the cube
# UpdateMode: dynamic
nodeID = 0
# Name: cubeDirPrefix
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [./cube]
# Comment: the root direction prefix of the cube. The actual dir is cubeDirPrefix + nodeID
# UpdateMode: dynamic
cubeDirPrefix = "./cube"
# Name: prophetEmbedEtcdJoinAddr
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [http://localhost:40000 http://127.0.0.1:40000]
# Comment: the join address of prophet of the cube
# UpdateMode: dynamic
prophetEmbedEtcdJoinAddr = "http://localhost:40000"
# Name: maxReplicas
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [1 1 1]
# Comment: the number of replicas for each resource
# UpdateMode: dynamic
maxReplicas = 1
# Name: lengthOfQueryPrinted
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [50 -1 10000]
# Comment: the length of query printed into console. -1, complete string. 0, empty string. >0 , length of characters at the header of the string.
# UpdateMode: dynamic
lengthOfQueryPrinted = 50
# Name: batchSizeInLoadData
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [40000]
# Comment: the count of rows in vector of batch in load data
# UpdateMode: dynamic
batchSizeInLoadData = 40000
# Name: blockCountInLoadData
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: range
# Values: [20 2 100000]
# Comment: count of read buffer in load data
# UpdateMode: dynamic
blockCountInLoadData = 20
# Name: blockSizeInLoadData
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [4194304]
# Comment: defaul is 4MB = 4194304 Bytes. bytes for every read buffer in load data
# UpdateMode: dynamic
blockSizeInLoadData = 4194304
# Name: loadDataParserType
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [0 1]
# Comment: default is 0 . The parser type of load data. 0 - simdcsv; 1 - handwritten
# UpdateMode: dynamic
loadDataParserType = 0
# Name: loadDataConcurrencyCount
# Scope: [global]
# Access: [file]
# DataType: int64
# DomainType: set
# Values: [16]
# Comment: default is 16. The count of go routine writing batch into the storage.
# UpdateMode: dynamic
loadDataConcurrencyCount = 16
# Name: cubeLogLevel
# Scope: [global]
# Access: [file]
# DataType: string
# DomainType: set
# Values: [error info debug warning warn fatal]
# Comment: default is error. The log level for cube.
# UpdateMode: dynamic
cubeLogLevel = "error"
# Cluster Configs
pre-allocated-group-num = 20
max-group-num = 0
# Logger Configs
level = "info" # debug, info, warn, error, fatal.
format = "console" # json, console.
# log file config
filename = "" # log file.
max-size = 512 # maximum log file size.
max-days = 0 # maximum log file days kept.
max-backups = 0 # maximum numbers of old log files to retain.
# Cube Configs
addr-raft = "localhost:10000"
addr-client = "localhost:20000"
dir-data = "./cube0/node"
dir-deploy = ""
version = ""
githash = ""
capacity = 0
use-memory-as-storage = false
shard-groups = 0
[replication]
max-peer-down-time = "0s"
shard-heartbeat-duration = "100ms"
store-heartbeat-duration = "1s"
shard-split-check-duration = "0s"
shard-state-check-duration = "0s"
disable-shard-split = false
allow-remove-leader = false
shard-capacity-bytes = 0
shard-split-check-bytes = 0
[snapshot]
max-concurrency-snap-chunks = 0
snap-chunk-size = 0
[raft]
enable-pre-vote = false
tick-interval = "600ms"
heartbeat-ticks = 0
election-timeout-ticks = 0
max-size-per-msg = 0
max-inflight-msgs = 0
max-entry-bytes = 314572800
send-raft-batch-size = 0
[raft.raft-log]
disable-sync = false
compact-duration = "0s"
compact-threshold = 0
max-allow-transfer-lag = 0
ForceCompactCount = 0
ForceCompactBytes = 0
CompactProtectLag = 0
[worker]
raft-apply-worker = 0
raft-msg-worker = 0
raft-event-workers = 0
[prophet]
name = "node0"
data-dir = ""
rpc-addr = "localhost:30000"
rpc-timeout = "0s"
storage-node = true
external-etcd = [""]
lease = 0
[prophet.embed-etcd]
join = ""
client-urls = "http://localhost:40000"
peer-urls = "http://localhost:50000"
advertise-client-urls = ""
advertise-peer-urls = ""
initial-cluster = ""
initial-cluster-state = ""
tick-interval = "0s"
election-interval = "0s"
enable-prevote = false
auto-compaction-mode = ""
auto-compaction-retention = ""
quota-backend-bytes = 0
[prophet.schedule]
max-snapshot-count = 0
max-pending-peer-count = 0
max-merge-resource-size = 0
max-merge-resource-keys = 0
split-merge-interval = "0s"
enable-one-way-merge = false
enable-cross-table-merge = false
patrol-resource-interval = "0s"
max-container-down-time = "0s"
leader-schedule-limit = 0
leader-schedule-policy = ""
resource-schedule-limit = 0
replica-schedule-limit = 0
merge-schedule-limit = 0
hot-resource-schedule-limit = 0
hot-resource-cache-hits-threshold = 0
tolerant-size-ratio = 0.0
low-space-ratio = 0.0
high-space-ratio = 0.0
resource-score-formula-version = ""
scheduler-max-waiting-operator = 0
enable-remove-down-replica = false
enable-replace-offline-replica = false
enable-make-up-replica = false
enable-remove-extra-replica = false
enable-location-replacement = false
enable-debug-metrics = false
enable-joint-consensus = true
container-limit-mode = ""
[prophet.replication]
max-replicas = 1
strictly-match-label = false
enable-placement-rules = false
isolation-level = ""
[metric]
addr = ""
interval = 0
job = ""
instance = ""
# Storage Configs
[meta.conf]
block-max-rows = 40000
segment-max-blocks = 40
[scheduler-cfg]
block-writers = 8
segment-writers = 4
[cache-cfg]
index-cache-size = 134217728 # 128M
insert-cache-size = 4294967296 # 4G
data-cache-size = 4294967296 # 4G
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment