Skip to content
Snippets Groups Projects
Unverified Commit aa227e55 authored by lni's avatar lni Committed by GitHub
Browse files

logservice: added config files for 3 nodes log service (#4264)

parent ba2c84b2
No related branches found
No related tags found
No related merge requests found
# Bootstrap Example
Assuming you are in MO's source directory.
Build mo-service -
```
make service
```
Start 3 mo-service instances configured as Log Service instances in 3 different terminals on the same machine.
```
./mo-service -cfg etc/bootstrap-example/log-node-1.toml
```
```
./mo-service -cfg etc/bootstrap-example/log-node-2.toml
```
```
./mo-service -cfg etc/bootstrap-example/log-node-3.toml
```
It takes several seconds to start each process on mac as the SSD is slow when invoking fsync(). After about 10-20 seconds, you should be able to see a minimal MO cluster with just 3 Log Service instances. On each instance, there is a HAKeeper replica and a Log Shard replica.
You should be able to see lots of logs on your terminals. "[00001:62146]" means Shard 1 ReplicaID 62146. You should be able to see log messages relate to replicas from both Shard 0 (HAKeeper) and Shard 1 (regular Log shard) in each one of your terminals.
# service node type, [DN|CN|LOG]
service-type = "LOG"
[log]
level = "debug"
format = "json"
max-size = 512
[logservice]
deployment-id = 1
uuid = "7c4dccb4-4d3c-41f8-b482-5251dc7a41bf"
data-dir = "node-1-data"
raft-address = "127.0.0.1:32000"
logservice-address = "127.0.0.1:32001"
gossip-address = "127.0.0.1:32002"
gossip-seed-addresses = "127.0.0.1:32002;127.0.0.1:32012;127.0.0.1:32022"
[logservice.BootstrapConfig]
bootstrap-cluster = true
num-of-log-shards = 1
num-of-dn-shards = 1
num-of-log-shard-replicas = 3
init-hakeeper-members = "131072:7c4dccb4-4d3c-41f8-b482-5251dc7a41bf;131073:8c4dccb4-4d3c-41f8-b482-5251dc7a41bf;131074:9c4dccb4-4d3c-41f8-b482-5251dc7a41bf"
hakeeper-replica-id = 131072
[logservice.HAKeeperClientConfig]
hakeeper-service-addresses = "127.0.0.1:32001"
# service node type, [DN|CN|LOG]
service-type = "LOG"
[log]
level = "debug"
format = "json"
max-size = 512
[logservice]
deployment-id = 1
uuid = "8c4dccb4-4d3c-41f8-b482-5251dc7a41bf"
data-dir = "node-2-data"
raft-address = "127.0.0.1:32010"
logservice-address = "127.0.0.1:32011"
gossip-address = "127.0.0.1:32012"
gossip-seed-addresses = "127.0.0.1:32002;127.0.0.1:32012;127.0.0.1:32022"
[logservice.BootstrapConfig]
bootstrap-cluster = true
num-of-log-shards = 1
num-of-dn-shards = 1
num-of-log-shard-replicas = 3
init-hakeeper-members = "131072:7c4dccb4-4d3c-41f8-b482-5251dc7a41bf;131073:8c4dccb4-4d3c-41f8-b482-5251dc7a41bf;131074:9c4dccb4-4d3c-41f8-b482-5251dc7a41bf"
hakeeper-replica-id = 131073
[logservice.HAKeeperClientConfig]
hakeeper-service-addresses = "127.0.0.1:32001"
# service node type, [DN|CN|LOG]
service-type = "LOG"
[log]
level = "debug"
format = "json"
max-size = 512
[logservice]
deployment-id = 1
data-dir = "node-3-data"
uuid = "9c4dccb4-4d3c-41f8-b482-5251dc7a41bf"
raft-address = "127.0.0.1:32020"
logservice-address = "127.0.0.1:32021"
gossip-address = "127.0.0.1:32022"
gossip-seed-addresses = "127.0.0.1:32002;127.0.0.1:32012;127.0.0.1:32022"
[logservice.BootstrapConfig]
bootstrap-cluster = true
num-of-log-shards = 1
num-of-dn-shards = 1
num-of-log-shard-replicas = 3
init-hakeeper-members = "131072:7c4dccb4-4d3c-41f8-b482-5251dc7a41bf;131073:8c4dccb4-4d3c-41f8-b482-5251dc7a41bf;131074:9c4dccb4-4d3c-41f8-b482-5251dc7a41bf"
hakeeper-replica-id = 131074
[logservice.HAKeeperClientConfig]
hakeeper-service-addresses = "127.0.0.1:32001"
......@@ -189,7 +189,6 @@ func (s *stateMachine) handleUpdateCommandsCmd(cmd []byte) sm.Result {
if err := b.Unmarshal(data); err != nil {
panic(err)
}
plog.Infof("incoming term: %d, rsm term: %d", b.Term, s.state.Term)
if s.state.Term > b.Term {
return sm.Result{}
}
......
......@@ -17,6 +17,8 @@ package logservice
import (
"context"
"time"
"github.com/lni/dragonboat/v4"
)
func (s *Service) BootstrapHAKeeper(ctx context.Context, cfg Config) error {
......@@ -26,9 +28,14 @@ func (s *Service) BootstrapHAKeeper(ctx context.Context, cfg Config) error {
}
replicaID := cfg.BootstrapConfig.HAKeeperReplicaID
if err := s.store.startHAKeeperReplica(replicaID, members, false); err != nil {
// let's be a little bit less strict, when HAKeeper replica is already
// running as a result of store.startReplicas(), we just ignore the
// dragonboat.ErrShardAlreadyExist error below.
if err != dragonboat.ErrShardAlreadyExist {
plog.Errorf("failed to start hakeeper replica, %v", err)
return err
}
}
numOfLogShards := cfg.BootstrapConfig.NumOfLogShards
numOfDNShards := cfg.BootstrapConfig.NumOfDNShards
numOfLogReplicas := cfg.BootstrapConfig.NumOfLogShardReplicas
......@@ -38,7 +45,6 @@ func (s *Service) BootstrapHAKeeper(ctx context.Context, cfg Config) error {
return nil
default:
}
plog.Infof("trying to set initial cluster info")
if err := s.store.setInitialClusterInfo(numOfLogShards,
numOfDNShards, numOfLogReplicas); err != nil {
plog.Errorf("failed to set initial cluster info, %v", err)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment