mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-25 06:42:16 +00:00
* use READ lock/unlock in ConsensusState#GetLastHeight Refs #2721 * do not use defers when there's no need * fix peer formatting (output its address instead of the pointer) ``` [54310]: E[11-02|11:59:39.851] Connection failed @ sendRoutine module=p2p peer=0xb78f00 conn=MConn{74.207.236.148:26656} err="pong timeout" ``` https://github.com/tendermint/tendermint/issues/2721#issuecomment-435326581 * panic if peer has no state https://github.com/tendermint/tendermint/issues/2721#issuecomment-435347165 It's confusing that sometimes we check if peer has a state, but most of the times we expect it to be there 1.add79700b5/mempool/reactor.go (L138)
2.add79700b5/rpc/core/consensus.go (L196)
(edited) I will change everything to always assume peer has a state and panic otherwise that should help identify issues earlier * abci/localclient: extend lock on app callback App callback should be protected by lock as well (note this was already done for InitChainAsync, why not for others???). Otherwise, when we execute the block, tx might come in and call the callback in the same time we're updating it in execBlockOnProxyApp => DATA RACE Fixes #2721 Consensus state is locked ``` goroutine 113333 [semacquire, 309 minutes]: sync.runtime_SemacquireMutex(0xc00180009c, 0xc0000c7e00) /usr/local/go/src/runtime/sema.go:71 +0x3d sync.(*RWMutex).RLock(0xc001800090) /usr/local/go/src/sync/rwmutex.go:50 +0x4e github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).GetRoundState(0xc001800000, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:218 +0x46 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).queryMaj23Routine(0xc0017def80, 0x11104a0, 0xc0072488f0, 0xc007248 9c0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:735 +0x16d created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).AddPeer /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:172 +0x236 ``` because localClient is locked ``` goroutine 1899 [semacquire, 309 minutes]: sync.runtime_SemacquireMutex(0xc00003363c, 0xc0000cb500) /usr/local/go/src/runtime/sema.go:71 +0x3d sync.(*Mutex).Lock(0xc000033638) /usr/local/go/src/sync/mutex.go:134 +0xff github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).SetResponseCallback(0xc0001fb560, 0xc007868540) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:32 +0x33 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnConsensus).SetResponseCallback(0xc00002f750, 0xc007868540) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:57 +0x40 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.execBlockOnProxyApp(0x1104e20, 0xc002ca0ba0, 0x11092a0, 0xc00002f750, 0xc0001fe960, 0xc000bfc660, 0x110cfe0, 0xc000090330, 0xc9d12, 0xc000d9d5a0, ...) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:230 +0x1fd github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.(*BlockExecutor).ApplyBlock(0xc002c2a230, 0x7, 0x0, 0xc000eae880, 0x6, 0xc002e52c60, 0x16, 0x1f927, 0xc9d12, 0xc000d9d5a0, ...) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:96 +0x142 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit(0xc001800000, 0x1f928) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1339 +0xa3e github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit(0xc001800000, 0x1f928) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1270 +0x451 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1(0xc001800000, 0x0, 0x1f928) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1218 +0x90 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit(0xc001800000, 0x1f928, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1247 +0x6b8 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xc003bc7ad0, 0xc003bc7b10) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1659 +0xbad github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xf1, 0xf1) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1517 +0x59 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg(0xc001800000, 0xd98200, 0xc0070dbed0, 0xc000cf4cc0, 0x28) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:660 +0x64b github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine(0xc001800000, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:617 +0x670 created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:311 +0x132 ``` tx comes in and CheckTx is executed right when we execute the block ``` goroutine 111044 [semacquire, 309 minutes]: sync.runtime_SemacquireMutex(0xc00003363c, 0x0) /usr/local/go/src/runtime/sema.go:71 +0x3d sync.(*Mutex).Lock(0xc000033638) /usr/local/go/src/sync/mutex.go:134 +0xff github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).CheckTxAsync(0xc0001fb0e0, 0xc002d94500, 0x13f, 0x280, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:85 +0x47 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnMempool).CheckTxAsync(0xc00002f720, 0xc002d94500, 0x13f, 0x280, 0x1) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:114 +0x51 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool.(*Mempool).CheckTx(0xc002d3a320, 0xc002d94500, 0x13f, 0x280, 0xc0072355f0, 0x0, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool/mempool.go:316 +0x17b github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core.BroadcastTxSync(0xc002d94500, 0x13f, 0x280, 0x0, 0x0, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go:93 +0xb8 reflect.Value.call(0xd85560, 0x10326c0, 0x13, 0xec7b8b, 0x4, 0xc00663f180, 0x1, 0x1, 0xc00663f180, 0xc00663f188, ...) /usr/local/go/src/reflect/value.go:447 +0x449 reflect.Value.Call(0xd85560, 0x10326c0, 0x13, 0xc00663f180, 0x1, 0x1, 0x0, 0x0, 0xc005cc9344) /usr/local/go/src/reflect/value.go:308 +0xa4 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.makeHTTPHandler.func2(0x1102060, 0xc00663f100, 0xc0082d7900) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:269 +0x188 net/http.HandlerFunc.ServeHTTP(0xc002c81f20, 0x1102060, 0xc00663f100, 0xc0082d7900) /usr/local/go/src/net/http/server.go:1964 +0x44 net/http.(*ServeMux).ServeHTTP(0xc002c81b60, 0x1102060, 0xc00663f100, 0xc0082d7900) /usr/local/go/src/net/http/server.go:2361 +0x127 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.maxBytesHandler.ServeHTTP(0x10f8a40, 0xc002c81b60, 0xf4240, 0x1102060, 0xc00663f100, 0xc0082d7900) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:219 +0xcf github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1(0x1103220, 0xc00121e620, 0xc0082d7900) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:192 +0x394 net/http.HandlerFunc.ServeHTTP(0xc002c06ea0, 0x1103220, 0xc00121e620, 0xc0082d7900) /usr/local/go/src/net/http/server.go:1964 +0x44 net/http.serverHandler.ServeHTTP(0xc001a1aa90, 0x1103220, 0xc00121e620, 0xc0082d7900) /usr/local/go/src/net/http/server.go:2741 +0xab net/http.(*conn).serve(0xc00785a3c0, 0x11041a0, 0xc000f844c0) /usr/local/go/src/net/http/server.go:1847 +0x646 created by net/http.(*Server).Serve /usr/local/go/src/net/http/server.go:2851 +0x2f5 ``` * consensus: use read lock in Receive#VoteMessage * use defer to unlock mutex because application might panic * use defer in every method of the localClient * add a changelog entry * drain channels before Unsubscribe(All) Read55362ed766/libs/pubsub/pubsub.go (L13)
for the detailed explanation of the issue. We'll need to fix it someday. Make sure to keep an eye on https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md * retry instead of panic when peer has no state in reactors other than consensus in /dump_consensus_state RPC endpoint, skip a peer with no state * rpc/core/mempool: simplify error messages * rpc/core/mempool: use time.After instead of timer also, do not log DeliverTx result (to be consistent with other memthods) * unlock before calling the callback in reqRes#SetCallback
349 lines
8.8 KiB
Go
349 lines
8.8 KiB
Go
package consensus
|
|
|
|
import (
|
|
"bufio"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
bc "github.com/tendermint/tendermint/blockchain"
|
|
cfg "github.com/tendermint/tendermint/config"
|
|
cmn "github.com/tendermint/tendermint/libs/common"
|
|
dbm "github.com/tendermint/tendermint/libs/db"
|
|
"github.com/tendermint/tendermint/libs/log"
|
|
"github.com/tendermint/tendermint/proxy"
|
|
sm "github.com/tendermint/tendermint/state"
|
|
"github.com/tendermint/tendermint/types"
|
|
)
|
|
|
|
const (
|
|
// event bus subscriber
|
|
subscriber = "replay-file"
|
|
)
|
|
|
|
//--------------------------------------------------------
|
|
// replay messages interactively or all at once
|
|
|
|
// replay the wal file
|
|
func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) {
|
|
consensusState := newConsensusStateForReplay(config, csConfig)
|
|
|
|
if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil {
|
|
cmn.Exit(fmt.Sprintf("Error during consensus replay: %v", err))
|
|
}
|
|
}
|
|
|
|
// Replay msgs in file or start the console
|
|
func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
|
|
|
if cs.IsRunning() {
|
|
return errors.New("cs is already running, cannot replay")
|
|
}
|
|
if cs.wal != nil {
|
|
return errors.New("cs wal is open, cannot replay")
|
|
}
|
|
|
|
cs.startForReplay()
|
|
|
|
// ensure all new step events are regenerated as expected
|
|
newStepCh := make(chan interface{}, 1)
|
|
|
|
ctx := context.Background()
|
|
err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
|
|
if err != nil {
|
|
return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
|
|
}
|
|
defer func() {
|
|
// drain newStepCh to make sure we don't block
|
|
LOOP:
|
|
for {
|
|
select {
|
|
case <-newStepCh:
|
|
default:
|
|
break LOOP
|
|
}
|
|
}
|
|
cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
|
}()
|
|
|
|
// just open the file for reading, no need to use wal
|
|
fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
pb := newPlayback(file, fp, cs, cs.state.Copy())
|
|
defer pb.fp.Close() // nolint: errcheck
|
|
|
|
var nextN int // apply N msgs in a row
|
|
var msg *TimedWALMessage
|
|
for {
|
|
if nextN == 0 && console {
|
|
nextN = pb.replayConsoleLoop()
|
|
}
|
|
|
|
msg, err = pb.dec.Decode()
|
|
if err == io.EOF {
|
|
return nil
|
|
} else if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
|
return err
|
|
}
|
|
|
|
if nextN > 0 {
|
|
nextN--
|
|
}
|
|
pb.count++
|
|
}
|
|
return nil
|
|
}
|
|
|
|
//------------------------------------------------
|
|
// playback manager
|
|
|
|
type playback struct {
|
|
cs *ConsensusState
|
|
|
|
fp *os.File
|
|
dec *WALDecoder
|
|
count int // how many lines/msgs into the file are we
|
|
|
|
// replays can be reset to beginning
|
|
fileName string // so we can close/reopen the file
|
|
genesisState sm.State // so the replay session knows where to restart from
|
|
}
|
|
|
|
func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.State) *playback {
|
|
return &playback{
|
|
cs: cs,
|
|
fp: fp,
|
|
fileName: fileName,
|
|
genesisState: genState,
|
|
dec: NewWALDecoder(fp),
|
|
}
|
|
}
|
|
|
|
// go back count steps by resetting the state and running (pb.count - count) steps
|
|
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
|
pb.cs.Stop()
|
|
pb.cs.Wait()
|
|
|
|
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
|
pb.cs.blockStore, pb.cs.mempool, pb.cs.evpool)
|
|
newCS.SetEventBus(pb.cs.eventBus)
|
|
newCS.startForReplay()
|
|
|
|
if err := pb.fp.Close(); err != nil {
|
|
return err
|
|
}
|
|
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
pb.fp = fp
|
|
pb.dec = NewWALDecoder(fp)
|
|
count = pb.count - count
|
|
fmt.Printf("Reseting from %d to %d\n", pb.count, count)
|
|
pb.count = 0
|
|
pb.cs = newCS
|
|
var msg *TimedWALMessage
|
|
for i := 0; i < count; i++ {
|
|
msg, err = pb.dec.Decode()
|
|
if err == io.EOF {
|
|
return nil
|
|
} else if err != nil {
|
|
return err
|
|
}
|
|
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
|
return err
|
|
}
|
|
pb.count++
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (cs *ConsensusState) startForReplay() {
|
|
cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests")
|
|
/* TODO:!
|
|
// since we replay tocks we just ignore ticks
|
|
go func() {
|
|
for {
|
|
select {
|
|
case <-cs.tickChan:
|
|
case <-cs.Quit:
|
|
return
|
|
}
|
|
}
|
|
}()*/
|
|
}
|
|
|
|
// console function for parsing input and running commands
|
|
func (pb *playback) replayConsoleLoop() int {
|
|
for {
|
|
fmt.Printf("> ")
|
|
bufReader := bufio.NewReader(os.Stdin)
|
|
line, more, err := bufReader.ReadLine()
|
|
if more {
|
|
cmn.Exit("input is too long")
|
|
} else if err != nil {
|
|
cmn.Exit(err.Error())
|
|
}
|
|
|
|
tokens := strings.Split(string(line), " ")
|
|
if len(tokens) == 0 {
|
|
continue
|
|
}
|
|
|
|
switch tokens[0] {
|
|
case "next":
|
|
// "next" -> replay next message
|
|
// "next N" -> replay next N messages
|
|
|
|
if len(tokens) == 1 {
|
|
return 0
|
|
}
|
|
i, err := strconv.Atoi(tokens[1])
|
|
if err != nil {
|
|
fmt.Println("next takes an integer argument")
|
|
} else {
|
|
return i
|
|
}
|
|
|
|
case "back":
|
|
// "back" -> go back one message
|
|
// "back N" -> go back N messages
|
|
|
|
// NOTE: "back" is not supported in the state machine design,
|
|
// so we restart and replay up to
|
|
|
|
ctx := context.Background()
|
|
// ensure all new step events are regenerated as expected
|
|
newStepCh := make(chan interface{}, 1)
|
|
|
|
err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
|
|
if err != nil {
|
|
cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
|
|
}
|
|
defer func() {
|
|
// drain newStepCh to make sure we don't block
|
|
LOOP:
|
|
for {
|
|
select {
|
|
case <-newStepCh:
|
|
default:
|
|
break LOOP
|
|
}
|
|
}
|
|
pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
|
}()
|
|
|
|
if len(tokens) == 1 {
|
|
if err := pb.replayReset(1, newStepCh); err != nil {
|
|
pb.cs.Logger.Error("Replay reset error", "err", err)
|
|
}
|
|
} else {
|
|
i, err := strconv.Atoi(tokens[1])
|
|
if err != nil {
|
|
fmt.Println("back takes an integer argument")
|
|
} else if i > pb.count {
|
|
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
|
|
} else {
|
|
if err := pb.replayReset(i, newStepCh); err != nil {
|
|
pb.cs.Logger.Error("Replay reset error", "err", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
case "rs":
|
|
// "rs" -> print entire round state
|
|
// "rs short" -> print height/round/step
|
|
// "rs <field>" -> print another field of the round state
|
|
|
|
rs := pb.cs.RoundState
|
|
if len(tokens) == 1 {
|
|
fmt.Println(rs)
|
|
} else {
|
|
switch tokens[1] {
|
|
case "short":
|
|
fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step)
|
|
case "validators":
|
|
fmt.Println(rs.Validators)
|
|
case "proposal":
|
|
fmt.Println(rs.Proposal)
|
|
case "proposal_block":
|
|
fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort())
|
|
case "locked_round":
|
|
fmt.Println(rs.LockedRound)
|
|
case "locked_block":
|
|
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
|
|
case "votes":
|
|
fmt.Println(rs.Votes.StringIndented(" "))
|
|
|
|
default:
|
|
fmt.Println("Unknown option", tokens[1])
|
|
}
|
|
}
|
|
case "n":
|
|
fmt.Println(pb.count)
|
|
}
|
|
}
|
|
return 0
|
|
}
|
|
|
|
//--------------------------------------------------------------------------------
|
|
|
|
// convenience for replay mode
|
|
func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *ConsensusState {
|
|
dbType := dbm.DBBackendType(config.DBBackend)
|
|
// Get BlockStore
|
|
blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir())
|
|
blockStore := bc.NewBlockStore(blockStoreDB)
|
|
|
|
// Get State
|
|
stateDB := dbm.NewDB("state", dbType, config.DBDir())
|
|
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
|
if err != nil {
|
|
cmn.Exit(err.Error())
|
|
}
|
|
state, err := sm.MakeGenesisState(gdoc)
|
|
if err != nil {
|
|
cmn.Exit(err.Error())
|
|
}
|
|
|
|
// Create proxyAppConn connection (consensus, mempool, query)
|
|
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
|
proxyApp := proxy.NewAppConns(clientCreator)
|
|
err = proxyApp.Start()
|
|
if err != nil {
|
|
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
|
}
|
|
|
|
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
|
|
err = handshaker.Handshake(proxyApp)
|
|
if err != nil {
|
|
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
|
|
}
|
|
|
|
eventBus := types.NewEventBus()
|
|
if err := eventBus.Start(); err != nil {
|
|
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
|
|
}
|
|
|
|
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
|
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
|
|
|
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,
|
|
blockStore, mempool, evpool)
|
|
|
|
consensusState.SetEventBus(eventBus)
|
|
return consensusState
|
|
}
|