mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-14 13:51:21 +00:00
start eventBus & indexerService before replay and use them while replaying blocks (#3194)
so if we did not index the last block (because of panic or smth else), we index it during replay Closes #3186
This commit is contained in:
@ -21,3 +21,4 @@ Special thanks to external contributors on this release:
|
|||||||
### IMPROVEMENTS:
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
- [node] \#3186 EventBus and indexerService should be started before first block (for replay last block on handshake) execution
|
||||||
|
@ -196,6 +196,7 @@ type Handshaker struct {
|
|||||||
stateDB dbm.DB
|
stateDB dbm.DB
|
||||||
initialState sm.State
|
initialState sm.State
|
||||||
store sm.BlockStore
|
store sm.BlockStore
|
||||||
|
eventBus types.BlockEventPublisher
|
||||||
genDoc *types.GenesisDoc
|
genDoc *types.GenesisDoc
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
@ -209,6 +210,7 @@ func NewHandshaker(stateDB dbm.DB, state sm.State,
|
|||||||
stateDB: stateDB,
|
stateDB: stateDB,
|
||||||
initialState: state,
|
initialState: state,
|
||||||
store: store,
|
store: store,
|
||||||
|
eventBus: types.NopEventBus{},
|
||||||
genDoc: genDoc,
|
genDoc: genDoc,
|
||||||
logger: log.NewNopLogger(),
|
logger: log.NewNopLogger(),
|
||||||
nBlocks: 0,
|
nBlocks: 0,
|
||||||
@ -219,6 +221,12 @@ func (h *Handshaker) SetLogger(l log.Logger) {
|
|||||||
h.logger = l
|
h.logger = l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetEventBus - sets the event bus for publishing block related events.
|
||||||
|
// If not called, it defaults to types.NopEventBus.
|
||||||
|
func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
|
||||||
|
h.eventBus = eventBus
|
||||||
|
}
|
||||||
|
|
||||||
func (h *Handshaker) NBlocks() int {
|
func (h *Handshaker) NBlocks() int {
|
||||||
return h.nBlocks
|
return h.nBlocks
|
||||||
}
|
}
|
||||||
@ -432,6 +440,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
|||||||
meta := h.store.LoadBlockMeta(height)
|
meta := h.store.LoadBlockMeta(height)
|
||||||
|
|
||||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
|
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
|
||||||
|
blockExec.SetEventBus(h.eventBus)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||||
|
@ -326,17 +326,18 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
|||||||
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
|
|
||||||
err = handshaker.Handshake(proxyApp)
|
|
||||||
if err != nil {
|
|
||||||
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
eventBus := types.NewEventBus()
|
eventBus := types.NewEventBus()
|
||||||
if err := eventBus.Start(); err != nil {
|
if err := eventBus.Start(); err != nil {
|
||||||
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
|
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
|
||||||
|
handshaker.SetEventBus(eventBus)
|
||||||
|
err = handshaker.Handshake(proxyApp)
|
||||||
|
if err != nil {
|
||||||
|
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
||||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||||
|
|
||||||
|
73
node/node.go
73
node/node.go
@ -217,11 +217,51 @@ func NewNode(config *cfg.Config,
|
|||||||
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
|
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EventBus and IndexerService must be started before the handshake because
|
||||||
|
// we might need to index the txs of the replayed block as this might not have happened
|
||||||
|
// when the node stopped last time (i.e. the node stopped after it saved the block
|
||||||
|
// but before it indexed the txs, or, endblocker panicked)
|
||||||
|
eventBus := types.NewEventBus()
|
||||||
|
eventBus.SetLogger(logger.With("module", "events"))
|
||||||
|
|
||||||
|
err = eventBus.Start()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transaction indexing
|
||||||
|
var txIndexer txindex.TxIndexer
|
||||||
|
switch config.TxIndex.Indexer {
|
||||||
|
case "kv":
|
||||||
|
store, err := dbProvider(&DBContext{"tx_index", config})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if config.TxIndex.IndexTags != "" {
|
||||||
|
txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
|
||||||
|
} else if config.TxIndex.IndexAllTags {
|
||||||
|
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
|
||||||
|
} else {
|
||||||
|
txIndexer = kv.NewTxIndex(store)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
txIndexer = &null.TxIndex{}
|
||||||
|
}
|
||||||
|
|
||||||
|
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
|
||||||
|
indexerService.SetLogger(logger.With("module", "txindex"))
|
||||||
|
|
||||||
|
err = indexerService.Start()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
|
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
|
||||||
// and replays any blocks as necessary to sync tendermint with the app.
|
// and replays any blocks as necessary to sync tendermint with the app.
|
||||||
consensusLogger := logger.With("module", "consensus")
|
consensusLogger := logger.With("module", "consensus")
|
||||||
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
|
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
|
||||||
handshaker.SetLogger(consensusLogger)
|
handshaker.SetLogger(consensusLogger)
|
||||||
|
handshaker.SetEventBus(eventBus)
|
||||||
if err := handshaker.Handshake(proxyApp); err != nil {
|
if err := handshaker.Handshake(proxyApp); err != nil {
|
||||||
return nil, fmt.Errorf("Error during handshake: %v", err)
|
return nil, fmt.Errorf("Error during handshake: %v", err)
|
||||||
}
|
}
|
||||||
@ -343,35 +383,10 @@ func NewNode(config *cfg.Config,
|
|||||||
consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
|
consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
|
||||||
consensusReactor.SetLogger(consensusLogger)
|
consensusReactor.SetLogger(consensusLogger)
|
||||||
|
|
||||||
eventBus := types.NewEventBus()
|
|
||||||
eventBus.SetLogger(logger.With("module", "events"))
|
|
||||||
|
|
||||||
// services which will be publishing and/or subscribing for messages (events)
|
// services which will be publishing and/or subscribing for messages (events)
|
||||||
// consensusReactor will set it on consensusState and blockExecutor
|
// consensusReactor will set it on consensusState and blockExecutor
|
||||||
consensusReactor.SetEventBus(eventBus)
|
consensusReactor.SetEventBus(eventBus)
|
||||||
|
|
||||||
// Transaction indexing
|
|
||||||
var txIndexer txindex.TxIndexer
|
|
||||||
switch config.TxIndex.Indexer {
|
|
||||||
case "kv":
|
|
||||||
store, err := dbProvider(&DBContext{"tx_index", config})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if config.TxIndex.IndexTags != "" {
|
|
||||||
txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
|
|
||||||
} else if config.TxIndex.IndexAllTags {
|
|
||||||
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
|
|
||||||
} else {
|
|
||||||
txIndexer = kv.NewTxIndex(store)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
txIndexer = &null.TxIndex{}
|
|
||||||
}
|
|
||||||
|
|
||||||
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
|
|
||||||
indexerService.SetLogger(logger.With("module", "txindex"))
|
|
||||||
|
|
||||||
p2pLogger := logger.With("module", "p2p")
|
p2pLogger := logger.With("module", "p2p")
|
||||||
nodeInfo, err := makeNodeInfo(
|
nodeInfo, err := makeNodeInfo(
|
||||||
config,
|
config,
|
||||||
@ -534,11 +549,6 @@ func (n *Node) OnStart() error {
|
|||||||
time.Sleep(genTime.Sub(now))
|
time.Sleep(genTime.Sub(now))
|
||||||
}
|
}
|
||||||
|
|
||||||
err := n.eventBus.Start()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add private IDs to addrbook to block those peers being added
|
// Add private IDs to addrbook to block those peers being added
|
||||||
n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
|
n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
|
||||||
|
|
||||||
@ -582,8 +592,7 @@ func (n *Node) OnStart() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start tx indexer
|
return nil
|
||||||
return n.indexerService.Start()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnStop stops the Node. It implements cmn.Service.
|
// OnStop stops the Node. It implements cmn.Service.
|
||||||
|
@ -49,8 +49,7 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption {
|
|||||||
|
|
||||||
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
|
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
|
||||||
// Call SetEventBus to provide one.
|
// Call SetEventBus to provide one.
|
||||||
func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus,
|
func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor {
|
||||||
mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor {
|
|
||||||
res := &BlockExecutor{
|
res := &BlockExecutor{
|
||||||
db: db,
|
db: db,
|
||||||
proxyApp: proxyApp,
|
proxyApp: proxyApp,
|
||||||
|
@ -309,8 +309,8 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
|||||||
|
|
||||||
state, stateDB := state(1, 1)
|
state, stateDB := state(1, 1)
|
||||||
|
|
||||||
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(),
|
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), MockMempool{}, MockEvidencePool{})
|
||||||
MockMempool{}, MockEvidencePool{})
|
|
||||||
eventBus := types.NewEventBus()
|
eventBus := types.NewEventBus()
|
||||||
err = eventBus.Start()
|
err = eventBus.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
Reference in New Issue
Block a user