mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-25 06:42:16 +00:00
* go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
753 lines
24 KiB
Go
753 lines
24 KiB
Go
package consensus
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"os"
|
|
"path/filepath"
|
|
"sort"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/go-kit/kit/log/term"
|
|
|
|
"path"
|
|
|
|
abcicli "github.com/tendermint/tendermint/abci/client"
|
|
"github.com/tendermint/tendermint/abci/example/counter"
|
|
"github.com/tendermint/tendermint/abci/example/kvstore"
|
|
abci "github.com/tendermint/tendermint/abci/types"
|
|
cfg "github.com/tendermint/tendermint/config"
|
|
cstypes "github.com/tendermint/tendermint/consensus/types"
|
|
cmn "github.com/tendermint/tendermint/libs/common"
|
|
"github.com/tendermint/tendermint/libs/log"
|
|
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
|
mempl "github.com/tendermint/tendermint/mempool"
|
|
"github.com/tendermint/tendermint/p2p"
|
|
"github.com/tendermint/tendermint/privval"
|
|
sm "github.com/tendermint/tendermint/state"
|
|
"github.com/tendermint/tendermint/store"
|
|
"github.com/tendermint/tendermint/types"
|
|
tmtime "github.com/tendermint/tendermint/types/time"
|
|
dbm "github.com/tendermint/tm-cmn/db"
|
|
)
|
|
|
|
const (
|
|
testSubscriber = "test-client"
|
|
)
|
|
|
|
// A cleanupFunc cleans up any config / test files created for a particular
|
|
// test.
|
|
type cleanupFunc func()
|
|
|
|
// genesis, chain_id, priv_val
|
|
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
|
var consensusReplayConfig *cfg.Config
|
|
var ensureTimeout = time.Millisecond * 100
|
|
|
|
func ensureDir(dir string, mode os.FileMode) {
|
|
if err := cmn.EnsureDir(dir, mode); err != nil {
|
|
panic(err)
|
|
}
|
|
}
|
|
|
|
func ResetConfig(name string) *cfg.Config {
|
|
return cfg.ResetTestRoot(name)
|
|
}
|
|
|
|
//-------------------------------------------------------------------------------
|
|
// validator stub (a kvstore consensus peer we control)
|
|
|
|
type validatorStub struct {
|
|
Index int // Validator index. NOTE: we don't assume validator set changes.
|
|
Height int64
|
|
Round int
|
|
types.PrivValidator
|
|
}
|
|
|
|
var testMinPower int64 = 10
|
|
|
|
func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub {
|
|
return &validatorStub{
|
|
Index: valIndex,
|
|
PrivValidator: privValidator,
|
|
}
|
|
}
|
|
|
|
func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
|
addr := vs.PrivValidator.GetPubKey().Address()
|
|
vote := &types.Vote{
|
|
ValidatorIndex: vs.Index,
|
|
ValidatorAddress: addr,
|
|
Height: vs.Height,
|
|
Round: vs.Round,
|
|
Timestamp: tmtime.Now(),
|
|
Type: voteType,
|
|
BlockID: types.BlockID{Hash: hash, PartsHeader: header},
|
|
}
|
|
err := vs.PrivValidator.SignVote(config.ChainID(), vote)
|
|
return vote, err
|
|
}
|
|
|
|
// Sign vote for type/hash/header
|
|
func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
|
|
v, err := vs.signVote(voteType, hash, header)
|
|
if err != nil {
|
|
panic(fmt.Errorf("failed to sign vote: %v", err))
|
|
}
|
|
return v
|
|
}
|
|
|
|
func signVotes(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
|
|
votes := make([]*types.Vote, len(vss))
|
|
for i, vs := range vss {
|
|
votes[i] = signVote(vs, voteType, hash, header)
|
|
}
|
|
return votes
|
|
}
|
|
|
|
func incrementHeight(vss ...*validatorStub) {
|
|
for _, vs := range vss {
|
|
vs.Height++
|
|
}
|
|
}
|
|
|
|
func incrementRound(vss ...*validatorStub) {
|
|
for _, vs := range vss {
|
|
vs.Round++
|
|
}
|
|
}
|
|
|
|
type ValidatorStubsByAddress []*validatorStub
|
|
|
|
func (vss ValidatorStubsByAddress) Len() int {
|
|
return len(vss)
|
|
}
|
|
|
|
func (vss ValidatorStubsByAddress) Less(i, j int) bool {
|
|
return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1
|
|
}
|
|
|
|
func (vss ValidatorStubsByAddress) Swap(i, j int) {
|
|
it := vss[i]
|
|
vss[i] = vss[j]
|
|
vss[i].Index = i
|
|
vss[j] = it
|
|
vss[j].Index = j
|
|
}
|
|
|
|
//-------------------------------------------------------------------------------
|
|
// Functions for transitioning the consensus state
|
|
|
|
func startTestRound(cs *ConsensusState, height int64, round int) {
|
|
cs.enterNewRound(height, round)
|
|
cs.startRoutines(0)
|
|
}
|
|
|
|
// Create proposal block from cs1 but sign it with vs.
|
|
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
|
|
cs1.mtx.Lock()
|
|
block, blockParts := cs1.createProposalBlock()
|
|
validRound := cs1.ValidRound
|
|
chainID := cs1.state.ChainID
|
|
cs1.mtx.Unlock()
|
|
if block == nil {
|
|
panic("Failed to createProposalBlock. Did you forget to add commit for previous block?")
|
|
}
|
|
|
|
// Make proposal
|
|
polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}
|
|
proposal = types.NewProposal(height, round, polRound, propBlockID)
|
|
if err := vs.SignProposal(chainID, proposal); err != nil {
|
|
panic(err)
|
|
}
|
|
return
|
|
}
|
|
|
|
func addVotes(to *ConsensusState, votes ...*types.Vote) {
|
|
for _, vote := range votes {
|
|
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
|
|
}
|
|
}
|
|
|
|
func signAddVotes(to *ConsensusState, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
|
|
votes := signVotes(voteType, hash, header, vss...)
|
|
addVotes(to, votes...)
|
|
}
|
|
|
|
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
|
|
prevotes := cs.Votes.Prevotes(round)
|
|
address := privVal.GetPubKey().Address()
|
|
var vote *types.Vote
|
|
if vote = prevotes.GetByAddress(address); vote == nil {
|
|
panic("Failed to find prevote from validator")
|
|
}
|
|
if blockHash == nil {
|
|
if vote.BlockID.Hash != nil {
|
|
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash))
|
|
}
|
|
} else {
|
|
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
|
|
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash))
|
|
}
|
|
}
|
|
}
|
|
|
|
func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) {
|
|
votes := cs.LastCommit
|
|
address := privVal.GetPubKey().Address()
|
|
var vote *types.Vote
|
|
if vote = votes.GetByAddress(address); vote == nil {
|
|
panic("Failed to find precommit from validator")
|
|
}
|
|
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
|
|
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash))
|
|
}
|
|
}
|
|
|
|
func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
|
|
precommits := cs.Votes.Precommits(thisRound)
|
|
address := privVal.GetPubKey().Address()
|
|
var vote *types.Vote
|
|
if vote = precommits.GetByAddress(address); vote == nil {
|
|
panic("Failed to find precommit from validator")
|
|
}
|
|
|
|
if votedBlockHash == nil {
|
|
if vote.BlockID.Hash != nil {
|
|
panic("Expected precommit to be for nil")
|
|
}
|
|
} else {
|
|
if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) {
|
|
panic("Expected precommit to be for proposal block")
|
|
}
|
|
}
|
|
|
|
if lockedBlockHash == nil {
|
|
if cs.LockedRound != lockRound || cs.LockedBlock != nil {
|
|
panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock))
|
|
}
|
|
} else {
|
|
if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) {
|
|
panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash))
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
|
|
// verify the prevote
|
|
validatePrevote(t, cs, thisRound, privVal, votedBlockHash)
|
|
// verify precommit
|
|
cs.mtx.Lock()
|
|
validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash)
|
|
cs.mtx.Unlock()
|
|
}
|
|
|
|
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
|
|
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
|
|
if err != nil {
|
|
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
|
|
}
|
|
ch := make(chan tmpubsub.Message)
|
|
go func() {
|
|
for msg := range votesSub.Out() {
|
|
vote := msg.Data().(types.EventDataVote)
|
|
// we only fire for our own votes
|
|
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
|
ch <- msg
|
|
}
|
|
}
|
|
}()
|
|
return ch
|
|
}
|
|
|
|
//-------------------------------------------------------------------------------
|
|
// consensus states
|
|
|
|
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
|
config := cfg.ResetTestRoot("consensus_state_test")
|
|
return newConsensusStateWithConfig(config, state, pv, app)
|
|
}
|
|
|
|
func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
|
blockDB := dbm.NewMemDB()
|
|
return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
|
|
}
|
|
|
|
func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState {
|
|
// Get BlockStore
|
|
blockStore := store.NewBlockStore(blockDB)
|
|
|
|
// one for mempool, one for consensus
|
|
mtx := new(sync.Mutex)
|
|
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
|
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
|
|
|
// Make Mempool
|
|
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
|
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
|
if thisConfig.Consensus.WaitForTxs() {
|
|
mempool.EnableTxsAvailable()
|
|
}
|
|
|
|
// mock the evidence pool
|
|
evpool := sm.MockEvidencePool{}
|
|
|
|
// Make ConsensusState
|
|
stateDB := blockDB
|
|
sm.SaveState(stateDB, state) //for save height 1's validators info
|
|
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
|
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
|
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
|
cs.SetPrivValidator(pv)
|
|
|
|
eventBus := types.NewEventBus()
|
|
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
|
eventBus.Start()
|
|
cs.SetEventBus(eventBus)
|
|
return cs
|
|
}
|
|
|
|
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
|
privValidatorKeyFile := config.PrivValidatorKeyFile()
|
|
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
|
|
privValidatorStateFile := config.PrivValidatorStateFile()
|
|
privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
|
|
privValidator.Reset()
|
|
return privValidator
|
|
}
|
|
|
|
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
|
|
// Get State
|
|
state, privVals := randGenesisState(nValidators, false, 10)
|
|
|
|
vss := make([]*validatorStub, nValidators)
|
|
|
|
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
|
|
|
|
for i := 0; i < nValidators; i++ {
|
|
vss[i] = NewValidatorStub(privVals[i], i)
|
|
}
|
|
// since cs1 starts at 1
|
|
incrementHeight(vss[1:]...)
|
|
|
|
return cs, vss
|
|
}
|
|
|
|
//-------------------------------------------------------------------------------
|
|
|
|
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
|
errorMessage string) {
|
|
select {
|
|
case <-time.After(timeout):
|
|
break
|
|
case <-ch:
|
|
panic(errorMessage)
|
|
}
|
|
}
|
|
|
|
func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
|
ensureNoNewEvent(
|
|
ch,
|
|
ensureTimeout,
|
|
"We should be stuck waiting, not receiving new event on the channel")
|
|
}
|
|
|
|
func ensureNoNewRoundStep(stepCh <-chan tmpubsub.Message) {
|
|
ensureNoNewEvent(
|
|
stepCh,
|
|
ensureTimeout,
|
|
"We should be stuck waiting, not receiving NewRoundStep event")
|
|
}
|
|
|
|
func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) {
|
|
ensureNoNewEvent(
|
|
unlockCh,
|
|
ensureTimeout,
|
|
"We should be stuck waiting, not receiving Unlock event")
|
|
}
|
|
|
|
func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
|
|
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
|
|
ensureNoNewEvent(
|
|
stepCh,
|
|
timeoutDuration,
|
|
"We should be stuck waiting, not receiving NewTimeout event")
|
|
}
|
|
|
|
func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int, timeout time.Duration, errorMessage string) {
|
|
select {
|
|
case <-time.After(timeout):
|
|
panic(errorMessage)
|
|
case msg := <-ch:
|
|
roundStateEvent, ok := msg.Data().(types.EventDataRoundState)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
if roundStateEvent.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, roundStateEvent.Height))
|
|
}
|
|
if roundStateEvent.Round != round {
|
|
panic(fmt.Sprintf("expected round %v, got %v", round, roundStateEvent.Round))
|
|
}
|
|
// TODO: We could check also for a step at this point!
|
|
}
|
|
}
|
|
|
|
func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for NewRound event")
|
|
case msg := <-roundCh:
|
|
newRoundEvent, ok := msg.Data().(types.EventDataNewRound)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
if newRoundEvent.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, newRoundEvent.Height))
|
|
}
|
|
if newRoundEvent.Round != round {
|
|
panic(fmt.Sprintf("expected round %v, got %v", round, newRoundEvent.Round))
|
|
}
|
|
}
|
|
}
|
|
|
|
func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) {
|
|
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
|
|
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
|
"Timeout expired while waiting for NewTimeout event")
|
|
}
|
|
|
|
func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for NewProposal event")
|
|
case msg := <-proposalCh:
|
|
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
if proposalEvent.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
|
|
}
|
|
if proposalEvent.Round != round {
|
|
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
|
|
}
|
|
}
|
|
}
|
|
|
|
func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int) {
|
|
ensureNewEvent(validBlockCh, height, round, ensureTimeout,
|
|
"Timeout expired while waiting for NewValidBlock event")
|
|
}
|
|
|
|
func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for NewBlock event")
|
|
case msg := <-blockCh:
|
|
blockEvent, ok := msg.Data().(types.EventDataNewBlock)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
if blockEvent.Block.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, blockEvent.Block.Height))
|
|
}
|
|
}
|
|
}
|
|
|
|
func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHash cmn.HexBytes) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for NewBlockHeader event")
|
|
case msg := <-blockCh:
|
|
blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
if blockHeaderEvent.Header.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeaderEvent.Header.Height))
|
|
}
|
|
if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) {
|
|
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash()))
|
|
}
|
|
}
|
|
}
|
|
|
|
func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int) {
|
|
ensureNewEvent(unlockCh, height, round, ensureTimeout,
|
|
"Timeout expired while waiting for NewUnlock event")
|
|
}
|
|
|
|
func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int, propID types.BlockID) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for NewProposal event")
|
|
case msg := <-proposalCh:
|
|
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
if proposalEvent.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
|
|
}
|
|
if proposalEvent.Round != round {
|
|
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
|
|
}
|
|
if !proposalEvent.BlockID.Equals(propID) {
|
|
panic("Proposed block does not match expected block")
|
|
}
|
|
}
|
|
}
|
|
|
|
func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int) {
|
|
ensureVote(voteCh, height, round, types.PrecommitType)
|
|
}
|
|
|
|
func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int) {
|
|
ensureVote(voteCh, height, round, types.PrevoteType)
|
|
}
|
|
|
|
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int,
|
|
voteType types.SignedMsgType) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for NewVote event")
|
|
case msg := <-voteCh:
|
|
voteEvent, ok := msg.Data().(types.EventDataVote)
|
|
if !ok {
|
|
panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?",
|
|
msg.Data()))
|
|
}
|
|
vote := voteEvent.Vote
|
|
if vote.Height != height {
|
|
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
|
|
}
|
|
if vote.Round != round {
|
|
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
|
|
}
|
|
if vote.Type != voteType {
|
|
panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type))
|
|
}
|
|
}
|
|
}
|
|
|
|
func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
|
select {
|
|
case <-time.After(ensureTimeout):
|
|
panic("Timeout expired while waiting for new activity on the channel")
|
|
case <-ch:
|
|
}
|
|
}
|
|
|
|
//-------------------------------------------------------------------------------
|
|
// consensus nets
|
|
|
|
// consensusLogger is a TestingLogger which uses a different
|
|
// color for each validator ("validator" key must exist).
|
|
func consensusLogger() log.Logger {
|
|
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
|
|
for i := 0; i < len(keyvals)-1; i += 2 {
|
|
if keyvals[i] == "validator" {
|
|
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
|
|
}
|
|
}
|
|
return term.FgBgColor{}
|
|
}).With("module", "consensus")
|
|
}
|
|
|
|
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
|
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
|
|
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
|
css := make([]*ConsensusState, nValidators)
|
|
logger := consensusLogger()
|
|
configRootDirs := make([]string, 0, nValidators)
|
|
for i := 0; i < nValidators; i++ {
|
|
stateDB := dbm.NewMemDB() // each state needs its own db
|
|
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
|
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
|
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
|
for _, opt := range configOpts {
|
|
opt(thisConfig)
|
|
}
|
|
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
|
app := appFunc()
|
|
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
|
app.InitChain(abci.RequestInitChain{Validators: vals})
|
|
|
|
css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
|
css[i].SetTimeoutTicker(tickerFunc())
|
|
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
|
}
|
|
return css, func() {
|
|
for _, dir := range configRootDirs {
|
|
os.RemoveAll(dir)
|
|
}
|
|
}
|
|
}
|
|
|
|
// nPeers = nValidators + nNotValidator
|
|
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
|
|
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
|
css := make([]*ConsensusState, nPeers)
|
|
logger := consensusLogger()
|
|
var peer0Config *cfg.Config
|
|
configRootDirs := make([]string, 0, nPeers)
|
|
for i := 0; i < nPeers; i++ {
|
|
stateDB := dbm.NewMemDB() // each state needs its own db
|
|
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
|
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
|
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
|
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
|
if i == 0 {
|
|
peer0Config = thisConfig
|
|
}
|
|
var privVal types.PrivValidator
|
|
if i < nValidators {
|
|
privVal = privVals[i]
|
|
} else {
|
|
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
|
|
privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
|
}
|
|
|
|
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
|
|
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
|
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
|
|
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail
|
|
}
|
|
app.InitChain(abci.RequestInitChain{Validators: vals})
|
|
//sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above
|
|
|
|
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
|
|
css[i].SetTimeoutTicker(tickerFunc())
|
|
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
|
}
|
|
return css, genDoc, peer0Config, func() {
|
|
for _, dir := range configRootDirs {
|
|
os.RemoveAll(dir)
|
|
}
|
|
}
|
|
}
|
|
|
|
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
|
for i, s := range switches {
|
|
if peer.NodeInfo().ID() == s.NodeInfo().ID() {
|
|
return i
|
|
}
|
|
}
|
|
panic("didnt find peer in switches")
|
|
}
|
|
|
|
//-------------------------------------------------------------------------------
|
|
// genesis
|
|
|
|
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
|
validators := make([]types.GenesisValidator, numValidators)
|
|
privValidators := make([]types.PrivValidator, numValidators)
|
|
for i := 0; i < numValidators; i++ {
|
|
val, privVal := types.RandValidator(randPower, minPower)
|
|
validators[i] = types.GenesisValidator{
|
|
PubKey: val.PubKey,
|
|
Power: val.VotingPower,
|
|
}
|
|
privValidators[i] = privVal
|
|
}
|
|
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
|
|
|
return &types.GenesisDoc{
|
|
GenesisTime: tmtime.Now(),
|
|
ChainID: config.ChainID(),
|
|
Validators: validators,
|
|
}, privValidators
|
|
}
|
|
|
|
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
|
|
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
|
s0, _ := sm.MakeGenesisState(genDoc)
|
|
return s0, privValidators
|
|
}
|
|
|
|
//------------------------------------
|
|
// mock ticker
|
|
|
|
func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker {
|
|
return func() TimeoutTicker {
|
|
return &mockTicker{
|
|
c: make(chan timeoutInfo, 10),
|
|
onlyOnce: onlyOnce,
|
|
}
|
|
}
|
|
}
|
|
|
|
// mock ticker only fires on RoundStepNewHeight
|
|
// and only once if onlyOnce=true
|
|
type mockTicker struct {
|
|
c chan timeoutInfo
|
|
|
|
mtx sync.Mutex
|
|
onlyOnce bool
|
|
fired bool
|
|
}
|
|
|
|
func (m *mockTicker) Start() error {
|
|
return nil
|
|
}
|
|
|
|
func (m *mockTicker) Stop() error {
|
|
return nil
|
|
}
|
|
|
|
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {
|
|
m.mtx.Lock()
|
|
defer m.mtx.Unlock()
|
|
if m.onlyOnce && m.fired {
|
|
return
|
|
}
|
|
if ti.Step == cstypes.RoundStepNewHeight {
|
|
m.c <- ti
|
|
m.fired = true
|
|
}
|
|
}
|
|
|
|
func (m *mockTicker) Chan() <-chan timeoutInfo {
|
|
return m.c
|
|
}
|
|
|
|
func (*mockTicker) SetLogger(log.Logger) {}
|
|
|
|
//------------------------------------
|
|
|
|
func newCounter() abci.Application {
|
|
return counter.NewCounterApplication(true)
|
|
}
|
|
|
|
func newPersistentKVStore() abci.Application {
|
|
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return kvstore.NewPersistentKVStoreApplication(dir)
|
|
}
|
|
|
|
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
|
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
|
}
|