tendermint/consensus/common_test.go
Anton Kaliaev f6539737de
new pubsub package
comment out failing consensus tests for now

rewrite rpc httpclient to use new pubsub package

import pubsub as tmpubsub, query as tmquery

make event IDs constants
EventKey -> EventTypeKey

rename EventsPubsub to PubSub

mempool does not use pubsub

rename eventsSub to pubsub

new subscribe API

fix channel size issues and consensus tests bugs

refactor rpc client

add missing discardFromChan method

add mutex

rename pubsub to eventBus

remove IsRunning from WSRPCConnection interface (not needed)

add a comment in broadcastNewRoundStepsAndVotes

rename registerEventCallbacks to broadcastNewRoundStepsAndVotes

See https://dave.cheney.net/2014/03/19/channel-axioms

stop eventBuses after reactor tests

remove unnecessary Unsubscribe

return subscribe helper function

move discardFromChan to where it is used

subscribe now returns an err

this gives us ability to refuse to subscribe if pubsub is at its max
capacity.

use context for control overflow

cache queries

handle err when subscribing in replay_test

rename testClientID to testSubscriber

extract var

set channel buffer capacity to 1 in replay_file

fix byzantine_test

unsubscribe from single event, not all events

refactor httpclient to return events to appropriate channels

return failing testReplayCrashBeforeWriteVote test

fix TestValidatorSetChanges

refactor code a bit

fix testReplayCrashBeforeWriteVote

add comment

fix TestValidatorSetChanges

fixes from Bucky's review

update comment [ci skip]

test TxEventBuffer

update changelog

fix TestValidatorSetChanges (2nd attempt)

only do wg.Done when no errors

benchmark event bus

create pubsub server inside NewEventBus

only expose config params (later if needed)

set buffer capacity to 0 so we are not testing cache

new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ}

This should allow to subscribe to all transactions! or a specific one
using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'"

use TimeoutCommit instead of afterPublishEventNewBlockTimeout

TimeoutCommit is the time a node waits after committing a block, before
it goes into the next height. So it will finish everything from the last
block, but then wait a bit. The idea is this gives it time to hear more
votes from other validators, to strengthen the commit it includes in the
next block. But it also gives it time to hear about new transactions.

waitForBlockWithUpdatedVals

rewrite WAL crash tests

Task:
test that we can recover from any WAL crash.

Solution:
the old tests were relying on event hub being run in the same thread (we
were injecting the private validator's last signature).

when considering a rewrite, we considered two possible solutions: write
a "fuzzy" testing system where WAL is crashing upon receiving a new
message, or inject failures and trigger them in tests using something
like https://github.com/coreos/gofail.

remove sleep

no cs.Lock around wal.Save

test different cases (empty block, non-empty block, ...)

comments

add comments

test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks

fixes as per Bucky's last review

reset subscriptions on UnsubscribeAll

use a simple counter to track message for which we panicked

also, set a smaller part size for all test cases
2017-10-30 00:32:22 -05:00

505 lines
15 KiB
Go

package consensus
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"sync"
"testing"
"time"
abcicli "github.com/tendermint/abci/client"
abci "github.com/tendermint/abci/types"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/abci/example/dummy"
"github.com/go-kit/kit/log/term"
)
const (
testSubscriber = "test-client"
)
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Second * 2
func ensureDir(dir string, mode os.FileMode) {
if err := cmn.EnsureDir(dir, mode); err != nil {
panic(err)
}
}
func ResetConfig(name string) *cfg.Config {
return cfg.ResetTestRoot(name)
}
//-------------------------------------------------------------------------------
// validator stub (a dummy consensus peer we control)
type validatorStub struct {
Index int // Validator index. NOTE: we don't assume validator set changes.
Height int
Round int
types.PrivValidator
}
var testMinPower = 10
func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub {
return &validatorStub{
Index: valIndex,
PrivValidator: privValidator,
}
}
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
vote := &types.Vote{
ValidatorIndex: vs.Index,
ValidatorAddress: vs.PrivValidator.GetAddress(),
Height: vs.Height,
Round: vs.Round,
Type: voteType,
BlockID: types.BlockID{hash, header},
}
err := vs.PrivValidator.SignVote(config.ChainID, vote)
return vote, err
}
// Sign vote for type/hash/header
func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote {
v, err := vs.signVote(voteType, hash, header)
if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err))
}
return v
}
func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss))
for i, vs := range vss {
votes[i] = signVote(vs, voteType, hash, header)
}
return votes
}
func incrementHeight(vss ...*validatorStub) {
for _, vs := range vss {
vs.Height += 1
}
}
func incrementRound(vss ...*validatorStub) {
for _, vs := range vss {
vs.Round += 1
}
}
//-------------------------------------------------------------------------------
// Functions for transitioning the consensus state
func startTestRound(cs *ConsensusState, height, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
}
// Create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
block, blockParts := cs1.createProposalBlock()
if block == nil { // on error
panic("error creating proposal block")
}
// Make proposal
polRound, polBlockID := cs1.Votes.POLInfo()
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
if err := vs.SignProposal(config.ChainID, proposal); err != nil {
panic(err)
}
return
}
func addVotes(to *ConsensusState, votes ...*types.Vote) {
for _, vote := range votes {
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
}
}
func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
votes := signVotes(voteType, hash, header, vss...)
addVotes(to, votes...)
}
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
prevotes := cs.Votes.Prevotes(round)
var vote *types.Vote
if vote = prevotes.GetByAddress(privVal.GetAddress()); vote == nil {
panic("Failed to find prevote from validator")
}
if blockHash == nil {
if vote.BlockID.Hash != nil {
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash))
}
} else {
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash))
}
}
}
func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) {
votes := cs.LastCommit
var vote *types.Vote
if vote = votes.GetByAddress(privVal.GetAddress()); vote == nil {
panic("Failed to find precommit from validator")
}
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash))
}
}
func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
precommits := cs.Votes.Precommits(thisRound)
var vote *types.Vote
if vote = precommits.GetByAddress(privVal.GetAddress()); vote == nil {
panic("Failed to find precommit from validator")
}
if votedBlockHash == nil {
if vote.BlockID.Hash != nil {
panic("Expected precommit to be for nil")
}
} else {
if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) {
panic("Expected precommit to be for proposal block")
}
}
if lockedBlockHash == nil {
if cs.LockedRound != lockRound || cs.LockedBlock != nil {
panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock))
}
} else {
if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) {
panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash))
}
}
}
func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
// verify the prevote
validatePrevote(t, cs, thisRound, privVal, votedBlockHash)
// verify precommit
cs.mtx.Lock()
validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash)
cs.mtx.Unlock()
}
// genesis
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh0 := make(chan interface{})
err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0)
if err != nil {
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
}
voteCh := make(chan interface{})
go func() {
for v := range voteCh0 {
vote := v.(types.TMEventData).Unwrap().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v
}
}
}()
return voteCh
}
//-------------------------------------------------------------------------------
// consensus states
func newConsensusState(state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
return newConsensusStateWithConfig(config, state, pv, app)
}
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
blockDB := dbm.NewMemDB()
return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
}
func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState {
// Get BlockStore
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(sync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}
// Make ConsensusReactor
cs := NewConsensusState(thisConfig.Consensus, state, proxyAppConnCon, blockStore, mempool)
cs.SetLogger(log.TestingLogger())
cs.SetPrivValidator(pv)
eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
cs.SetEventBus(eventBus)
return cs
}
func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS {
privValidatorFile := config.PrivValidatorFile()
ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
privValidator.Reset()
return privValidator
}
func fixedConsensusStateDummy(config *cfg.Config, logger log.Logger) *ConsensusState {
stateDB := dbm.NewMemDB()
state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(logger.With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
cs.SetLogger(logger)
return cs
}
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
// Get State
state, privVals := randGenesisState(nValidators, false, 10)
vss := make([]*validatorStub, nValidators)
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
cs.SetLogger(log.TestingLogger())
for i := 0; i < nValidators; i++ {
vss[i] = NewValidatorStub(privVals[i], i)
}
// since cs1 starts at 1
incrementHeight(vss[1:]...)
return cs, vss
}
//-------------------------------------------------------------------------------
func ensureNoNewStep(stepCh <-chan interface{}) {
timer := time.NewTimer(ensureTimeout)
select {
case <-timer.C:
break
case <-stepCh:
panic("We should be stuck waiting, not moving to the next step")
}
}
func ensureNewStep(stepCh <-chan interface{}) {
timer := time.NewTimer(ensureTimeout)
select {
case <-timer.C:
panic("We shouldnt be stuck waiting")
case <-stepCh:
break
}
}
//-------------------------------------------------------------------------------
// consensus nets
// consensusLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func consensusLogger() log.Logger {
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
for i := 0; i < len(keyvals)-1; i += 2 {
if keyvals[i] == "validator" {
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
}
}
return term.FgBgColor{}
})
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
css := make([]*ConsensusState, nValidators)
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
db := dbm.NewMemDB() // each state needs its own db
state, _ := sm.MakeGenesisState(db, genDoc)
state.SetLogger(logger.With("module", "state", "validator", i))
state.Save()
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
for _, opt := range configOpts {
opt(thisConfig)
}
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.Validators(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
css[i].SetLogger(logger.With("validator", i))
css[i].SetTimeoutTicker(tickerFunc())
}
return css
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
css := make([]*ConsensusState, nPeers)
logger := consensusLogger()
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state, _ := sm.MakeGenesisState(db, genDoc)
state.SetLogger(logger.With("module", "state", "validator", i))
state.Save()
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal types.PrivValidator
if i < nValidators {
privVal = privVals[i]
} else {
_, tempFilePath := cmn.Tempfile("priv_validator_")
privVal = types.GenPrivValidatorFS(tempFilePath)
}
app := appFunc()
vals := types.TM2PB.Validators(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
css[i].SetLogger(logger.With("validator", i))
css[i].SetTimeoutTicker(tickerFunc())
}
return css
}
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
for i, s := range switches {
if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) {
return i
}
}
panic("didnt find peer in switches")
return -1
}
//-------------------------------------------------------------------------------
// genesis
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]*types.PrivValidatorFS, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: time.Now(),
ChainID: config.ChainID,
Validators: validators,
}, privValidators
}
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidatorFS) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
db := dbm.NewMemDB()
s0, _ := sm.MakeGenesisState(db, genDoc)
s0.SetLogger(log.TestingLogger().With("module", "state"))
s0.Save()
return s0, privValidators
}
//------------------------------------
// mock ticker
func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker {
return func() TimeoutTicker {
return &mockTicker{
c: make(chan timeoutInfo, 10),
onlyOnce: onlyOnce,
}
}
}
// mock ticker only fires on RoundStepNewHeight
// and only once if onlyOnce=true
type mockTicker struct {
c chan timeoutInfo
mtx sync.Mutex
onlyOnce bool
fired bool
}
func (m *mockTicker) Start() (bool, error) {
return true, nil
}
func (m *mockTicker) Stop() bool {
return true
}
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.onlyOnce && m.fired {
return
}
if ti.Step == cstypes.RoundStepNewHeight {
m.c <- ti
m.fired = true
}
}
func (m *mockTicker) Chan() <-chan timeoutInfo {
return m.c
}
func (mockTicker) SetLogger(log.Logger) {
}
//------------------------------------
func newCounter() abci.Application {
return counter.NewCounterApplication(true)
}
func newPersistentDummy() abci.Application {
dir, _ := ioutil.TempDir("/tmp", "persistent-dummy")
return dummy.NewPersistentDummyApplication(dir)
}