mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-12 12:51:22 +00:00
cs/replay: execCommitBlock should not read from state.lastValidators (#3067)
* execCommitBlock should not read from state.lastValidators
* fix height 1
* fix blockchain/reactor_test
* fix consensus/mempool_test
* fix consensus/reactor_test
* fix consensus/replay_test
* add CHANGELOG
* fix consensus/reactor_test
* fix consensus/replay_test
* add a test for replay validators change
* fix mem_pool test
* fix byzantine test
* remove a redundant code
* reduce validator change blocks to 6
* fix
* return peer0 config
* seperate testName
* seperate testName 1
* seperate testName 2
* seperate app db path
* seperate app db path 1
* add a lock before startNet
* move the lock to reactor_test
* simulate just once
* try to find problem
* handshake only saveState when app version changed
* update gometalinter to 3.0.0 (#3233)
in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165
also
code is simplified by running gofmt -s .
remove unused vars
enable linters we're currently passing
remove deprecated linters
(cherry picked from commit d470945503
)
* gofmt code
* goimport code
* change the bool name to testValidatorsChange
* adjust receive kvstore.ProtocolVersion
* adjust receive kvstore.ProtocolVersion 1
* adjust receive kvstore.ProtocolVersion 3
* fix merge execution.go
* fix merge develop
* fix merge develop 1
* fix run cleanupFunc
* adjust code according to reviewers' opinion
* modify the func name match the convention
* simplify simulate a chain containing some validator change txs 1
* test CI error
* Merge remote-tracking branch 'upstream/develop' into fixReplay 1
* fix pubsub_test
* subscribeUnbuffered vote channel
This commit is contained in:
@ -14,6 +14,8 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
|
||||
"path"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
@ -119,6 +121,24 @@ func incrementRound(vss ...*validatorStub) {
|
||||
}
|
||||
}
|
||||
|
||||
type ValidatorStubsByAddress []*validatorStub
|
||||
|
||||
func (vss ValidatorStubsByAddress) Len() int {
|
||||
return len(vss)
|
||||
}
|
||||
|
||||
func (vss ValidatorStubsByAddress) Less(i, j int) bool {
|
||||
return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1
|
||||
}
|
||||
|
||||
func (vss ValidatorStubsByAddress) Swap(i, j int) {
|
||||
it := vss[i]
|
||||
vss[i] = vss[j]
|
||||
vss[i].Index = i
|
||||
vss[j] = it
|
||||
vss[j].Index = j
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// Functions for transitioning the consensus state
|
||||
|
||||
@ -228,7 +248,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
|
||||
}
|
||||
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
|
||||
votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
|
||||
}
|
||||
@ -278,7 +298,8 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
evpool := sm.MockEvidencePool{}
|
||||
|
||||
// Make ConsensusState
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := blockDB
|
||||
sm.SaveState(stateDB, state) //for save height 1's validators info
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
@ -564,7 +585,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
@ -576,12 +597,11 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
}
|
||||
|
||||
// nPeers = nValidators + nNotValidator
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
|
||||
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
||||
css := make([]*ConsensusState, nPeers)
|
||||
logger := consensusLogger()
|
||||
var peer0Config *cfg.Config
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
@ -589,6 +609,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
if i == 0 {
|
||||
peer0Config = thisConfig
|
||||
}
|
||||
var privVal types.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
@ -605,15 +628,19 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
}
|
||||
|
||||
app := appFunc()
|
||||
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail
|
||||
}
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
//sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
return css, func() {
|
||||
return css, genDoc, peer0Config, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
@ -719,3 +746,7 @@ func newPersistentKVStore() abci.Application {
|
||||
}
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
||||
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
||||
}
|
||||
|
Reference in New Issue
Block a user