mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-25 14:52:17 +00:00
improve ResetTestRootWithChainID() concurrency safety (#3291)
* improve ResetTestRootWithChainID() concurrency safety Rely on ioutil.TempDir() to create test root directories and ensure multiple same-chain id test cases can run in parallel. * Update config/toml.go Co-Authored-By: alessio <quadrispro@ubuntu.com> * clean up test directories after completion Closes: #1034 * Remove redundant EnsureDir call * s/PanicSafety()/panic()/s * Put create dir functionality back in ResetTestRootWithChainID * Place test directories in OS's tempdir In modern UNIX and UNIX-like systems /tmp is very often mounted as tmpfs. This might speed test execution a bit. * Set 0700 to a const * rootsDirs -> configRootDirs * Don't double remove directories * Avoid global variables * Fix consensus tests * Reduce defer stack * Address review comments * Try to fix tests * Update CHANGELOG_PENDING.md Co-Authored-By: alessio <quadrispro@ubuntu.com> * Update consensus/common_test.go Co-Authored-By: alessio <quadrispro@ubuntu.com> * Update consensus/common_test.go Co-Authored-By: alessio <quadrispro@ubuntu.com>
This commit is contained in:
parent
af8793c01a
commit
59cc6d36c9
@ -20,6 +20,8 @@ Special thanks to external contributors on this release:
|
|||||||
|
|
||||||
### IMPROVEMENTS:
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
|
- [config] \#3291 Make config.ResetTestRootWithChainID() create concurrency-safe test directories.
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
|
||||||
* [consensus] \#3297 Flush WAL on stop to prevent data corruption during
|
* [consensus] \#3297 Flush WAL on stop to prevent data corruption during
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -125,6 +126,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
|||||||
|
|
||||||
func TestNoBlockResponse(t *testing.T) {
|
func TestNoBlockResponse(t *testing.T) {
|
||||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||||
|
|
||||||
maxBlockHeight := int64(65)
|
maxBlockHeight := int64(65)
|
||||||
@ -184,6 +186,7 @@ func TestNoBlockResponse(t *testing.T) {
|
|||||||
// that seems extreme.
|
// that seems extreme.
|
||||||
func TestBadBlockStopsPeer(t *testing.T) {
|
func TestBadBlockStopsPeer(t *testing.T) {
|
||||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||||
|
|
||||||
maxBlockHeight := int64(148)
|
maxBlockHeight := int64(148)
|
||||||
|
@ -3,6 +3,7 @@ package blockchain
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -21,13 +22,16 @@ import (
|
|||||||
tmtime "github.com/tendermint/tendermint/types/time"
|
tmtime "github.com/tendermint/tendermint/types/time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A cleanupFunc cleans up any config / test files created for a particular test.
|
||||||
|
type cleanupFunc func()
|
||||||
|
|
||||||
// make a Commit with a single vote containing just the height and a timestamp
|
// make a Commit with a single vote containing just the height and a timestamp
|
||||||
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||||
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
|
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
|
||||||
return types.NewCommit(types.BlockID{}, commitSigs)
|
return types.NewCommit(types.BlockID{}, commitSigs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
|
||||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||||
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
||||||
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
||||||
@ -37,7 +41,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||||
}
|
}
|
||||||
return state, NewBlockStore(blockDB)
|
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadBlockStoreStateJSON(t *testing.T) {
|
func TestLoadBlockStoreStateJSON(t *testing.T) {
|
||||||
@ -87,19 +91,32 @@ func freshBlockStore() (*BlockStore, db.DB) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
state sm.State
|
||||||
|
block *types.Block
|
||||||
|
partSet *types.PartSet
|
||||||
|
part1 *types.Part
|
||||||
|
part2 *types.Part
|
||||||
|
seenCommit1 *types.Commit
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
var cleanup cleanupFunc
|
||||||
|
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||||
block = makeBlock(1, state, new(types.Commit))
|
block = makeBlock(1, state, new(types.Commit))
|
||||||
partSet = block.MakePartSet(2)
|
partSet = block.MakePartSet(2)
|
||||||
part1 = partSet.GetPart(0)
|
part1 = partSet.GetPart(0)
|
||||||
part2 = partSet.GetPart(1)
|
part2 = partSet.GetPart(1)
|
||||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||||
)
|
code := m.Run()
|
||||||
|
cleanup()
|
||||||
|
os.Exit(code)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: This test should be simplified ...
|
// TODO: This test should be simplified ...
|
||||||
|
|
||||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||||
|
defer cleanup()
|
||||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||||
|
|
||||||
// check there are no blocks at various heights
|
// check there are no blocks at various heights
|
||||||
@ -350,7 +367,8 @@ func TestLoadBlockMeta(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockFetchAtHeight(t *testing.T) {
|
func TestBlockFetchAtHeight(t *testing.T) {
|
||||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||||
|
defer cleanup()
|
||||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||||
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
||||||
|
|
||||||
|
@ -3,13 +3,16 @@ package config
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"io/ioutil"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
cmn "github.com/tendermint/tendermint/libs/common"
|
cmn "github.com/tendermint/tendermint/libs/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DefaultDirPerm is the default permissions used when creating directories.
|
||||||
|
const DefaultDirPerm = 0700
|
||||||
|
|
||||||
var configTemplate *template.Template
|
var configTemplate *template.Template
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -24,13 +27,13 @@ func init() {
|
|||||||
// EnsureRoot creates the root, config, and data directories if they don't exist,
|
// EnsureRoot creates the root, config, and data directories if they don't exist,
|
||||||
// and panics if it fails.
|
// and panics if it fails.
|
||||||
func EnsureRoot(rootDir string) {
|
func EnsureRoot(rootDir string) {
|
||||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||||
cmn.PanicSanity(err.Error())
|
cmn.PanicSanity(err.Error())
|
||||||
}
|
}
|
||||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||||
cmn.PanicSanity(err.Error())
|
cmn.PanicSanity(err.Error())
|
||||||
}
|
}
|
||||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||||
cmn.PanicSanity(err.Error())
|
cmn.PanicSanity(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,29 +325,17 @@ func ResetTestRoot(testName string) *Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||||
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
|
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||||
rootDir = filepath.Join(rootDir, testName)
|
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||||
// Remove ~/.tendermint_test_bak
|
if err != nil {
|
||||||
if cmn.FileExists(rootDir + "_bak") {
|
panic(err)
|
||||||
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
|
|
||||||
cmn.PanicSanity(err.Error())
|
|
||||||
}
|
}
|
||||||
|
// ensure config and data subdirs are created
|
||||||
|
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
// Move ~/.tendermint_test to ~/.tendermint_test_bak
|
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||||
if cmn.FileExists(rootDir) {
|
panic(err)
|
||||||
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
|
|
||||||
cmn.PanicSanity(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create new dir
|
|
||||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
|
||||||
cmn.PanicSanity(err.Error())
|
|
||||||
}
|
|
||||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
|
||||||
cmn.PanicSanity(err.Error())
|
|
||||||
}
|
|
||||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
|
||||||
cmn.PanicSanity(err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
baseConfig := DefaultBaseConfig()
|
baseConfig := DefaultBaseConfig()
|
||||||
|
@ -48,6 +48,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
|||||||
|
|
||||||
// create root dir
|
// create root dir
|
||||||
cfg := ResetTestRoot(testName)
|
cfg := ResetTestRoot(testName)
|
||||||
|
defer os.RemoveAll(cfg.RootDir)
|
||||||
rootDir := cfg.RootDir
|
rootDir := cfg.RootDir
|
||||||
|
|
||||||
// make sure config is set properly
|
// make sure config is set properly
|
||||||
|
@ -13,10 +13,6 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
config = ResetConfig("consensus_byzantine_test")
|
|
||||||
}
|
|
||||||
|
|
||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
// byzantine failures
|
// byzantine failures
|
||||||
|
|
||||||
@ -29,7 +25,8 @@ func init() {
|
|||||||
func TestByzantine(t *testing.T) {
|
func TestByzantine(t *testing.T) {
|
||||||
N := 4
|
N := 4
|
||||||
logger := consensusLogger().With("test", "byzantine")
|
logger := consensusLogger().With("test", "byzantine")
|
||||||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// give the byzantine validator a normal ticker
|
// give the byzantine validator a normal ticker
|
||||||
ticker := NewTimeoutTicker()
|
ticker := NewTimeoutTicker()
|
||||||
|
@ -37,8 +37,12 @@ const (
|
|||||||
testSubscriber = "test-client"
|
testSubscriber = "test-client"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// A cleanupFunc cleans up any config / test files created for a particular test.
|
||||||
|
type cleanupFunc func()
|
||||||
|
|
||||||
// genesis, chain_id, priv_val
|
// genesis, chain_id, priv_val
|
||||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||||
|
var consensusReplayConfig *cfg.Config
|
||||||
var ensureTimeout = time.Millisecond * 100
|
var ensureTimeout = time.Millisecond * 100
|
||||||
|
|
||||||
func ensureDir(dir string, mode os.FileMode) {
|
func ensureDir(dir string, mode os.FileMode) {
|
||||||
@ -248,6 +252,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
|||||||
// consensus states
|
// consensus states
|
||||||
|
|
||||||
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
||||||
|
config := cfg.ResetTestRoot("consensus_state_test")
|
||||||
return newConsensusStateWithConfig(config, state, pv, app)
|
return newConsensusStateWithConfig(config, state, pv, app)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -406,7 +411,7 @@ func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
|
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
|
||||||
timeoutDuration := time.Duration(timeout*3) * time.Nanosecond
|
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||||
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
||||||
"Timeout expired while waiting for NewTimeout event")
|
"Timeout expired while waiting for NewTimeout event")
|
||||||
}
|
}
|
||||||
@ -560,14 +565,17 @@ func consensusLogger() log.Logger {
|
|||||||
}).With("module", "consensus")
|
}).With("module", "consensus")
|
||||||
}
|
}
|
||||||
|
|
||||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
|
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||||
|
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
|
||||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||||
css := make([]*ConsensusState, nValidators)
|
css := make([]*ConsensusState, nValidators)
|
||||||
logger := consensusLogger()
|
logger := consensusLogger()
|
||||||
|
configRootDirs := make([]string, 0, nValidators)
|
||||||
for i := 0; i < nValidators; i++ {
|
for i := 0; i < nValidators; i++ {
|
||||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||||
|
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||||
for _, opt := range configOpts {
|
for _, opt := range configOpts {
|
||||||
opt(thisConfig)
|
opt(thisConfig)
|
||||||
}
|
}
|
||||||
@ -580,18 +588,26 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
|||||||
css[i].SetTimeoutTicker(tickerFunc())
|
css[i].SetTimeoutTicker(tickerFunc())
|
||||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||||
}
|
}
|
||||||
return css
|
return css, func() {
|
||||||
|
for _, dir := range configRootDirs {
|
||||||
|
os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nPeers = nValidators + nNotValidator
|
// nPeers = nValidators + nNotValidator
|
||||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
|
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
|
||||||
|
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
|
||||||
|
|
||||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
||||||
css := make([]*ConsensusState, nPeers)
|
css := make([]*ConsensusState, nPeers)
|
||||||
logger := consensusLogger()
|
logger := consensusLogger()
|
||||||
|
configRootDirs := make([]string, 0, nPeers)
|
||||||
for i := 0; i < nPeers; i++ {
|
for i := 0; i < nPeers; i++ {
|
||||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||||
|
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||||
var privVal types.PrivValidator
|
var privVal types.PrivValidator
|
||||||
if i < nValidators {
|
if i < nValidators {
|
||||||
@ -617,7 +633,11 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
|||||||
css[i].SetTimeoutTicker(tickerFunc())
|
css[i].SetTimeoutTicker(tickerFunc())
|
||||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||||
}
|
}
|
||||||
return css
|
return css, func() {
|
||||||
|
for _, dir := range configRootDirs {
|
||||||
|
os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||||
@ -713,6 +733,9 @@ func newCounter() abci.Application {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newPersistentKVStore() abci.Application {
|
func newPersistentKVStore() abci.Application {
|
||||||
dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore")
|
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package consensus
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -14,10 +15,6 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
config = ResetConfig("consensus_mempool_test")
|
|
||||||
}
|
|
||||||
|
|
||||||
// for testing
|
// for testing
|
||||||
func assertMempool(txn txNotifier) sm.Mempool {
|
func assertMempool(txn txNotifier) sm.Mempool {
|
||||||
return txn.(sm.Mempool)
|
return txn.(sm.Mempool)
|
||||||
@ -25,6 +22,7 @@ func assertMempool(txn txNotifier) sm.Mempool {
|
|||||||
|
|
||||||
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
config.Consensus.CreateEmptyBlocks = false
|
config.Consensus.CreateEmptyBlocks = false
|
||||||
state, privVals := randGenesisState(1, false, 10)
|
state, privVals := randGenesisState(1, false, 10)
|
||||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||||
@ -43,6 +41,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
|||||||
|
|
||||||
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
||||||
state, privVals := randGenesisState(1, false, 10)
|
state, privVals := randGenesisState(1, false, 10)
|
||||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||||
@ -58,6 +57,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
|||||||
|
|
||||||
func TestMempoolProgressInHigherRound(t *testing.T) {
|
func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
config.Consensus.CreateEmptyBlocks = false
|
config.Consensus.CreateEmptyBlocks = false
|
||||||
state, privVals := randGenesisState(1, false, 10)
|
state, privVals := randGenesisState(1, false, 10)
|
||||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||||
|
@ -27,10 +27,6 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
config = ResetConfig("consensus_reactor_test")
|
|
||||||
}
|
|
||||||
|
|
||||||
//----------------------------------------------
|
//----------------------------------------------
|
||||||
// in-process testnets
|
// in-process testnets
|
||||||
|
|
||||||
@ -86,7 +82,8 @@ func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuse
|
|||||||
// Ensure a testnet makes blocks
|
// Ensure a testnet makes blocks
|
||||||
func TestReactorBasic(t *testing.T) {
|
func TestReactorBasic(t *testing.T) {
|
||||||
N := 4
|
N := 4
|
||||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||||
|
defer cleanup()
|
||||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||||
// wait till everyone makes the first new block
|
// wait till everyone makes the first new block
|
||||||
@ -116,6 +113,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
|||||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||||
|
defer os.RemoveAll(thisConfig.RootDir)
|
||||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||||
app := appFunc()
|
app := appFunc()
|
||||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||||
@ -218,10 +216,11 @@ func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false }
|
|||||||
// Ensure a testnet makes blocks when there are txs
|
// Ensure a testnet makes blocks when there are txs
|
||||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||||
N := 4
|
N := 4
|
||||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||||
func(c *cfg.Config) {
|
func(c *cfg.Config) {
|
||||||
c.Consensus.CreateEmptyBlocks = false
|
c.Consensus.CreateEmptyBlocks = false
|
||||||
})
|
})
|
||||||
|
defer cleanup()
|
||||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||||
|
|
||||||
@ -239,7 +238,8 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
|||||||
// Test we record stats about votes and block parts from other peers.
|
// Test we record stats about votes and block parts from other peers.
|
||||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||||
N := 4
|
N := 4
|
||||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||||
|
defer cleanup()
|
||||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||||
|
|
||||||
@ -263,7 +263,8 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
|||||||
func TestReactorVotingPowerChange(t *testing.T) {
|
func TestReactorVotingPowerChange(t *testing.T) {
|
||||||
nVals := 4
|
nVals := 4
|
||||||
logger := log.TestingLogger()
|
logger := log.TestingLogger()
|
||||||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||||
|
defer cleanup()
|
||||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
||||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||||
|
|
||||||
@ -324,8 +325,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
|||||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||||
nPeers := 7
|
nPeers := 7
|
||||||
nVals := 4
|
nVals := 4
|
||||||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||||
|
defer cleanup()
|
||||||
logger := log.TestingLogger()
|
logger := log.TestingLogger()
|
||||||
|
|
||||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
|
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
|
||||||
@ -422,7 +423,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
|||||||
// Check we can make blocks with skip_timeout_commit=false
|
// Check we can make blocks with skip_timeout_commit=false
|
||||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||||
N := 4
|
N := 4
|
||||||
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||||
|
defer cleanup()
|
||||||
// override default SkipTimeoutCommit == true for tests
|
// override default SkipTimeoutCommit == true for tests
|
||||||
for i := 0; i < N; i++ {
|
for i := 0; i < N; i++ {
|
||||||
css[i].config.SkipTimeoutCommit = false
|
css[i].config.SkipTimeoutCommit = false
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -30,10 +31,19 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var consensusReplayConfig *cfg.Config
|
func TestMain(m *testing.M) {
|
||||||
|
config = ResetConfig("consensus_reactor_test")
|
||||||
func init() {
|
|
||||||
consensusReplayConfig = ResetConfig("consensus_replay_test")
|
consensusReplayConfig = ResetConfig("consensus_replay_test")
|
||||||
|
configStateTest := ResetConfig("consensus_state_test")
|
||||||
|
configMempoolTest := ResetConfig("consensus_mempool_test")
|
||||||
|
configByzantineTest := ResetConfig("consensus_byzantine_test")
|
||||||
|
code := m.Run()
|
||||||
|
os.RemoveAll(config.RootDir)
|
||||||
|
os.RemoveAll(consensusReplayConfig.RootDir)
|
||||||
|
os.RemoveAll(configStateTest.RootDir)
|
||||||
|
os.RemoveAll(configMempoolTest.RootDir)
|
||||||
|
os.RemoveAll(configByzantineTest.RootDir)
|
||||||
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
// These tests ensure we can always recover from failure at any part of the consensus process.
|
// These tests ensure we can always recover from failure at any part of the consensus process.
|
||||||
@ -51,7 +61,8 @@ func init() {
|
|||||||
// and which ones we need the wal for - then we'd also be able to only flush the
|
// and which ones we need the wal for - then we'd also be able to only flush the
|
||||||
// wal writer when we need to, instead of with every message.
|
// wal writer when we need to, instead of with every message.
|
||||||
|
|
||||||
func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
|
func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||||
|
lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
|
||||||
logger := log.TestingLogger()
|
logger := log.TestingLogger()
|
||||||
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
|
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
|
||||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||||
@ -59,7 +70,6 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
|
|||||||
cs.SetLogger(logger)
|
cs.SetLogger(logger)
|
||||||
|
|
||||||
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
||||||
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
|
|
||||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||||
|
|
||||||
err := cs.Start()
|
err := cs.Start()
|
||||||
@ -110,21 +120,22 @@ func TestWALCrash(t *testing.T) {
|
|||||||
3},
|
3},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
|
consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i))
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop)
|
crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||||
|
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
||||||
walPaniced := make(chan error)
|
walPaniced := make(chan error)
|
||||||
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
|
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
|
||||||
|
|
||||||
i := 1
|
i := 1
|
||||||
LOOP:
|
LOOP:
|
||||||
for {
|
for {
|
||||||
// fmt.Printf("====== LOOP %d\n", i)
|
|
||||||
t.Logf("====== LOOP %d\n", i)
|
t.Logf("====== LOOP %d\n", i)
|
||||||
|
|
||||||
// create consensus state from a clean slate
|
// create consensus state from a clean slate
|
||||||
@ -142,6 +153,7 @@ LOOP:
|
|||||||
|
|
||||||
// clean up WAL file from the previous iteration
|
// clean up WAL file from the previous iteration
|
||||||
walFile := cs.config.WalFile()
|
walFile := cs.config.WalFile()
|
||||||
|
ensureDir(filepath.Dir(walFile), 0700)
|
||||||
os.Remove(walFile)
|
os.Remove(walFile)
|
||||||
|
|
||||||
// set crashing WAL
|
// set crashing WAL
|
||||||
@ -163,7 +175,7 @@ LOOP:
|
|||||||
t.Logf("WAL paniced: %v", err)
|
t.Logf("WAL paniced: %v", err)
|
||||||
|
|
||||||
// make sure we can make blocks after a crash
|
// make sure we can make blocks after a crash
|
||||||
startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB)
|
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
|
||||||
|
|
||||||
// stop consensus state and transactions sender (initFn)
|
// stop consensus state and transactions sender (initFn)
|
||||||
cs.Stop()
|
cs.Stop()
|
||||||
@ -269,29 +281,37 @@ var modes = []uint{0, 1, 2}
|
|||||||
|
|
||||||
// Sync from scratch
|
// Sync from scratch
|
||||||
func TestHandshakeReplayAll(t *testing.T) {
|
func TestHandshakeReplayAll(t *testing.T) {
|
||||||
for _, m := range modes {
|
for i, m := range modes {
|
||||||
testHandshakeReplay(t, 0, m)
|
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
testHandshakeReplay(t, config, 0, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync many, not from scratch
|
// Sync many, not from scratch
|
||||||
func TestHandshakeReplaySome(t *testing.T) {
|
func TestHandshakeReplaySome(t *testing.T) {
|
||||||
for _, m := range modes {
|
for i, m := range modes {
|
||||||
testHandshakeReplay(t, 1, m)
|
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
testHandshakeReplay(t, config, 1, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync from lagging by one
|
// Sync from lagging by one
|
||||||
func TestHandshakeReplayOne(t *testing.T) {
|
func TestHandshakeReplayOne(t *testing.T) {
|
||||||
for _, m := range modes {
|
for i, m := range modes {
|
||||||
testHandshakeReplay(t, NUM_BLOCKS-1, m)
|
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync from caught up
|
// Sync from caught up
|
||||||
func TestHandshakeReplayNone(t *testing.T) {
|
func TestHandshakeReplayNone(t *testing.T) {
|
||||||
for _, m := range modes {
|
for i, m := range modes {
|
||||||
testHandshakeReplay(t, NUM_BLOCKS, m)
|
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
testHandshakeReplay(t, config, NUM_BLOCKS, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,10 +331,8 @@ func tempWALWithData(data []byte) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
|
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
|
||||||
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
|
||||||
config := ResetConfig("proxy_test_")
|
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
|
||||||
|
|
||||||
walBody, err := WALWithNBlocks(NUM_BLOCKS)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
walFile := tempWALWithData(walBody)
|
walFile := tempWALWithData(walBody)
|
||||||
config.Consensus.SetWalFile(walFile)
|
config.Consensus.SetWalFile(walFile)
|
||||||
@ -631,6 +649,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
|
|||||||
clientCreator := proxy.NewLocalClientCreator(app)
|
clientCreator := proxy.NewLocalClientCreator(app)
|
||||||
|
|
||||||
config := ResetConfig("proxy_test_")
|
config := ResetConfig("proxy_test_")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)
|
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)
|
||||||
|
|
||||||
|
@ -18,10 +18,6 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
config = ResetConfig("consensus_state_test")
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
ProposeSuite
|
ProposeSuite
|
||||||
|
@ -2,6 +2,7 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
@ -11,8 +12,11 @@ import (
|
|||||||
|
|
||||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||||
|
|
||||||
func init() {
|
func TestMain(m *testing.M) {
|
||||||
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
|
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
|
||||||
|
code := m.Run()
|
||||||
|
os.RemoveAll(config.RootDir)
|
||||||
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeerCatchupRounds(t *testing.T) {
|
func TestPeerCatchupRounds(t *testing.T) {
|
||||||
|
@ -7,7 +7,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -28,8 +28,9 @@ import (
|
|||||||
// stripped down version of node (proxy app, event bus, consensus state) with a
|
// stripped down version of node (proxy app, event bus, consensus state) with a
|
||||||
// persistent kvstore application and special consensus wal instance
|
// persistent kvstore application and special consensus wal instance
|
||||||
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
|
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
|
||||||
func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||||
config := getConfig()
|
config := getConfig(t)
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
|
||||||
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
||||||
|
|
||||||
@ -102,11 +103,11 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//WALWithNBlocks returns a WAL content with numBlocks.
|
//WALWithNBlocks returns a WAL content with numBlocks.
|
||||||
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) {
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
wr := bufio.NewWriter(&b)
|
wr := bufio.NewWriter(&b)
|
||||||
|
|
||||||
if err := WALGenerateNBlocks(wr, numBlocks); err != nil {
|
if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil {
|
||||||
return []byte{}, err
|
return []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,18 +115,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
|||||||
return b.Bytes(), nil
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// f**ing long, but unique for each test
|
|
||||||
func makePathname() string {
|
|
||||||
// get path
|
|
||||||
p, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
// fmt.Println(p)
|
|
||||||
sep := string(filepath.Separator)
|
|
||||||
return strings.Replace(p, sep, "_", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func randPort() int {
|
func randPort() int {
|
||||||
// returns between base and base + spread
|
// returns between base and base + spread
|
||||||
base, spread := 20000, 20000
|
base, spread := 20000, 20000
|
||||||
@ -140,9 +129,8 @@ func makeAddrs() (string, string, string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getConfig returns a config for test cases
|
// getConfig returns a config for test cases
|
||||||
func getConfig() *cfg.Config {
|
func getConfig(t *testing.T) *cfg.Config {
|
||||||
pathname := makePathname()
|
c := cfg.ResetTestRoot(t.Name())
|
||||||
c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt()))
|
|
||||||
|
|
||||||
// and we use random ports to run in parallel
|
// and we use random ports to run in parallel
|
||||||
tm, rpc, grpc := makeAddrs()
|
tm, rpc, grpc := makeAddrs()
|
||||||
|
@ -48,7 +48,7 @@ func TestWALTruncate(t *testing.T) {
|
|||||||
|
|
||||||
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
|
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
|
||||||
//at this time, RotateFile is called, truncate content exist in each file.
|
//at this time, RotateFile is called, truncate content exist in each file.
|
||||||
err = WALGenerateNBlocks(wal.Group(), 60)
|
err = WALGenerateNBlocks(t, wal.Group(), 60)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
|
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
|
||||||
@ -116,7 +116,7 @@ func TestWALWritePanicsIfMsgIsTooBig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWALSearchForEndHeight(t *testing.T) {
|
func TestWALSearchForEndHeight(t *testing.T) {
|
||||||
walBody, err := WALWithNBlocks(6)
|
walBody, err := WALWithNBlocks(t, 6)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,8 @@ import (
|
|||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
node := rpctest.StartTendermint(app)
|
node, cleanup := rpctest.StartTendermint(app)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
code := m.Run()
|
code := m.Run()
|
||||||
|
|
||||||
@ -28,6 +29,7 @@ func TestProvider(t *testing.T) {
|
|||||||
assert, require := assert.New(t), require.New(t)
|
assert, require := assert.New(t), require.New(t)
|
||||||
|
|
||||||
cfg := rpctest.GetConfig()
|
cfg := rpctest.GetConfig()
|
||||||
|
defer os.RemoveAll(cfg.RootDir)
|
||||||
rpcAddr := cfg.RPC.ListenAddress
|
rpcAddr := cfg.RPC.ListenAddress
|
||||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,13 +27,15 @@ var waitForEventTimeout = 5 * time.Second
|
|||||||
// TODO fix tests!!
|
// TODO fix tests!!
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
var cleanup func()
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
node = rpctest.StartTendermint(app)
|
node, cleanup = rpctest.StartTendermint(app)
|
||||||
|
|
||||||
code := m.Run()
|
code := m.Run()
|
||||||
|
|
||||||
node.Stop()
|
node.Stop()
|
||||||
node.Wait()
|
node.Wait()
|
||||||
|
cleanup()
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ import (
|
|||||||
func BenchmarkReap(b *testing.B) {
|
func BenchmarkReap(b *testing.B) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
size := 10000
|
size := 10000
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
|
func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, func()) {
|
||||||
config := cfg.ResetTestRoot("mempool_test")
|
config := cfg.ResetTestRoot("mempool_test")
|
||||||
|
|
||||||
appConnMem, _ := cc.NewABCIClient()
|
appConnMem, _ := cc.NewABCIClient()
|
||||||
@ -36,7 +36,7 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
|
|||||||
}
|
}
|
||||||
mempool := NewMempool(config.Mempool, appConnMem, 0)
|
mempool := NewMempool(config.Mempool, appConnMem, 0)
|
||||||
mempool.SetLogger(log.TestingLogger())
|
mempool.SetLogger(log.TestingLogger())
|
||||||
return mempool
|
return mempool, func() { os.RemoveAll(config.RootDir) }
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
||||||
@ -82,7 +82,8 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
|
|||||||
func TestReapMaxBytesMaxGas(t *testing.T) {
|
func TestReapMaxBytesMaxGas(t *testing.T) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Ensure gas calculation behaves as expected
|
// Ensure gas calculation behaves as expected
|
||||||
checkTxs(t, mempool, 1)
|
checkTxs(t, mempool, 1)
|
||||||
@ -130,7 +131,8 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
|
|||||||
func TestMempoolFilters(t *testing.T) {
|
func TestMempoolFilters(t *testing.T) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
emptyTxArr := []types.Tx{[]byte{}}
|
emptyTxArr := []types.Tx{[]byte{}}
|
||||||
|
|
||||||
nopPreFilter := func(tx types.Tx) error { return nil }
|
nopPreFilter := func(tx types.Tx) error { return nil }
|
||||||
@ -168,7 +170,8 @@ func TestMempoolFilters(t *testing.T) {
|
|||||||
func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
|
func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
|
mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
|
||||||
err := mempool.CheckTx([]byte{0x01}, nil)
|
err := mempool.CheckTx([]byte{0x01}, nil)
|
||||||
if assert.Error(t, err) {
|
if assert.Error(t, err) {
|
||||||
@ -179,7 +182,8 @@ func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
|
|||||||
func TestTxsAvailable(t *testing.T) {
|
func TestTxsAvailable(t *testing.T) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
mempool.EnableTxsAvailable()
|
mempool.EnableTxsAvailable()
|
||||||
|
|
||||||
timeoutMS := 500
|
timeoutMS := 500
|
||||||
@ -224,7 +228,9 @@ func TestSerialReap(t *testing.T) {
|
|||||||
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
|
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
|
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
appConnCon, _ := cc.NewABCIClient()
|
appConnCon, _ := cc.NewABCIClient()
|
||||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||||
err := appConnCon.Start()
|
err := appConnCon.Start()
|
||||||
@ -364,6 +370,7 @@ func TestMempoolCloseWAL(t *testing.T) {
|
|||||||
// 3. Create the mempool
|
// 3. Create the mempool
|
||||||
wcfg := cfg.DefaultMempoolConfig()
|
wcfg := cfg.DefaultMempoolConfig()
|
||||||
wcfg.RootDir = rootDir
|
wcfg.RootDir = rootDir
|
||||||
|
defer os.RemoveAll(wcfg.RootDir)
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
appConnMem, _ := cc.NewABCIClient()
|
appConnMem, _ := cc.NewABCIClient()
|
||||||
@ -406,7 +413,8 @@ func txMessageSize(tx types.Tx) int {
|
|||||||
func TestMempoolMaxMsgSize(t *testing.T) {
|
func TestMempoolMaxMsgSize(t *testing.T) {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempl := newMempoolWithApp(cc)
|
mempl, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
len int
|
len int
|
||||||
|
@ -49,7 +49,8 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor
|
|||||||
for i := 0; i < N; i++ {
|
for i := 0; i < N; i++ {
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
mempool := newMempoolWithApp(cc)
|
mempool, cleanup := newMempoolWithApp(cc)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
|
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
|
||||||
reactors[i].SetLogger(logger.With("validator", i))
|
reactors[i].SetLogger(logger.With("validator", i))
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
|
|
||||||
func TestNodeStartStop(t *testing.T) {
|
func TestNodeStartStop(t *testing.T) {
|
||||||
config := cfg.ResetTestRoot("node_node_test")
|
config := cfg.ResetTestRoot("node_node_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
|
||||||
// create & start node
|
// create & start node
|
||||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||||
@ -90,6 +91,7 @@ func TestSplitAndTrimEmpty(t *testing.T) {
|
|||||||
|
|
||||||
func TestNodeDelayedStart(t *testing.T) {
|
func TestNodeDelayedStart(t *testing.T) {
|
||||||
config := cfg.ResetTestRoot("node_delayed_start_test")
|
config := cfg.ResetTestRoot("node_delayed_start_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
now := tmtime.Now()
|
now := tmtime.Now()
|
||||||
|
|
||||||
// create & start node
|
// create & start node
|
||||||
@ -104,6 +106,7 @@ func TestNodeDelayedStart(t *testing.T) {
|
|||||||
|
|
||||||
func TestNodeSetAppVersion(t *testing.T) {
|
func TestNodeSetAppVersion(t *testing.T) {
|
||||||
config := cfg.ResetTestRoot("node_app_version_test")
|
config := cfg.ResetTestRoot("node_app_version_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
|
|
||||||
// create & start node
|
// create & start node
|
||||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||||
@ -124,6 +127,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
|||||||
addr := "tcp://" + testFreeAddr(t)
|
addr := "tcp://" + testFreeAddr(t)
|
||||||
|
|
||||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
config.BaseConfig.PrivValidatorListenAddr = addr
|
config.BaseConfig.PrivValidatorListenAddr = addr
|
||||||
|
|
||||||
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
|
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
|
||||||
@ -153,6 +157,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
|
|||||||
addrNoPrefix := testFreeAddr(t)
|
addrNoPrefix := testFreeAddr(t)
|
||||||
|
|
||||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
|
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
|
||||||
|
|
||||||
_, err := DefaultNewNode(config, log.TestingLogger())
|
_, err := DefaultNewNode(config, log.TestingLogger())
|
||||||
@ -164,6 +169,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
|
|||||||
defer os.Remove(tmpfile) // clean up
|
defer os.Remove(tmpfile) // clean up
|
||||||
|
|
||||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
|
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
|
||||||
|
|
||||||
dialer := privval.DialUnixFn(tmpfile)
|
dialer := privval.DialUnixFn(tmpfile)
|
||||||
@ -200,6 +206,7 @@ func testFreeAddr(t *testing.T) string {
|
|||||||
// mempool and evidence pool and validate it.
|
// mempool and evidence pool and validate it.
|
||||||
func TestCreateProposalBlock(t *testing.T) {
|
func TestCreateProposalBlock(t *testing.T) {
|
||||||
config := cfg.ResetTestRoot("node_create_proposal")
|
config := cfg.ResetTestRoot("node_create_proposal")
|
||||||
|
defer os.RemoveAll(config.RootDir)
|
||||||
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
|
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
|
||||||
proxyApp := proxy.NewAppConns(cc)
|
proxyApp := proxy.NewAppConns(cc)
|
||||||
err := proxyApp.Start()
|
err := proxyApp.Start()
|
||||||
|
@ -13,12 +13,14 @@ var node *nm.Node
|
|||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
// start a tendermint node (and kvstore) in the background to test against
|
// start a tendermint node (and kvstore) in the background to test against
|
||||||
|
var cleanup func()
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
node = rpctest.StartTendermint(app)
|
node, cleanup = rpctest.StartTendermint(app)
|
||||||
code := m.Run()
|
code := m.Run()
|
||||||
|
|
||||||
// and shut down proper at the end
|
// and shut down proper at the end
|
||||||
node.Stop()
|
node.Stop()
|
||||||
node.Wait()
|
node.Wait()
|
||||||
|
cleanup()
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
@ -15,12 +15,13 @@ import (
|
|||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
// start a tendermint node in the background to test against
|
// start a tendermint node in the background to test against
|
||||||
app := kvstore.NewKVStoreApplication()
|
app := kvstore.NewKVStoreApplication()
|
||||||
node := rpctest.StartTendermint(app)
|
node, cleanup := rpctest.StartTendermint(app)
|
||||||
code := m.Run()
|
code := m.Run()
|
||||||
|
|
||||||
// and shut down proper at the end
|
// and shut down proper at the end
|
||||||
node.Stop()
|
node.Stop()
|
||||||
node.Wait()
|
node.Wait()
|
||||||
|
cleanup()
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,8 +100,8 @@ func GetGRPCClient() core_grpc.BroadcastAPIClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
|
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
|
||||||
func StartTendermint(app abci.Application) *nm.Node {
|
func StartTendermint(app abci.Application) (*nm.Node, func()) {
|
||||||
node := NewTendermint(app)
|
node, cleanup := NewTendermint(app)
|
||||||
err := node.Start()
|
err := node.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -113,11 +113,11 @@ func StartTendermint(app abci.Application) *nm.Node {
|
|||||||
|
|
||||||
fmt.Println("Tendermint running!")
|
fmt.Println("Tendermint running!")
|
||||||
|
|
||||||
return node
|
return node, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTendermint creates a new tendermint server and sleeps forever
|
// NewTendermint creates a new tendermint server and sleeps forever
|
||||||
func NewTendermint(app abci.Application) *nm.Node {
|
func NewTendermint(app abci.Application) (*nm.Node, func()) {
|
||||||
// Create & start node
|
// Create & start node
|
||||||
config := GetConfig()
|
config := GetConfig()
|
||||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||||
@ -138,5 +138,5 @@ func NewTendermint(app abci.Application) *nm.Node {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return node
|
return node, func() { os.RemoveAll(config.RootDir) }
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -28,7 +29,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) {
|
|||||||
state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
|
state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
|
||||||
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
|
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
|
||||||
|
|
||||||
tearDown := func(t *testing.T) {}
|
tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) }
|
||||||
|
|
||||||
return tearDown, stateDB, state
|
return tearDown, stateDB, state
|
||||||
}
|
}
|
||||||
@ -802,10 +803,10 @@ func TestLargeGenesisValidator(t *testing.T) {
|
|||||||
func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
|
func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
|
||||||
const valSetSize = 2
|
const valSetSize = 2
|
||||||
tearDown, stateDB, state := setupTestCase(t)
|
tearDown, stateDB, state := setupTestCase(t)
|
||||||
|
defer tearDown(t)
|
||||||
state.Validators = genValSet(valSetSize)
|
state.Validators = genValSet(valSetSize)
|
||||||
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
|
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
|
||||||
SaveState(stateDB, state)
|
SaveState(stateDB, state)
|
||||||
defer tearDown(t)
|
|
||||||
|
|
||||||
nextHeight := state.LastBlockHeight + 1
|
nextHeight := state.LastBlockHeight + 1
|
||||||
|
|
||||||
@ -825,11 +826,11 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
|
|||||||
func TestManyValidatorChangesSaveLoad(t *testing.T) {
|
func TestManyValidatorChangesSaveLoad(t *testing.T) {
|
||||||
const valSetSize = 7
|
const valSetSize = 7
|
||||||
tearDown, stateDB, state := setupTestCase(t)
|
tearDown, stateDB, state := setupTestCase(t)
|
||||||
|
defer tearDown(t)
|
||||||
require.Equal(t, int64(0), state.LastBlockHeight)
|
require.Equal(t, int64(0), state.LastBlockHeight)
|
||||||
state.Validators = genValSet(valSetSize)
|
state.Validators = genValSet(valSetSize)
|
||||||
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
|
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
|
||||||
SaveState(stateDB, state)
|
SaveState(stateDB, state)
|
||||||
defer tearDown(t)
|
|
||||||
|
|
||||||
_, valOld := state.Validators.GetByIndex(0)
|
_, valOld := state.Validators.GetByIndex(0)
|
||||||
var pubkeyOld = valOld.PubKey
|
var pubkeyOld = valOld.PubKey
|
||||||
|
Loading…
x
Reference in New Issue
Block a user