tendermint/node/node_test.go
Ethan Buchman 45b70ae031
fix non deterministic test failures and race in privval socket (#3258)
* node: decrease retry conn timeout in test

Should fix #3256

The retry timeout was set to the default, which is the same as the
accept timeout, so it's no wonder this would fail. Here we decrease the
retry timeout so we can try many times before the accept timeout.

* p2p: increase handshake timeout in test

This fails sometimes, presumably because the handshake timeout is so low
(only 50ms). So increase it to 1s. Should fix #3187

* privval: fix race with ping. closes #3237

Pings happen in a go-routine and can happen concurrently with other
messages. Since we use a request/response protocol, we expect to send a
request and get back the corresponding response. But with pings
happening concurrently, this assumption could be violated. We were using
a mutex, but only a RWMutex, where the RLock was being held for sending
messages - this was to allow the underlying connection to be replaced if
it fails. Turns out we actually need to use a full lock (not just a read
lock) to prevent multiple requests from happening concurrently.

* node: fix test name. DelayedStop -> DelayedStart

* autofile: Wait() method

In the TestWALTruncate in consensus/wal_test.go we remove the WAL
directory at the end of the test. However the wal.Stop() does not
properly wait for the autofile group to finish shutting down. Hence it
was possible that the group's go-routine is still running when the
cleanup happens, which causes a panic since the directory disappeared.
Here we add a Wait() method to properly wait until the go-routine exits
so we can safely clean up. This fixes #2852.
2019-02-06 10:24:43 -05:00

305 lines
7.8 KiB
Go

package node
import (
"context"
"fmt"
"net"
"os"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/evidence"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
)
func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
err = n.Start()
require.NoError(t, err)
t.Logf("Started node %v", n.sw.NodeInfo())
// wait for the node to produce a block
blockCh := make(chan interface{})
err = n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock, blockCh)
require.NoError(t, err)
select {
case <-blockCh:
case <-time.After(10 * time.Second):
t.Fatal("timed out waiting for the node to produce a block")
}
// stop the node
go func() {
n.Stop()
}()
select {
case <-n.Quit():
case <-time.After(5 * time.Second):
pid := os.Getpid()
p, err := os.FindProcess(pid)
if err != nil {
panic(err)
}
err = p.Signal(syscall.SIGABRT)
fmt.Println(err)
t.Fatal("timed out waiting for shutdown")
}
}
func TestSplitAndTrimEmpty(t *testing.T) {
testCases := []struct {
s string
sep string
cutset string
expected []string
}{
{"a,b,c", ",", " ", []string{"a", "b", "c"}},
{" a , b , c ", ",", " ", []string{"a", "b", "c"}},
{" a, b, c ", ",", " ", []string{"a", "b", "c"}},
{" a, ", ",", " ", []string{"a"}},
{" ", ",", " ", []string{}},
}
for _, tc := range testCases {
assert.Equal(t, tc.expected, splitAndTrimEmpty(tc.s, tc.sep, tc.cutset), "%s", tc.s)
}
}
func TestNodeDelayedStart(t *testing.T) {
config := cfg.ResetTestRoot("node_delayed_start_test")
now := tmtime.Now()
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
n.GenesisDoc().GenesisTime = now.Add(2 * time.Second)
require.NoError(t, err)
n.Start()
startTime := tmtime.Now()
assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime))
}
func TestNodeSetAppVersion(t *testing.T) {
config := cfg.ResetTestRoot("node_app_version_test")
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
// default config uses the kvstore app
var appVersion version.Protocol = kvstore.ProtocolVersion
// check version is set in state
state := sm.LoadState(n.stateDB)
assert.Equal(t, state.Version.Consensus.App, appVersion)
// check version is set in node info
assert.Equal(t, n.nodeInfo.(p2p.DefaultNodeInfo).ProtocolVersion.App, appVersion)
}
func TestNodeSetPrivValTCP(t *testing.T) {
addr := "tcp://" + testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
config.BaseConfig.PrivValidatorListenAddr = addr
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
pvsc := privval.NewRemoteSigner(
log.TestingLogger(),
config.ChainID(),
types.NewMockPV(),
dialer,
)
privval.RemoteSignerConnDeadline(100 * time.Millisecond)(pvsc)
go func() {
err := pvsc.Start()
if err != nil {
panic(err)
}
}()
defer pvsc.Stop()
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
assert.IsType(t, &privval.SocketVal{}, n.PrivValidator())
}
// address without a protocol must result in error
func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
addrNoPrefix := testFreeAddr(t)
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
_, err := DefaultNewNode(config, log.TestingLogger())
assert.Error(t, err)
}
func TestNodeSetPrivValIPC(t *testing.T) {
tmpfile := "/tmp/kms." + cmn.RandStr(6) + ".sock"
defer os.Remove(tmpfile) // clean up
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
dialer := privval.DialUnixFn(tmpfile)
pvsc := privval.NewRemoteSigner(
log.TestingLogger(),
config.ChainID(),
types.NewMockPV(),
dialer,
)
privval.RemoteSignerConnDeadline(100 * time.Millisecond)(pvsc)
go func() {
err := pvsc.Start()
require.NoError(t, err)
}()
defer pvsc.Stop()
n, err := DefaultNewNode(config, log.TestingLogger())
require.NoError(t, err)
assert.IsType(t, &privval.SocketVal{}, n.PrivValidator())
}
// testFreeAddr claims a free port so we don't block on listener being ready.
func testFreeAddr(t *testing.T) string {
ln, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
defer ln.Close()
return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port)
}
// create a proposal block using real and full
// mempool and evidence pool and validate it.
func TestCreateProposalBlock(t *testing.T) {
config := cfg.ResetTestRoot("node_create_proposal")
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.Nil(t, err)
defer proxyApp.Stop()
logger := log.TestingLogger()
var height int64 = 1
state, stateDB := state(1, height)
maxBytes := 16384
state.ConsensusParams.BlockSize.MaxBytes = int64(maxBytes)
proposerAddr, _ := state.Validators.GetByIndex(0)
// Make Mempool
memplMetrics := mempl.PrometheusMetrics("node_test")
mempool := mempl.NewMempool(
config.Mempool,
proxyApp.Mempool(),
state.LastBlockHeight,
mempl.WithMetrics(memplMetrics),
mempl.WithPreCheck(sm.TxPreCheck(state)),
mempl.WithPostCheck(sm.TxPostCheck(state)),
)
mempool.SetLogger(logger)
// Make EvidencePool
types.RegisterMockEvidencesGlobal()
evidence.RegisterMockEvidences()
evidenceDB := dbm.NewMemDB()
evidenceStore := evidence.NewEvidenceStore(evidenceDB)
evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
evidencePool.SetLogger(logger)
// fill the evidence pool with more evidence
// than can fit in a block
minEvSize := 12
numEv := (maxBytes / types.MaxEvidenceBytesDenominator) / minEvSize
for i := 0; i < numEv; i++ {
ev := types.NewMockRandomGoodEvidence(1, proposerAddr, cmn.RandBytes(minEvSize))
err := evidencePool.AddEvidence(ev)
assert.NoError(t, err)
}
// fill the mempool with more txs
// than can fit in a block
txLength := 1000
for i := 0; i < maxBytes/txLength; i++ {
tx := cmn.RandBytes(txLength)
err := mempool.CheckTx(tx, nil)
assert.NoError(t, err)
}
blockExec := sm.NewBlockExecutor(
stateDB,
logger,
proxyApp.Consensus(),
mempool,
evidencePool,
)
commit := &types.Commit{}
block, _ := blockExec.CreateProposalBlock(
height,
state, commit,
proposerAddr,
)
err = blockExec.ValidateBlock(state, block)
assert.NoError(t, err)
}
func state(nVals int, height int64) (sm.State, dbm.DB) {
vals := make([]types.GenesisValidator, nVals)
for i := 0; i < nVals; i++ {
secret := []byte(fmt.Sprintf("test%d", i))
pk := ed25519.GenPrivKeyFromSecret(secret)
vals[i] = types.GenesisValidator{
pk.PubKey().Address(),
pk.PubKey(),
1000,
fmt.Sprintf("test%d", i),
}
}
s, _ := sm.MakeGenesisState(&types.GenesisDoc{
ChainID: "test-chain",
Validators: vals,
AppHash: nil,
})
// save validators to db for 2 heights
stateDB := dbm.NewMemDB()
sm.SaveState(stateDB, s)
for i := 1; i < int(height); i++ {
s.LastBlockHeight++
s.LastValidators = s.Validators.Copy()
sm.SaveState(stateDB, s)
}
return s, stateDB
}