mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-20 16:36:31 +00:00
fixes from Bucky's review
This commit is contained in:
@ -141,10 +141,12 @@ func TestRmBadTx(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check for the tx
|
// check for the tx
|
||||||
txs := cs.mempool.Reap(1)
|
for {
|
||||||
if len(txs) == 0 {
|
txs := cs.mempool.Reap(1)
|
||||||
emptyMempoolCh <- struct{}{}
|
if len(txs) == 0 {
|
||||||
return
|
emptyMempoolCh <- struct{}{}
|
||||||
|
}
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -9,27 +9,30 @@ import (
|
|||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNodeStartStop(t *testing.T) {
|
func TestNodeStartStop(t *testing.T) {
|
||||||
config := cfg.ResetTestRoot("node_node_test")
|
config := cfg.ResetTestRoot("node_node_test")
|
||||||
|
|
||||||
// Create & start node
|
// create & start node
|
||||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||||
assert.NoError(t, err, "expected no err on DefaultNewNode")
|
assert.NoError(t, err, "expected no err on DefaultNewNode")
|
||||||
n.Start()
|
n.Start()
|
||||||
t.Logf("Started node %v", n.sw.NodeInfo())
|
t.Logf("Started node %v", n.sw.NodeInfo())
|
||||||
|
|
||||||
ticker := time.NewTicker(10 * time.Millisecond)
|
// wait for the node to produce a block
|
||||||
|
blockCh := make(chan struct{})
|
||||||
|
types.AddListenerForEvent(n.EventSwitch(), "node_test", types.EventStringNewBlock(), func(types.TMEventData) {
|
||||||
|
blockCh <- struct{}{}
|
||||||
|
})
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-blockCh:
|
||||||
if n.IsRunning() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
t.Fatal("timed out waiting for start")
|
t.Fatal("timed out waiting for the node to produce a block")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stop the node
|
||||||
go func() {
|
go func() {
|
||||||
n.Stop()
|
n.Stop()
|
||||||
}()
|
}()
|
||||||
|
@ -109,26 +109,28 @@ func TestPEXReactorRunning(t *testing.T) {
|
|||||||
|
|
||||||
func assertSomePeersWithTimeout(t *testing.T, switches []*Switch, checkPeriod, timeout time.Duration) {
|
func assertSomePeersWithTimeout(t *testing.T, switches []*Switch, checkPeriod, timeout time.Duration) {
|
||||||
ticker := time.NewTicker(checkPeriod)
|
ticker := time.NewTicker(checkPeriod)
|
||||||
select {
|
for {
|
||||||
case <-ticker.C:
|
select {
|
||||||
// check peers are connected
|
case <-ticker.C:
|
||||||
allGood := true
|
// check peers are connected
|
||||||
for _, s := range switches {
|
allGood := true
|
||||||
outbound, inbound, _ := s.NumPeers()
|
for _, s := range switches {
|
||||||
if outbound+inbound == 0 {
|
outbound, inbound, _ := s.NumPeers()
|
||||||
allGood = false
|
if outbound+inbound == 0 {
|
||||||
|
allGood = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
if allGood {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-time.After(timeout):
|
||||||
|
numPeersStr := ""
|
||||||
|
for i, s := range switches {
|
||||||
|
outbound, inbound, _ := s.NumPeers()
|
||||||
|
numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound)
|
||||||
|
}
|
||||||
|
t.Errorf("expected all switches to be connected to at least one peer (switches: %s)", numPeersStr)
|
||||||
}
|
}
|
||||||
if allGood {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-time.After(timeout):
|
|
||||||
numPeersStr := ""
|
|
||||||
for i, s := range switches {
|
|
||||||
outbound, inbound, _ := s.NumPeers()
|
|
||||||
numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound)
|
|
||||||
}
|
|
||||||
t.Errorf("expected all switches to be connected to at least one peer (switches: %s)", numPeersStr)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,16 +138,19 @@ func TestSwitches(t *testing.T) {
|
|||||||
|
|
||||||
func assertMsgReceivedWithTimeout(t *testing.T, msg string, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
|
func assertMsgReceivedWithTimeout(t *testing.T, msg string, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
|
||||||
ticker := time.NewTicker(checkPeriod)
|
ticker := time.NewTicker(checkPeriod)
|
||||||
select {
|
for {
|
||||||
case <-ticker.C:
|
select {
|
||||||
msgs := reactor.getMsgs(channel)
|
case <-ticker.C:
|
||||||
if len(msgs) > 0 {
|
msgs := reactor.getMsgs(channel)
|
||||||
if !bytes.Equal(msgs[0].Bytes, wire.BinaryBytes(msg)) {
|
if len(msgs) > 0 {
|
||||||
t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(msg), msgs[0].Bytes)
|
if !bytes.Equal(msgs[0].Bytes, wire.BinaryBytes(msg)) {
|
||||||
|
t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(msg), msgs[0].Bytes)
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel)
|
||||||
}
|
}
|
||||||
case <-time.After(timeout):
|
|
||||||
t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,19 +177,14 @@ func TestConnAddrFilter(t *testing.T) {
|
|||||||
s2.addPeerWithConnection(c2)
|
s2.addPeerWithConnection(c2)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
assertNoPeersWithTimeout(t, s1, 100*time.Millisecond, 400*time.Millisecond)
|
assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond)
|
||||||
assertNoPeersWithTimeout(t, s2, 100*time.Millisecond, 400*time.Millisecond)
|
assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertNoPeersWithTimeout(t *testing.T, sw *Switch, checkPeriod, timeout time.Duration) {
|
func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
|
||||||
ticker := time.NewTicker(checkPeriod)
|
time.Sleep(timeout)
|
||||||
select {
|
if sw.Peers().Size() != 0 {
|
||||||
case <-ticker.C:
|
t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size())
|
||||||
if sw.Peers().Size() != 0 {
|
|
||||||
t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size())
|
|
||||||
}
|
|
||||||
case <-time.After(timeout):
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,8 +212,8 @@ func TestConnPubKeyFilter(t *testing.T) {
|
|||||||
s2.addPeerWithConnection(c2)
|
s2.addPeerWithConnection(c2)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
assertNoPeersWithTimeout(t, s1, 100*time.Millisecond, 400*time.Millisecond)
|
assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond)
|
||||||
assertNoPeersWithTimeout(t, s2, 100*time.Millisecond, 400*time.Millisecond)
|
assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||||
@ -238,7 +236,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
|||||||
// simulate failure by closing connection
|
// simulate failure by closing connection
|
||||||
peer.CloseConn()
|
peer.CloseConn()
|
||||||
|
|
||||||
assertNoPeersWithTimeout(t, sw, 100*time.Millisecond, 100*time.Millisecond)
|
assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
|
||||||
assert.False(peer.IsRunning())
|
assert.False(peer.IsRunning())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user