tendermint/evidence/reactor_test.go
Anton Kaliaev 5a6822c8ac abci: localClient improvements & bugfixes & pubsub Unsubscribe issues (#2748)
* use READ lock/unlock in ConsensusState#GetLastHeight

Refs #2721

* do not use defers when there's no need

* fix peer formatting (output its address instead of the pointer)

```
[54310]: E[11-02|11:59:39.851] Connection failed @ sendRoutine              module=p2p peer=0xb78f00 conn=MConn{74.207.236.148:26656} err="pong timeout"
```

https://github.com/tendermint/tendermint/issues/2721#issuecomment-435326581

* panic if peer has no state

https://github.com/tendermint/tendermint/issues/2721#issuecomment-435347165

It's confusing that sometimes we check if peer has a state, but most of
the times we expect it to be there

1. add79700b5/mempool/reactor.go (L138)
2. add79700b5/rpc/core/consensus.go (L196) (edited)

I will change everything to always assume peer has a state and panic
otherwise

that should help identify issues earlier

* abci/localclient: extend lock on app callback

App callback should be protected by lock as well (note this was already
done for InitChainAsync, why not for others???). Otherwise, when we
execute the block, tx might come in and call the callback in the same
time we're updating it in execBlockOnProxyApp => DATA RACE

Fixes #2721

Consensus state is locked

```
goroutine 113333 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00180009c, 0xc0000c7e00)
        /usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*RWMutex).RLock(0xc001800090)
        /usr/local/go/src/sync/rwmutex.go:50 +0x4e
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).GetRoundState(0xc001800000, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:218 +0x46
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).queryMaj23Routine(0xc0017def80, 0x11104a0, 0xc0072488f0, 0xc007248
9c0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:735 +0x16d
created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).AddPeer
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:172 +0x236
```

because localClient is locked

```
goroutine 1899 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00003363c, 0xc0000cb500)
        /usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*Mutex).Lock(0xc000033638)
        /usr/local/go/src/sync/mutex.go:134 +0xff
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).SetResponseCallback(0xc0001fb560, 0xc007868540)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:32 +0x33
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnConsensus).SetResponseCallback(0xc00002f750, 0xc007868540)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:57 +0x40
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.execBlockOnProxyApp(0x1104e20, 0xc002ca0ba0, 0x11092a0, 0xc00002f750, 0xc0001fe960, 0xc000bfc660, 0x110cfe0, 0xc000090330, 0xc9d12, 0xc000d9d5a0, ...)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:230 +0x1fd
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.(*BlockExecutor).ApplyBlock(0xc002c2a230, 0x7, 0x0, 0xc000eae880, 0x6, 0xc002e52c60, 0x16, 0x1f927, 0xc9d12, 0xc000d9d5a0, ...)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:96 +0x142
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit(0xc001800000, 0x1f928)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1339 +0xa3e
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit(0xc001800000, 0x1f928)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1270 +0x451
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1(0xc001800000, 0x0, 0x1f928)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1218 +0x90
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit(0xc001800000, 0x1f928, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1247 +0x6b8
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xc003bc7ad0, 0xc003bc7b10)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1659 +0xbad
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xf1, 0xf1)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1517 +0x59
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg(0xc001800000, 0xd98200, 0xc0070dbed0, 0xc000cf4cc0, 0x28)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:660 +0x64b
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine(0xc001800000, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:617 +0x670
created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:311 +0x132
```

tx comes in and CheckTx is executed right when we execute the block

```
goroutine 111044 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00003363c, 0x0)
        /usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*Mutex).Lock(0xc000033638)
        /usr/local/go/src/sync/mutex.go:134 +0xff
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).CheckTxAsync(0xc0001fb0e0, 0xc002d94500, 0x13f, 0x280, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:85 +0x47
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnMempool).CheckTxAsync(0xc00002f720, 0xc002d94500, 0x13f, 0x280, 0x1)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:114 +0x51
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool.(*Mempool).CheckTx(0xc002d3a320, 0xc002d94500, 0x13f, 0x280, 0xc0072355f0, 0x0, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool/mempool.go:316 +0x17b
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core.BroadcastTxSync(0xc002d94500, 0x13f, 0x280, 0x0, 0x0, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go:93 +0xb8
reflect.Value.call(0xd85560, 0x10326c0, 0x13, 0xec7b8b, 0x4, 0xc00663f180, 0x1, 0x1, 0xc00663f180, 0xc00663f188, ...)
        /usr/local/go/src/reflect/value.go:447 +0x449
reflect.Value.Call(0xd85560, 0x10326c0, 0x13, 0xc00663f180, 0x1, 0x1, 0x0, 0x0, 0xc005cc9344)
        /usr/local/go/src/reflect/value.go:308 +0xa4
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.makeHTTPHandler.func2(0x1102060, 0xc00663f100, 0xc0082d7900)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:269 +0x188
net/http.HandlerFunc.ServeHTTP(0xc002c81f20, 0x1102060, 0xc00663f100, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:1964 +0x44
net/http.(*ServeMux).ServeHTTP(0xc002c81b60, 0x1102060, 0xc00663f100, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:2361 +0x127
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.maxBytesHandler.ServeHTTP(0x10f8a40, 0xc002c81b60, 0xf4240, 0x1102060, 0xc00663f100, 0xc0082d7900)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:219 +0xcf
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1(0x1103220, 0xc00121e620, 0xc0082d7900)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:192 +0x394
net/http.HandlerFunc.ServeHTTP(0xc002c06ea0, 0x1103220, 0xc00121e620, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:1964 +0x44
net/http.serverHandler.ServeHTTP(0xc001a1aa90, 0x1103220, 0xc00121e620, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:2741 +0xab
net/http.(*conn).serve(0xc00785a3c0, 0x11041a0, 0xc000f844c0)
        /usr/local/go/src/net/http/server.go:1847 +0x646
created by net/http.(*Server).Serve
        /usr/local/go/src/net/http/server.go:2851 +0x2f5
```

* consensus: use read lock in Receive#VoteMessage

* use defer to unlock mutex because application might panic

* use defer in every method of the localClient

* add a changelog entry

* drain channels before Unsubscribe(All)

Read 55362ed766/libs/pubsub/pubsub.go (L13)
for the detailed explanation of the issue.

We'll need to fix it someday. Make sure to keep an eye on
https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md

* retry instead of panic when peer has no state in reactors other than consensus

in /dump_consensus_state RPC endpoint, skip a peer with no state

* rpc/core/mempool: simplify error messages

* rpc/core/mempool: use time.After instead of timer

also, do not log DeliverTx result (to be consistent with other memthods)

* unlock before calling the callback in reqRes#SetCallback
2018-11-13 11:32:51 -05:00

219 lines
6.1 KiB
Go

package evidence
import (
"fmt"
"sync"
"testing"
"time"
"github.com/go-kit/kit/log/term"
"github.com/stretchr/testify/assert"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/secp256k1"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
// evidenceLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func evidenceLogger() log.Logger {
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
for i := 0; i < len(keyvals)-1; i += 2 {
if keyvals[i] == "validator" {
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
}
}
return term.FgBgColor{}
})
}
// connect N evidence reactors through N switches
func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*EvidenceReactor {
N := len(stateDBs)
reactors := make([]*EvidenceReactor, N)
logger := evidenceLogger()
for i := 0; i < N; i++ {
store := NewEvidenceStore(dbm.NewMemDB())
pool := NewEvidencePool(stateDBs[i], store)
reactors[i] = NewEvidenceReactor(pool)
reactors[i].SetLogger(logger.With("validator", i))
}
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("EVIDENCE", reactors[i])
return s
}, p2p.Connect2Switches)
return reactors
}
// wait for all evidence on all reactors
func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceReactor) {
// wait for the evidence in all evpools
wg := new(sync.WaitGroup)
for i := 0; i < len(reactors); i++ {
wg.Add(1)
go _waitForEvidence(t, wg, evs, i, reactors)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
timer := time.After(TIMEOUT)
select {
case <-timer:
t.Fatal("Timed out waiting for evidence")
case <-done:
}
}
// wait for all evidence on a single evpool
func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, reactorIdx int, reactors []*EvidenceReactor) {
evpool := reactors[reactorIdx].evpool
for len(evpool.PendingEvidence(-1)) != len(evs) {
time.Sleep(time.Millisecond * 100)
}
reapedEv := evpool.PendingEvidence(-1)
// put the reaped evidence in a map so we can quickly check we got everything
evMap := make(map[string]types.Evidence)
for _, e := range reapedEv {
evMap[string(e.Hash())] = e
}
for i, expectedEv := range evs {
gotEv := evMap[string(expectedEv.Hash())]
assert.Equal(t, expectedEv, gotEv,
fmt.Sprintf("evidence at index %d on reactor %d don't match: %v vs %v",
i, reactorIdx, expectedEv, gotEv))
}
wg.Done()
}
func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) types.EvidenceList {
evList := make([]types.Evidence, n)
for i := 0; i < n; i++ {
ev := types.NewMockGoodEvidence(int64(i+1), 0, valAddr)
err := evpool.AddEvidence(ev)
assert.Nil(t, err)
evList[i] = ev
}
return evList
}
var (
NUM_EVIDENCE = 10
TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow
)
func TestReactorBroadcastEvidence(t *testing.T) {
config := cfg.TestConfig()
N := 7
// create statedb for everyone
stateDBs := make([]dbm.DB, N)
valAddr := []byte("myval")
// we need validators saved for heights at least as high as we have evidence for
height := int64(NUM_EVIDENCE) + 10
for i := 0; i < N; i++ {
stateDBs[i] = initializeValidatorState(valAddr, height)
}
// make reactors from statedb
reactors := makeAndConnectEvidenceReactors(config, stateDBs)
// set the peer height on each reactor
for _, r := range reactors {
for _, peer := range r.Switch.Peers().List() {
ps := peerState{height}
peer.Set(types.PeerStateKey, ps)
}
}
// send a bunch of valid evidence to the first reactor's evpool
// and wait for them all to be received in the others
evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE)
waitForEvidence(t, evList, reactors)
}
type peerState struct {
height int64
}
func (ps peerState) GetHeight() int64 {
return ps.height
}
func TestReactorSelectiveBroadcast(t *testing.T) {
config := cfg.TestConfig()
valAddr := []byte("myval")
height1 := int64(NUM_EVIDENCE) + 10
height2 := int64(NUM_EVIDENCE) / 2
// DB1 is ahead of DB2
stateDB1 := initializeValidatorState(valAddr, height1)
stateDB2 := initializeValidatorState(valAddr, height2)
// make reactors from statedb
reactors := makeAndConnectEvidenceReactors(config, []dbm.DB{stateDB1, stateDB2})
// set the peer height on each reactor
for _, r := range reactors {
for _, peer := range r.Switch.Peers().List() {
ps := peerState{height1}
peer.Set(types.PeerStateKey, ps)
}
}
// update the first reactor peer's height to be very small
peer := reactors[0].Switch.Peers().List()[0]
ps := peerState{height2}
peer.Set(types.PeerStateKey, ps)
// send a bunch of valid evidence to the first reactor's evpool
evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE)
// only ones less than the peers height should make it through
waitForEvidence(t, evList[:NUM_EVIDENCE/2], reactors[1:2])
// peers should still be connected
peers := reactors[1].Switch.Peers().List()
assert.Equal(t, 1, len(peers))
}
func TestEvidenceListMessageValidationBasic(t *testing.T) {
testCases := []struct {
testName string
malleateEvListMsg func(*EvidenceListMessage)
expectErr bool
}{
{"Good EvidenceListMessage", func(evList *EvidenceListMessage) {}, false},
{"Invalid EvidenceListMessage", func(evList *EvidenceListMessage) {
evList.Evidence = append(evList.Evidence,
&types.DuplicateVoteEvidence{PubKey: secp256k1.GenPrivKey().PubKey()})
}, true},
}
for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
evListMsg := &EvidenceListMessage{}
n := 3
valAddr := []byte("myval")
evListMsg.Evidence = make([]types.Evidence, n)
for i := 0; i < n; i++ {
evListMsg.Evidence[i] = types.NewMockGoodEvidence(int64(i+1), 0, valAddr)
}
tc.malleateEvListMsg(evListMsg)
assert.Equal(t, tc.expectErr, evListMsg.ValidateBasic() != nil, "Validate Basic had an unexpected result")
})
}
}