2017-11-20 04:22:25 +00:00
|
|
|
package evidence
|
2017-11-02 12:06:48 -06:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
2017-11-02 18:26:07 -06:00
|
|
|
"time"
|
2017-11-02 12:06:48 -06:00
|
|
|
|
2018-07-09 13:01:23 +04:00
|
|
|
amino "github.com/tendermint/go-amino"
|
|
|
|
|
2018-07-01 22:36:49 -04:00
|
|
|
clist "github.com/tendermint/tendermint/libs/clist"
|
|
|
|
"github.com/tendermint/tendermint/libs/log"
|
2017-11-02 12:06:48 -06:00
|
|
|
"github.com/tendermint/tendermint/p2p"
|
|
|
|
"github.com/tendermint/tendermint/types"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2017-11-19 01:34:11 +00:00
|
|
|
EvidenceChannel = byte(0x38)
|
2017-11-02 12:06:48 -06:00
|
|
|
|
2018-06-04 17:38:44 -07:00
|
|
|
maxMsgSize = 1048576 // 1MB TODO make it configurable
|
|
|
|
|
|
|
|
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
|
|
|
|
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
|
2017-11-02 12:06:48 -06:00
|
|
|
)
|
|
|
|
|
2017-11-19 01:34:11 +00:00
|
|
|
// EvidenceReactor handles evpool evidence broadcasting amongst peers.
|
|
|
|
type EvidenceReactor struct {
|
2017-11-02 12:06:48 -06:00
|
|
|
p2p.BaseReactor
|
2017-11-19 01:34:11 +00:00
|
|
|
evpool *EvidencePool
|
|
|
|
eventBus *types.EventBus
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|
|
|
|
|
2017-11-19 01:34:11 +00:00
|
|
|
// NewEvidenceReactor returns a new EvidenceReactor with the given config and evpool.
|
2017-12-26 20:34:57 -05:00
|
|
|
func NewEvidenceReactor(evpool *EvidencePool) *EvidenceReactor {
|
2017-11-19 01:34:11 +00:00
|
|
|
evR := &EvidenceReactor{
|
2017-11-02 18:26:07 -06:00
|
|
|
evpool: evpool,
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|
2017-11-19 01:34:11 +00:00
|
|
|
evR.BaseReactor = *p2p.NewBaseReactor("EvidenceReactor", evR)
|
2017-11-02 12:06:48 -06:00
|
|
|
return evR
|
|
|
|
}
|
|
|
|
|
2017-11-19 01:34:11 +00:00
|
|
|
// SetLogger sets the Logger on the reactor and the underlying Evidence.
|
|
|
|
func (evR *EvidenceReactor) SetLogger(l log.Logger) {
|
2017-11-02 12:06:48 -06:00
|
|
|
evR.Logger = l
|
2017-11-02 18:26:07 -06:00
|
|
|
evR.evpool.SetLogger(l)
|
|
|
|
}
|
|
|
|
|
2017-11-02 12:06:48 -06:00
|
|
|
// GetChannels implements Reactor.
|
|
|
|
// It returns the list of channels for this reactor.
|
2017-11-19 01:34:11 +00:00
|
|
|
func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor {
|
2017-11-02 12:06:48 -06:00
|
|
|
return []*p2p.ChannelDescriptor{
|
2019-01-30 12:24:26 +04:00
|
|
|
{
|
2017-11-19 01:34:11 +00:00
|
|
|
ID: EvidenceChannel,
|
2017-11-02 12:06:48 -06:00
|
|
|
Priority: 5,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddPeer implements Reactor.
|
2017-11-19 01:34:11 +00:00
|
|
|
func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
|
2018-06-04 17:38:44 -07:00
|
|
|
go evR.broadcastEvidenceRoutine(peer)
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Receive implements Reactor.
|
|
|
|
// It adds any received evidence to the evpool.
|
2017-11-19 01:34:11 +00:00
|
|
|
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
2018-07-09 13:01:23 +04:00
|
|
|
msg, err := decodeMsg(msgBytes)
|
2017-11-02 12:06:48 -06:00
|
|
|
if err != nil {
|
2018-03-04 13:42:45 +04:00
|
|
|
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
|
|
|
evR.Switch.StopPeerForError(src, err)
|
2017-11-02 12:06:48 -06:00
|
|
|
return
|
|
|
|
}
|
2018-11-01 07:07:18 +01:00
|
|
|
|
|
|
|
if err = msg.ValidateBasic(); err != nil {
|
|
|
|
evR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
|
|
|
evR.Switch.StopPeerForError(src, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-11-02 12:06:48 -06:00
|
|
|
evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
|
|
|
|
|
|
|
switch msg := msg.(type) {
|
2017-11-19 01:34:11 +00:00
|
|
|
case *EvidenceListMessage:
|
2017-11-02 12:06:48 -06:00
|
|
|
for _, ev := range msg.Evidence {
|
2017-11-02 18:26:07 -06:00
|
|
|
err := evR.evpool.AddEvidence(ev)
|
2017-11-02 12:06:48 -06:00
|
|
|
if err != nil {
|
|
|
|
evR.Logger.Info("Evidence is not valid", "evidence", msg.Evidence, "err", err)
|
2018-03-04 14:22:58 +04:00
|
|
|
// punish peer
|
|
|
|
evR.Switch.StopPeerForError(src, err)
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
evR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetEventSwitch implements events.Eventable.
|
2017-11-19 01:34:11 +00:00
|
|
|
func (evR *EvidenceReactor) SetEventBus(b *types.EventBus) {
|
|
|
|
evR.eventBus = b
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|
|
|
|
|
2018-06-04 17:38:44 -07:00
|
|
|
// Modeled after the mempool routine.
|
|
|
|
// - Evidence accumulates in a clist.
|
|
|
|
// - Each peer has a routien that iterates through the clist,
|
|
|
|
// sending available evidence to the peer.
|
|
|
|
// - If we're waiting for new evidence and the list is not empty,
|
|
|
|
// start iterating from the beginning again.
|
|
|
|
func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) {
|
|
|
|
var next *clist.CElement
|
2017-11-02 18:26:07 -06:00
|
|
|
for {
|
2018-06-04 17:38:44 -07:00
|
|
|
// This happens because the CElement we were looking at got garbage
|
|
|
|
// collected (removed). That is, .NextWait() returned nil. Go ahead and
|
|
|
|
// start from the beginning.
|
|
|
|
if next == nil {
|
|
|
|
select {
|
|
|
|
case <-evR.evpool.EvidenceWaitChan(): // Wait until evidence is available
|
|
|
|
if next = evR.evpool.EvidenceFront(); next == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case <-peer.Quit():
|
|
|
|
return
|
|
|
|
case <-evR.Quit():
|
|
|
|
return
|
|
|
|
}
|
2018-05-24 17:49:37 -04:00
|
|
|
}
|
|
|
|
|
2018-06-04 17:38:44 -07:00
|
|
|
ev := next.Value.(types.Evidence)
|
2018-06-04 21:50:42 -07:00
|
|
|
msg, retry := evR.checkSendEvidenceMessage(peer, ev)
|
|
|
|
if msg != nil {
|
|
|
|
success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
|
|
|
|
retry = !success
|
2018-06-04 17:38:44 -07:00
|
|
|
}
|
|
|
|
|
2018-06-04 21:50:42 -07:00
|
|
|
if retry {
|
2018-06-04 17:38:44 -07:00
|
|
|
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
afterCh := time.After(time.Second * broadcastEvidenceIntervalS)
|
|
|
|
select {
|
|
|
|
case <-afterCh:
|
|
|
|
// start from the beginning every tick.
|
|
|
|
// TODO: only do this if we're at the end of the list!
|
|
|
|
next = nil
|
|
|
|
case <-next.NextWaitChan():
|
|
|
|
// see the start of the for loop for nil check
|
|
|
|
next = next.Next()
|
|
|
|
case <-peer.Quit():
|
|
|
|
return
|
|
|
|
case <-evR.Quit():
|
|
|
|
return
|
2018-05-24 17:49:37 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-04 21:50:42 -07:00
|
|
|
// Returns the message to send the peer, or nil if the evidence is invalid for the peer.
|
|
|
|
// If message is nil, return true if we should sleep and try again.
|
|
|
|
func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evidence) (msg EvidenceMessage, retry bool) {
|
|
|
|
// make sure the peer is up to date
|
|
|
|
evHeight := ev.Height()
|
|
|
|
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
|
2018-11-26 15:31:11 -05:00
|
|
|
if !ok {
|
abci: localClient improvements & bugfixes & pubsub Unsubscribe issues (#2748)
* use READ lock/unlock in ConsensusState#GetLastHeight
Refs #2721
* do not use defers when there's no need
* fix peer formatting (output its address instead of the pointer)
```
[54310]: E[11-02|11:59:39.851] Connection failed @ sendRoutine module=p2p peer=0xb78f00 conn=MConn{74.207.236.148:26656} err="pong timeout"
```
https://github.com/tendermint/tendermint/issues/2721#issuecomment-435326581
* panic if peer has no state
https://github.com/tendermint/tendermint/issues/2721#issuecomment-435347165
It's confusing that sometimes we check if peer has a state, but most of
the times we expect it to be there
1. https://github.com/tendermint/tendermint/blob/add79700b5fe84417538202b6c927c8cc5383672/mempool/reactor.go#L138
2. https://github.com/tendermint/tendermint/blob/add79700b5fe84417538202b6c927c8cc5383672/rpc/core/consensus.go#L196 (edited)
I will change everything to always assume peer has a state and panic
otherwise
that should help identify issues earlier
* abci/localclient: extend lock on app callback
App callback should be protected by lock as well (note this was already
done for InitChainAsync, why not for others???). Otherwise, when we
execute the block, tx might come in and call the callback in the same
time we're updating it in execBlockOnProxyApp => DATA RACE
Fixes #2721
Consensus state is locked
```
goroutine 113333 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00180009c, 0xc0000c7e00)
/usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*RWMutex).RLock(0xc001800090)
/usr/local/go/src/sync/rwmutex.go:50 +0x4e
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).GetRoundState(0xc001800000, 0x0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:218 +0x46
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).queryMaj23Routine(0xc0017def80, 0x11104a0, 0xc0072488f0, 0xc007248
9c0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:735 +0x16d
created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).AddPeer
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:172 +0x236
```
because localClient is locked
```
goroutine 1899 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00003363c, 0xc0000cb500)
/usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*Mutex).Lock(0xc000033638)
/usr/local/go/src/sync/mutex.go:134 +0xff
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).SetResponseCallback(0xc0001fb560, 0xc007868540)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:32 +0x33
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnConsensus).SetResponseCallback(0xc00002f750, 0xc007868540)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:57 +0x40
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.execBlockOnProxyApp(0x1104e20, 0xc002ca0ba0, 0x11092a0, 0xc00002f750, 0xc0001fe960, 0xc000bfc660, 0x110cfe0, 0xc000090330, 0xc9d12, 0xc000d9d5a0, ...)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:230 +0x1fd
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.(*BlockExecutor).ApplyBlock(0xc002c2a230, 0x7, 0x0, 0xc000eae880, 0x6, 0xc002e52c60, 0x16, 0x1f927, 0xc9d12, 0xc000d9d5a0, ...)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:96 +0x142
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit(0xc001800000, 0x1f928)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1339 +0xa3e
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit(0xc001800000, 0x1f928)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1270 +0x451
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1(0xc001800000, 0x0, 0x1f928)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1218 +0x90
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit(0xc001800000, 0x1f928, 0x0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1247 +0x6b8
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xc003bc7ad0, 0xc003bc7b10)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1659 +0xbad
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xf1, 0xf1)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1517 +0x59
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg(0xc001800000, 0xd98200, 0xc0070dbed0, 0xc000cf4cc0, 0x28)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:660 +0x64b
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine(0xc001800000, 0x0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:617 +0x670
created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:311 +0x132
```
tx comes in and CheckTx is executed right when we execute the block
```
goroutine 111044 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00003363c, 0x0)
/usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*Mutex).Lock(0xc000033638)
/usr/local/go/src/sync/mutex.go:134 +0xff
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).CheckTxAsync(0xc0001fb0e0, 0xc002d94500, 0x13f, 0x280, 0x0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:85 +0x47
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnMempool).CheckTxAsync(0xc00002f720, 0xc002d94500, 0x13f, 0x280, 0x1)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:114 +0x51
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool.(*Mempool).CheckTx(0xc002d3a320, 0xc002d94500, 0x13f, 0x280, 0xc0072355f0, 0x0, 0x0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool/mempool.go:316 +0x17b
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core.BroadcastTxSync(0xc002d94500, 0x13f, 0x280, 0x0, 0x0, 0x0)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go:93 +0xb8
reflect.Value.call(0xd85560, 0x10326c0, 0x13, 0xec7b8b, 0x4, 0xc00663f180, 0x1, 0x1, 0xc00663f180, 0xc00663f188, ...)
/usr/local/go/src/reflect/value.go:447 +0x449
reflect.Value.Call(0xd85560, 0x10326c0, 0x13, 0xc00663f180, 0x1, 0x1, 0x0, 0x0, 0xc005cc9344)
/usr/local/go/src/reflect/value.go:308 +0xa4
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.makeHTTPHandler.func2(0x1102060, 0xc00663f100, 0xc0082d7900)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:269 +0x188
net/http.HandlerFunc.ServeHTTP(0xc002c81f20, 0x1102060, 0xc00663f100, 0xc0082d7900)
/usr/local/go/src/net/http/server.go:1964 +0x44
net/http.(*ServeMux).ServeHTTP(0xc002c81b60, 0x1102060, 0xc00663f100, 0xc0082d7900)
/usr/local/go/src/net/http/server.go:2361 +0x127
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.maxBytesHandler.ServeHTTP(0x10f8a40, 0xc002c81b60, 0xf4240, 0x1102060, 0xc00663f100, 0xc0082d7900)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:219 +0xcf
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1(0x1103220, 0xc00121e620, 0xc0082d7900)
/root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:192 +0x394
net/http.HandlerFunc.ServeHTTP(0xc002c06ea0, 0x1103220, 0xc00121e620, 0xc0082d7900)
/usr/local/go/src/net/http/server.go:1964 +0x44
net/http.serverHandler.ServeHTTP(0xc001a1aa90, 0x1103220, 0xc00121e620, 0xc0082d7900)
/usr/local/go/src/net/http/server.go:2741 +0xab
net/http.(*conn).serve(0xc00785a3c0, 0x11041a0, 0xc000f844c0)
/usr/local/go/src/net/http/server.go:1847 +0x646
created by net/http.(*Server).Serve
/usr/local/go/src/net/http/server.go:2851 +0x2f5
```
* consensus: use read lock in Receive#VoteMessage
* use defer to unlock mutex because application might panic
* use defer in every method of the localClient
* add a changelog entry
* drain channels before Unsubscribe(All)
Read https://github.com/tendermint/tendermint/blob/55362ed76630f3e1ebec159a598f6a9fb5892cb1/libs/pubsub/pubsub.go#L13
for the detailed explanation of the issue.
We'll need to fix it someday. Make sure to keep an eye on
https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md
* retry instead of panic when peer has no state in reactors other than consensus
in /dump_consensus_state RPC endpoint, skip a peer with no state
* rpc/core/mempool: simplify error messages
* rpc/core/mempool: use time.After instead of timer
also, do not log DeliverTx result (to be consistent with other memthods)
* unlock before calling the callback in reqRes#SetCallback
2018-11-13 20:32:51 +04:00
|
|
|
// Peer does not have a state yet. We set it in the consensus reactor, but
|
|
|
|
// when we add peer in Switch, the order we call reactors#AddPeer is
|
|
|
|
// different every time due to us using a map. Sometimes other reactors
|
|
|
|
// will be initialized before the consensus reactor. We should wait a few
|
|
|
|
// milliseconds and retry.
|
2018-06-04 21:50:42 -07:00
|
|
|
return nil, true
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: We only send evidence to peers where
|
|
|
|
// peerHeight - maxAge < evidenceHeight < peerHeight
|
2018-10-30 08:36:53 -07:00
|
|
|
maxAge := evR.evpool.State().ConsensusParams.Evidence.MaxAge
|
2018-06-04 21:50:42 -07:00
|
|
|
peerHeight := peerState.GetHeight()
|
|
|
|
if peerHeight < evHeight {
|
|
|
|
// peer is behind. sleep while he catches up
|
|
|
|
return nil, true
|
|
|
|
} else if peerHeight > evHeight+maxAge {
|
|
|
|
// evidence is too old, skip
|
|
|
|
// NOTE: if evidence is too old for an honest peer,
|
|
|
|
// then we're behind and either it already got committed or it never will!
|
|
|
|
evR.Logger.Info("Not sending peer old evidence", "peerHeight", peerHeight, "evHeight", evHeight, "maxAge", maxAge, "peer", peer)
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// send evidence
|
|
|
|
msg = &EvidenceListMessage{[]types.Evidence{ev}}
|
|
|
|
return msg, false
|
|
|
|
}
|
|
|
|
|
2018-06-04 17:38:44 -07:00
|
|
|
// PeerState describes the state of a peer.
|
2018-05-24 17:49:37 -04:00
|
|
|
type PeerState interface {
|
2018-06-04 17:38:44 -07:00
|
|
|
GetHeight() int64
|
2018-05-24 17:49:37 -04:00
|
|
|
}
|
|
|
|
|
2017-11-02 12:06:48 -06:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// Messages
|
|
|
|
|
2017-11-19 01:34:11 +00:00
|
|
|
// EvidenceMessage is a message sent or received by the EvidenceReactor.
|
2018-11-01 07:07:18 +01:00
|
|
|
type EvidenceMessage interface {
|
|
|
|
ValidateBasic() error
|
|
|
|
}
|
2017-11-02 12:06:48 -06:00
|
|
|
|
2018-04-05 05:43:23 -07:00
|
|
|
func RegisterEvidenceMessages(cdc *amino.Codec) {
|
|
|
|
cdc.RegisterInterface((*EvidenceMessage)(nil), nil)
|
|
|
|
cdc.RegisterConcrete(&EvidenceListMessage{},
|
2018-04-06 13:46:40 -07:00
|
|
|
"tendermint/evidence/EvidenceListMessage", nil)
|
2018-04-05 05:43:23 -07:00
|
|
|
}
|
2017-11-02 12:06:48 -06:00
|
|
|
|
2018-07-09 13:01:23 +04:00
|
|
|
func decodeMsg(bz []byte) (msg EvidenceMessage, err error) {
|
2018-04-09 15:14:33 +03:00
|
|
|
if len(bz) > maxMsgSize {
|
2018-07-09 13:01:23 +04:00
|
|
|
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
2018-04-09 15:14:33 +03:00
|
|
|
}
|
2018-04-05 05:43:23 -07:00
|
|
|
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
2017-11-02 12:06:48 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
//-------------------------------------
|
|
|
|
|
2018-11-01 07:07:18 +01:00
|
|
|
// EvidenceListMessage contains a list of evidence.
|
2017-11-19 01:34:11 +00:00
|
|
|
type EvidenceListMessage struct {
|
2017-11-02 18:26:07 -06:00
|
|
|
Evidence []types.Evidence
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|
|
|
|
|
2018-11-01 07:07:18 +01:00
|
|
|
// ValidateBasic performs basic validation.
|
|
|
|
func (m *EvidenceListMessage) ValidateBasic() error {
|
|
|
|
for i, ev := range m.Evidence {
|
|
|
|
if err := ev.ValidateBasic(); err != nil {
|
|
|
|
return fmt.Errorf("Invalid evidence (#%d): %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-19 01:34:11 +00:00
|
|
|
// String returns a string representation of the EvidenceListMessage.
|
|
|
|
func (m *EvidenceListMessage) String() string {
|
|
|
|
return fmt.Sprintf("[EvidenceListMessage %v]", m.Evidence)
|
2017-11-02 12:06:48 -06:00
|
|
|
}
|