mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-22 01:11:32 +00:00
Merge branch 'develop' into jae/literefactor4
This commit is contained in:
@ -29,10 +29,10 @@ eg, L = latency = 0.1s
|
||||
*/
|
||||
|
||||
const (
|
||||
requestIntervalMS = 100
|
||||
maxTotalRequesters = 1000
|
||||
requestIntervalMS = 2
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 50
|
||||
maxPendingRequestsPerPeer = 20
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@ -219,14 +219,12 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
|
||||
if request.block == nil {
|
||||
panic("Expected block to be non-nil")
|
||||
peerID := request.getPeerID()
|
||||
if peerID != p2p.ID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(request.peerID)
|
||||
return request.peerID
|
||||
return peerID
|
||||
}
|
||||
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
|
@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -25,7 +24,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
|
||||
peers := make(map[p2p.ID]testPeer, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := p2p.ID(cmn.RandStr(12))
|
||||
height := minHeight + rand.Int63n(maxHeight-minHeight)
|
||||
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
|
||||
peers[peerID] = testPeer{peerID, height}
|
||||
}
|
||||
return peers
|
||||
@ -80,7 +79,7 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
go func() {
|
||||
block := &types.Block{Header: &types.Header{Height: request.Height}}
|
||||
block := &types.Block{Header: types.Header{Height: request.Height}}
|
||||
pool.AddBlock(request.PeerID, block, 123)
|
||||
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
|
||||
}()
|
||||
|
@ -5,19 +5,21 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
|
||||
trySyncIntervalMS = 50
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
// stop syncing when last block's time is
|
||||
// within this much of the system time.
|
||||
// stopSyncingDurationMinutes = 10
|
||||
@ -75,8 +77,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
requestsCh := make(chan BlockRequest, capacity)
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
|
||||
|
||||
pool := NewBlockPool(
|
||||
@ -174,7 +177,7 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := DecodeMessage(msgBytes)
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
@ -208,7 +211,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
@ -223,6 +225,8 @@ func (bcR *BlockchainReactor) poolRoutine() {
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
@ -238,14 +242,17 @@ FOR_LOOP:
|
||||
// The pool handles timeouts, just let it go.
|
||||
continue FOR_LOOP
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
@ -260,60 +267,78 @@ FOR_LOOP:
|
||||
|
||||
break FOR_LOOP
|
||||
}
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
// This loop can be slow as long as it's doing syncing work.
|
||||
SYNC_LOOP:
|
||||
for i := 0; i < 10; i++ {
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
break SYNC_LOOP
|
||||
select {
|
||||
case didProcessCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
case <-didProcessCh:
|
||||
// NOTE: It is a subtle mistake to process more than a single block
|
||||
// at a time (e.g. 10) here, because we only TrySend 1 request per
|
||||
// loop. The ratio mismatch can result in starving of blocks, a
|
||||
// sudden burst of requests and responses, and repeat.
|
||||
// Consequently, it is better to split these routines rather than
|
||||
// coupling them as it's written here. TODO uncouple from request
|
||||
// routine.
|
||||
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
// Try again quickly next loop.
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommit(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
}
|
||||
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommit(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
}
|
||||
break SYNC_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
break FOR_LOOP
|
||||
}
|
||||
@ -342,17 +367,11 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
|
||||
}
|
||||
|
||||
// DecodeMessage decodes BlockchainMessage.
|
||||
// TODO: ensure that bz is completely read.
|
||||
func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
|
||||
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
|
||||
len(bz), maxMsgSize)
|
||||
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
if err != nil {
|
||||
err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -206,3 +206,4 @@ func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }
|
||||
func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil }
|
||||
|
@ -126,7 +126,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
eraseSeenCommitInDB bool
|
||||
}{
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
},
|
||||
@ -137,19 +137,19 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header2, commitAtH10),
|
||||
block: newBlock(header2, commitAtH10),
|
||||
parts: uncontiguousPartSet,
|
||||
wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: incompletePartSet,
|
||||
wantPanic: "only save complete block", // incomplete parts
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
corruptCommitInDB: true, // Corrupt the DB's commit entry
|
||||
@ -157,7 +157,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
wantPanic: "unmarshal to types.BlockMeta failed",
|
||||
@ -165,7 +165,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
@ -174,7 +174,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
@ -183,7 +183,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
@ -375,7 +375,7 @@ func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr
|
||||
return res, err, panicErr
|
||||
}
|
||||
|
||||
func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block {
|
||||
func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block {
|
||||
return &types.Block{
|
||||
Header: hdr,
|
||||
LastCommit: lastCommit,
|
||||
|
@ -2,12 +2,12 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterBlockchainMessages(cdc)
|
||||
crypto.RegisterAmino(cdc)
|
||||
cryptoAmino.RegisterAmino(cdc)
|
||||
}
|
||||
|
Reference in New Issue
Block a user