mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-15 14:21:22 +00:00
consensus: fix for initializing block parts during catchup
This commit is contained in:
@ -488,6 +488,18 @@ OUTER_LOOP:
|
|||||||
// If the peer is on a previous height, help catch up.
|
// If the peer is on a previous height, help catch up.
|
||||||
if (0 < prs.Height) && (prs.Height < rs.Height) {
|
if (0 < prs.Height) && (prs.Height < rs.Height) {
|
||||||
heightLogger := logger.With("height", prs.Height)
|
heightLogger := logger.With("height", prs.Height)
|
||||||
|
|
||||||
|
// if we never received the commit message from the peer, the block parts wont be initialized
|
||||||
|
if prs.ProposalBlockParts == nil {
|
||||||
|
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
||||||
|
if blockMeta == nil {
|
||||||
|
cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d",
|
||||||
|
prs.Height, conR.conS.blockStore.Height()))
|
||||||
|
}
|
||||||
|
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
|
||||||
|
// continue the loop since prs is a copy and not effected by this initialization
|
||||||
|
continue OUTER_LOOP
|
||||||
|
}
|
||||||
conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
|
conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
|
||||||
continue OUTER_LOOP
|
continue OUTER_LOOP
|
||||||
}
|
}
|
||||||
@ -539,20 +551,6 @@ OUTER_LOOP:
|
|||||||
func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
|
func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState,
|
||||||
prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
|
prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) {
|
||||||
|
|
||||||
// this might happen if we didn't receive the commit message from the peer
|
|
||||||
// NOTE: wouldn't it be better if the peer resubmitted his CommitStepMessage periodically if not progressing?
|
|
||||||
if prs.ProposalBlockParts == nil {
|
|
||||||
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
|
||||||
if blockMeta == nil {
|
|
||||||
logger.Error("Failed to load block meta",
|
|
||||||
"ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height())
|
|
||||||
time.Sleep(conR.conS.config.PeerGossipSleep())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
prs.ProposalBlockPartsHeader = blockMeta.BlockID.PartsHeader
|
|
||||||
prs.ProposalBlockParts = cmn.NewBitArray(blockMeta.BlockID.PartsHeader.Total)
|
|
||||||
}
|
|
||||||
|
|
||||||
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
|
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
|
||||||
// Ensure that the peer's PartSetHeader is correct
|
// Ensure that the peer's PartSetHeader is correct
|
||||||
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
||||||
@ -581,9 +579,11 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
|
|||||||
Round: prs.Round, // Not our height, so it doesn't matter.
|
Round: prs.Round, // Not our height, so it doesn't matter.
|
||||||
Part: part,
|
Part: part,
|
||||||
}
|
}
|
||||||
logger.Debug("Sending block part for catchup", "round", prs.Round)
|
logger.Debug("Sending block part for catchup", "height", prs.Height, "round", prs.Round, "index", index)
|
||||||
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
|
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
|
||||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||||
|
} else {
|
||||||
|
logger.Debug("Sending block part for catchup failed")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
@ -882,6 +882,19 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
|
|||||||
ps.ProposalPOL = nil // Nil until ProposalPOLMessage received.
|
ps.ProposalPOL = nil // Nil until ProposalPOLMessage received.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
|
||||||
|
func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) {
|
||||||
|
ps.mtx.Lock()
|
||||||
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
|
if ps.ProposalBlockParts != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.ProposalBlockPartsHeader = partsHeader
|
||||||
|
ps.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total)
|
||||||
|
}
|
||||||
|
|
||||||
// SetHasProposalBlockPart sets the given block part index as known for the peer.
|
// SetHasProposalBlockPart sets the given block part index as known for the peer.
|
||||||
func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
|
func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
|
||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
|
@ -55,6 +55,8 @@ func (rs RoundStepType) String() string {
|
|||||||
// It is Immutable when returned from ConsensusState.GetRoundState()
|
// It is Immutable when returned from ConsensusState.GetRoundState()
|
||||||
// TODO: Actually, only the top pointer is copied,
|
// TODO: Actually, only the top pointer is copied,
|
||||||
// so access to field pointers is still racey
|
// so access to field pointers is still racey
|
||||||
|
// NOTE: Not thread safe. Should only be manipulated by functions downstream
|
||||||
|
// of the cs.receiveRoutine
|
||||||
type RoundState struct {
|
type RoundState struct {
|
||||||
Height int // Height we are working on
|
Height int // Height we are working on
|
||||||
Round int
|
Round int
|
||||||
|
Reference in New Issue
Block a user