uuid branch tidying

This commit is contained in:
Jae Kwon 2015-07-13 16:00:01 -07:00
parent bdc2711f27
commit d91f073676
8 changed files with 94 additions and 52 deletions

View File

@ -78,6 +78,7 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("db_dir", rootDir+"/data") mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "debug") mapConfig.SetDefault("log_level", "debug")
mapConfig.SetDefault("rpc_laddr", "0.0.0.0:36657") mapConfig.SetDefault("rpc_laddr", "0.0.0.0:36657")
mapConfig.SetDefault("revisions_file", rootDir+"/revisions")
return mapConfig return mapConfig
} }

View File

@ -141,16 +141,16 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
// Get round state // Get round state
rs := conR.conS.GetRoundState() rs := conR.conS.GetRoundState()
ps := peer.Data.Get(PeerStateKey).(*PeerState) ps := peer.Data.Get(PeerStateKey).(*PeerState)
_, msg_, err := DecodeMessage(msgBytes) _, msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
log.Warn("Error decoding message", "channel", chId, "peer", peer, "msg", msg_, "error", err, "bytes", msgBytes) log.Warn("Error decoding message", "channel", chId, "peer", peer, "msg", msg, "error", err, "bytes", msgBytes)
return return
} }
log.Debug("Receive", "channel", chId, "peer", peer, "msg", msg_, "rsHeight", rs.Height) //, "bytes", msgBytes) log.Debug("Receive", "channel", chId, "peer", peer, "msg", msg, "rsHeight", rs.Height) //, "bytes", msgBytes)
switch chId { switch chId {
case StateChannel: case StateChannel:
switch msg := msg_.(type) { switch msg := msg.(type) {
case *NewRoundStepMessage: case *NewRoundStepMessage:
ps.ApplyNewRoundStepMessage(msg, rs) ps.ApplyNewRoundStepMessage(msg, rs)
case *CommitStepMessage: case *CommitStepMessage:
@ -163,10 +163,10 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
case DataChannel: case DataChannel:
if conR.fastSync { if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg_) log.Warn("Ignoring message received during fastSync", "msg", msg)
return return
} }
switch msg := msg_.(type) { switch msg := msg.(type) {
case *ProposalMessage: case *ProposalMessage:
ps.SetHasProposal(msg.Proposal) ps.SetHasProposal(msg.Proposal)
err = conR.conS.SetProposal(msg.Proposal) err = conR.conS.SetProposal(msg.Proposal)
@ -181,10 +181,10 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
case VoteChannel: case VoteChannel:
if conR.fastSync { if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg_) log.Warn("Ignoring message received during fastSync", "msg", msg)
return return
} }
switch msg := msg_.(type) { switch msg := msg.(type) {
case *VoteMessage: case *VoteMessage:
vote := msg.Vote vote := msg.Vote
var validators *sm.ValidatorSet var validators *sm.ValidatorSet

View File

@ -73,14 +73,14 @@ func (pexR *MempoolReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// Implements Reactor // Implements Reactor
func (memR *MempoolReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) { func (memR *MempoolReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) {
_, msg_, err := DecodeMessage(msgBytes) _, msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
log.Warn("Error decoding message", "error", err) log.Warn("Error decoding message", "error", err)
return return
} }
log.Info("MempoolReactor received message", "msg", msg_) log.Info("MempoolReactor received message", "msg", msg)
switch msg := msg_.(type) { switch msg := msg.(type) {
case *TxMessage: case *TxMessage:
err := memR.Mempool.AddTx(msg.Tx) err := memR.Mempool.AddTx(msg.Tx)
if err != nil { if err != nil {

View File

@ -1,6 +1,7 @@
package p2p package p2p
import ( import (
"fmt"
"net" "net"
"strings" "strings"
"sync" "sync"
@ -54,7 +55,7 @@ func (ps *PeerSet) Add(peer *Peer) error {
// ensure we havent maxed out connections for the peer's IP range yet // ensure we havent maxed out connections for the peer's IP range yet
// and update the IP range counters // and update the IP range counters
if !ps.updateIPRangeCounts(peer.Host) { if !ps.incrIPRangeCounts(peer.Host) {
return ErrSwitchMaxPeersPerIPRange return ErrSwitchMaxPeersPerIPRange
} }
@ -91,6 +92,10 @@ func (ps *PeerSet) Remove(peer *Peer) {
if item == nil { if item == nil {
return return
} }
// update the IP range counters
ps.decrIPRangeCounts(peer.Host)
index := item.index index := item.index
// Copy the list but without the last element. // Copy the list but without the last element.
// (we must copy because we're mutating the list) // (we must copy because we're mutating the list)
@ -102,6 +107,7 @@ func (ps *PeerSet) Remove(peer *Peer) {
delete(ps.lookup, peer.Key) delete(ps.lookup, peer.Key)
return return
} }
// Move the last item from ps.list to "index" in list. // Move the last item from ps.list to "index" in list.
lastPeer := ps.list[len(ps.list)-1] lastPeer := ps.list[len(ps.list)-1]
lastPeerKey := lastPeer.Key lastPeerKey := lastPeer.Key
@ -110,6 +116,7 @@ func (ps *PeerSet) Remove(peer *Peer) {
lastPeerItem.index = index lastPeerItem.index = index
ps.list = newList ps.list = newList
delete(ps.lookup, peer.Key) delete(ps.lookup, peer.Key)
} }
func (ps *PeerSet) Size() int { func (ps *PeerSet) Size() int {
@ -147,50 +154,80 @@ func (ps *PeerSet) HasMaxForIPRange(conn net.Conn) (ok bool) {
ps.mtx.Lock() ps.mtx.Lock()
defer ps.mtx.Unlock() defer ps.mtx.Unlock()
ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
spl := strings.Split(ip, ".") ipBytes := strings.Split(ip, ".")
c := ps.connectedIPs c := ps.connectedIPs
for i, ipByte := range spl { for i, ipByte := range ipBytes {
if c, ok = c.children[ipByte]; !ok { if c, ok = c.children[ipByte]; !ok {
return false return false
} }
if c.count == maxPeersPerIPRange[i] { if maxPeersPerIPRange[i] <= c.count {
return true return true
} }
} }
return false return false
} }
// Update counts for this address' IP range // Increments counts for this address' IP range
// Returns false if we already have enough connections // Returns false if we already have enough connections
// Not thread safe (only called by ps.Add()) // Not thread safe (only called by ps.Add())
func (ps *PeerSet) updateIPRangeCounts(address string) bool { func (ps *PeerSet) incrIPRangeCounts(address string) bool {
spl := strings.Split(address, ".") addrParts := strings.Split(address, ".")
c := ps.connectedIPs c := ps.connectedIPs
return updateNestedCountRecursive(c, spl, 0) return incrNestedCounters(c, addrParts, 0)
} }
// recursively descend the IP hierarchy, checking if we have // Recursively descend the IP hierarchy, checking if we have
// max peers for each range and updating if not // max peers for each range and incrementing if not.
func updateNestedCountRecursive(c *nestedCounter, ipBytes []string, index int) bool { // Returns false if incr failed because max peers reached for some range counter.
if index == len(ipBytes) { func incrNestedCounters(c *nestedCounter, ipBytes []string, index int) bool {
return true fmt.Println("incr:", c.count, ipBytes, index)
}
ipByte := ipBytes[index] ipByte := ipBytes[index]
if c2, ok := c.children[ipByte]; !ok { child := c.children[ipByte]
c2 = NewNestedCounter() if child == nil {
c.children[ipByte] = c2 child = NewNestedCounter()
c = c2 c.children[ipByte] = child
} else { }
c = c2 fmt.Println("incr child:", child.count)
if c.count == maxPeersPerIPRange[index] { if index+1 < len(ipBytes) {
fmt.Println("1>>")
if !incrNestedCounters(child, ipBytes, index+1) {
return false return false
} }
} else {
fmt.Println("2>>")
} }
if !updateNestedCountRecursive(c, ipBytes, index+1) { if maxPeersPerIPRange[index] <= child.count {
return false return false
} else {
child.count += 1
return true
}
}
// Decrement counts for this address' IP range
func (ps *PeerSet) decrIPRangeCounts(address string) {
addrParts := strings.Split(address, ".")
c := ps.connectedIPs
decrNestedCounters(c, addrParts, 0)
}
// Recursively descend the IP hierarchy, decrementing by one.
// If the counter is zero, deletes the child.
func decrNestedCounters(c *nestedCounter, ipBytes []string, index int) {
ipByte := ipBytes[index]
child := c.children[ipByte]
if child == nil {
log.Error("p2p/peer_set decrNestedCounters encountered a missing child counter")
return
}
if index+1 < len(ipBytes) {
decrNestedCounters(child, ipBytes, index+1)
}
child.count -= 1
if child.count <= 0 {
delete(c.children, ipByte)
} }
c.count += 1
return true
} }

View File

@ -1,6 +1,7 @@
package p2p package p2p
import ( import (
"fmt"
"math/rand" "math/rand"
"strings" "strings"
"testing" "testing"
@ -24,8 +25,8 @@ func TestAddRemoveOne(t *testing.T) {
peerSet := NewPeerSet() peerSet := NewPeerSet()
peer := randPeer() peer := randPeer()
added := peerSet.Add(peer) err := peerSet.Add(peer)
if err := added; err != nil { if err != nil {
t.Errorf("Failed to add new peer") t.Errorf("Failed to add new peer")
} }
if peerSet.Size() != 1 { if peerSet.Size() != 1 {
@ -92,6 +93,7 @@ func TestIPRanges(t *testing.T) {
peerSet := NewPeerSet() peerSet := NewPeerSet()
// test /8 // test /8
fmt.Println("---")
maxPeersPerIPRange = [4]int{2, 2, 2, 2} maxPeersPerIPRange = [4]int{2, 2, 2, 2}
peer := newPeerInIPRange("54") peer := newPeerInIPRange("54")
if err := peerSet.Add(peer); err != nil { if err := peerSet.Add(peer); err != nil {
@ -109,6 +111,7 @@ func TestIPRanges(t *testing.T) {
if err := peerSet.Add(peer); err != nil { if err := peerSet.Add(peer); err != nil {
t.Errorf("Failed to add new peer") t.Errorf("Failed to add new peer")
} }
fmt.Println("---END")
// test /16 // test /16
peerSet = NewPeerSet() peerSet = NewPeerSet()

View File

@ -20,7 +20,6 @@ const (
PexChannel = byte(0x00) PexChannel = byte(0x00)
ensurePeersPeriodSeconds = 30 ensurePeersPeriodSeconds = 30
minNumOutboundPeers = 10 minNumOutboundPeers = 10
maxNumPeers = 50
) )
/* /*
@ -78,12 +77,14 @@ func (pexR *PEXReactor) GetChannels() []*ChannelDescriptor {
func (pexR *PEXReactor) AddPeer(peer *Peer) { func (pexR *PEXReactor) AddPeer(peer *Peer) {
// Add the peer to the address book // Add the peer to the address book
netAddr := NewNetAddressString(fmt.Sprintf("%s:%d", peer.Host, peer.P2PPort)) netAddr := NewNetAddressString(fmt.Sprintf("%s:%d", peer.Host, peer.P2PPort))
pexR.book.AddAddress(netAddr, netAddr) // the peer is its own source
if peer.IsOutbound() { if peer.IsOutbound() {
if pexR.book.NeedMoreAddrs() { if pexR.book.NeedMoreAddrs() {
pexR.RequestPEX(peer) pexR.RequestPEX(peer)
} }
} else {
// For inbound connections, the peer is its own source
// (For outbound peers, the address is already in the books)
pexR.book.AddAddress(netAddr, netAddr)
} }
} }
@ -104,7 +105,7 @@ func (pexR *PEXReactor) Receive(chId byte, src *Peer, msgBytes []byte) {
} }
log.Info("Received message", "msg", msg) log.Info("Received message", "msg", msg)
switch msgT := msg.(type) { switch msg := msg.(type) {
case *pexRequestMessage: case *pexRequestMessage:
// src requested some peers. // src requested some peers.
// TODO: prevent abuse. // TODO: prevent abuse.
@ -114,7 +115,7 @@ func (pexR *PEXReactor) Receive(chId byte, src *Peer, msgBytes []byte) {
// TODO: prevent abuse. // TODO: prevent abuse.
// (We don't want to get spammed with bad peers) // (We don't want to get spammed with bad peers)
srcAddr := src.Connection().RemoteAddress srcAddr := src.Connection().RemoteAddress
for _, addr := range msgT.Addrs { for _, addr := range msg.Addrs {
pexR.book.AddAddress(addr, srcAddr) pexR.book.AddAddress(addr, srcAddr)
} }
default: default:
@ -210,12 +211,13 @@ func (pexR *PEXReactor) ensurePeers() {
}(item.(*NetAddress)) }(item.(*NetAddress))
} }
// if no addresses to dial, pick a random connected peer and ask for more peers // If we need more addresses, pick a random peer and ask for more.
if toDial.Size() == 0 { if pexR.book.NeedMoreAddrs() {
if peers := pexR.sw.Peers().List(); len(peers) > 0 { if peers := pexR.sw.Peers().List(); len(peers) > 0 {
i := rand.Int() % len(peers) i := rand.Int() % len(peers)
log.Debug("No addresses to dial. Sending pexRequest to random peer", "peer", peers[i]) peer := peers[i]
pexR.RequestPEX(peers[i]) log.Debug("No addresses to dial. Sending pexRequest to random peer", "peer", peer)
pexR.RequestPEX(peer)
} }
} }
} }

View File

@ -57,7 +57,8 @@ var (
) )
const ( const (
peerDialTimeoutSeconds = 3 peerDialTimeoutSeconds = 3 // TODO make this configurable
maxNumPeers = 50 // TODO make this configurable
) )
func NewSwitch() *Switch { func NewSwitch() *Switch {
@ -308,9 +309,7 @@ func (sw *Switch) listenerRoutine(l Listener) {
} }
// ignore connection if we already have enough // ignore connection if we already have enough
// note we might exceed the maxNumPeers in order to if maxNumPeers <= sw.peers.Size() {
// achieve minNumOutboundPeers
if sw.peers.Size() >= maxNumPeers {
log.Debug("Ignoring inbound connection: already have enough peers", "conn", inConn, "numPeers", sw.peers.Size(), "max", maxNumPeers) log.Debug("Ignoring inbound connection: already have enough peers", "conn", inConn, "numPeers", sw.peers.Size(), "max", maxNumPeers)
continue continue
} }

View File

@ -293,14 +293,14 @@ func adjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutpu
// If the tx is invalid, an error will be returned. // If the tx is invalid, an error will be returned.
// Unlike ExecBlock(), state will not be altered. // Unlike ExecBlock(), state will not be altered.
func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Fireable) (err error) { func ExecTx(blockCache *BlockCache, tx types.Tx, runCall bool, evc events.Fireable) (err error) {
// TODO: do something with fees // TODO: do something with fees
fees := int64(0) fees := int64(0)
_s := blockCache.State() // hack to access validators and block height _s := blockCache.State() // hack to access validators and block height
// Exec tx // Exec tx
switch tx := tx_.(type) { switch tx := tx.(type) {
case *types.SendTx: case *types.SendTx:
accounts, err := getInputs(blockCache, tx.Inputs) accounts, err := getInputs(blockCache, tx.Inputs)
if err != nil { if err != nil {