mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-25 14:52:17 +00:00
* expose AuthEnc in the P2P config if AuthEnc is true, dialed peers must have a node ID in the address and it must match the persistent pubkey from the secret handshake. Refs #1157 * fixes after my own review * fix docs * fix build failure ``` p2p/pex/pex_reactor_test.go:288:88: cannot use seed.NodeInfo().NetAddress() (type *p2p.NetAddress) as type string in array or slice literal ``` * p2p: introduce peerConn to simplify peer creation * Introduce `peerConn` containing the known fields of `peer` * `peer` only created in `sw.addPeer` once handshake is complete and NodeInfo is checked * Eliminates some mutable variables and makes the code flow better * Simplifies the `newXxxPeer` funcs * Use ID instead of PubKey where possible. * SetPubKeyFilter -> SetIDFilter * nodeInfo.Validate takes ID * remove peer.PubKey() * persistent node ids * fixes from review * test: use ip_plus_id.sh more * fix invalid memory panic during fast_sync test ``` 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: panic: runtime error: invalid memory address or nil pointer dereference 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0x98dd3e] 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: goroutine 3432 [running]: 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: github.com/tendermint/tendermint/p2p.newOutboundPeerConn(0xc423fd1380, 0xc420933e00, 0x1, 0x1239a60, 0 xc420128c40, 0x2, 0x42caf6, 0xc42001f300, 0xc422831d98, 0xc4227951c0, ...) 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: #011/go/src/github.com/tendermint/tendermint/p2p/peer.go:123 +0x31e 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: github.com/tendermint/tendermint/p2p.(*Switch).addOutboundPeerWithConfig(0xc4200ad040, 0xc423fd1380, 0 xc420933e00, 0xc423f48801, 0x28, 0x2) 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: #011/go/src/github.com/tendermint/tendermint/p2p/switch.go:455 +0x12b 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: github.com/tendermint/tendermint/p2p.(*Switch).DialPeerWithAddress(0xc4200ad040, 0xc423fd1380, 0x1, 0x 0, 0x0) 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: #011/go/src/github.com/tendermint/tendermint/p2p/switch.go:371 +0xdc 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: github.com/tendermint/tendermint/p2p.(*Switch).reconnectToPeer(0xc4200ad040, 0x123e000, 0xc42007bb00) 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: #011/go/src/github.com/tendermint/tendermint/p2p/switch.go:290 +0x25f 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: created by github.com/tendermint/tendermint/p2p.(*Switch).StopPeerForError 2018-02-21T06:30:05Z box887.localdomain docker/local_testnet_4[14907]: #011/go/src/github.com/tendermint/tendermint/p2p/switch.go:256 +0x1b7 ```
153 lines
3.8 KiB
Go
153 lines
3.8 KiB
Go
package p2p
|
|
|
|
import (
|
|
"math/rand"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
crypto "github.com/tendermint/go-crypto"
|
|
cmn "github.com/tendermint/tmlibs/common"
|
|
)
|
|
|
|
// Returns an empty dummy peer
|
|
func randPeer() *peer {
|
|
pubKey := crypto.GenPrivKeyEd25519().Wrap().PubKey()
|
|
return &peer{
|
|
nodeInfo: NodeInfo{
|
|
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
|
|
PubKey: pubKey,
|
|
},
|
|
}
|
|
}
|
|
|
|
func TestPeerSetAddRemoveOne(t *testing.T) {
|
|
t.Parallel()
|
|
peerSet := NewPeerSet()
|
|
|
|
var peerList []Peer
|
|
for i := 0; i < 5; i++ {
|
|
p := randPeer()
|
|
if err := peerSet.Add(p); err != nil {
|
|
t.Error(err)
|
|
}
|
|
peerList = append(peerList, p)
|
|
}
|
|
|
|
n := len(peerList)
|
|
// 1. Test removing from the front
|
|
for i, peerAtFront := range peerList {
|
|
peerSet.Remove(peerAtFront)
|
|
wantSize := n - i - 1
|
|
for j := 0; j < 2; j++ {
|
|
assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j)
|
|
assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j)
|
|
// Test the route of removing the now non-existent element
|
|
peerSet.Remove(peerAtFront)
|
|
}
|
|
}
|
|
|
|
// 2. Next we are testing removing the peer at the end
|
|
// a) Replenish the peerSet
|
|
for _, peer := range peerList {
|
|
if err := peerSet.Add(peer); err != nil {
|
|
t.Error(err)
|
|
}
|
|
}
|
|
|
|
// b) In reverse, remove each element
|
|
for i := n - 1; i >= 0; i-- {
|
|
peerAtEnd := peerList[i]
|
|
peerSet.Remove(peerAtEnd)
|
|
assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i)
|
|
assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i)
|
|
}
|
|
}
|
|
|
|
func TestPeerSetAddRemoveMany(t *testing.T) {
|
|
t.Parallel()
|
|
peerSet := NewPeerSet()
|
|
|
|
peers := []Peer{}
|
|
N := 100
|
|
for i := 0; i < N; i++ {
|
|
peer := randPeer()
|
|
if err := peerSet.Add(peer); err != nil {
|
|
t.Errorf("Failed to add new peer")
|
|
}
|
|
if peerSet.Size() != i+1 {
|
|
t.Errorf("Failed to add new peer and increment size")
|
|
}
|
|
peers = append(peers, peer)
|
|
}
|
|
|
|
for i, peer := range peers {
|
|
peerSet.Remove(peer)
|
|
if peerSet.Has(peer.ID()) {
|
|
t.Errorf("Failed to remove peer")
|
|
}
|
|
if peerSet.Size() != len(peers)-i-1 {
|
|
t.Errorf("Failed to remove peer and decrement size")
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestPeerSetAddDuplicate(t *testing.T) {
|
|
t.Parallel()
|
|
peerSet := NewPeerSet()
|
|
peer := randPeer()
|
|
|
|
n := 20
|
|
errsChan := make(chan error)
|
|
// Add the same asynchronously to test the
|
|
// concurrent guarantees of our APIs, and
|
|
// our expectation in the end is that only
|
|
// one addition succeeded, but the rest are
|
|
// instances of ErrSwitchDuplicatePeer.
|
|
for i := 0; i < n; i++ {
|
|
go func() {
|
|
errsChan <- peerSet.Add(peer)
|
|
}()
|
|
}
|
|
|
|
// Now collect and tally the results
|
|
errsTally := make(map[error]int)
|
|
for i := 0; i < n; i++ {
|
|
err := <-errsChan
|
|
errsTally[err] += 1
|
|
}
|
|
|
|
// Our next procedure is to ensure that only one addition
|
|
// succeeded and that the rest are each ErrSwitchDuplicatePeer.
|
|
wantErrCount, gotErrCount := n-1, errsTally[ErrSwitchDuplicatePeer]
|
|
assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
|
|
|
|
wantNilErrCount, gotNilErrCount := 1, errsTally[nil]
|
|
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
|
|
}
|
|
|
|
func TestPeerSetGet(t *testing.T) {
|
|
t.Parallel()
|
|
peerSet := NewPeerSet()
|
|
peer := randPeer()
|
|
assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add")
|
|
|
|
if err := peerSet.Add(peer); err != nil {
|
|
t.Fatalf("Failed to add new peer: %v", err)
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
for i := 0; i < 10; i++ {
|
|
// Add them asynchronously to test the
|
|
// concurrent guarantees of our APIs.
|
|
wg.Add(1)
|
|
go func(i int) {
|
|
defer wg.Done()
|
|
got, want := peerSet.Get(peer.ID()), peer
|
|
assert.Equal(t, got, want, "#%d: got=%v want=%v", i, got, want)
|
|
}(i)
|
|
}
|
|
wg.Wait()
|
|
}
|