New node methods for tools and otherwise (#265)

* Add IpfsDHT.{Bootstrap{Random,Self},Peer{Id,Key}}
* Include client mode tests, support for Ping, and uncruft setupDHTs
* Include xerrors in deps
This commit is contained in:
Matt Joiner 2019-03-06 08:10:38 +11:00 committed by GitHub
parent bbf33de897
commit 08c34b4d83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 93 additions and 27 deletions

26
dht.go
View File

@ -8,6 +8,8 @@ import (
"sync"
"time"
"golang.org/x/xerrors"
opts "github.com/libp2p/go-libp2p-kad-dht/opts"
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
providers "github.com/libp2p/go-libp2p-kad-dht/providers"
@ -400,3 +402,27 @@ func (dht *IpfsDHT) protocolStrs() []string {
func mkDsKey(s string) ds.Key {
return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s)))
}
func (dht *IpfsDHT) PeerID() peer.ID {
return dht.self
}
func (dht *IpfsDHT) PeerKey() []byte {
return kb.ConvertPeerID(dht.self)
}
func (dht *IpfsDHT) Host() host.Host {
return dht.host
}
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) error {
req := pb.NewMessage(pb.Message_PING, nil, 0)
resp, err := dht.sendRequest(ctx, p, req)
if err != nil {
return xerrors.Errorf("sending request: %w", err)
}
if resp.Type != pb.Message_PING {
return xerrors.Errorf("got unexpected response type: %v", resp.Type)
}
return nil
}

View File

@ -170,3 +170,12 @@ func (dht *IpfsDHT) runBootstrap(ctx context.Context, cfg BootstrapConfig) error
return err
})
}
func (dht *IpfsDHT) BootstrapRandom(ctx context.Context) error {
return dht.randomWalk(ctx)
}
func (dht *IpfsDHT) BootstrapSelf(ctx context.Context) error {
_, err := dht.walk(ctx, dht.self)
return err
}

View File

@ -5,8 +5,6 @@ import (
"context"
"errors"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"math/rand"
"sort"
"strings"
@ -14,6 +12,13 @@ import (
"testing"
"time"
multistream "github.com/multiformats/go-multistream"
"golang.org/x/xerrors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
opts "github.com/libp2p/go-libp2p-kad-dht/opts"
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
@ -80,14 +85,13 @@ func setupDHT(ctx context.Context, t *testing.T, client bool) *IpfsDHT {
opts.Client(client),
opts.NamespacedValidator("v", blankValidator{}),
)
if err != nil {
t.Fatal(err)
}
return d
}
func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.ID, []*IpfsDHT) {
func setupDHTS(t *testing.T, ctx context.Context, n int) []*IpfsDHT {
addrs := make([]ma.Multiaddr, n)
dhts := make([]*IpfsDHT, n)
peers := make([]peer.ID, n)
@ -97,8 +101,8 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer
for i := 0; i < n; i++ {
dhts[i] = setupDHT(ctx, t, false)
peers[i] = dhts[i].self
addrs[i] = dhts[i].peerstore.Addrs(dhts[i].self)[0]
peers[i] = dhts[i].PeerID()
addrs[i] = dhts[i].host.Addrs()[0]
if _, lol := sanityAddrsMap[addrs[i].String()]; lol {
t.Fatal("While setting up DHTs address got duplicated.")
@ -112,7 +116,7 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer
}
}
return addrs, peers, dhts
return dhts
}
func connectNoSync(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
@ -481,7 +485,7 @@ func TestProvides(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -531,7 +535,7 @@ func TestLocalProvides(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -618,7 +622,7 @@ func TestBootstrap(t *testing.T) {
defer cancel()
nDHTs := 30
_, _, dhts := setupDHTS(ctx, nDHTs, t)
dhts := setupDHTS(t, ctx, nDHTs)
defer func() {
for i := 0; i < nDHTs; i++ {
dhts[i].Close()
@ -671,7 +675,7 @@ func TestPeriodicBootstrap(t *testing.T) {
defer cancel()
nDHTs := 30
_, _, dhts := setupDHTS(ctx, nDHTs, t)
dhts := setupDHTS(t, ctx, nDHTs)
defer func() {
for i := 0; i < nDHTs; i++ {
dhts[i].Close()
@ -727,7 +731,7 @@ func TestProvidesMany(t *testing.T) {
defer cancel()
nDHTs := 40
_, _, dhts := setupDHTS(ctx, nDHTs, t)
dhts := setupDHTS(t, ctx, nDHTs)
defer func() {
for i := 0; i < nDHTs; i++ {
dhts[i].Close()
@ -828,7 +832,7 @@ func TestProvidesAsync(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -870,7 +874,7 @@ func TestLayeredGet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, _, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -909,11 +913,11 @@ func TestUnfindablePeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
maddrs, peers, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
dhts[i].host.Close()
dhts[i].Host().Close()
}
}()
@ -922,12 +926,12 @@ func TestUnfindablePeer(t *testing.T) {
connect(t, ctx, dhts[2], dhts[3])
// Give DHT 1 a bad addr for DHT 2.
dhts[1].host.Peerstore().ClearAddrs(peers[2])
dhts[1].host.Peerstore().AddAddr(peers[2], maddrs[0], time.Minute)
dhts[1].host.Peerstore().ClearAddrs(dhts[2].PeerID())
dhts[1].host.Peerstore().AddAddr(dhts[2].PeerID(), dhts[0].Host().Addrs()[0], time.Minute)
ctxT, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
_, err := dhts[0].FindPeer(ctxT, peers[3])
_, err := dhts[0].FindPeer(ctxT, dhts[3].PeerID())
if err == nil {
t.Error("should have failed to find peer")
}
@ -945,7 +949,7 @@ func TestFindPeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, peers, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -959,7 +963,7 @@ func TestFindPeer(t *testing.T) {
ctxT, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
p, err := dhts[0].FindPeer(ctxT, peers[2])
p, err := dhts[0].FindPeer(ctxT, dhts[2].PeerID())
if err != nil {
t.Fatal(err)
}
@ -968,7 +972,7 @@ func TestFindPeer(t *testing.T) {
t.Fatal("Failed to find peer.")
}
if p.ID != peers[2] {
if p.ID != dhts[2].PeerID() {
t.Fatal("Didnt find expected peer.")
}
}
@ -983,7 +987,7 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, peers, dhts := setupDHTS(ctx, 4, t)
dhts := setupDHTS(t, ctx, 4)
defer func() {
for i := 0; i < 4; i++ {
dhts[i].Close()
@ -1005,7 +1009,7 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
ctxT, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2])
pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, dhts[2].PeerID())
if err != nil {
t.Fatal(err)
}
@ -1225,7 +1229,7 @@ func testFindPeerQuery(t *testing.T,
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, allpeers, dhts := setupDHTS(ctx, 1+bootstrappers+leafs, t)
dhts := setupDHTS(t, ctx, 1+bootstrappers+leafs)
defer func() {
for _, d := range dhts {
d.Close()
@ -1252,7 +1256,7 @@ func testFindPeerQuery(t *testing.T,
lp := len(d.host.Network().Peers())
//t.Log(i, lp)
if i != 0 && lp > 0 {
reachableIds = append(reachableIds, allpeers[i])
reachableIds = append(reachableIds, d.PeerID())
}
}
t.Logf("%d reachable ids", len(reachableIds))
@ -1287,7 +1291,7 @@ func TestFindClosestPeers(t *testing.T) {
defer cancel()
nDHTs := 30
_, _, dhts := setupDHTS(ctx, nDHTs, t)
dhts := setupDHTS(t, ctx, nDHTs)
defer func() {
for i := 0; i < nDHTs; i++ {
dhts[i].Close()
@ -1394,3 +1398,21 @@ func TestGetSetPluggedProtocol(t *testing.T) {
}
})
}
func TestPing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ds := setupDHTS(t, ctx, 2)
ds[0].Host().Peerstore().AddAddrs(ds[1].PeerID(), ds[1].Host().Addrs(), pstore.AddressTTL)
assert.NoError(t, ds[0].Ping(context.Background(), ds[1].PeerID()))
}
func TestClientModeAtInit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pinger := setupDHT(ctx, t, false)
client := setupDHT(ctx, t, true)
pinger.Host().Peerstore().AddAddrs(client.PeerID(), client.Host().Addrs(), pstore.AddressTTL)
err := pinger.Ping(context.Background(), client.PeerID())
assert.True(t, xerrors.Is(err, multistream.ErrNotSupported))
}

1
go.mod
View File

@ -28,4 +28,5 @@ require (
github.com/multiformats/go-multistream v0.0.1
github.com/stretchr/testify v1.3.0
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc
golang.org/x/xerrors v0.0.0-20190212162355-a5947ffaace3
)

2
go.sum
View File

@ -237,6 +237,8 @@ golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20190212162355-a5947ffaace3 h1:P6iTFmrTQqWrqLZPX1VMzCUbCRCAUXSUsSpkEOvWzJ0=
golang.org/x/xerrors v0.0.0-20190212162355-a5947ffaace3/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=

View File

@ -165,6 +165,12 @@
"hash": "QmPVkJMTeRC6iBByPWdrRkD3BE5UXsj5HPzb4kPqL186mS",
"name": "testify",
"version": "1.0.0"
},
{
"author": "anacrolix",
"hash": "QmW1UQYrsBotoorzgBcQ3HVj5RzZUzB6g1vQwnNfUuCVdN",
"name": "xerrors",
"version": "0.0.2"
}
],
"gxVersion": "0.4.0",