2014-08-01 13:21:51 -07:00
|
|
|
package dht
|
|
|
|
|
2014-09-19 07:51:03 -07:00
|
|
|
import (
|
2016-09-30 10:24:03 -07:00
|
|
|
"context"
|
2014-12-23 18:40:19 -08:00
|
|
|
"fmt"
|
2015-01-05 07:19:07 -08:00
|
|
|
"math/rand"
|
2014-11-24 14:58:51 -05:00
|
|
|
"sort"
|
2014-12-23 18:40:19 -08:00
|
|
|
"sync"
|
2014-09-19 07:51:03 -07:00
|
|
|
"testing"
|
2014-12-19 12:19:56 -08:00
|
|
|
"time"
|
2014-09-19 07:51:03 -07:00
|
|
|
|
2016-09-13 13:14:39 -07:00
|
|
|
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
cid "github.com/ipfs/go-cid"
|
2016-08-21 17:18:58 +01:00
|
|
|
ds "github.com/ipfs/go-datastore"
|
|
|
|
dssync "github.com/ipfs/go-datastore/sync"
|
|
|
|
u "github.com/ipfs/go-ipfs-util"
|
2017-03-03 23:31:43 -08:00
|
|
|
kb "github.com/libp2p/go-libp2p-kbucket"
|
2016-11-21 20:10:14 -08:00
|
|
|
netutil "github.com/libp2p/go-libp2p-netutil"
|
2016-10-05 12:34:28 -07:00
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
|
|
|
pstore "github.com/libp2p/go-libp2p-peerstore"
|
2016-09-02 20:21:23 +01:00
|
|
|
record "github.com/libp2p/go-libp2p-record"
|
2016-11-21 20:10:14 -08:00
|
|
|
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
2016-09-02 20:21:23 +01:00
|
|
|
ci "github.com/libp2p/go-testutil/ci"
|
|
|
|
travisci "github.com/libp2p/go-testutil/ci/travis"
|
2016-10-05 12:34:28 -07:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
2014-09-19 07:51:03 -07:00
|
|
|
)
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
var testCaseValues = map[string][]byte{}
|
|
|
|
var testCaseCids []*cid.Cid
|
2014-12-23 18:40:19 -08:00
|
|
|
|
|
|
|
func init() {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
v := fmt.Sprintf("%d -- value", i)
|
2016-09-30 10:13:57 -07:00
|
|
|
|
|
|
|
mhv := u.Hash([]byte(v))
|
|
|
|
testCaseCids = append(testCaseCids, cid.NewCidV0(mhv))
|
2014-12-23 18:40:19 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-23 23:45:35 -07:00
|
|
|
func setupDHT(ctx context.Context, t *testing.T, client bool) *IpfsDHT {
|
2016-11-21 20:10:14 -08:00
|
|
|
h := bhost.New(netutil.GenSwarmNetwork(t, ctx))
|
2014-09-19 07:51:03 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
dss := dssync.MutexWrap(ds.NewMapDatastore())
|
2016-09-23 23:45:35 -07:00
|
|
|
var d *IpfsDHT
|
|
|
|
if client {
|
|
|
|
d = NewDHTClient(ctx, h, dss)
|
|
|
|
} else {
|
|
|
|
d = NewDHT(ctx, h, dss)
|
|
|
|
}
|
2014-12-16 08:55:46 -08:00
|
|
|
|
2018-02-01 15:09:57 -05:00
|
|
|
d.Validator["v"] = func(*record.ValidationRecord) error { return nil }
|
2016-09-30 10:13:57 -07:00
|
|
|
d.Selector["v"] = func(_ string, bs [][]byte) (int, error) { return 0, nil }
|
2014-09-19 07:51:03 -07:00
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.ID, []*IpfsDHT) {
|
|
|
|
addrs := make([]ma.Multiaddr, n)
|
2014-09-19 14:31:10 -07:00
|
|
|
dhts := make([]*IpfsDHT, n)
|
2014-12-19 12:19:56 -08:00
|
|
|
peers := make([]peer.ID, n)
|
|
|
|
|
2016-06-10 00:11:00 +02:00
|
|
|
sanityAddrsMap := make(map[string]struct{})
|
|
|
|
sanityPeersMap := make(map[string]struct{})
|
|
|
|
|
2014-09-19 14:31:10 -07:00
|
|
|
for i := 0; i < n; i++ {
|
2016-09-23 23:45:35 -07:00
|
|
|
dhts[i] = setupDHT(ctx, t, false)
|
2014-12-19 12:19:56 -08:00
|
|
|
peers[i] = dhts[i].self
|
2015-02-02 11:30:00 -08:00
|
|
|
addrs[i] = dhts[i].peerstore.Addrs(dhts[i].self)[0]
|
2016-06-10 00:11:00 +02:00
|
|
|
|
|
|
|
if _, lol := sanityAddrsMap[addrs[i].String()]; lol {
|
2016-06-10 00:50:50 +02:00
|
|
|
t.Fatal("While setting up DHTs address got duplicated.")
|
2016-06-10 00:11:00 +02:00
|
|
|
} else {
|
|
|
|
sanityAddrsMap[addrs[i].String()] = struct{}{}
|
|
|
|
}
|
|
|
|
if _, lol := sanityPeersMap[peers[i].String()]; lol {
|
2016-06-10 00:50:50 +02:00
|
|
|
t.Fatal("While setting up DHTs peerid got duplicated.")
|
2016-06-10 00:11:00 +02:00
|
|
|
} else {
|
|
|
|
sanityPeersMap[peers[i].String()] = struct{}{}
|
|
|
|
}
|
2014-09-19 07:51:03 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return addrs, peers, dhts
|
|
|
|
}
|
|
|
|
|
2016-09-23 23:45:35 -07:00
|
|
|
func connectNoSync(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
|
2014-12-19 12:19:56 -08:00
|
|
|
idB := b.self
|
2015-02-02 11:30:00 -08:00
|
|
|
addrB := b.peerstore.Addrs(idB)
|
2014-12-19 12:19:56 -08:00
|
|
|
if len(addrB) == 0 {
|
|
|
|
t.Fatal("peers setup incorrectly: no local address")
|
2014-09-19 07:51:03 -07:00
|
|
|
}
|
2014-12-19 12:19:56 -08:00
|
|
|
|
2016-06-01 15:51:39 -07:00
|
|
|
a.peerstore.AddAddrs(idB, addrB, pstore.TempAddrTTL)
|
|
|
|
pi := pstore.PeerInfo{ID: idB}
|
2015-07-13 15:53:58 -07:00
|
|
|
if err := a.host.Connect(ctx, pi); err != nil {
|
2014-12-19 12:19:56 -08:00
|
|
|
t.Fatal(err)
|
2014-09-19 07:51:03 -07:00
|
|
|
}
|
2016-09-23 23:45:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
|
|
|
|
connectNoSync(t, ctx, a, b)
|
2015-07-17 17:28:44 -07:00
|
|
|
|
2015-07-19 16:32:50 -07:00
|
|
|
// loop until connection notification has been received.
|
|
|
|
// under high load, this may not happen as immediately as we would like.
|
2015-07-17 17:28:44 -07:00
|
|
|
for a.routingTable.Find(b.self) == "" {
|
|
|
|
time.Sleep(time.Millisecond * 5)
|
|
|
|
}
|
|
|
|
|
|
|
|
for b.routingTable.Find(a.self) == "" {
|
|
|
|
time.Sleep(time.Millisecond * 5)
|
|
|
|
}
|
2014-09-19 07:51:03 -07:00
|
|
|
}
|
|
|
|
|
2014-12-23 19:05:41 -08:00
|
|
|
func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {
|
2014-12-23 19:21:35 -08:00
|
|
|
|
2014-12-23 23:12:22 -08:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2016-02-18 17:39:26 -05:00
|
|
|
log.Debugf("Bootstrapping DHTs...")
|
2015-01-16 12:52:12 -08:00
|
|
|
|
|
|
|
// tried async. sequential fares much better. compare:
|
|
|
|
// 100 async https://gist.github.com/jbenet/56d12f0578d5f34810b2
|
|
|
|
// 100 sync https://gist.github.com/jbenet/6c59e7c15426e48aaedd
|
|
|
|
// probably because results compound
|
|
|
|
|
2015-01-23 04:36:32 -08:00
|
|
|
var cfg BootstrapConfig
|
|
|
|
cfg = DefaultBootstrapConfig
|
|
|
|
cfg.Queries = 3
|
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
start := rand.Intn(len(dhts)) // randomize to decrease bias.
|
|
|
|
for i := range dhts {
|
|
|
|
dht := dhts[(start+i)%len(dhts)]
|
2015-01-23 04:36:32 -08:00
|
|
|
dht.runBootstrap(ctx, cfg)
|
2014-12-23 19:05:41 -08:00
|
|
|
}
|
2014-12-23 23:12:22 -08:00
|
|
|
cancel()
|
2014-12-23 19:05:41 -08:00
|
|
|
}
|
|
|
|
|
2014-09-19 07:51:03 -07:00
|
|
|
func TestValueGetSet(t *testing.T) {
|
2016-09-30 10:13:57 -07:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2014-12-16 14:35:52 -08:00
|
|
|
|
2016-09-23 23:45:35 -07:00
|
|
|
dhtA := setupDHT(ctx, t, false)
|
|
|
|
dhtB := setupDHT(ctx, t, false)
|
2014-09-19 07:51:03 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
defer dhtA.Close()
|
|
|
|
defer dhtB.Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
defer dhtA.host.Close()
|
|
|
|
defer dhtB.host.Close()
|
2014-09-19 07:51:03 -07:00
|
|
|
|
2018-02-01 15:09:57 -05:00
|
|
|
vf := func(*record.ValidationRecord) error { return nil }
|
2016-09-30 10:13:57 -07:00
|
|
|
nulsel := func(_ string, bs [][]byte) (int, error) { return 0, nil }
|
2015-09-18 10:27:55 -07:00
|
|
|
|
2015-01-17 03:52:40 -08:00
|
|
|
dhtA.Validator["v"] = vf
|
|
|
|
dhtB.Validator["v"] = vf
|
2015-09-18 10:27:55 -07:00
|
|
|
dhtA.Selector["v"] = nulsel
|
|
|
|
dhtB.Selector["v"] = nulsel
|
2014-11-10 14:22:56 -08:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
connect(t, ctx, dhtA, dhtB)
|
2014-09-19 07:51:03 -07:00
|
|
|
|
2018-02-11 11:36:59 -08:00
|
|
|
log.Debug("adding value on: ", dhtA.self)
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, time.Second)
|
|
|
|
defer cancel()
|
2016-09-30 10:13:57 -07:00
|
|
|
err := dhtA.PutValue(ctxT, "/v/hello", []byte("world"))
|
2014-09-19 07:51:03 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
/*
|
|
|
|
ctxT, _ = context.WithTimeout(ctx, time.Second*2)
|
|
|
|
val, err := dhtA.GetValue(ctxT, "/v/hello")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if string(val) != "world" {
|
|
|
|
t.Fatalf("Expected 'world' got '%s'", string(val))
|
|
|
|
}
|
|
|
|
*/
|
2014-09-19 07:51:03 -07:00
|
|
|
|
2018-02-11 11:36:59 -08:00
|
|
|
log.Debug("requesting value on dht: ", dhtB.self)
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel = context.WithTimeout(ctx, time.Second*2)
|
|
|
|
defer cancel()
|
2016-09-30 10:13:57 -07:00
|
|
|
valb, err := dhtB.GetValue(ctxT, "/v/hello")
|
2014-09-22 22:34:30 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
if string(valb) != "world" {
|
|
|
|
t.Fatalf("Expected 'world' got '%s'", string(valb))
|
2014-09-22 22:34:30 -07:00
|
|
|
}
|
2014-09-19 07:51:03 -07:00
|
|
|
}
|
|
|
|
|
2017-09-18 19:38:02 -07:00
|
|
|
func TestInvalidMessageSenderTracking(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
dht := setupDHT(ctx, t, false)
|
|
|
|
foo := peer.ID("asdasd")
|
|
|
|
_, err := dht.messageSenderForPeer(foo)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("that shouldnt have succeeded")
|
|
|
|
}
|
|
|
|
|
|
|
|
dht.smlk.Lock()
|
|
|
|
defer dht.smlk.Unlock()
|
|
|
|
if len(dht.strmap) > 0 {
|
|
|
|
t.Fatal("should have no message senders in map")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-19 18:11:05 -07:00
|
|
|
func TestProvides(t *testing.T) {
|
|
|
|
// t.Skip("skipping test to debug another")
|
2014-10-18 04:19:12 -07:00
|
|
|
ctx := context.Background()
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
_, _, dhts := setupDHTS(ctx, 4, t)
|
2014-09-19 18:11:05 -07:00
|
|
|
defer func() {
|
|
|
|
for i := 0; i < 4; i++ {
|
2014-10-25 07:12:01 -07:00
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
defer dhts[i].host.Close()
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
connect(t, ctx, dhts[0], dhts[1])
|
|
|
|
connect(t, ctx, dhts[1], dhts[2])
|
|
|
|
connect(t, ctx, dhts[1], dhts[3])
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
for _, k := range testCaseCids {
|
2014-12-24 01:33:52 -08:00
|
|
|
log.Debugf("announcing provider for %s", k)
|
2017-05-16 18:23:18 -07:00
|
|
|
if err := dhts[3].Provide(ctx, k, true); err != nil {
|
2014-12-23 18:40:19 -08:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
|
2014-12-23 18:40:19 -08:00
|
|
|
// what is this timeout for? was 60ms before.
|
|
|
|
time.Sleep(time.Millisecond * 6)
|
|
|
|
|
|
|
|
n := 0
|
2016-09-30 10:13:57 -07:00
|
|
|
for _, c := range testCaseCids {
|
2014-12-23 18:40:19 -08:00
|
|
|
n = (n + 1) % 3
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
log.Debugf("getting providers for %s from %d", c, n)
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, time.Second)
|
|
|
|
defer cancel()
|
2016-09-30 10:13:57 -07:00
|
|
|
provchan := dhts[n].FindProvidersAsync(ctxT, c, 1)
|
2014-12-23 18:40:19 -08:00
|
|
|
|
|
|
|
select {
|
|
|
|
case prov := <-provchan:
|
|
|
|
if prov.ID == "" {
|
|
|
|
t.Fatal("Got back nil provider")
|
|
|
|
}
|
|
|
|
if prov.ID != dhts[3].self {
|
|
|
|
t.Fatal("Got back wrong provider")
|
|
|
|
}
|
|
|
|
case <-ctxT.Done():
|
|
|
|
t.Fatal("Did not get a provider back.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-16 18:23:18 -07:00
|
|
|
func TestLocalProvides(t *testing.T) {
|
|
|
|
// t.Skip("skipping test to debug another")
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
_, _, dhts := setupDHTS(ctx, 4, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
dhts[i].Close()
|
|
|
|
defer dhts[i].host.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
connect(t, ctx, dhts[0], dhts[1])
|
|
|
|
connect(t, ctx, dhts[1], dhts[2])
|
|
|
|
connect(t, ctx, dhts[1], dhts[3])
|
|
|
|
|
|
|
|
for _, k := range testCaseCids {
|
|
|
|
log.Debugf("announcing provider for %s", k)
|
|
|
|
if err := dhts[3].Provide(ctx, k, false); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 10)
|
|
|
|
|
|
|
|
for _, c := range testCaseCids {
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
provs := dhts[i].providers.GetProviders(ctx, c)
|
|
|
|
if len(provs) > 0 {
|
|
|
|
t.Fatal("shouldnt know this")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
// if minPeers or avgPeers is 0, dont test for it.
|
|
|
|
func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers int, timeout time.Duration) bool {
|
|
|
|
// test "well-formed-ness" (>= minPeers peers in every routing table)
|
|
|
|
|
|
|
|
checkTables := func() bool {
|
|
|
|
totalPeers := 0
|
|
|
|
for _, dht := range dhts {
|
|
|
|
rtlen := dht.routingTable.Size()
|
|
|
|
totalPeers += rtlen
|
|
|
|
if minPeers > 0 && rtlen < minPeers {
|
|
|
|
t.Logf("routing table for %s only has %d peers (should have >%d)", dht.self, rtlen, minPeers)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
actualAvgPeers := totalPeers / len(dhts)
|
|
|
|
t.Logf("avg rt size: %d", actualAvgPeers)
|
|
|
|
if avgPeers > 0 && actualAvgPeers < avgPeers {
|
|
|
|
t.Logf("avg rt size: %d < %d", actualAvgPeers, avgPeers)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
timeoutA := time.After(timeout)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-timeoutA:
|
2015-02-03 01:06:07 -08:00
|
|
|
log.Debugf("did not reach well-formed routing tables by %s", timeout)
|
2015-01-16 12:52:12 -08:00
|
|
|
return false // failed
|
|
|
|
case <-time.After(5 * time.Millisecond):
|
|
|
|
if checkTables() {
|
|
|
|
return true // succeeded
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func printRoutingTables(dhts []*IpfsDHT) {
|
|
|
|
// the routing tables should be full now. let's inspect them.
|
2016-06-14 13:32:48 +02:00
|
|
|
fmt.Printf("checking routing table of %d\n", len(dhts))
|
2015-01-16 12:52:12 -08:00
|
|
|
for _, dht := range dhts {
|
|
|
|
fmt.Printf("checking routing table of %s\n", dht.self)
|
|
|
|
dht.routingTable.Print()
|
|
|
|
fmt.Println("")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-23 19:05:41 -08:00
|
|
|
func TestBootstrap(t *testing.T) {
|
2015-01-05 04:48:37 -08:00
|
|
|
// t.Skip("skipping test to debug another")
|
2014-12-24 05:39:48 -08:00
|
|
|
if testing.Short() {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
|
|
|
|
2014-12-23 19:05:41 -08:00
|
|
|
ctx := context.Background()
|
|
|
|
|
2015-01-05 07:00:49 -08:00
|
|
|
nDHTs := 30
|
2014-12-23 19:05:41 -08:00
|
|
|
_, _, dhts := setupDHTS(ctx, nDHTs, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
defer dhts[i].host.Close()
|
2014-12-23 19:05:41 -08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Logf("connecting %d dhts in a ring", nDHTs)
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
|
|
|
|
}
|
|
|
|
|
2014-12-23 19:21:35 -08:00
|
|
|
<-time.After(100 * time.Millisecond)
|
2015-01-16 12:52:12 -08:00
|
|
|
// bootstrap a few times until we get good tables.
|
|
|
|
stop := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
2016-09-13 13:20:26 -07:00
|
|
|
t.Logf("bootstrapping them so they find each other %d", nDHTs)
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, 5*time.Second)
|
|
|
|
defer cancel()
|
2015-01-16 12:52:12 -08:00
|
|
|
bootstrap(t, ctxT, dhts)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
continue // being explicit
|
|
|
|
case <-stop:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-02-03 01:06:07 -08:00
|
|
|
waitForWellFormedTables(t, dhts, 7, 10, 20*time.Second)
|
2015-01-16 12:52:12 -08:00
|
|
|
close(stop)
|
2014-12-23 19:05:41 -08:00
|
|
|
|
2014-12-24 01:33:52 -08:00
|
|
|
if u.Debug {
|
|
|
|
// the routing tables should be full now. let's inspect them.
|
2015-01-16 12:52:12 -08:00
|
|
|
printRoutingTables(dhts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPeriodicBootstrap(t *testing.T) {
|
|
|
|
// t.Skip("skipping test to debug another")
|
2015-01-24 10:30:15 -08:00
|
|
|
if ci.IsRunning() {
|
|
|
|
t.Skip("skipping on CI. highly timing dependent")
|
|
|
|
}
|
2015-01-16 12:52:12 -08:00
|
|
|
if testing.Short() {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
nDHTs := 30
|
|
|
|
_, _, dhts := setupDHTS(ctx, nDHTs, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
dhts[i].Close()
|
|
|
|
defer dhts[i].host.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// signal amplifier
|
|
|
|
amplify := func(signal chan time.Time, other []chan time.Time) {
|
|
|
|
for t := range signal {
|
|
|
|
for _, s := range other {
|
|
|
|
s <- t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, s := range other {
|
|
|
|
close(s)
|
2014-12-24 01:33:52 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
signal := make(chan time.Time)
|
|
|
|
allSignals := []chan time.Time{}
|
|
|
|
|
2015-01-23 04:36:32 -08:00
|
|
|
var cfg BootstrapConfig
|
|
|
|
cfg = DefaultBootstrapConfig
|
|
|
|
cfg.Queries = 5
|
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
// kick off periodic bootstrappers with instrumented signals.
|
|
|
|
for _, dht := range dhts {
|
|
|
|
s := make(chan time.Time)
|
|
|
|
allSignals = append(allSignals, s)
|
2015-01-23 04:36:32 -08:00
|
|
|
dht.BootstrapOnSignal(cfg, s)
|
2015-01-16 12:52:12 -08:00
|
|
|
}
|
|
|
|
go amplify(signal, allSignals)
|
|
|
|
|
2016-09-13 13:20:26 -07:00
|
|
|
t.Logf("dhts are not connected. %d", nDHTs)
|
2015-01-16 12:52:12 -08:00
|
|
|
for _, dht := range dhts {
|
|
|
|
rtlen := dht.routingTable.Size()
|
|
|
|
if rtlen > 0 {
|
|
|
|
t.Errorf("routing table for %s should have 0 peers. has %d", dht.self, rtlen)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
|
|
|
|
}
|
|
|
|
|
2016-09-13 13:20:26 -07:00
|
|
|
t.Logf("DHTs are now connected to 1-2 others. %d", nDHTs)
|
2014-12-23 19:05:41 -08:00
|
|
|
for _, dht := range dhts {
|
2014-12-24 01:33:52 -08:00
|
|
|
rtlen := dht.routingTable.Size()
|
2015-01-16 12:52:12 -08:00
|
|
|
if rtlen > 2 {
|
|
|
|
t.Errorf("routing table for %s should have at most 2 peers. has %d", dht.self, rtlen)
|
2014-12-24 01:33:52 -08:00
|
|
|
}
|
2014-12-23 19:05:41 -08:00
|
|
|
}
|
2015-01-05 07:00:49 -08:00
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
if u.Debug {
|
|
|
|
printRoutingTables(dhts)
|
|
|
|
}
|
|
|
|
|
2016-09-13 13:20:26 -07:00
|
|
|
t.Logf("bootstrapping them so they find each other. %d", nDHTs)
|
2015-01-16 12:52:12 -08:00
|
|
|
signal <- time.Now()
|
|
|
|
|
|
|
|
// this is async, and we dont know when it's finished with one cycle, so keep checking
|
|
|
|
// until the routing tables look better, or some long timeout for the failure case.
|
2015-02-03 01:06:07 -08:00
|
|
|
waitForWellFormedTables(t, dhts, 7, 10, 20*time.Second)
|
2015-01-16 12:52:12 -08:00
|
|
|
|
|
|
|
if u.Debug {
|
|
|
|
printRoutingTables(dhts)
|
2015-01-05 07:00:49 -08:00
|
|
|
}
|
2014-12-23 19:05:41 -08:00
|
|
|
}
|
|
|
|
|
2014-12-23 18:40:19 -08:00
|
|
|
func TestProvidesMany(t *testing.T) {
|
|
|
|
t.Skip("this test doesn't work")
|
2014-12-23 19:05:41 -08:00
|
|
|
// t.Skip("skipping test to debug another")
|
2014-12-23 18:40:19 -08:00
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
nDHTs := 40
|
|
|
|
_, _, dhts := setupDHTS(ctx, nDHTs, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
defer dhts[i].host.Close()
|
2014-12-23 18:40:19 -08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Logf("connecting %d dhts in a ring", nDHTs)
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
|
|
|
|
}
|
|
|
|
|
2014-12-23 23:12:22 -08:00
|
|
|
<-time.After(100 * time.Millisecond)
|
2016-09-13 13:20:26 -07:00
|
|
|
t.Logf("bootstrapping them so they find each other. %d", nDHTs)
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
|
|
defer cancel()
|
2014-12-23 23:12:22 -08:00
|
|
|
bootstrap(t, ctxT, dhts)
|
|
|
|
|
2014-12-24 01:33:52 -08:00
|
|
|
if u.Debug {
|
|
|
|
// the routing tables should be full now. let's inspect them.
|
|
|
|
t.Logf("checking routing table of %d", nDHTs)
|
|
|
|
for _, dht := range dhts {
|
|
|
|
fmt.Printf("checking routing table of %s\n", dht.self)
|
|
|
|
dht.routingTable.Print()
|
|
|
|
fmt.Println("")
|
|
|
|
}
|
2014-12-23 23:12:22 -08:00
|
|
|
}
|
2014-12-23 18:40:19 -08:00
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
providers := make(map[string]peer.ID)
|
2014-12-24 04:23:15 -08:00
|
|
|
|
2014-12-23 18:40:19 -08:00
|
|
|
d := 0
|
2016-09-30 10:13:57 -07:00
|
|
|
for _, c := range testCaseCids {
|
2014-12-23 18:40:19 -08:00
|
|
|
d = (d + 1) % len(dhts)
|
|
|
|
dht := dhts[d]
|
2016-09-30 10:13:57 -07:00
|
|
|
providers[c.KeyString()] = dht.self
|
2014-12-23 18:40:19 -08:00
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
t.Logf("announcing provider for %s", c)
|
2017-05-16 18:23:18 -07:00
|
|
|
if err := dht.Provide(ctx, c, true); err != nil {
|
2014-12-23 18:40:19 -08:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
// what is this timeout for? was 60ms before.
|
|
|
|
time.Sleep(time.Millisecond * 6)
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2014-12-23 18:40:19 -08:00
|
|
|
errchan := make(chan error)
|
|
|
|
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel = context.WithTimeout(ctx, 5*time.Second)
|
|
|
|
defer cancel()
|
2014-12-23 18:40:19 -08:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
2016-09-30 10:13:57 -07:00
|
|
|
getProvider := func(dht *IpfsDHT, k *cid.Cid) {
|
2014-12-23 18:40:19 -08:00
|
|
|
defer wg.Done()
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
expected := providers[k.KeyString()]
|
2014-12-24 04:23:15 -08:00
|
|
|
|
2014-12-23 18:40:19 -08:00
|
|
|
provchan := dht.FindProvidersAsync(ctxT, k, 1)
|
|
|
|
select {
|
|
|
|
case prov := <-provchan:
|
2014-12-24 04:23:15 -08:00
|
|
|
actual := prov.ID
|
|
|
|
if actual == "" {
|
2014-12-23 18:40:19 -08:00
|
|
|
errchan <- fmt.Errorf("Got back nil provider (%s at %s)", k, dht.self)
|
2014-12-24 04:23:15 -08:00
|
|
|
} else if actual != expected {
|
|
|
|
errchan <- fmt.Errorf("Got back wrong provider (%s != %s) (%s at %s)",
|
|
|
|
expected, actual, k, dht.self)
|
2014-12-23 18:40:19 -08:00
|
|
|
}
|
|
|
|
case <-ctxT.Done():
|
|
|
|
errchan <- fmt.Errorf("Did not get a provider back (%s at %s)", k, dht.self)
|
2014-10-11 10:43:54 -07:00
|
|
|
}
|
2014-12-23 18:40:19 -08:00
|
|
|
}
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
for _, c := range testCaseCids {
|
2014-12-23 18:40:19 -08:00
|
|
|
// everyone should be able to find it...
|
|
|
|
for _, dht := range dhts {
|
2016-09-30 10:13:57 -07:00
|
|
|
log.Debugf("getting providers for %s at %s", c, dht.self)
|
2014-12-23 18:40:19 -08:00
|
|
|
wg.Add(1)
|
2016-09-30 10:13:57 -07:00
|
|
|
go getProvider(dht, c)
|
2014-12-19 12:19:56 -08:00
|
|
|
}
|
2014-12-23 18:40:19 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// we need this because of printing errors
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(errchan)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for err := range errchan {
|
|
|
|
t.Error(err)
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-21 18:04:43 -07:00
|
|
|
func TestProvidesAsync(t *testing.T) {
|
2015-01-05 04:48:37 -08:00
|
|
|
// t.Skip("skipping test to debug another")
|
2014-11-15 00:19:47 -08:00
|
|
|
if testing.Short() {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
2014-09-21 18:04:43 -07:00
|
|
|
|
2014-10-18 04:19:12 -07:00
|
|
|
ctx := context.Background()
|
2014-09-21 18:04:43 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
_, _, dhts := setupDHTS(ctx, 4, t)
|
2014-09-21 18:04:43 -07:00
|
|
|
defer func() {
|
|
|
|
for i := 0; i < 4; i++ {
|
2014-10-25 07:12:01 -07:00
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
defer dhts[i].host.Close()
|
2014-09-21 18:04:43 -07:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
connect(t, ctx, dhts[0], dhts[1])
|
|
|
|
connect(t, ctx, dhts[1], dhts[2])
|
|
|
|
connect(t, ctx, dhts[1], dhts[3])
|
2014-09-21 18:04:43 -07:00
|
|
|
|
2017-05-16 18:23:18 -07:00
|
|
|
err := dhts[3].Provide(ctx, testCaseCids[0], true)
|
2014-09-21 18:04:43 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 60)
|
|
|
|
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, time.Millisecond*300)
|
|
|
|
defer cancel()
|
2016-09-30 10:13:57 -07:00
|
|
|
provs := dhts[0].FindProvidersAsync(ctxT, testCaseCids[0], 5)
|
2014-09-21 18:04:43 -07:00
|
|
|
select {
|
2014-10-24 18:32:28 -07:00
|
|
|
case p, ok := <-provs:
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("Provider channel was closed...")
|
|
|
|
}
|
2014-12-19 12:19:56 -08:00
|
|
|
if p.ID == "" {
|
2014-10-24 18:32:28 -07:00
|
|
|
t.Fatal("Got back nil provider!")
|
|
|
|
}
|
2014-12-19 12:19:56 -08:00
|
|
|
if p.ID != dhts[3].self {
|
2014-10-07 21:29:03 -07:00
|
|
|
t.Fatalf("got a provider, but not the right one. %s", p)
|
2014-09-21 18:04:43 -07:00
|
|
|
}
|
2014-10-18 04:19:12 -07:00
|
|
|
case <-ctxT.Done():
|
2014-09-21 18:04:43 -07:00
|
|
|
t.Fatal("Didnt get back providers")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-19 18:11:05 -07:00
|
|
|
func TestLayeredGet(t *testing.T) {
|
2016-09-30 10:13:57 -07:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2014-12-16 14:35:52 -08:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
_, _, dhts := setupDHTS(ctx, 4, t)
|
2014-09-19 18:11:05 -07:00
|
|
|
defer func() {
|
|
|
|
for i := 0; i < 4; i++ {
|
2014-10-25 07:12:01 -07:00
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
defer dhts[i].host.Close()
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
connect(t, ctx, dhts[0], dhts[1])
|
|
|
|
connect(t, ctx, dhts[1], dhts[2])
|
2016-09-30 10:13:57 -07:00
|
|
|
connect(t, ctx, dhts[2], dhts[3])
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
err := dhts[3].PutValue(ctx, "/v/hello", []byte("world"))
|
2014-09-19 18:11:05 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2014-12-24 01:33:52 -08:00
|
|
|
time.Sleep(time.Millisecond * 6)
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, time.Second)
|
|
|
|
defer cancel()
|
2016-09-30 10:13:57 -07:00
|
|
|
val, err := dhts[0].GetValue(ctxT, "/v/hello")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2014-12-24 01:33:52 -08:00
|
|
|
}
|
2016-09-30 10:13:57 -07:00
|
|
|
|
|
|
|
if string(val) != "world" {
|
|
|
|
t.Error("got wrong value")
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFindPeer(t *testing.T) {
|
2015-01-05 04:48:37 -08:00
|
|
|
// t.Skip("skipping test to debug another")
|
2014-11-15 00:19:47 -08:00
|
|
|
if testing.Short() {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2014-10-18 04:19:12 -07:00
|
|
|
ctx := context.Background()
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2014-10-18 04:19:12 -07:00
|
|
|
_, peers, dhts := setupDHTS(ctx, 4, t)
|
2014-09-19 18:11:05 -07:00
|
|
|
defer func() {
|
|
|
|
for i := 0; i < 4; i++ {
|
2014-10-25 07:12:01 -07:00
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
dhts[i].host.Close()
|
2014-09-19 18:11:05 -07:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
connect(t, ctx, dhts[0], dhts[1])
|
|
|
|
connect(t, ctx, dhts[1], dhts[2])
|
|
|
|
connect(t, ctx, dhts[1], dhts[3])
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, time.Second)
|
|
|
|
defer cancel()
|
2014-12-19 12:19:56 -08:00
|
|
|
p, err := dhts[0].FindPeer(ctxT, peers[2])
|
2014-09-19 18:11:05 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
if p.ID == "" {
|
2014-09-19 18:11:05 -07:00
|
|
|
t.Fatal("Failed to find peer.")
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
if p.ID != peers[2] {
|
2014-09-19 18:11:05 -07:00
|
|
|
t.Fatal("Didnt find expected peer.")
|
|
|
|
}
|
|
|
|
}
|
2014-10-14 13:40:55 -07:00
|
|
|
|
2014-11-24 14:58:51 -05:00
|
|
|
func TestFindPeersConnectedToPeer(t *testing.T) {
|
2014-12-16 14:35:52 -08:00
|
|
|
t.Skip("not quite correct (see note)")
|
|
|
|
|
2014-11-24 14:58:51 -05:00
|
|
|
if testing.Short() {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
_, peers, dhts := setupDHTS(ctx, 4, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
dhts[i].Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
dhts[i].host.Close()
|
2014-11-24 14:58:51 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// topology:
|
|
|
|
// 0-1, 1-2, 1-3, 2-3
|
2014-12-19 12:19:56 -08:00
|
|
|
connect(t, ctx, dhts[0], dhts[1])
|
|
|
|
connect(t, ctx, dhts[1], dhts[2])
|
|
|
|
connect(t, ctx, dhts[1], dhts[3])
|
|
|
|
connect(t, ctx, dhts[2], dhts[3])
|
2014-11-24 14:58:51 -05:00
|
|
|
|
|
|
|
// fmt.Println("0 is", peers[0])
|
|
|
|
// fmt.Println("1 is", peers[1])
|
|
|
|
// fmt.Println("2 is", peers[2])
|
|
|
|
// fmt.Println("3 is", peers[3])
|
|
|
|
|
2016-09-30 11:08:16 -07:00
|
|
|
ctxT, cancel := context.WithTimeout(ctx, time.Second)
|
|
|
|
defer cancel()
|
2014-12-19 12:19:56 -08:00
|
|
|
pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2])
|
2014-11-24 14:58:51 -05:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
// shouldFind := []peer.ID{peers[1], peers[3]}
|
2017-03-05 12:08:20 -08:00
|
|
|
var found []*pstore.PeerInfo
|
2014-11-24 14:58:51 -05:00
|
|
|
for nextp := range pchan {
|
|
|
|
found = append(found, nextp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fmt.Printf("querying 0 (%s) FindPeersConnectedToPeer 2 (%s)\n", peers[0], peers[2])
|
|
|
|
// fmt.Println("should find 1, 3", shouldFind)
|
|
|
|
// fmt.Println("found", found)
|
|
|
|
|
|
|
|
// testPeerListsMatch(t, shouldFind, found)
|
|
|
|
|
|
|
|
log.Warning("TestFindPeersConnectedToPeer is not quite correct")
|
|
|
|
if len(found) == 0 {
|
|
|
|
t.Fatal("didn't find any peers.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func testPeerListsMatch(t *testing.T, p1, p2 []peer.ID) {
|
2014-11-24 14:58:51 -05:00
|
|
|
|
|
|
|
if len(p1) != len(p2) {
|
|
|
|
t.Fatal("did not find as many peers as should have", p1, p2)
|
|
|
|
}
|
|
|
|
|
|
|
|
ids1 := make([]string, len(p1))
|
|
|
|
ids2 := make([]string, len(p2))
|
|
|
|
|
|
|
|
for i, p := range p1 {
|
2014-12-19 12:19:56 -08:00
|
|
|
ids1[i] = string(p)
|
2014-11-24 14:58:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for i, p := range p2 {
|
2014-12-19 12:19:56 -08:00
|
|
|
ids2[i] = string(p)
|
2014-11-24 14:58:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(sort.StringSlice(ids1))
|
|
|
|
sort.Sort(sort.StringSlice(ids2))
|
|
|
|
|
|
|
|
for i := range ids1 {
|
|
|
|
if ids1[i] != ids2[i] {
|
|
|
|
t.Fatal("Didnt find expected peer", ids1[i], ids2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-14 13:40:55 -07:00
|
|
|
func TestConnectCollision(t *testing.T) {
|
2015-01-05 04:48:37 -08:00
|
|
|
// t.Skip("skipping test to debug another")
|
2014-11-15 00:19:47 -08:00
|
|
|
if testing.Short() {
|
|
|
|
t.SkipNow()
|
|
|
|
}
|
2015-01-24 10:34:26 -08:00
|
|
|
if travisci.IsRunning() {
|
|
|
|
t.Skip("Skipping on Travis-CI.")
|
|
|
|
}
|
2014-10-14 13:40:55 -07:00
|
|
|
|
2014-10-19 06:29:18 -07:00
|
|
|
runTimes := 10
|
2014-10-14 13:40:55 -07:00
|
|
|
|
2014-10-19 02:05:29 -07:00
|
|
|
for rtime := 0; rtime < runTimes; rtime++ {
|
2015-06-12 04:48:27 +07:00
|
|
|
log.Info("Running Time: ", rtime)
|
2014-10-14 13:40:55 -07:00
|
|
|
|
2014-10-19 02:05:29 -07:00
|
|
|
ctx := context.Background()
|
2014-12-16 14:35:52 -08:00
|
|
|
|
2016-09-23 23:45:35 -07:00
|
|
|
dhtA := setupDHT(ctx, t, false)
|
|
|
|
dhtB := setupDHT(ctx, t, false)
|
2014-10-14 13:40:55 -07:00
|
|
|
|
2015-02-02 11:30:00 -08:00
|
|
|
addrA := dhtA.peerstore.Addrs(dhtA.self)[0]
|
|
|
|
addrB := dhtB.peerstore.Addrs(dhtB.self)[0]
|
2014-10-19 02:05:29 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
peerA := dhtA.self
|
|
|
|
peerB := dhtB.self
|
2014-10-19 02:05:29 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
errs := make(chan error)
|
2014-10-19 02:05:29 -07:00
|
|
|
go func() {
|
2016-06-01 15:51:39 -07:00
|
|
|
dhtA.peerstore.AddAddr(peerB, addrB, pstore.TempAddrTTL)
|
|
|
|
pi := pstore.PeerInfo{ID: peerB}
|
2015-07-13 15:53:58 -07:00
|
|
|
err := dhtA.host.Connect(ctx, pi)
|
2014-12-19 12:19:56 -08:00
|
|
|
errs <- err
|
2014-10-19 02:05:29 -07:00
|
|
|
}()
|
|
|
|
go func() {
|
2016-06-01 15:51:39 -07:00
|
|
|
dhtB.peerstore.AddAddr(peerA, addrA, pstore.TempAddrTTL)
|
|
|
|
pi := pstore.PeerInfo{ID: peerA}
|
2015-07-13 15:53:58 -07:00
|
|
|
err := dhtB.host.Connect(ctx, pi)
|
2014-12-19 12:19:56 -08:00
|
|
|
errs <- err
|
2014-10-19 02:05:29 -07:00
|
|
|
}()
|
|
|
|
|
2015-01-24 10:34:26 -08:00
|
|
|
timeout := time.After(5 * time.Second)
|
2014-10-19 02:05:29 -07:00
|
|
|
select {
|
2014-12-19 12:19:56 -08:00
|
|
|
case e := <-errs:
|
|
|
|
if e != nil {
|
|
|
|
t.Fatal(e)
|
|
|
|
}
|
2014-10-19 02:05:29 -07:00
|
|
|
case <-timeout:
|
|
|
|
t.Fatal("Timeout received!")
|
|
|
|
}
|
|
|
|
select {
|
2014-12-19 12:19:56 -08:00
|
|
|
case e := <-errs:
|
|
|
|
if e != nil {
|
|
|
|
t.Fatal(e)
|
|
|
|
}
|
2014-10-19 02:05:29 -07:00
|
|
|
case <-timeout:
|
|
|
|
t.Fatal("Timeout received!")
|
|
|
|
}
|
|
|
|
|
2014-10-25 07:12:01 -07:00
|
|
|
dhtA.Close()
|
|
|
|
dhtB.Close()
|
2015-01-01 12:45:39 -08:00
|
|
|
dhtA.host.Close()
|
|
|
|
dhtB.host.Close()
|
2014-10-14 17:46:11 -07:00
|
|
|
}
|
2014-10-14 13:40:55 -07:00
|
|
|
}
|
2016-09-13 13:14:39 -07:00
|
|
|
|
|
|
|
func TestBadProtoMessages(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2016-09-23 23:45:35 -07:00
|
|
|
d := setupDHT(ctx, t, false)
|
2016-09-13 13:14:39 -07:00
|
|
|
|
|
|
|
nilrec := new(pb.Message)
|
|
|
|
if _, err := d.handlePutValue(ctx, "testpeer", nilrec); err == nil {
|
|
|
|
t.Fatal("should have errored on nil record")
|
|
|
|
}
|
|
|
|
}
|
2016-09-23 23:45:35 -07:00
|
|
|
|
|
|
|
func TestClientModeConnect(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
a := setupDHT(ctx, t, false)
|
|
|
|
b := setupDHT(ctx, t, true)
|
|
|
|
|
|
|
|
connectNoSync(t, ctx, a, b)
|
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
c := testCaseCids[0]
|
2016-09-23 23:45:35 -07:00
|
|
|
p := peer.ID("TestPeer")
|
2016-09-30 10:13:57 -07:00
|
|
|
a.providers.AddProvider(ctx, c, p)
|
|
|
|
time.Sleep(time.Millisecond * 5) // just in case...
|
2016-09-23 23:45:35 -07:00
|
|
|
|
2016-09-30 10:13:57 -07:00
|
|
|
provs, err := b.FindProviders(ctx, c)
|
2016-09-23 23:45:35 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(provs) == 0 {
|
|
|
|
t.Fatal("Expected to get a provider back")
|
|
|
|
}
|
|
|
|
|
|
|
|
if provs[0].ID != p {
|
|
|
|
t.Fatal("expected it to be our test peer")
|
|
|
|
}
|
|
|
|
}
|
2016-11-01 13:42:22 -07:00
|
|
|
|
2017-03-03 23:31:43 -08:00
|
|
|
func TestFindPeerQuery(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2017-03-05 12:08:20 -08:00
|
|
|
nDHTs := 101
|
2017-03-03 23:31:43 -08:00
|
|
|
_, allpeers, dhts := setupDHTS(ctx, nDHTs, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
dhts[i].Close()
|
|
|
|
defer dhts[i].host.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2017-03-05 12:08:20 -08:00
|
|
|
mrand := rand.New(rand.NewSource(42))
|
2017-03-03 23:31:43 -08:00
|
|
|
guy := dhts[0]
|
|
|
|
others := dhts[1:]
|
|
|
|
for i := 0; i < 20; i++ {
|
2017-03-05 12:08:20 -08:00
|
|
|
for j := 0; j < 16; j++ { // 16, high enough to probably not have any partitions
|
|
|
|
v := mrand.Intn(80)
|
|
|
|
connect(t, ctx, others[i], others[20+v])
|
2017-03-03 23:31:43 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
connect(t, ctx, guy, others[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
val := "foobar"
|
|
|
|
rtval := kb.ConvertKey(val)
|
|
|
|
|
2017-03-05 12:08:20 -08:00
|
|
|
rtablePeers := guy.routingTable.NearestPeers(rtval, AlphaValue)
|
|
|
|
if len(rtablePeers) != 3 {
|
|
|
|
t.Fatalf("expected 3 peers back from routing table, got %d", len(rtablePeers))
|
2017-03-03 23:31:43 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
netpeers := guy.host.Network().Peers()
|
|
|
|
if len(netpeers) != 20 {
|
|
|
|
t.Fatalf("expected 20 peers to be connected, got %d", len(netpeers))
|
|
|
|
}
|
|
|
|
|
|
|
|
rtableset := make(map[peer.ID]bool)
|
|
|
|
for _, p := range rtablePeers {
|
|
|
|
rtableset[p] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := guy.GetClosestPeers(ctx, val)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var notfromrtable int
|
|
|
|
var count int
|
|
|
|
var outpeers []peer.ID
|
|
|
|
for p := range out {
|
|
|
|
count++
|
|
|
|
if !rtableset[p] {
|
|
|
|
notfromrtable++
|
|
|
|
}
|
|
|
|
outpeers = append(outpeers, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
if notfromrtable == 0 {
|
|
|
|
t.Fatal("got entirely peers from our routing table")
|
|
|
|
}
|
|
|
|
|
2017-03-05 12:08:20 -08:00
|
|
|
if count != 20 {
|
|
|
|
t.Fatal("should have only gotten 20 peers from getclosestpeers call")
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(peer.IDSlice(allpeers[1:]))
|
|
|
|
sort.Sort(peer.IDSlice(outpeers))
|
2017-03-03 23:31:43 -08:00
|
|
|
fmt.Println("counts: ", count, notfromrtable)
|
|
|
|
actualclosest := kb.SortClosestPeers(allpeers[1:], rtval)
|
|
|
|
exp := actualclosest[:20]
|
|
|
|
got := kb.SortClosestPeers(outpeers, rtval)
|
2017-03-05 12:08:20 -08:00
|
|
|
|
|
|
|
diffp := countDiffPeers(exp, got)
|
|
|
|
if diffp > 0 {
|
|
|
|
// could be a partition created during setup
|
|
|
|
t.Fatal("didnt get expected closest peers")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func countDiffPeers(a, b []peer.ID) int {
|
|
|
|
s := make(map[peer.ID]bool)
|
|
|
|
for _, p := range a {
|
|
|
|
s[p] = true
|
|
|
|
}
|
|
|
|
var out int
|
|
|
|
for _, p := range b {
|
|
|
|
if !s[p] {
|
|
|
|
out++
|
2017-03-03 23:31:43 -08:00
|
|
|
}
|
|
|
|
}
|
2017-03-05 12:08:20 -08:00
|
|
|
return out
|
2017-03-03 23:31:43 -08:00
|
|
|
}
|
|
|
|
|
2016-11-01 13:42:22 -07:00
|
|
|
func TestFindClosestPeers(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
nDHTs := 30
|
|
|
|
_, _, dhts := setupDHTS(ctx, nDHTs, t)
|
|
|
|
defer func() {
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
dhts[i].Close()
|
|
|
|
defer dhts[i].host.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Logf("connecting %d dhts in a ring", nDHTs)
|
|
|
|
for i := 0; i < nDHTs; i++ {
|
|
|
|
connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
|
|
|
|
}
|
|
|
|
|
|
|
|
peers, err := dhts[1].GetClosestPeers(ctx, "foo")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var out []peer.ID
|
|
|
|
for p := range peers {
|
|
|
|
out = append(out, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(out) != KValue {
|
|
|
|
t.Fatalf("got wrong number of peers (got %d, expected %d)", len(out), KValue)
|
|
|
|
}
|
|
|
|
}
|