AddrManager: use addr manager with smarter TTLs

This addr manager should seriously help with the addrsplosion
problem.
This commit is contained in:
Juan Batiz-Benet 2015-02-02 11:30:00 -08:00
parent 202d58a73a
commit dacbe3cf89
4 changed files with 10 additions and 10 deletions

View File

@ -55,7 +55,7 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
dhts[i] = setupDHT(ctx, t) dhts[i] = setupDHT(ctx, t)
peers[i] = dhts[i].self peers[i] = dhts[i].self
addrs[i] = dhts[i].peerstore.Addresses(dhts[i].self)[0] addrs[i] = dhts[i].peerstore.Addrs(dhts[i].self)[0]
} }
return addrs, peers, dhts return addrs, peers, dhts
@ -64,12 +64,12 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer
func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) { func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
idB := b.self idB := b.self
addrB := b.peerstore.Addresses(idB) addrB := b.peerstore.Addrs(idB)
if len(addrB) == 0 { if len(addrB) == 0 {
t.Fatal("peers setup incorrectly: no local address") t.Fatal("peers setup incorrectly: no local address")
} }
a.peerstore.AddAddresses(idB, addrB) a.peerstore.AddAddrs(idB, addrB, peer.TempAddrTTL)
if err := a.Connect(ctx, idB); err != nil { if err := a.Connect(ctx, idB); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -754,20 +754,20 @@ func TestConnectCollision(t *testing.T) {
dhtA := setupDHT(ctx, t) dhtA := setupDHT(ctx, t)
dhtB := setupDHT(ctx, t) dhtB := setupDHT(ctx, t)
addrA := dhtA.peerstore.Addresses(dhtA.self)[0] addrA := dhtA.peerstore.Addrs(dhtA.self)[0]
addrB := dhtB.peerstore.Addresses(dhtB.self)[0] addrB := dhtB.peerstore.Addrs(dhtB.self)[0]
peerA := dhtA.self peerA := dhtA.self
peerB := dhtB.self peerB := dhtB.self
errs := make(chan error) errs := make(chan error)
go func() { go func() {
dhtA.peerstore.AddAddress(peerB, addrB) dhtA.peerstore.AddAddr(peerB, addrB, peer.TempAddrTTL)
err := dhtA.Connect(ctx, peerB) err := dhtA.Connect(ctx, peerB)
errs <- err errs <- err
}() }()
go func() { go func() {
dhtB.peerstore.AddAddress(peerA, addrA) dhtB.peerstore.AddAddr(peerA, addrA, peer.TempAddrTTL)
err := dhtB.Connect(ctx, peerA) err := dhtB.Connect(ctx, peerA)
errs <- err errs <- err
}() }()

View File

@ -238,7 +238,7 @@ func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.M
log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
if pi.ID != dht.self { // dont add own addrs. if pi.ID != dht.self { // dont add own addrs.
// add the received addresses to our peerstore. // add the received addresses to our peerstore.
dht.peerstore.AddPeerInfo(pi) dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
} }
dht.providers.AddProvider(key, p) dht.providers.AddProvider(key, p)
} }

View File

@ -100,7 +100,7 @@ func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key u.Key, p peer.ID)
for _, pbp := range pmes.GetCloserPeers() { for _, pbp := range pmes.GetCloserPeers() {
pid := peer.ID(pbp.GetId()) pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self if pid != dht.self { // dont add self
dht.peerstore.AddAddresses(pid, pbp.Addresses()) dht.peerstore.AddAddrs(pid, pbp.Addresses(), peer.TempAddrTTL)
out = append(out, pid) out = append(out, pid)
} }
} }

View File

@ -253,7 +253,7 @@ func (r *dhtQueryRunner) queryPeer(cg ctxgroup.ContextGroup, p peer.ID) {
} }
// add their addresses to the dialer's peerstore // add their addresses to the dialer's peerstore
r.query.dht.peerstore.AddPeerInfo(next) r.query.dht.peerstore.AddAddrs(next.ID, next.Addrs, peer.TempAddrTTL)
r.addPeerToQuery(cg.Context(), next.ID) r.addPeerToQuery(cg.Context(), next.ID)
log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs) log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs)
} }