mirror of
https://github.com/fluencelabs/go-libp2p-kad-dht
synced 2025-04-25 06:42:13 +00:00
- updated go-ctxgroup and goprocess ctxgroup: AddChildGroup was changed to AddChild. Used in two files: - p2p/net/mock/mock_net.go - routing/dht/dht.go - updated context from hg repo to git prev. commit in hg was ad01a6fcc8a19d3a4478c836895ffe883bd2ceab. (context: make parentCancelCtx iterative) represents commit 84f8955a887232b6308d79c68b8db44f64df455c in git repo - updated context to master (b6fdb7d8a4ccefede406f8fe0f017fb58265054c) Aaron Jacobs (2): net/context: Don't accept a context in the DoSomethingSlow example. context: Be clear that users must cancel the result of WithCancel. Andrew Gerrand (1): go.net: use golang.org/x/... import paths Bryan C. Mills (1): net/context: Don't leak goroutines in Done example. Damien Neil (1): context: fix removal of cancelled timer contexts from parent David Symonds (2): context: Fix WithValue example code. net: add import comments. Sameer Ajmani (1): context: fix TestAllocs to account for ints in interfaces
394 lines
10 KiB
Go
394 lines
10 KiB
Go
package dht
|
|
|
|
import (
|
|
"math"
|
|
"sync"
|
|
"time"
|
|
|
|
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
|
notif "github.com/jbenet/go-ipfs/notifications"
|
|
inet "github.com/jbenet/go-ipfs/p2p/net"
|
|
peer "github.com/jbenet/go-ipfs/p2p/peer"
|
|
"github.com/jbenet/go-ipfs/routing"
|
|
pb "github.com/jbenet/go-ipfs/routing/dht/pb"
|
|
kb "github.com/jbenet/go-ipfs/routing/kbucket"
|
|
record "github.com/jbenet/go-ipfs/routing/record"
|
|
u "github.com/jbenet/go-ipfs/util"
|
|
errors "github.com/jbenet/go-ipfs/util/debugerror"
|
|
pset "github.com/jbenet/go-ipfs/util/peerset"
|
|
)
|
|
|
|
// asyncQueryBuffer is the size of buffered channels in async queries. This
|
|
// buffer allows multiple queries to execute simultaneously, return their
|
|
// results and continue querying closer peers. Note that different query
|
|
// results will wait for the channel to drain.
|
|
var asyncQueryBuffer = 10
|
|
|
|
// This file implements the Routing interface for the IpfsDHT struct.
|
|
|
|
// Basic Put/Get
|
|
|
|
// PutValue adds value corresponding to given Key.
|
|
// This is the top level "Store" operation of the DHT
|
|
func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error {
|
|
log.Debugf("PutValue %s", key)
|
|
err := dht.putLocal(key, value)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
sk, err := dht.getOwnPrivateKey()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
rec, err := record.MakePutRecord(sk, key, value)
|
|
if err != nil {
|
|
log.Debug("Creation of record failed!")
|
|
return err
|
|
}
|
|
|
|
pchan, err := dht.GetClosestPeers(ctx, key)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
for p := range pchan {
|
|
wg.Add(1)
|
|
go func(p peer.ID) {
|
|
defer wg.Done()
|
|
err := dht.putValueToPeer(ctx, p, key, rec)
|
|
if err != nil {
|
|
log.Debugf("failed putting value to peer: %s", err)
|
|
}
|
|
}(p)
|
|
}
|
|
wg.Wait()
|
|
return nil
|
|
}
|
|
|
|
// GetValue searches for the value corresponding to given Key.
|
|
// If the search does not succeed, a multiaddr string of a closer peer is
|
|
// returned along with util.ErrSearchIncomplete
|
|
func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
|
|
// If we have it local, dont bother doing an RPC!
|
|
val, err := dht.getLocal(key)
|
|
if err == nil {
|
|
log.Debug("have it locally")
|
|
return val, nil
|
|
} else {
|
|
log.Debug("failed to get value locally: %s", err)
|
|
}
|
|
|
|
// get closest peers in the routing table
|
|
rtp := dht.routingTable.ListPeers()
|
|
log.Debugf("peers in rt: %s", len(rtp), rtp)
|
|
if len(rtp) == 0 {
|
|
log.Warning("No peers from routing table!")
|
|
return nil, errors.Wrap(kb.ErrLookupFailure)
|
|
}
|
|
|
|
// setup the Query
|
|
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
val, peers, err := dht.getValueOrPeers(ctx, p, key)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
res := &dhtQueryResult{value: val, closerPeers: peers}
|
|
if val != nil {
|
|
res.success = true
|
|
}
|
|
|
|
return res, nil
|
|
})
|
|
|
|
// run it!
|
|
result, err := query.Run(ctx, rtp)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
log.Debugf("GetValue %v %v", key, result.value)
|
|
if result.value == nil {
|
|
return nil, routing.ErrNotFound
|
|
}
|
|
|
|
return result.value, nil
|
|
}
|
|
|
|
// Value provider layer of indirection.
|
|
// This is what DSHTs (Coral and MainlineDHT) do to store large values in a DHT.
|
|
|
|
// Provide makes this node announce that it can provide a value for the given key
|
|
func (dht *IpfsDHT) Provide(ctx context.Context, key u.Key) error {
|
|
defer log.EventBegin(ctx, "provide", &key).Done()
|
|
|
|
// add self locally
|
|
dht.providers.AddProvider(key, dht.self)
|
|
|
|
peers, err := dht.GetClosestPeers(ctx, key)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
for p := range peers {
|
|
wg.Add(1)
|
|
go func(p peer.ID) {
|
|
defer wg.Done()
|
|
log.Debugf("putProvider(%s, %s)", key, p)
|
|
err := dht.putProvider(ctx, p, string(key))
|
|
if err != nil {
|
|
log.Debug(err)
|
|
}
|
|
}(p)
|
|
}
|
|
wg.Wait()
|
|
return nil
|
|
}
|
|
|
|
// FindProviders searches until the context expires.
|
|
func (dht *IpfsDHT) FindProviders(ctx context.Context, key u.Key) ([]peer.PeerInfo, error) {
|
|
var providers []peer.PeerInfo
|
|
for p := range dht.FindProvidersAsync(ctx, key, math.MaxInt32) {
|
|
providers = append(providers, p)
|
|
}
|
|
return providers, nil
|
|
}
|
|
|
|
// FindProvidersAsync is the same thing as FindProviders, but returns a channel.
|
|
// Peers will be returned on the channel as soon as they are found, even before
|
|
// the search query completes.
|
|
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int) <-chan peer.PeerInfo {
|
|
log.Event(ctx, "findProviders", &key)
|
|
peerOut := make(chan peer.PeerInfo, count)
|
|
go dht.findProvidersAsyncRoutine(ctx, key, count, peerOut)
|
|
return peerOut
|
|
}
|
|
|
|
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, count int, peerOut chan peer.PeerInfo) {
|
|
defer log.EventBegin(ctx, "findProvidersAsync", &key).Done()
|
|
defer close(peerOut)
|
|
|
|
ps := pset.NewLimited(count)
|
|
provs := dht.providers.GetProviders(ctx, key)
|
|
for _, p := range provs {
|
|
// NOTE: assuming that this list of peers is unique
|
|
if ps.TryAdd(p) {
|
|
select {
|
|
case peerOut <- dht.peerstore.PeerInfo(p):
|
|
case <-ctx.Done():
|
|
return
|
|
}
|
|
}
|
|
|
|
// If we have enough peers locally, dont bother with remote RPC
|
|
if ps.Size() >= count {
|
|
return
|
|
}
|
|
}
|
|
|
|
// setup the Query
|
|
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.SendingQuery,
|
|
ID: p,
|
|
})
|
|
pmes, err := dht.findProvidersSingle(ctx, p, key)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
log.Debugf("%d provider entries", len(pmes.GetProviderPeers()))
|
|
provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
|
|
log.Debugf("%d provider entries decoded", len(provs))
|
|
|
|
// Add unique providers from request, up to 'count'
|
|
for _, prov := range provs {
|
|
log.Debugf("got provider: %s", prov)
|
|
if ps.TryAdd(prov.ID) {
|
|
log.Debugf("using provider: %s", prov)
|
|
select {
|
|
case peerOut <- prov:
|
|
case <-ctx.Done():
|
|
log.Debug("Context timed out sending more providers")
|
|
return nil, ctx.Err()
|
|
}
|
|
}
|
|
if ps.Size() >= count {
|
|
log.Debugf("got enough providers (%d/%d)", ps.Size(), count)
|
|
return &dhtQueryResult{success: true}, nil
|
|
}
|
|
}
|
|
|
|
// Give closer peers back to the query to be queried
|
|
closer := pmes.GetCloserPeers()
|
|
clpeers := pb.PBPeersToPeerInfos(closer)
|
|
log.Debugf("got closer peers: %d %s", len(clpeers), clpeers)
|
|
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.PeerResponse,
|
|
ID: p,
|
|
Responses: pointerizePeerInfos(clpeers),
|
|
})
|
|
return &dhtQueryResult{closerPeers: clpeers}, nil
|
|
})
|
|
|
|
peers := dht.routingTable.ListPeers()
|
|
_, err := query.Run(ctx, peers)
|
|
if err != nil {
|
|
log.Debugf("Query error: %s", err)
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.QueryError,
|
|
Extra: err.Error(),
|
|
})
|
|
}
|
|
}
|
|
|
|
// FindPeer searches for a peer with given ID.
|
|
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.PeerInfo, error) {
|
|
defer log.EventBegin(ctx, "FindPeer", id).Done()
|
|
|
|
// Check if were already connected to them
|
|
if pi := dht.FindLocal(id); pi.ID != "" {
|
|
return pi, nil
|
|
}
|
|
|
|
peers := dht.routingTable.ListPeers()
|
|
if len(peers) == 0 {
|
|
return peer.PeerInfo{}, errors.Wrap(kb.ErrLookupFailure)
|
|
}
|
|
|
|
// Sanity...
|
|
for _, p := range peers {
|
|
if p == id {
|
|
log.Debug("Found target peer in list of closest peers...")
|
|
return dht.peerstore.PeerInfo(p), nil
|
|
}
|
|
}
|
|
|
|
// setup the Query
|
|
query := dht.newQuery(u.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.SendingQuery,
|
|
ID: p,
|
|
})
|
|
|
|
pmes, err := dht.findPeerSingle(ctx, p, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
closer := pmes.GetCloserPeers()
|
|
clpeerInfos := pb.PBPeersToPeerInfos(closer)
|
|
|
|
// see it we got the peer here
|
|
for _, npi := range clpeerInfos {
|
|
if npi.ID == id {
|
|
return &dhtQueryResult{
|
|
peer: npi,
|
|
success: true,
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.PeerResponse,
|
|
Responses: pointerizePeerInfos(clpeerInfos),
|
|
})
|
|
|
|
return &dhtQueryResult{closerPeers: clpeerInfos}, nil
|
|
})
|
|
|
|
// run it!
|
|
result, err := query.Run(ctx, peers)
|
|
if err != nil {
|
|
return peer.PeerInfo{}, err
|
|
}
|
|
|
|
log.Debugf("FindPeer %v %v", id, result.success)
|
|
if result.peer.ID == "" {
|
|
return peer.PeerInfo{}, routing.ErrNotFound
|
|
}
|
|
|
|
return result.peer, nil
|
|
}
|
|
|
|
// FindPeersConnectedToPeer searches for peers directly connected to a given peer.
|
|
func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan peer.PeerInfo, error) {
|
|
|
|
peerchan := make(chan peer.PeerInfo, asyncQueryBuffer)
|
|
peersSeen := peer.Set{}
|
|
|
|
peers := dht.routingTable.ListPeers()
|
|
if len(peers) == 0 {
|
|
return nil, errors.Wrap(kb.ErrLookupFailure)
|
|
}
|
|
|
|
// setup the Query
|
|
query := dht.newQuery(u.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
|
|
pmes, err := dht.findPeerSingle(ctx, p, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var clpeers []peer.PeerInfo
|
|
closer := pmes.GetCloserPeers()
|
|
for _, pbp := range closer {
|
|
pi := pb.PBPeerToPeerInfo(pbp)
|
|
|
|
// skip peers already seen
|
|
if _, found := peersSeen[pi.ID]; found {
|
|
continue
|
|
}
|
|
peersSeen[pi.ID] = struct{}{}
|
|
|
|
// if peer is connected, send it to our client.
|
|
if pb.Connectedness(*pbp.Connection) == inet.Connected {
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil, ctx.Err()
|
|
case peerchan <- pi:
|
|
}
|
|
}
|
|
|
|
// if peer is the peer we're looking for, don't bother querying it.
|
|
// TODO maybe query it?
|
|
if pb.Connectedness(*pbp.Connection) != inet.Connected {
|
|
clpeers = append(clpeers, pi)
|
|
}
|
|
}
|
|
|
|
return &dhtQueryResult{closerPeers: clpeers}, nil
|
|
})
|
|
|
|
// run it! run it asynchronously to gen peers as results are found.
|
|
// this does no error checking
|
|
go func() {
|
|
if _, err := query.Run(ctx, peers); err != nil {
|
|
log.Debug(err)
|
|
}
|
|
|
|
// close the peerchan channel when done.
|
|
close(peerchan)
|
|
}()
|
|
|
|
return peerchan, nil
|
|
}
|
|
|
|
// Ping a peer, log the time it took
|
|
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) (time.Duration, error) {
|
|
// Thoughts: maybe this should accept an ID and do a peer lookup?
|
|
log.Debugf("ping %s start", p)
|
|
before := time.Now()
|
|
|
|
pmes := pb.NewMessage(pb.Message_PING, "", 0)
|
|
_, err := dht.sendRequest(ctx, p, pmes)
|
|
log.Debugf("ping %s end (err = %s)", p, err)
|
|
|
|
return time.Now().Sub(before), err
|
|
}
|