go-libp2p-kad-dht/routing.go

532 lines
14 KiB
Go
Raw Normal View History

2014-07-23 04:48:30 -07:00
package dht
import (
"bytes"
"fmt"
"runtime"
2014-10-11 10:43:54 -07:00
"sync"
"time"
cid "github.com/ipfs/go-cid"
peer "github.com/ipfs/go-libp2p-peer"
2016-09-02 20:21:23 +01:00
pset "github.com/ipfs/go-libp2p-peer/peerset"
pstore "github.com/ipfs/go-libp2p-peerstore"
2016-09-02 20:21:23 +01:00
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
kb "github.com/libp2p/go-libp2p-kbucket"
record "github.com/libp2p/go-libp2p-record"
routing "github.com/libp2p/go-libp2p-routing"
notif "github.com/libp2p/go-libp2p-routing/notifications"
inet "github.com/libp2p/go-libp2p/p2p/net"
context "golang.org/x/net/context"
2014-07-23 04:48:30 -07:00
)
2014-12-08 21:55:51 -08:00
// asyncQueryBuffer is the size of buffered channels in async queries. This
// buffer allows multiple queries to execute simultaneously, return their
// results and continue querying closer peers. Note that different query
// results will wait for the channel to drain.
var asyncQueryBuffer = 10
2014-07-23 04:48:30 -07:00
// This file implements the Routing interface for the IpfsDHT struct.
// Basic Put/Get
// PutValue adds value corresponding to given Key.
// This is the top level "Store" operation of the DHT
func (dht *IpfsDHT) PutValue(ctx context.Context, key string, value []byte) error {
log.Debugf("PutValue %s", key)
2015-02-23 00:25:20 -08:00
sk, err := dht.getOwnPrivateKey()
2014-09-12 17:34:07 +00:00
if err != nil {
return err
}
2014-09-17 07:19:40 -07:00
sign, err := dht.Validator.IsSigned(key)
if err != nil {
return err
}
2015-02-23 00:25:20 -08:00
rec, err := record.MakePutRecord(sk, key, value, sign)
if err != nil {
log.Debug("creation of record failed!")
return err
}
2015-02-23 00:25:20 -08:00
err = dht.putLocal(key, rec)
if err != nil {
return err
}
pchan, err := dht.GetClosestPeers(ctx, key)
if err != nil {
return err
}
2014-09-17 07:19:40 -07:00
wg := sync.WaitGroup{}
for p := range pchan {
wg.Add(1)
go func(p peer.ID) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
defer wg.Done()
2015-02-21 16:20:28 -08:00
notif.PublishQueryEvent(ctx, &notif.QueryEvent{
Type: notif.Value,
ID: p,
})
2014-12-28 23:46:25 +00:00
err := dht.putValueToPeer(ctx, p, key, rec)
if err != nil {
2015-01-26 19:12:12 -08:00
log.Debugf("failed putting value to peer: %s", err)
}
}(p)
}
wg.Wait()
return nil
2014-07-23 04:48:30 -07:00
}
// GetValue searches for the value corresponding to given Key.
func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
vals, err := dht.GetValues(ctx, key, 16)
if err != nil {
return nil, err
}
var recs [][]byte
for _, v := range vals {
if v.Val != nil {
recs = append(recs, v.Val)
}
}
i, err := dht.Selector.BestRecord(key, recs)
if err != nil {
return nil, err
}
best := recs[i]
log.Debugf("GetValue %v %v", key, best)
if best == nil {
log.Errorf("GetValue yielded correct record with nil value.")
return nil, routing.ErrNotFound
}
fixupRec, err := record.MakePutRecord(dht.peerstore.PrivKey(dht.self), key, best, true)
if err != nil {
// probably shouldnt actually 'error' here as we have found a value we like,
// but this call failing probably isnt something we want to ignore
return nil, err
}
for _, v := range vals {
// if someone sent us a different 'less-valid' record, lets correct them
if !bytes.Equal(v.Val, best) {
go func(v routing.RecvdVal) {
if v.From == dht.self {
err := dht.putLocal(key, fixupRec)
if err != nil {
log.Error("Error correcting local dht entry:", err)
}
return
}
ctx, cancel := context.WithTimeout(dht.Context(), time.Second*30)
defer cancel()
err := dht.putValueToPeer(ctx, v.From, key, fixupRec)
if err != nil {
log.Error("Error correcting DHT entry: ", err)
}
}(v)
}
}
return best, nil
}
func (dht *IpfsDHT) GetValues(ctx context.Context, key string, nvals int) ([]routing.RecvdVal, error) {
var vals []routing.RecvdVal
var valslock sync.Mutex
2014-08-12 22:10:44 -07:00
// If we have it local, dont bother doing an RPC!
lrec, err := dht.getLocal(key)
2014-08-14 08:32:17 -07:00
if err == nil {
// TODO: this is tricky, we dont always want to trust our own value
// what if the authoritative source updated it?
2015-01-05 04:35:54 -08:00
log.Debug("have it locally")
vals = append(vals, routing.RecvdVal{
Val: lrec.GetValue(),
From: dht.self,
})
if nvals <= 1 {
return vals, nil
}
} else if nvals == 0 {
return nil, err
2014-08-12 22:10:44 -07:00
}
// get closest peers in the routing table
rtp := dht.routingTable.NearestPeers(kb.ConvertKey(key), KValue)
log.Debugf("peers in rt: %d %s", len(rtp), rtp)
if len(rtp) == 0 {
2015-01-05 04:35:54 -08:00
log.Warning("No peers from routing table!")
return nil, kb.ErrLookupFailure
}
2014-09-18 19:30:04 -07:00
// setup the Query
parent := ctx
2015-01-01 12:45:39 -08:00
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
notif.PublishQueryEvent(parent, &notif.QueryEvent{
2015-02-21 16:20:28 -08:00
Type: notif.SendingQuery,
ID: p,
})
rec, peers, err := dht.getValueOrPeers(ctx, p, key)
switch err {
case routing.ErrNotFound:
// in this case, they responded with nothing,
// still send a notification so listeners can know the
// request has completed 'successfully'
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.PeerResponse,
ID: p,
})
2014-09-18 19:30:04 -07:00
return nil, err
default:
return nil, err
case nil, errInvalidRecord:
// in either of these cases, we want to keep going
2014-09-18 19:30:04 -07:00
}
res := &dhtQueryResult{closerPeers: peers}
if rec.GetValue() != nil || err == errInvalidRecord {
rv := routing.RecvdVal{
Val: rec.GetValue(),
From: p,
}
valslock.Lock()
vals = append(vals, rv)
// If weve collected enough records, we're done
if len(vals) >= nvals {
res.success = true
}
valslock.Unlock()
2014-09-18 19:30:04 -07:00
}
notif.PublishQueryEvent(parent, &notif.QueryEvent{
2015-02-21 16:20:28 -08:00
Type: notif.PeerResponse,
ID: p,
Responses: pointerizePeerInfos(peers),
})
2014-09-18 19:30:04 -07:00
return res, nil
})
2014-09-17 07:19:40 -07:00
// run it!
_, err = query.Run(ctx, rtp)
if len(vals) == 0 {
if err != nil {
return nil, err
}
}
return vals, nil
2014-09-18 19:30:04 -07:00
2014-07-23 04:48:30 -07:00
}
// Value provider layer of indirection.
// This is what DSHTs (Coral and MainlineDHT) do to store large values in a DHT.
2014-08-16 23:03:36 -07:00
// Provide makes this node announce that it can provide a value for the given key
func (dht *IpfsDHT) Provide(ctx context.Context, key *cid.Cid) error {
defer log.EventBegin(ctx, "provide", key).Done()
2015-01-05 04:35:54 -08:00
// add self locally
dht.providers.AddProvider(ctx, key, dht.self)
peers, err := dht.GetClosestPeers(ctx, key.KeyString())
if err != nil {
return err
}
mes, err := dht.makeProvRecord(key)
if err != nil {
return err
}
2014-12-28 23:46:25 +00:00
wg := sync.WaitGroup{}
for p := range peers {
2014-12-28 23:46:25 +00:00
wg.Add(1)
go func(p peer.ID) {
defer wg.Done()
2015-01-05 04:35:54 -08:00
log.Debugf("putProvider(%s, %s)", key, p)
err := dht.sendMessage(ctx, p, mes)
2014-12-28 23:46:25 +00:00
if err != nil {
2015-01-26 19:12:12 -08:00
log.Debug(err)
2014-12-28 23:46:25 +00:00
}
}(p)
}
2014-12-28 23:46:25 +00:00
wg.Wait()
return nil
2014-07-23 04:48:30 -07:00
}
func (dht *IpfsDHT) makeProvRecord(skey *cid.Cid) (*pb.Message, error) {
pi := pstore.PeerInfo{
ID: dht.self,
Addrs: dht.host.Addrs(),
}
// // only share WAN-friendly addresses ??
// pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs)
if len(pi.Addrs) < 1 {
return nil, fmt.Errorf("no known addresses for self. cannot put provider.")
}
pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, skey.KeyString(), 0)
pmes.ProviderPeers = pb.RawPeerInfosToPBPeers([]pstore.PeerInfo{pi})
return pmes, nil
}
2014-07-23 04:48:30 -07:00
// FindProviders searches until the context expires.
func (dht *IpfsDHT) FindProviders(ctx context.Context, c *cid.Cid) ([]pstore.PeerInfo, error) {
var providers []pstore.PeerInfo
for p := range dht.FindProvidersAsync(ctx, c, KValue) {
providers = append(providers, p)
}
return providers, nil
}
2014-11-20 10:46:19 -08:00
// FindProvidersAsync is the same thing as FindProviders, but returns a channel.
// Peers will be returned on the channel as soon as they are found, even before
// the search query completes.
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key *cid.Cid, count int) <-chan pstore.PeerInfo {
log.Event(ctx, "findProviders", key)
peerOut := make(chan pstore.PeerInfo, count)
2014-12-11 05:08:39 +00:00
go dht.findProvidersAsyncRoutine(ctx, key, count, peerOut)
return peerOut
}
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key *cid.Cid, count int, peerOut chan pstore.PeerInfo) {
defer log.EventBegin(ctx, "findProvidersAsync", key).Done()
2014-12-11 05:08:39 +00:00
defer close(peerOut)
2014-12-17 19:27:41 +00:00
ps := pset.NewLimited(count)
2014-12-11 05:08:39 +00:00
provs := dht.providers.GetProviders(ctx, key)
for _, p := range provs {
// NOTE: Assuming that this list of peers is unique
2014-12-16 18:33:36 +00:00
if ps.TryAdd(p) {
2014-12-11 05:42:05 +00:00
select {
case peerOut <- dht.peerstore.PeerInfo(p):
2014-12-11 05:42:05 +00:00
case <-ctx.Done():
return
}
2014-12-11 05:08:39 +00:00
}
2014-12-11 05:42:05 +00:00
// If we have enough peers locally, dont bother with remote RPC
if ps.Size() >= count {
2014-12-11 05:08:39 +00:00
return
}
}
// setup the Query
parent := ctx
query := dht.newQuery(key.KeyString(), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
pmes, err := dht.findProvidersSingle(ctx, p, key)
2014-12-11 05:08:39 +00:00
if err != nil {
return nil, err
}
2015-01-05 04:35:54 -08:00
log.Debugf("%d provider entries", len(pmes.GetProviderPeers()))
provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
2015-01-05 04:35:54 -08:00
log.Debugf("%d provider entries decoded", len(provs))
2014-12-11 05:08:39 +00:00
// Add unique providers from request, up to 'count'
for _, prov := range provs {
2015-01-05 04:35:54 -08:00
log.Debugf("got provider: %s", prov)
if ps.TryAdd(prov.ID) {
2015-01-05 04:35:54 -08:00
log.Debugf("using provider: %s", prov)
2014-12-11 05:42:05 +00:00
select {
case peerOut <- prov:
case <-ctx.Done():
log.Debug("context timed out sending more providers")
2014-12-11 05:42:05 +00:00
return nil, ctx.Err()
}
}
2014-12-11 05:08:39 +00:00
if ps.Size() >= count {
2015-01-05 04:35:54 -08:00
log.Debugf("got enough providers (%d/%d)", ps.Size(), count)
2014-12-11 05:08:39 +00:00
return &dhtQueryResult{success: true}, nil
}
}
2014-12-11 05:08:39 +00:00
// Give closer peers back to the query to be queried
closer := pmes.GetCloserPeers()
clpeers := pb.PBPeersToPeerInfos(closer)
2015-01-05 04:35:54 -08:00
log.Debugf("got closer peers: %d %s", len(clpeers), clpeers)
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.PeerResponse,
ID: p,
Responses: pointerizePeerInfos(clpeers),
})
2014-12-11 05:08:39 +00:00
return &dhtQueryResult{closerPeers: clpeers}, nil
})
peers := dht.routingTable.NearestPeers(kb.ConvertKey(key.KeyString()), KValue)
2014-12-11 05:08:39 +00:00
_, err := query.Run(ctx, peers)
if err != nil {
2015-01-26 19:12:12 -08:00
log.Debugf("Query error: %s", err)
// Special handling for issue: https://github.com/ipfs/go-ipfs/issues/3032
if fmt.Sprint(err) == "<nil>" {
log.Error("reproduced bug 3032:")
log.Errorf("Errors type information: %#v", err)
log.Errorf("go version: %s", runtime.Version())
log.Error("please report this information to: https://github.com/ipfs/go-ipfs/issues/3032")
// replace problematic error with something that won't crash the daemon
err = fmt.Errorf("<nil>")
}
notif.PublishQueryEvent(ctx, &notif.QueryEvent{
Type: notif.QueryError,
Extra: err.Error(),
})
2014-12-11 05:08:39 +00:00
}
}
2014-07-23 04:48:30 -07:00
// FindPeer searches for a peer with given ID.
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (pstore.PeerInfo, error) {
2015-01-15 04:45:34 +00:00
defer log.EventBegin(ctx, "FindPeer", id).Done()
2014-09-17 07:19:40 -07:00
// Check if were already connected to them
2014-12-28 23:46:25 +00:00
if pi := dht.FindLocal(id); pi.ID != "" {
return pi, nil
}
peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), KValue)
if len(peers) == 0 {
return pstore.PeerInfo{}, kb.ErrLookupFailure
}
2014-10-24 18:32:28 -07:00
// Sanity...
for _, p := range peers {
if p == id {
log.Debug("found target peer in list of closest peers...")
return dht.peerstore.PeerInfo(p), nil
}
}
2014-09-17 07:19:40 -07:00
2014-10-24 18:32:28 -07:00
// setup the Query
parent := ctx
query := dht.newQuery(string(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
2014-10-24 18:32:28 -07:00
pmes, err := dht.findPeerSingle(ctx, p, id)
2014-09-17 07:19:40 -07:00
if err != nil {
2014-09-18 19:30:04 -07:00
return nil, err
2014-09-17 07:19:40 -07:00
}
2014-10-24 18:32:28 -07:00
closer := pmes.GetCloserPeers()
clpeerInfos := pb.PBPeersToPeerInfos(closer)
2014-11-21 08:03:11 -08:00
// see it we got the peer here
for _, npi := range clpeerInfos {
if npi.ID == id {
2014-10-24 18:32:28 -07:00
return &dhtQueryResult{
peer: npi,
2014-10-24 18:32:28 -07:00
success: true,
}, nil
}
2014-09-17 07:19:40 -07:00
}
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.PeerResponse,
Responses: pointerizePeerInfos(clpeerInfos),
})
return &dhtQueryResult{closerPeers: clpeerInfos}, nil
2014-09-18 19:30:04 -07:00
})
2014-10-24 18:32:28 -07:00
// run it!
result, err := query.Run(ctx, peers)
2014-09-17 07:19:40 -07:00
if err != nil {
return pstore.PeerInfo{}, err
2014-09-17 07:19:40 -07:00
}
2014-10-30 06:35:29 -07:00
log.Debugf("FindPeer %v %v", id, result.success)
if result.peer.ID == "" {
return pstore.PeerInfo{}, routing.ErrNotFound
}
2014-10-24 18:32:28 -07:00
2014-09-18 19:30:04 -07:00
return result.peer, nil
}
2014-11-24 14:58:51 -05:00
// FindPeersConnectedToPeer searches for peers directly connected to a given peer.
func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan pstore.PeerInfo, error) {
2014-11-24 14:58:51 -05:00
peerchan := make(chan pstore.PeerInfo, asyncQueryBuffer)
peersSeen := make(map[peer.ID]struct{})
2014-11-24 14:58:51 -05:00
peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), KValue)
if len(peers) == 0 {
return nil, kb.ErrLookupFailure
2014-11-24 14:58:51 -05:00
}
// setup the Query
query := dht.newQuery(string(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
2014-11-24 14:58:51 -05:00
pmes, err := dht.findPeerSingle(ctx, p, id)
2014-11-24 14:58:51 -05:00
if err != nil {
return nil, err
}
var clpeers []pstore.PeerInfo
2014-11-24 14:58:51 -05:00
closer := pmes.GetCloserPeers()
for _, pbp := range closer {
pi := pb.PBPeerToPeerInfo(pbp)
2014-11-24 14:58:51 -05:00
// skip peers already seen
if _, found := peersSeen[pi.ID]; found {
2014-11-24 14:58:51 -05:00
continue
}
peersSeen[pi.ID] = struct{}{}
2014-11-24 14:58:51 -05:00
// if peer is connected, send it to our client.
if pb.Connectedness(*pbp.Connection) == inet.Connected {
select {
case <-ctx.Done():
return nil, ctx.Err()
case peerchan <- pi:
2014-11-24 14:58:51 -05:00
}
}
// if peer is the peer we're looking for, don't bother querying it.
// TODO maybe query it?
2014-11-24 14:58:51 -05:00
if pb.Connectedness(*pbp.Connection) != inet.Connected {
clpeers = append(clpeers, pi)
2014-11-24 14:58:51 -05:00
}
}
return &dhtQueryResult{closerPeers: clpeers}, nil
})
// run it! run it asynchronously to gen peers as results are found.
// this does no error checking
go func() {
if _, err := query.Run(ctx, peers); err != nil {
2015-01-26 19:12:12 -08:00
log.Debug(err)
2014-11-24 14:58:51 -05:00
}
// close the peerchan channel when done.
close(peerchan)
}()
return peerchan, nil
}