2014-11-08 13:43:43 -08:00
|
|
|
// package dht implements a distributed hash table that satisfies the ipfs routing
|
|
|
|
// interface. This DHT is modeled after kademlia with Coral and S/Kademlia modifications.
|
2014-07-23 04:48:30 -07:00
|
|
|
package dht
|
|
|
|
|
2014-07-28 22:14:27 -07:00
|
|
|
import (
|
2014-09-19 08:07:56 -07:00
|
|
|
"bytes"
|
2014-09-04 20:32:46 +00:00
|
|
|
"crypto/rand"
|
2014-09-16 00:56:40 -07:00
|
|
|
"errors"
|
2014-08-23 22:21:20 -07:00
|
|
|
"fmt"
|
2014-08-08 18:09:21 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-09-14 04:52:08 -07:00
|
|
|
inet "github.com/jbenet/go-ipfs/net"
|
2014-09-16 00:56:40 -07:00
|
|
|
msg "github.com/jbenet/go-ipfs/net/message"
|
2014-08-08 18:09:21 -07:00
|
|
|
peer "github.com/jbenet/go-ipfs/peer"
|
2014-10-28 02:17:46 -07:00
|
|
|
routing "github.com/jbenet/go-ipfs/routing"
|
2014-10-25 04:13:28 -07:00
|
|
|
pb "github.com/jbenet/go-ipfs/routing/dht/pb"
|
2014-08-09 22:28:46 -07:00
|
|
|
kb "github.com/jbenet/go-ipfs/routing/kbucket"
|
2014-08-08 18:09:21 -07:00
|
|
|
u "github.com/jbenet/go-ipfs/util"
|
2014-10-25 07:12:01 -07:00
|
|
|
ctxc "github.com/jbenet/go-ipfs/util/ctxcloser"
|
2014-11-16 04:53:07 -08:00
|
|
|
"github.com/jbenet/go-ipfs/util/eventlog"
|
2014-07-31 17:43:48 -07:00
|
|
|
|
2014-09-16 00:56:40 -07:00
|
|
|
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
2014-10-21 15:10:58 -07:00
|
|
|
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
2014-07-30 20:16:34 -07:00
|
|
|
|
2014-09-09 22:39:42 -07:00
|
|
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
2014-07-28 22:14:27 -07:00
|
|
|
)
|
|
|
|
|
2014-11-16 04:01:02 -08:00
|
|
|
var log = eventlog.Logger("dht")
|
2014-09-25 15:10:57 -07:00
|
|
|
|
2014-10-25 14:50:22 -07:00
|
|
|
const doPinging = false
|
2014-10-17 17:54:47 -07:00
|
|
|
|
2014-07-23 04:48:30 -07:00
|
|
|
// TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js
|
|
|
|
|
|
|
|
// IpfsDHT is an implementation of Kademlia with Coral and S/Kademlia modifications.
|
|
|
|
// It is used to implement the base IpfsRouting module.
|
|
|
|
type IpfsDHT struct {
|
2014-08-07 18:06:50 -07:00
|
|
|
// Array of routing tables for differently distanced nodes
|
|
|
|
// NOTE: (currently, only a single table is used)
|
2014-08-16 23:03:36 -07:00
|
|
|
routingTables []*kb.RoutingTable
|
2014-07-28 22:14:27 -07:00
|
|
|
|
2014-10-21 03:13:10 -07:00
|
|
|
// the network services we need
|
|
|
|
dialer inet.Dialer
|
|
|
|
sender inet.Sender
|
2014-07-29 14:50:33 -07:00
|
|
|
|
2014-07-30 20:16:34 -07:00
|
|
|
// Local peer (yourself)
|
2014-10-20 03:26:44 -07:00
|
|
|
self peer.Peer
|
2014-07-30 20:16:34 -07:00
|
|
|
|
2014-09-16 06:18:26 -07:00
|
|
|
// Other peers
|
|
|
|
peerstore peer.Peerstore
|
|
|
|
|
2014-07-30 20:16:34 -07:00
|
|
|
// Local data
|
|
|
|
datastore ds.Datastore
|
2014-08-17 20:17:43 -07:00
|
|
|
dslock sync.Mutex
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-18 20:38:44 -07:00
|
|
|
providers *ProviderManager
|
2014-08-03 21:46:01 -07:00
|
|
|
|
2014-08-06 18:37:45 -07:00
|
|
|
// When this peer started up
|
|
|
|
birth time.Time
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
//lock to make diagnostics work better
|
|
|
|
diaglock sync.Mutex
|
2014-10-15 12:30:52 -07:00
|
|
|
|
2014-11-09 23:45:16 -08:00
|
|
|
// record validator funcs
|
|
|
|
Validators map[string]ValidatorFunc
|
|
|
|
|
2014-10-25 07:12:01 -07:00
|
|
|
ctxc.ContextCloser
|
2014-07-28 22:14:27 -07:00
|
|
|
}
|
|
|
|
|
2014-08-09 22:28:46 -07:00
|
|
|
// NewDHT creates a new DHT object with the given peer as the 'local' host
|
2014-10-21 03:13:10 -07:00
|
|
|
func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, dialer inet.Dialer, sender inet.Sender, dstore ds.Datastore) *IpfsDHT {
|
2014-08-01 13:21:51 -07:00
|
|
|
dht := new(IpfsDHT)
|
2014-10-21 03:13:10 -07:00
|
|
|
dht.dialer = dialer
|
2014-09-16 00:56:40 -07:00
|
|
|
dht.sender = sender
|
2014-08-26 14:24:51 -07:00
|
|
|
dht.datastore = dstore
|
2014-07-31 17:43:48 -07:00
|
|
|
dht.self = p
|
2014-09-16 06:18:26 -07:00
|
|
|
dht.peerstore = ps
|
2014-10-25 07:12:01 -07:00
|
|
|
dht.ContextCloser = ctxc.NewContextCloser(ctx, nil)
|
2014-09-16 00:56:40 -07:00
|
|
|
|
2014-10-25 07:12:01 -07:00
|
|
|
dht.providers = NewProviderManager(dht.Context(), p.ID())
|
|
|
|
dht.AddCloserChild(dht.providers)
|
2014-08-11 20:11:23 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
dht.routingTables = make([]*kb.RoutingTable, 3)
|
2014-10-20 03:26:44 -07:00
|
|
|
dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
|
|
|
|
dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
|
|
|
|
dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Hour)
|
2014-08-06 18:37:45 -07:00
|
|
|
dht.birth = time.Now()
|
2014-11-10 15:48:49 -08:00
|
|
|
|
2014-11-09 23:45:16 -08:00
|
|
|
dht.Validators = make(map[string]ValidatorFunc)
|
2014-11-10 15:48:49 -08:00
|
|
|
dht.Validators["pk"] = ValidatePublicKeyRecord
|
2014-10-15 12:30:52 -07:00
|
|
|
|
2014-10-17 17:54:47 -07:00
|
|
|
if doPinging {
|
2014-10-25 07:12:01 -07:00
|
|
|
dht.Children().Add(1)
|
2014-10-17 17:54:47 -07:00
|
|
|
go dht.PingRoutine(time.Second * 10)
|
|
|
|
}
|
2014-08-10 21:40:17 -07:00
|
|
|
return dht
|
2014-07-31 17:43:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// Connect to a new peer at the given address, ping and add to the routing table
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) (peer.Peer, error) {
|
2014-09-16 00:56:40 -07:00
|
|
|
// TODO(jbenet,whyrusleeping)
|
|
|
|
//
|
|
|
|
// Connect should take in a Peer (with ID). In a sense, we shouldn't be
|
|
|
|
// allowing connections to random multiaddrs without knowing who we're
|
|
|
|
// speaking to (i.e. peer.ID). In terms of moving around simple addresses
|
|
|
|
// -- instead of an (ID, Addr) pair -- we can use:
|
|
|
|
//
|
|
|
|
// /ip4/10.20.30.40/tcp/1234/ipfs/Qxhxxchxzcncxnzcnxzcxzm
|
|
|
|
//
|
2014-11-05 04:26:30 -08:00
|
|
|
err := dht.dialer.DialPeer(ctx, npeer)
|
2014-07-31 17:43:48 -07:00
|
|
|
if err != nil {
|
2014-08-05 09:38:26 -07:00
|
|
|
return nil, err
|
2014-07-31 17:43:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-06 10:02:53 -07:00
|
|
|
// Ping new peer to register in their routing table
|
|
|
|
// NOTE: this should be done better...
|
2014-09-21 18:04:43 -07:00
|
|
|
err = dht.Ping(ctx, npeer)
|
2014-08-06 10:02:53 -07:00
|
|
|
if err != nil {
|
2014-08-23 22:21:20 -07:00
|
|
|
return nil, fmt.Errorf("failed to ping newly connected peer: %s\n", err)
|
2014-08-06 10:02:53 -07:00
|
|
|
}
|
2014-11-25 04:17:37 -08:00
|
|
|
log.Event(ctx, "connect", dht.self, npeer)
|
2014-08-06 10:02:53 -07:00
|
|
|
|
2014-11-22 20:02:45 -08:00
|
|
|
dht.Update(ctx, npeer)
|
2014-08-10 21:02:05 -07:00
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
return npeer, nil
|
2014-07-30 20:16:34 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 00:56:40 -07:00
|
|
|
// HandleMessage implements the inet.Handler interface.
|
2014-09-22 14:04:41 -04:00
|
|
|
func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.NetMessage {
|
2014-09-16 00:56:40 -07:00
|
|
|
|
|
|
|
mData := mes.Data()
|
|
|
|
if mData == nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Error("Message contained nil data.")
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
mPeer := mes.Peer()
|
|
|
|
if mPeer == nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Error("Message contained nil peer.")
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize msg
|
2014-10-25 04:13:28 -07:00
|
|
|
pmes := new(pb.Message)
|
2014-09-16 00:56:40 -07:00
|
|
|
err := proto.Unmarshal(mData, pmes)
|
|
|
|
if err != nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Error("Error unmarshaling data")
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// update the peer (on valid msgs only)
|
2014-11-22 20:02:45 -08:00
|
|
|
dht.Update(ctx, mPeer)
|
2014-09-16 00:56:40 -07:00
|
|
|
|
2014-11-15 18:31:06 -08:00
|
|
|
log.Event(ctx, "foo", dht.self, mPeer, pmes)
|
2014-09-16 00:56:40 -07:00
|
|
|
|
|
|
|
// get handler for this msg type.
|
|
|
|
handler := dht.handlerForMsgType(pmes.GetType())
|
|
|
|
if handler == nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Error("got back nil handler from handlerForMsgType")
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// dispatch handler.
|
2014-12-02 00:40:50 -08:00
|
|
|
rpmes, err := handler(ctx, mPeer, pmes)
|
2014-09-16 00:56:40 -07:00
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("handle message error: %s", err)
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
2014-09-19 07:51:03 -07:00
|
|
|
// if nil response, return it before serializing
|
|
|
|
if rpmes == nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Warning("Got back nil response from request.")
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-19 07:51:03 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 00:56:40 -07:00
|
|
|
// serialize response msg
|
|
|
|
rmes, err := msg.FromObject(mPeer, rpmes)
|
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("serialze response error: %s", err)
|
2014-09-22 14:04:41 -04:00
|
|
|
return nil
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
2014-09-22 14:04:41 -04:00
|
|
|
return rmes
|
2014-09-16 00:56:40 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 02:43:11 -07:00
|
|
|
// sendRequest sends out a request using dht.sender, but also makes sure to
|
|
|
|
// measure the RTT for latency measurements.
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
|
2014-09-16 02:43:11 -07:00
|
|
|
|
|
|
|
mes, err := msg.FromObject(p, pmes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
|
2014-12-02 00:19:22 -08:00
|
|
|
rmes, err := dht.sender.SendRequest(ctx, mes) // respect?
|
2014-09-16 02:43:11 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-18 19:30:04 -07:00
|
|
|
if rmes == nil {
|
|
|
|
return nil, errors.New("no response to request")
|
|
|
|
}
|
2014-12-02 00:19:22 -08:00
|
|
|
log.Event(ctx, "sentMessage", dht.self, p, pmes)
|
2014-09-16 02:43:11 -07:00
|
|
|
|
2014-12-02 00:19:22 -08:00
|
|
|
rmes.Peer().SetLatency(time.Since(start))
|
2014-09-16 02:43:11 -07:00
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
rpmes := new(pb.Message)
|
2014-09-16 02:43:11 -07:00
|
|
|
if err := proto.Unmarshal(rmes.Data(), rpmes); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return rpmes, nil
|
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// putValueToNetwork stores the given key/value pair at the peer 'p'
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer,
|
2014-11-09 23:45:16 -08:00
|
|
|
key string, rec *pb.Record) error {
|
2014-09-19 08:07:56 -07:00
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
pmes := pb.NewMessage(pb.Message_PUT_VALUE, string(key), 0)
|
2014-11-09 23:45:16 -08:00
|
|
|
pmes.Record = rec
|
2014-09-19 08:07:56 -07:00
|
|
|
rpmes, err := dht.sendRequest(ctx, p, pmes)
|
2014-09-17 07:19:40 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-19 08:07:56 -07:00
|
|
|
|
2014-11-09 23:45:16 -08:00
|
|
|
if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) {
|
2014-09-19 08:07:56 -07:00
|
|
|
return errors.New("value not put correctly")
|
|
|
|
}
|
|
|
|
return nil
|
2014-09-17 07:19:40 -07:00
|
|
|
}
|
|
|
|
|
2014-11-03 03:02:56 +00:00
|
|
|
// putProvider sends a message to peer 'p' saying that the local node
|
|
|
|
// can provide the value of 'key'
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) error {
|
2014-09-17 07:19:40 -07:00
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, string(key), 0)
|
2014-10-11 06:33:57 -07:00
|
|
|
|
|
|
|
// add self as the provider
|
2014-10-25 04:13:28 -07:00
|
|
|
pmes.ProviderPeers = pb.PeersToPBPeers([]peer.Peer{dht.self})
|
2014-10-11 06:33:57 -07:00
|
|
|
|
2014-09-19 14:31:10 -07:00
|
|
|
rpmes, err := dht.sendRequest(ctx, p, pmes)
|
2014-09-17 07:19:40 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-19 14:31:10 -07:00
|
|
|
|
2014-10-30 16:34:52 +00:00
|
|
|
log.Debugf("%s putProvider: %s for %s", dht.self, p, u.Key(key))
|
2014-10-12 23:07:30 -07:00
|
|
|
if rpmes.GetKey() != pmes.GetKey() {
|
2014-09-19 14:31:10 -07:00
|
|
|
return errors.New("provider not added correctly")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2014-09-17 07:19:40 -07:00
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
|
|
|
|
key u.Key, level int) ([]byte, []peer.Peer, error) {
|
2014-09-16 02:43:11 -07:00
|
|
|
|
|
|
|
pmes, err := dht.getValueSingle(ctx, p, key, level)
|
2014-08-15 09:39:38 -07:00
|
|
|
if err != nil {
|
2014-08-15 22:37:53 -07:00
|
|
|
return nil, nil, err
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
|
|
|
|
2014-11-09 23:45:16 -08:00
|
|
|
if record := pmes.GetRecord(); record != nil {
|
2014-08-15 09:39:38 -07:00
|
|
|
// Success! We were given the value
|
2014-09-28 00:13:07 -07:00
|
|
|
log.Debug("getValueOrPeers: got value")
|
2014-11-09 23:45:16 -08:00
|
|
|
|
|
|
|
// make sure record is still valid
|
|
|
|
err = dht.verifyRecord(record)
|
|
|
|
if err != nil {
|
2014-11-14 11:00:45 -08:00
|
|
|
log.Error("Received invalid record!")
|
2014-11-09 23:45:16 -08:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return record.GetValue(), nil, nil
|
2014-08-16 23:03:36 -07:00
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
|
2014-09-16 05:05:32 -07:00
|
|
|
// TODO decide on providers. This probably shouldn't be happening.
|
2014-09-18 19:30:04 -07:00
|
|
|
if prv := pmes.GetProviderPeers(); prv != nil && len(prv) > 0 {
|
|
|
|
val, err := dht.getFromPeerList(ctx, key, prv, level)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2014-09-28 00:13:07 -07:00
|
|
|
log.Debug("getValueOrPeers: get from providers")
|
2014-09-18 19:30:04 -07:00
|
|
|
return val, nil, nil
|
|
|
|
}
|
2014-09-16 05:05:32 -07:00
|
|
|
|
|
|
|
// Perhaps we were given closer peers
|
2014-10-20 03:26:44 -07:00
|
|
|
var peers []peer.Peer
|
2014-09-16 05:05:32 -07:00
|
|
|
for _, pb := range pmes.GetCloserPeers() {
|
2014-10-21 01:18:20 -07:00
|
|
|
pr, err := dht.peerFromInfo(pb)
|
2014-08-16 23:03:36 -07:00
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Error(err)
|
2014-08-16 23:03:36 -07:00
|
|
|
continue
|
|
|
|
}
|
2014-09-16 06:33:51 -07:00
|
|
|
peers = append(peers, pr)
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
2014-09-16 05:05:32 -07:00
|
|
|
|
|
|
|
if len(peers) > 0 {
|
2014-09-29 17:47:13 +00:00
|
|
|
log.Debug("getValueOrPeers: peers")
|
2014-09-16 05:05:32 -07:00
|
|
|
return nil, peers, nil
|
|
|
|
}
|
|
|
|
|
2014-10-28 02:17:46 -07:00
|
|
|
log.Warning("getValueOrPeers: routing.ErrNotFound")
|
|
|
|
return nil, nil, routing.ErrNotFound
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// getValueSingle simply performs the get value RPC with the given parameters
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.Peer,
|
2014-10-25 04:13:28 -07:00
|
|
|
key u.Key, level int) (*pb.Message, error) {
|
2014-09-16 02:43:11 -07:00
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
pmes := pb.NewMessage(pb.Message_GET_VALUE, string(key), level)
|
2014-09-16 02:43:11 -07:00
|
|
|
return dht.sendRequest(ctx, p, pmes)
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// TODO: Im not certain on this implementation, we get a list of peers/providers
|
|
|
|
// from someone what do we do with it? Connect to each of them? randomly pick
|
|
|
|
// one to get the value from? Or just connect to one at a time until we get a
|
|
|
|
// successful connection and request the value from it?
|
2014-09-16 06:33:51 -07:00
|
|
|
func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key,
|
2014-10-25 04:13:28 -07:00
|
|
|
peerlist []*pb.Message_Peer, level int) ([]byte, error) {
|
2014-08-11 20:11:23 -07:00
|
|
|
|
2014-09-16 06:33:51 -07:00
|
|
|
for _, pinfo := range peerlist {
|
2014-11-05 04:26:30 -08:00
|
|
|
p, err := dht.ensureConnectedToPeer(ctx, pinfo)
|
2014-09-16 06:33:51 -07:00
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("getFromPeers error: %s", err)
|
2014-09-16 06:33:51 -07:00
|
|
|
continue
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
2014-09-16 06:33:51 -07:00
|
|
|
|
|
|
|
pmes, err := dht.getValueSingle(ctx, p, key, level)
|
2014-08-09 22:28:46 -07:00
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("getFromPeers error: %s\n", err)
|
2014-08-09 22:28:46 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-11-09 23:45:16 -08:00
|
|
|
if record := pmes.GetRecord(); record != nil {
|
2014-09-16 06:33:51 -07:00
|
|
|
// Success! We were given the value
|
2014-11-09 23:45:16 -08:00
|
|
|
|
|
|
|
err := dht.verifyRecord(record)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-16 06:33:51 -07:00
|
|
|
dht.providers.AddProvider(key, p)
|
2014-11-09 23:45:16 -08:00
|
|
|
return record.GetValue(), nil
|
2014-08-10 21:02:05 -07:00
|
|
|
}
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
2014-10-28 02:17:46 -07:00
|
|
|
return nil, routing.ErrNotFound
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// getLocal attempts to retrieve the value from the datastore
|
2014-08-16 23:03:36 -07:00
|
|
|
func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
|
2014-08-17 20:17:43 -07:00
|
|
|
dht.dslock.Lock()
|
|
|
|
defer dht.dslock.Unlock()
|
2014-11-11 16:28:20 -08:00
|
|
|
log.Debug("getLocal %s", key)
|
2014-10-03 15:34:08 -07:00
|
|
|
v, err := dht.datastore.Get(key.DsKey())
|
2014-08-07 21:52:11 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-11-11 16:28:20 -08:00
|
|
|
log.Debug("found in db")
|
2014-09-16 02:26:46 -07:00
|
|
|
|
|
|
|
byt, ok := v.([]byte)
|
|
|
|
if !ok {
|
2014-10-15 12:30:52 -07:00
|
|
|
return nil, errors.New("value stored in datastore not []byte")
|
2014-09-16 02:26:46 -07:00
|
|
|
}
|
2014-11-09 23:45:16 -08:00
|
|
|
rec := new(pb.Record)
|
|
|
|
err = proto.Unmarshal(byt, rec)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: 'if paranoid'
|
|
|
|
if u.Debug {
|
|
|
|
err = dht.verifyRecord(rec)
|
|
|
|
if err != nil {
|
2014-11-11 16:28:20 -08:00
|
|
|
log.Errorf("local record verify failed: %s", err)
|
2014-11-09 23:45:16 -08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rec.GetValue(), nil
|
2014-08-07 21:52:11 -07:00
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// putLocal stores the key value pair in the datastore
|
2014-08-16 23:03:36 -07:00
|
|
|
func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
|
2014-11-09 23:45:16 -08:00
|
|
|
rec, err := dht.makePutRecord(key, value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
data, err := proto.Marshal(rec)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dht.datastore.Put(key.DsKey(), data)
|
2014-08-07 21:52:11 -07:00
|
|
|
}
|
2014-08-08 18:09:21 -07:00
|
|
|
|
2014-09-16 06:40:17 -07:00
|
|
|
// Update signals to all routingTables to Update their last-seen status
|
|
|
|
// on the given peer.
|
2014-11-22 20:02:45 -08:00
|
|
|
func (dht *IpfsDHT) Update(ctx context.Context, p peer.Peer) {
|
|
|
|
log.Event(ctx, "updatePeer", p)
|
2014-09-16 06:40:17 -07:00
|
|
|
removedCount := 0
|
2014-08-16 23:03:36 -07:00
|
|
|
for _, route := range dht.routingTables {
|
2014-08-11 20:11:23 -07:00
|
|
|
removed := route.Update(p)
|
2014-08-30 00:00:52 -07:00
|
|
|
// Only close the connection if no tables refer to this peer
|
2014-08-11 20:11:23 -07:00
|
|
|
if removed != nil {
|
2014-09-16 06:40:17 -07:00
|
|
|
removedCount++
|
2014-08-11 20:11:23 -07:00
|
|
|
}
|
2014-08-08 18:09:21 -07:00
|
|
|
}
|
2014-09-16 06:40:17 -07:00
|
|
|
|
|
|
|
// Only close the connection if no tables refer to this peer
|
|
|
|
// if removedCount == len(dht.routingTables) {
|
|
|
|
// dht.network.ClosePeer(p)
|
|
|
|
// }
|
|
|
|
// ACTUALLY, no, let's not just close the connection. it may be connected
|
|
|
|
// due to other things. it seems that we just need connection timeouts
|
|
|
|
// after some deadline of inactivity.
|
2014-08-08 18:09:21 -07:00
|
|
|
}
|
2014-08-09 22:28:46 -07:00
|
|
|
|
2014-10-11 10:43:54 -07:00
|
|
|
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.Peer, *kb.RoutingTable) {
|
2014-08-16 23:03:36 -07:00
|
|
|
for _, table := range dht.routingTables {
|
2014-08-09 22:28:46 -07:00
|
|
|
p := table.Find(id)
|
|
|
|
if p != nil {
|
|
|
|
return p, table
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
|
2014-11-03 03:02:56 +00:00
|
|
|
// findPeerSingle asks peer 'p' if they know where the peer with id 'id' is
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.Peer, id peer.ID, level int) (*pb.Message, error) {
|
|
|
|
pmes := pb.NewMessage(pb.Message_FIND_NODE, string(id), level)
|
2014-09-16 07:17:55 -07:00
|
|
|
return dht.sendRequest(ctx, p, pmes)
|
2014-08-10 21:02:05 -07:00
|
|
|
}
|
2014-08-11 20:11:23 -07:00
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.Peer, key u.Key, level int) (*pb.Message, error) {
|
|
|
|
pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, string(key), level)
|
2014-09-16 07:17:55 -07:00
|
|
|
return dht.sendRequest(ctx, p, pmes)
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) addProviders(key u.Key, peers []*pb.Message_Peer) []peer.Peer {
|
2014-10-20 03:26:44 -07:00
|
|
|
var provArr []peer.Peer
|
2014-08-14 08:32:17 -07:00
|
|
|
for _, prov := range peers {
|
2014-09-16 07:17:55 -07:00
|
|
|
p, err := dht.peerFromInfo(prov)
|
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("error getting peer from info: %v", err)
|
2014-08-14 08:32:17 -07:00
|
|
|
continue
|
|
|
|
}
|
2014-09-16 07:17:55 -07:00
|
|
|
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Debugf("%s adding provider: %s for %s", dht.self, p, key)
|
2014-09-19 18:11:05 -07:00
|
|
|
|
2014-09-16 07:17:55 -07:00
|
|
|
// Dont add outselves to the list
|
2014-10-20 03:26:44 -07:00
|
|
|
if p.ID().Equal(dht.self.ID()) {
|
2014-09-16 07:17:55 -07:00
|
|
|
continue
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
2014-09-16 07:17:55 -07:00
|
|
|
|
|
|
|
// TODO(jbenet) ensure providers is idempotent
|
2014-08-18 20:38:44 -07:00
|
|
|
dht.providers.AddProvider(key, p)
|
2014-08-16 23:03:36 -07:00
|
|
|
provArr = append(provArr, p)
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
return provArr
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
2014-08-26 14:24:51 -07:00
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// nearestPeersToQuery returns the routing tables closest peers.
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.Peer {
|
2014-09-16 01:54:53 -07:00
|
|
|
level := pmes.GetClusterLevel()
|
|
|
|
cluster := dht.routingTables[level]
|
|
|
|
|
|
|
|
key := u.Key(pmes.GetKey())
|
2014-10-12 23:07:30 -07:00
|
|
|
closer := cluster.NearestPeers(kb.ConvertKey(key), count)
|
2014-09-16 01:54:53 -07:00
|
|
|
return closer
|
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer {
|
2014-10-12 23:07:30 -07:00
|
|
|
closer := dht.nearestPeersToQuery(pmes, count)
|
2014-09-16 01:54:53 -07:00
|
|
|
|
|
|
|
// no node? nil
|
|
|
|
if closer == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// == to self? thats bad
|
|
|
|
for _, p := range closer {
|
2014-10-20 03:26:44 -07:00
|
|
|
if p.ID().Equal(dht.self.ID()) {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Error("Attempted to return self! this shouldnt happen...")
|
|
|
|
return nil
|
|
|
|
}
|
2014-09-16 01:54:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
var filtered []peer.Peer
|
2014-10-12 23:07:30 -07:00
|
|
|
for _, p := range closer {
|
|
|
|
// must all be closer than self
|
|
|
|
key := u.Key(pmes.GetKey())
|
2014-10-20 03:26:44 -07:00
|
|
|
if !kb.Closer(dht.self.ID(), p.ID(), key) {
|
2014-10-12 23:07:30 -07:00
|
|
|
filtered = append(filtered, p)
|
|
|
|
}
|
2014-09-16 01:54:53 -07:00
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
// ok seems like closer nodes
|
|
|
|
return filtered
|
2014-09-16 01:54:53 -07:00
|
|
|
}
|
|
|
|
|
2014-11-03 03:02:56 +00:00
|
|
|
// getPeer searches the peerstore for a peer with the given peer ID
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) {
|
2014-10-19 23:40:14 -07:00
|
|
|
p, err := dht.peerstore.Get(id)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("Failed to get peer from peerstore: %s", err)
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Error(err)
|
2014-10-19 23:40:14 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2014-11-03 03:02:56 +00:00
|
|
|
// peerFromInfo returns a peer using info in the protobuf peer struct
|
|
|
|
// to lookup or create a peer
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) peerFromInfo(pbp *pb.Message_Peer) (peer.Peer, error) {
|
2014-09-16 06:33:51 -07:00
|
|
|
|
|
|
|
id := peer.ID(pbp.GetId())
|
2014-09-17 07:19:40 -07:00
|
|
|
|
2014-10-21 01:18:20 -07:00
|
|
|
// bail out if it's ourselves
|
|
|
|
//TODO(jbenet) not sure this should be an error _here_
|
2014-10-20 03:26:44 -07:00
|
|
|
if id.Equal(dht.self.ID()) {
|
2014-09-17 07:19:40 -07:00
|
|
|
return nil, errors.New("found self")
|
|
|
|
}
|
|
|
|
|
2014-10-19 23:40:14 -07:00
|
|
|
p, err := dht.getPeer(id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-09-16 06:33:51 -07:00
|
|
|
}
|
|
|
|
|
2014-10-19 23:40:14 -07:00
|
|
|
maddr, err := pbp.Address()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-09-16 07:17:55 -07:00
|
|
|
}
|
2014-10-19 23:40:14 -07:00
|
|
|
p.AddAddress(maddr)
|
2014-09-16 07:17:55 -07:00
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2014-11-05 04:26:30 -08:00
|
|
|
func (dht *IpfsDHT) ensureConnectedToPeer(ctx context.Context, pbp *pb.Message_Peer) (peer.Peer, error) {
|
2014-09-16 07:17:55 -07:00
|
|
|
p, err := dht.peerFromInfo(pbp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-08-26 14:24:51 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 06:33:51 -07:00
|
|
|
// dial connection
|
2014-11-05 04:26:30 -08:00
|
|
|
err = dht.dialer.DialPeer(ctx, p)
|
2014-09-16 06:33:51 -07:00
|
|
|
return p, err
|
2014-08-26 14:24:51 -07:00
|
|
|
}
|
2014-09-04 20:32:46 +00:00
|
|
|
|
2014-10-03 15:34:08 -07:00
|
|
|
//TODO: this should be smarter about which keys it selects.
|
2014-09-04 20:32:46 +00:00
|
|
|
func (dht *IpfsDHT) loadProvidableKeys() error {
|
2014-09-07 04:25:13 +00:00
|
|
|
kl, err := dht.datastore.KeyList()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-10-03 15:34:08 -07:00
|
|
|
for _, dsk := range kl {
|
|
|
|
k := u.KeyFromDsKey(dsk)
|
|
|
|
if len(k) == 0 {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("loadProvidableKeys error: %v", dsk)
|
2014-10-03 15:34:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
dht.providers.AddProvider(k, dht.self)
|
2014-09-04 20:32:46 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-10-19 23:40:14 -07:00
|
|
|
// PingRoutine periodically pings nearest neighbors.
|
2014-10-15 12:30:52 -07:00
|
|
|
func (dht *IpfsDHT) PingRoutine(t time.Duration) {
|
2014-10-25 07:12:01 -07:00
|
|
|
defer dht.Children().Done()
|
|
|
|
|
2014-10-15 12:30:52 -07:00
|
|
|
tick := time.Tick(t)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-tick:
|
|
|
|
id := make([]byte, 16)
|
|
|
|
rand.Read(id)
|
|
|
|
peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(u.Key(id)), 5)
|
|
|
|
for _, p := range peers {
|
2014-10-25 07:12:01 -07:00
|
|
|
ctx, _ := context.WithTimeout(dht.Context(), time.Second*5)
|
2014-10-15 12:30:52 -07:00
|
|
|
err := dht.Ping(ctx, p)
|
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("Ping error: %s", err)
|
2014-10-15 12:30:52 -07:00
|
|
|
}
|
|
|
|
}
|
2014-10-25 07:12:01 -07:00
|
|
|
case <-dht.Closing():
|
2014-10-15 12:30:52 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-16 07:17:55 -07:00
|
|
|
// Bootstrap builds up list of peers by requesting random peer IDs
|
2014-09-21 18:04:43 -07:00
|
|
|
func (dht *IpfsDHT) Bootstrap(ctx context.Context) {
|
2014-09-04 20:32:46 +00:00
|
|
|
id := make([]byte, 16)
|
|
|
|
rand.Read(id)
|
2014-10-25 03:36:00 -07:00
|
|
|
p, err := dht.FindPeer(ctx, peer.ID(id))
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Bootstrap peer error: %s", err)
|
|
|
|
}
|
2014-11-05 04:26:30 -08:00
|
|
|
err = dht.dialer.DialPeer(ctx, p)
|
2014-10-17 17:54:47 -07:00
|
|
|
if err != nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Errorf("Bootstrap peer error: %s", err)
|
2014-10-17 17:54:47 -07:00
|
|
|
}
|
2014-09-04 20:32:46 +00:00
|
|
|
}
|