2014-07-23 04:48:30 -07:00
|
|
|
package dht
|
|
|
|
|
2014-07-28 22:14:27 -07:00
|
|
|
import (
|
2014-08-07 14:16:24 -07:00
|
|
|
"bytes"
|
2014-09-04 20:32:46 +00:00
|
|
|
"crypto/rand"
|
2014-09-16 00:56:40 -07:00
|
|
|
"errors"
|
2014-08-23 22:21:20 -07:00
|
|
|
"fmt"
|
2014-08-08 18:09:21 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-09-14 04:52:08 -07:00
|
|
|
inet "github.com/jbenet/go-ipfs/net"
|
2014-09-16 00:56:40 -07:00
|
|
|
msg "github.com/jbenet/go-ipfs/net/message"
|
2014-08-08 18:09:21 -07:00
|
|
|
peer "github.com/jbenet/go-ipfs/peer"
|
2014-08-09 22:28:46 -07:00
|
|
|
kb "github.com/jbenet/go-ipfs/routing/kbucket"
|
2014-08-08 18:09:21 -07:00
|
|
|
u "github.com/jbenet/go-ipfs/util"
|
2014-07-31 17:43:48 -07:00
|
|
|
|
2014-09-16 00:56:40 -07:00
|
|
|
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
2014-09-09 22:39:42 -07:00
|
|
|
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
2014-09-16 00:56:40 -07:00
|
|
|
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
2014-07-30 20:16:34 -07:00
|
|
|
|
2014-09-09 22:39:42 -07:00
|
|
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
2014-07-28 22:14:27 -07:00
|
|
|
)
|
|
|
|
|
2014-07-23 04:48:30 -07:00
|
|
|
// TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js
|
|
|
|
|
|
|
|
// IpfsDHT is an implementation of Kademlia with Coral and S/Kademlia modifications.
|
|
|
|
// It is used to implement the base IpfsRouting module.
|
|
|
|
type IpfsDHT struct {
|
2014-08-07 18:06:50 -07:00
|
|
|
// Array of routing tables for differently distanced nodes
|
|
|
|
// NOTE: (currently, only a single table is used)
|
2014-08-16 23:03:36 -07:00
|
|
|
routingTables []*kb.RoutingTable
|
2014-07-28 22:14:27 -07:00
|
|
|
|
2014-09-16 00:56:40 -07:00
|
|
|
// the network interface. service
|
2014-09-14 04:52:08 -07:00
|
|
|
network inet.Network
|
2014-09-16 00:56:40 -07:00
|
|
|
sender inet.Sender
|
2014-07-29 14:50:33 -07:00
|
|
|
|
2014-07-30 20:16:34 -07:00
|
|
|
// Local peer (yourself)
|
|
|
|
self *peer.Peer
|
|
|
|
|
|
|
|
// Local data
|
|
|
|
datastore ds.Datastore
|
2014-08-17 20:17:43 -07:00
|
|
|
dslock sync.Mutex
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-18 20:38:44 -07:00
|
|
|
providers *ProviderManager
|
2014-08-03 21:46:01 -07:00
|
|
|
|
2014-07-29 19:33:51 -07:00
|
|
|
// Signal to shutdown dht
|
|
|
|
shutdown chan struct{}
|
2014-08-06 18:37:45 -07:00
|
|
|
|
|
|
|
// When this peer started up
|
|
|
|
birth time.Time
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
//lock to make diagnostics work better
|
|
|
|
diaglock sync.Mutex
|
2014-07-28 22:14:27 -07:00
|
|
|
}
|
|
|
|
|
2014-08-09 22:28:46 -07:00
|
|
|
// NewDHT creates a new DHT object with the given peer as the 'local' host
|
2014-09-16 00:56:40 -07:00
|
|
|
func NewDHT(p *peer.Peer, net inet.Network, sender inet.Sender, dstore ds.Datastore) *IpfsDHT {
|
2014-08-01 13:21:51 -07:00
|
|
|
dht := new(IpfsDHT)
|
2014-08-10 21:40:17 -07:00
|
|
|
dht.network = net
|
2014-09-16 00:56:40 -07:00
|
|
|
dht.sender = sender
|
2014-08-26 14:24:51 -07:00
|
|
|
dht.datastore = dstore
|
2014-07-31 17:43:48 -07:00
|
|
|
dht.self = p
|
2014-09-16 00:56:40 -07:00
|
|
|
|
2014-09-04 20:32:46 +00:00
|
|
|
dht.providers = NewProviderManager(p.ID)
|
2014-07-30 20:16:34 -07:00
|
|
|
dht.shutdown = make(chan struct{})
|
2014-08-11 20:11:23 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
dht.routingTables = make([]*kb.RoutingTable, 3)
|
|
|
|
dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*30)
|
|
|
|
dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*100)
|
|
|
|
dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Hour)
|
2014-08-06 18:37:45 -07:00
|
|
|
dht.birth = time.Now()
|
2014-08-10 21:40:17 -07:00
|
|
|
return dht
|
2014-07-31 17:43:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-01 13:21:51 -07:00
|
|
|
// Start up background goroutines needed by the DHT
|
2014-07-31 21:55:44 -07:00
|
|
|
func (dht *IpfsDHT) Start() {
|
2014-09-16 00:56:40 -07:00
|
|
|
panic("the service is already started. rmv this method")
|
2014-07-31 21:55:44 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// Connect to a new peer at the given address, ping and add to the routing table
|
2014-08-05 09:38:26 -07:00
|
|
|
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
|
2014-08-08 18:09:21 -07:00
|
|
|
maddrstr, _ := addr.String()
|
2014-08-17 20:17:43 -07:00
|
|
|
u.DOut("Connect to new peer: %s\n", maddrstr)
|
2014-09-16 00:56:40 -07:00
|
|
|
|
|
|
|
// TODO(jbenet,whyrusleeping)
|
|
|
|
//
|
|
|
|
// Connect should take in a Peer (with ID). In a sense, we shouldn't be
|
|
|
|
// allowing connections to random multiaddrs without knowing who we're
|
|
|
|
// speaking to (i.e. peer.ID). In terms of moving around simple addresses
|
|
|
|
// -- instead of an (ID, Addr) pair -- we can use:
|
|
|
|
//
|
|
|
|
// /ip4/10.20.30.40/tcp/1234/ipfs/Qxhxxchxzcncxnzcnxzcxzm
|
|
|
|
//
|
|
|
|
npeer := &peer.Peer{}
|
|
|
|
npeer.AddAddress(addr)
|
|
|
|
err := dht.network.DialPeer(npeer)
|
2014-07-31 17:43:48 -07:00
|
|
|
if err != nil {
|
2014-08-05 09:38:26 -07:00
|
|
|
return nil, err
|
2014-07-31 17:43:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-06 10:02:53 -07:00
|
|
|
// Ping new peer to register in their routing table
|
|
|
|
// NOTE: this should be done better...
|
2014-08-08 18:09:21 -07:00
|
|
|
err = dht.Ping(npeer, time.Second*2)
|
2014-08-06 10:02:53 -07:00
|
|
|
if err != nil {
|
2014-08-23 22:21:20 -07:00
|
|
|
return nil, fmt.Errorf("failed to ping newly connected peer: %s\n", err)
|
2014-08-06 10:02:53 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
dht.Update(npeer)
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
return npeer, nil
|
2014-07-30 20:16:34 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 00:56:40 -07:00
|
|
|
// HandleMessage implements the inet.Handler interface.
|
|
|
|
func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) (msg.NetMessage, error) {
|
|
|
|
|
|
|
|
mData := mes.Data()
|
|
|
|
if mData == nil {
|
|
|
|
return nil, errors.New("message did not include Data")
|
|
|
|
}
|
|
|
|
|
|
|
|
mPeer := mes.Peer()
|
|
|
|
if mPeer == nil {
|
|
|
|
return nil, errors.New("message did not include a Peer")
|
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize msg
|
|
|
|
pmes := new(Message)
|
|
|
|
err := proto.Unmarshal(mData, pmes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to decode protobuf message: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the peer (on valid msgs only)
|
|
|
|
dht.Update(mPeer)
|
|
|
|
|
|
|
|
// Print out diagnostic
|
|
|
|
u.DOut("[peer: %s]\nGot message type: '%s' [from = %s]\n",
|
|
|
|
dht.self.ID.Pretty(),
|
|
|
|
Message_MessageType_name[int32(pmes.GetType())], mPeer.ID.Pretty())
|
|
|
|
|
|
|
|
// get handler for this msg type.
|
|
|
|
var resp *Message
|
|
|
|
handler := dht.handlerForMsgType(pmes.GetType())
|
|
|
|
if handler == nil {
|
|
|
|
return nil, errors.New("Recieved invalid message type")
|
|
|
|
}
|
|
|
|
|
|
|
|
// dispatch handler.
|
|
|
|
rpmes, err := handler(mPeer, pmes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// serialize response msg
|
|
|
|
rmes, err := msg.FromObject(mPeer, rpmes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to encode protobuf message: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return rmes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// dhthandler specifies the signature of functions that handle DHT messages.
|
|
|
|
type dhtHandler func(*peer.Peer, *Message) (*Message, error)
|
|
|
|
|
|
|
|
func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
|
|
|
|
switch t {
|
|
|
|
case Message_GET_VALUE:
|
|
|
|
return dht.handleGetValue
|
|
|
|
// case Message_PUT_VALUE:
|
|
|
|
// return dht.handlePutValue
|
|
|
|
// case Message_FIND_NODE:
|
|
|
|
// return dht.handleFindPeer
|
|
|
|
// case Message_ADD_PROVIDER:
|
|
|
|
// return dht.handleAddProvider
|
|
|
|
// case Message_GET_PROVIDERS:
|
|
|
|
// return dht.handleGetProviders
|
|
|
|
// case Message_PING:
|
|
|
|
// return dht.handlePing
|
|
|
|
// case Message_DIAGNOSTIC:
|
|
|
|
// return dht.handleDiagnostic
|
|
|
|
default:
|
|
|
|
return nil
|
2014-08-08 18:09:21 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-09 22:28:46 -07:00
|
|
|
func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) error {
|
2014-09-16 00:57:20 -07:00
|
|
|
typ := Message_PUT_VALUE
|
|
|
|
pmes := &Message{
|
|
|
|
Type: &typ,
|
|
|
|
Key: &key,
|
2014-08-07 18:06:50 -07:00
|
|
|
Value: value,
|
|
|
|
}
|
|
|
|
|
2014-09-16 00:57:20 -07:00
|
|
|
mes, err := msg.FromObject(p, pmes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return dht.sender.SendMessage(context.TODO(), mes)
|
2014-08-07 18:06:50 -07:00
|
|
|
}
|
|
|
|
|
2014-09-16 00:57:20 -07:00
|
|
|
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error) {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.DOut("handleGetValue for key: %s\n", pmes.GetKey())
|
2014-09-16 00:57:20 -07:00
|
|
|
|
|
|
|
// setup response
|
2014-08-16 23:03:36 -07:00
|
|
|
resp := &Message{
|
2014-09-16 00:57:20 -07:00
|
|
|
Type: pmes.Type,
|
|
|
|
Key: pmes.Key,
|
|
|
|
}
|
|
|
|
|
|
|
|
// first, is the key even a key?
|
|
|
|
key := pmes.GetKey()
|
|
|
|
if key == "" {
|
|
|
|
return nil, errors.New("handleGetValue but no key was provided.")
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
2014-09-16 00:57:20 -07:00
|
|
|
|
|
|
|
// let's first check if we have the value locally.
|
|
|
|
dskey := ds.NewKey(pmes.GetKey())
|
2014-08-09 22:28:46 -07:00
|
|
|
iVal, err := dht.datastore.Get(dskey)
|
2014-09-16 00:57:20 -07:00
|
|
|
|
|
|
|
// if we got an unexpected error, bail.
|
|
|
|
if err != ds.ErrNotFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we have the value, respond with it!
|
2014-07-30 20:16:34 -07:00
|
|
|
if err == nil {
|
2014-08-19 19:14:52 -07:00
|
|
|
u.DOut("handleGetValue success!\n")
|
2014-09-16 00:57:20 -07:00
|
|
|
|
|
|
|
byts, ok := iVal.([]byte)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
|
2014-08-06 18:37:45 -07:00
|
|
|
}
|
2014-09-16 00:57:20 -07:00
|
|
|
|
|
|
|
resp.Value = byts
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we know any providers for the requested value, return those.
|
|
|
|
provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
|
|
|
|
if len(provs) > 0 {
|
|
|
|
u.DOut("handleGetValue returning %d provider[s]\n", len(provs))
|
|
|
|
resp.ProviderPeers = provs
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find closest peer on given cluster to desired key and reply with that info
|
|
|
|
// TODO: this should probably be decomposed.
|
|
|
|
|
|
|
|
// stored levels are > 1, to distinguish missing levels.
|
|
|
|
level := pmes.GetClusterLevel()
|
|
|
|
if level < 0 {
|
|
|
|
// TODO: maybe return an error? Defaulting isnt a good idea IMO
|
|
|
|
u.PErr("handleGetValue: no routing level specified, assuming 0\n")
|
|
|
|
level = 0
|
|
|
|
}
|
|
|
|
u.DOut("handleGetValue searching level %d clusters\n", level)
|
|
|
|
|
|
|
|
ck := kb.ConvertKey(u.Key(pmes.GetKey()))
|
|
|
|
closer := dht.routingTables[level].NearestPeer(ck)
|
|
|
|
|
|
|
|
// if closer peer is self, return nil
|
|
|
|
if closer.ID.Equal(dht.self.ID) {
|
|
|
|
u.DOut("Attempted to return self! this shouldnt happen...\n")
|
|
|
|
resp.CloserPeers = nil
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// if self is closer than the one from the table, return nil
|
|
|
|
if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
|
|
|
|
u.DOut("handleGetValue could not find a closer node than myself.\n")
|
|
|
|
resp.CloserPeers = nil
|
|
|
|
return resp, nil
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
2014-08-06 18:37:45 -07:00
|
|
|
|
2014-09-16 00:57:20 -07:00
|
|
|
// we got a closer peer, it seems. return it.
|
|
|
|
u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty())
|
|
|
|
resp.CloserPeers = []*peer.Peer{closer}
|
|
|
|
return resp, nil
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 20:31:48 -07:00
|
|
|
// Store a value in this peer local storage
|
2014-08-08 18:09:21 -07:00
|
|
|
func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *PBDHTMessage) {
|
2014-08-17 20:17:43 -07:00
|
|
|
dht.dslock.Lock()
|
|
|
|
defer dht.dslock.Unlock()
|
2014-07-30 20:16:34 -07:00
|
|
|
dskey := ds.NewKey(pmes.GetKey())
|
|
|
|
err := dht.datastore.Put(dskey, pmes.GetValue())
|
|
|
|
if err != nil {
|
|
|
|
// For now, just panic, handle this better later maybe
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) {
|
2014-08-23 22:21:20 -07:00
|
|
|
u.DOut("[%s] Responding to ping from [%s]!\n", dht.self.ID.Pretty(), p.ID.Pretty())
|
2014-08-16 23:03:36 -07:00
|
|
|
resp := Message{
|
2014-08-08 18:09:21 -07:00
|
|
|
Type: pmes.GetType(),
|
2014-08-01 13:21:51 -07:00
|
|
|
Response: true,
|
2014-08-16 23:03:36 -07:00
|
|
|
ID: pmes.GetId(),
|
2014-08-01 13:21:51 -07:00
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- swarm.NewMessage(p, resp.ToProtobuf())
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
|
2014-08-16 23:03:36 -07:00
|
|
|
resp := Message{
|
2014-08-10 21:02:05 -07:00
|
|
|
Type: pmes.GetType(),
|
2014-08-16 23:03:36 -07:00
|
|
|
ID: pmes.GetId(),
|
2014-08-10 21:02:05 -07:00
|
|
|
Response: true,
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-08-10 21:02:05 -07:00
|
|
|
}()
|
|
|
|
level := pmes.GetValue()[0]
|
2014-08-17 20:17:43 -07:00
|
|
|
u.DOut("handleFindPeer: searching for '%s'\n", peer.ID(pmes.GetKey()).Pretty())
|
2014-08-16 23:03:36 -07:00
|
|
|
closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
2014-08-05 20:31:48 -07:00
|
|
|
if closest == nil {
|
2014-08-19 19:14:52 -07:00
|
|
|
u.PErr("handleFindPeer: could not find anything.\n")
|
2014-08-10 21:02:05 -07:00
|
|
|
return
|
2014-08-05 20:31:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(closest.Addresses) == 0 {
|
2014-08-19 19:14:52 -07:00
|
|
|
u.PErr("handleFindPeer: no addresses for connected peer...\n")
|
2014-08-10 21:02:05 -07:00
|
|
|
return
|
2014-08-05 20:31:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// If the found peer further away than this peer...
|
|
|
|
if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) {
|
|
|
|
return
|
2014-08-05 20:31:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-17 20:17:43 -07:00
|
|
|
u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty())
|
2014-08-10 21:02:05 -07:00
|
|
|
resp.Peers = []*peer.Peer{closest}
|
|
|
|
resp.Success = true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
|
2014-08-16 23:03:36 -07:00
|
|
|
resp := Message{
|
2014-08-10 21:02:05 -07:00
|
|
|
Type: PBDHTMessage_GET_PROVIDERS,
|
|
|
|
Key: pmes.GetKey(),
|
2014-08-16 23:03:36 -07:00
|
|
|
ID: pmes.GetId(),
|
2014-08-10 21:02:05 -07:00
|
|
|
Response: true,
|
2014-08-05 20:31:48 -07:00
|
|
|
}
|
|
|
|
|
2014-09-04 20:32:46 +00:00
|
|
|
has, err := dht.datastore.Has(ds.NewKey(pmes.GetKey()))
|
|
|
|
if err != nil {
|
|
|
|
dht.netChan.Errors <- err
|
|
|
|
}
|
|
|
|
|
2014-08-18 20:38:44 -07:00
|
|
|
providers := dht.providers.GetProviders(u.Key(pmes.GetKey()))
|
2014-09-04 20:32:46 +00:00
|
|
|
if has {
|
|
|
|
providers = append(providers, dht.self)
|
|
|
|
}
|
2014-08-03 21:46:01 -07:00
|
|
|
if providers == nil || len(providers) == 0 {
|
2014-08-14 08:32:17 -07:00
|
|
|
level := 0
|
|
|
|
if len(pmes.GetValue()) > 0 {
|
|
|
|
level = int(pmes.GetValue()[0])
|
|
|
|
}
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
2014-08-14 08:32:17 -07:00
|
|
|
if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
|
|
|
|
resp.Peers = nil
|
|
|
|
} else {
|
|
|
|
resp.Peers = []*peer.Peer{closer}
|
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
} else {
|
2014-08-18 20:38:44 -07:00
|
|
|
resp.Peers = providers
|
2014-08-10 21:02:05 -07:00
|
|
|
resp.Success = true
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-07-31 21:55:44 -07:00
|
|
|
}
|
|
|
|
|
2014-08-06 18:37:45 -07:00
|
|
|
type providerInfo struct {
|
|
|
|
Creation time.Time
|
2014-08-08 18:09:21 -07:00
|
|
|
Value *peer.Peer
|
2014-08-06 18:37:45 -07:00
|
|
|
}
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *PBDHTMessage) {
|
2014-08-03 21:46:01 -07:00
|
|
|
key := u.Key(pmes.GetKey())
|
2014-08-26 14:24:51 -07:00
|
|
|
u.DOut("[%s] Adding [%s] as a provider for '%s'\n", dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty())
|
2014-08-18 20:38:44 -07:00
|
|
|
dht.providers.AddProvider(key, p)
|
2014-07-31 21:55:44 -07:00
|
|
|
}
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
// Halt stops all communications from this peer and shut down
|
2014-07-30 17:46:56 -07:00
|
|
|
func (dht *IpfsDHT) Halt() {
|
|
|
|
dht.shutdown <- struct{}{}
|
|
|
|
dht.network.Close()
|
2014-08-23 22:21:20 -07:00
|
|
|
dht.providers.Halt()
|
|
|
|
dht.listener.Halt()
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
2014-07-31 17:43:48 -07:00
|
|
|
|
2014-08-09 22:28:46 -07:00
|
|
|
// NOTE: not yet finished, low priority
|
2014-08-08 18:09:21 -07:00
|
|
|
func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *PBDHTMessage) {
|
2014-08-16 23:03:36 -07:00
|
|
|
seq := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10)
|
2014-08-15 22:37:53 -07:00
|
|
|
listenChan := dht.listener.Listen(pmes.GetId(), len(seq), time.Second*30)
|
2014-08-07 14:16:24 -07:00
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
for _, ps := range seq {
|
2014-08-07 14:16:24 -07:00
|
|
|
mes := swarm.NewMessage(ps, pmes)
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-08-07 14:16:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
buf := new(bytes.Buffer)
|
2014-08-07 18:06:50 -07:00
|
|
|
di := dht.getDiagInfo()
|
|
|
|
buf.Write(di.Marshal())
|
|
|
|
|
2014-08-07 14:16:24 -07:00
|
|
|
// NOTE: this shouldnt be a hardcoded value
|
|
|
|
after := time.After(time.Second * 20)
|
|
|
|
count := len(seq)
|
|
|
|
for count > 0 {
|
|
|
|
select {
|
|
|
|
case <-after:
|
|
|
|
//Timeout, return what we have
|
|
|
|
goto out
|
2014-08-16 23:03:36 -07:00
|
|
|
case reqResp := <-listenChan:
|
|
|
|
pmesOut := new(PBDHTMessage)
|
|
|
|
err := proto.Unmarshal(reqResp.Data, pmesOut)
|
2014-08-07 18:06:50 -07:00
|
|
|
if err != nil {
|
|
|
|
// It broke? eh, whatever, keep going
|
|
|
|
continue
|
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
buf.Write(reqResp.Data)
|
2014-08-07 14:16:24 -07:00
|
|
|
count--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2014-08-16 23:03:36 -07:00
|
|
|
resp := Message{
|
2014-08-08 18:09:21 -07:00
|
|
|
Type: PBDHTMessage_DIAGNOSTIC,
|
2014-08-16 23:03:36 -07:00
|
|
|
ID: pmes.GetId(),
|
2014-08-08 18:09:21 -07:00
|
|
|
Value: buf.Bytes(),
|
2014-08-07 14:16:24 -07:00
|
|
|
Response: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-08-07 14:16:24 -07:00
|
|
|
}
|
2014-08-07 21:52:11 -07:00
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) {
|
|
|
|
pmes, err := dht.getValueSingle(p, key, timeout, level)
|
|
|
|
if err != nil {
|
2014-08-15 22:37:53 -07:00
|
|
|
return nil, nil, err
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if pmes.GetSuccess() {
|
|
|
|
if pmes.Value == nil { // We were given provider[s]
|
|
|
|
val, err := dht.getFromPeerList(key, timeout, pmes.GetPeers(), level)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return val, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success! We were given the value
|
|
|
|
return pmes.GetValue(), nil, nil
|
2014-08-16 23:03:36 -07:00
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
// We were given a closer node
|
|
|
|
var peers []*peer.Peer
|
|
|
|
for _, pb := range pmes.GetPeers() {
|
|
|
|
if peer.ID(pb.GetId()).Equal(dht.self.ID) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addr, err := ma.NewMultiaddr(pb.GetAddr())
|
|
|
|
if err != nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.PErr("%v\n", err.Error())
|
2014-08-16 23:03:36 -07:00
|
|
|
continue
|
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
np, err := dht.network.GetConnection(peer.ID(pb.GetId()), addr)
|
|
|
|
if err != nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.PErr("%v\n", err.Error())
|
2014-08-16 23:03:36 -07:00
|
|
|
continue
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
|
|
|
|
peers = append(peers, np)
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
return nil, peers, nil
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// getValueSingle simply performs the get value RPC with the given parameters
|
|
|
|
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) {
|
2014-08-16 23:03:36 -07:00
|
|
|
pmes := Message{
|
2014-08-10 21:02:05 -07:00
|
|
|
Type: PBDHTMessage_GET_VALUE,
|
|
|
|
Key: string(key),
|
|
|
|
Value: []byte{byte(level)},
|
2014-08-23 22:21:20 -07:00
|
|
|
ID: swarm.GenerateMessageID(),
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
responseChan := dht.listener.Listen(pmes.ID, 1, time.Minute)
|
2014-08-09 22:28:46 -07:00
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
2014-08-11 20:11:23 -07:00
|
|
|
t := time.Now()
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-08-09 22:28:46 -07:00
|
|
|
|
|
|
|
// Wait for either the response or a timeout
|
|
|
|
timeup := time.After(timeout)
|
|
|
|
select {
|
|
|
|
case <-timeup:
|
2014-08-16 23:03:36 -07:00
|
|
|
dht.listener.Unlisten(pmes.ID)
|
2014-08-09 22:28:46 -07:00
|
|
|
return nil, u.ErrTimeout
|
2014-08-16 23:03:36 -07:00
|
|
|
case resp, ok := <-responseChan:
|
2014-08-09 22:28:46 -07:00
|
|
|
if !ok {
|
2014-08-19 19:14:52 -07:00
|
|
|
u.PErr("response channel closed before timeout, please investigate.\n")
|
2014-08-09 22:28:46 -07:00
|
|
|
return nil, u.ErrTimeout
|
|
|
|
}
|
2014-08-11 20:11:23 -07:00
|
|
|
roundtrip := time.Since(t)
|
|
|
|
resp.Peer.SetLatency(roundtrip)
|
2014-08-16 23:03:36 -07:00
|
|
|
pmesOut := new(PBDHTMessage)
|
|
|
|
err := proto.Unmarshal(resp.Data, pmesOut)
|
2014-08-09 22:28:46 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
return pmesOut, nil
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// TODO: Im not certain on this implementation, we get a list of peers/providers
|
|
|
|
// from someone what do we do with it? Connect to each of them? randomly pick
|
|
|
|
// one to get the value from? Or just connect to one at a time until we get a
|
|
|
|
// successful connection and request the value from it?
|
|
|
|
func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration,
|
|
|
|
peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) {
|
|
|
|
for _, pinfo := range peerlist {
|
|
|
|
p, _ := dht.Find(peer.ID(pinfo.GetId()))
|
|
|
|
if p == nil {
|
|
|
|
maddr, err := ma.NewMultiaddr(pinfo.GetAddr())
|
2014-08-09 22:28:46 -07:00
|
|
|
if err != nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.PErr("getValue error: %s\n", err)
|
2014-08-09 22:28:46 -07:00
|
|
|
continue
|
|
|
|
}
|
2014-08-11 20:11:23 -07:00
|
|
|
|
2014-08-12 15:37:26 -07:00
|
|
|
p, err = dht.network.GetConnection(peer.ID(pinfo.GetId()), maddr)
|
2014-08-09 22:28:46 -07:00
|
|
|
if err != nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.PErr("getValue error: %s\n", err)
|
2014-08-09 22:28:46 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
pmes, err := dht.getValueSingle(p, key, timeout, level)
|
2014-08-09 22:28:46 -07:00
|
|
|
if err != nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.DErr("getFromPeers error: %s\n", err)
|
2014-08-09 22:28:46 -07:00
|
|
|
continue
|
|
|
|
}
|
2014-08-18 20:38:44 -07:00
|
|
|
dht.providers.AddProvider(key, p)
|
2014-08-09 22:28:46 -07:00
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
// Make sure it was a successful get
|
|
|
|
if pmes.GetSuccess() && pmes.Value != nil {
|
|
|
|
return pmes.GetValue(), nil
|
|
|
|
}
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
|
|
|
return nil, u.ErrNotFound
|
|
|
|
}
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
|
2014-08-17 20:17:43 -07:00
|
|
|
dht.dslock.Lock()
|
|
|
|
defer dht.dslock.Unlock()
|
2014-08-08 18:09:21 -07:00
|
|
|
v, err := dht.datastore.Get(ds.NewKey(string(key)))
|
2014-08-07 21:52:11 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return v.([]byte), nil
|
|
|
|
}
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
|
2014-08-07 21:52:11 -07:00
|
|
|
return dht.datastore.Put(ds.NewKey(string(key)), value)
|
|
|
|
}
|
2014-08-08 18:09:21 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
// Update TODO(chas) Document this function
|
2014-08-08 18:09:21 -07:00
|
|
|
func (dht *IpfsDHT) Update(p *peer.Peer) {
|
2014-08-16 23:03:36 -07:00
|
|
|
for _, route := range dht.routingTables {
|
2014-08-11 20:11:23 -07:00
|
|
|
removed := route.Update(p)
|
2014-08-30 00:00:52 -07:00
|
|
|
// Only close the connection if no tables refer to this peer
|
2014-08-11 20:11:23 -07:00
|
|
|
if removed != nil {
|
|
|
|
found := false
|
2014-08-16 23:03:36 -07:00
|
|
|
for _, r := range dht.routingTables {
|
2014-08-11 20:11:23 -07:00
|
|
|
if r.Find(removed.ID) != nil {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
2014-08-30 00:00:52 -07:00
|
|
|
dht.network.CloseConnection(removed)
|
2014-08-11 20:11:23 -07:00
|
|
|
}
|
|
|
|
}
|
2014-08-08 18:09:21 -07:00
|
|
|
}
|
|
|
|
}
|
2014-08-09 22:28:46 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
// Find looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
|
2014-08-09 22:28:46 -07:00
|
|
|
func (dht *IpfsDHT) Find(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
|
2014-08-16 23:03:36 -07:00
|
|
|
for _, table := range dht.routingTables {
|
2014-08-09 22:28:46 -07:00
|
|
|
p := table.Find(id)
|
|
|
|
if p != nil {
|
|
|
|
return p, table
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) findPeerSingle(p *peer.Peer, id peer.ID, timeout time.Duration, level int) (*PBDHTMessage, error) {
|
2014-08-16 23:03:36 -07:00
|
|
|
pmes := Message{
|
2014-08-10 21:02:05 -07:00
|
|
|
Type: PBDHTMessage_FIND_NODE,
|
|
|
|
Key: string(id),
|
2014-08-23 22:21:20 -07:00
|
|
|
ID: swarm.GenerateMessageID(),
|
2014-08-10 21:02:05 -07:00
|
|
|
Value: []byte{byte(level)},
|
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
2014-08-16 23:03:36 -07:00
|
|
|
listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute)
|
2014-08-11 20:11:23 -07:00
|
|
|
t := time.Now()
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-08-10 21:02:05 -07:00
|
|
|
after := time.After(timeout)
|
|
|
|
select {
|
|
|
|
case <-after:
|
2014-08-16 23:03:36 -07:00
|
|
|
dht.listener.Unlisten(pmes.ID)
|
2014-08-10 21:02:05 -07:00
|
|
|
return nil, u.ErrTimeout
|
|
|
|
case resp := <-listenChan:
|
2014-08-11 20:11:23 -07:00
|
|
|
roundtrip := time.Since(t)
|
|
|
|
resp.Peer.SetLatency(roundtrip)
|
2014-08-16 23:03:36 -07:00
|
|
|
pmesOut := new(PBDHTMessage)
|
|
|
|
err := proto.Unmarshal(resp.Data, pmesOut)
|
2014-08-10 21:02:05 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
return pmesOut, nil
|
2014-08-10 21:02:05 -07:00
|
|
|
}
|
|
|
|
}
|
2014-08-11 20:11:23 -07:00
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
func (dht *IpfsDHT) printTables() {
|
|
|
|
for _, route := range dht.routingTables {
|
2014-08-11 20:11:23 -07:00
|
|
|
route.Print()
|
|
|
|
}
|
|
|
|
}
|
2014-08-14 08:32:17 -07:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) findProvidersSingle(p *peer.Peer, key u.Key, level int, timeout time.Duration) (*PBDHTMessage, error) {
|
2014-08-16 23:03:36 -07:00
|
|
|
pmes := Message{
|
2014-08-14 08:32:17 -07:00
|
|
|
Type: PBDHTMessage_GET_PROVIDERS,
|
|
|
|
Key: string(key),
|
2014-08-23 22:21:20 -07:00
|
|
|
ID: swarm.GenerateMessageID(),
|
2014-08-14 08:32:17 -07:00
|
|
|
Value: []byte{byte(level)},
|
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute)
|
2014-08-20 16:51:03 -07:00
|
|
|
dht.netChan.Outgoing <- mes
|
2014-08-14 08:32:17 -07:00
|
|
|
after := time.After(timeout)
|
|
|
|
select {
|
|
|
|
case <-after:
|
2014-08-16 23:03:36 -07:00
|
|
|
dht.listener.Unlisten(pmes.ID)
|
2014-08-14 08:32:17 -07:00
|
|
|
return nil, u.ErrTimeout
|
|
|
|
case resp := <-listenChan:
|
2014-08-19 19:14:52 -07:00
|
|
|
u.DOut("FindProviders: got response.\n")
|
2014-08-16 23:03:36 -07:00
|
|
|
pmesOut := new(PBDHTMessage)
|
|
|
|
err := proto.Unmarshal(resp.Data, pmesOut)
|
2014-08-14 08:32:17 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-08-16 23:03:36 -07:00
|
|
|
return pmesOut, nil
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-24 18:13:05 -07:00
|
|
|
// TODO: Could be done async
|
2014-08-14 08:32:17 -07:00
|
|
|
func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer {
|
2014-08-16 23:03:36 -07:00
|
|
|
var provArr []*peer.Peer
|
2014-08-14 08:32:17 -07:00
|
|
|
for _, prov := range peers {
|
|
|
|
// Dont add outselves to the list
|
|
|
|
if peer.ID(prov.GetId()).Equal(dht.self.ID) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Dont add someone who is already on the list
|
2014-08-30 00:24:04 -07:00
|
|
|
p := dht.network.GetPeer(u.Key(prov.GetId()))
|
2014-08-14 08:32:17 -07:00
|
|
|
if p == nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.DOut("given provider %s was not in our network already.\n", peer.ID(prov.GetId()).Pretty())
|
2014-08-26 14:24:51 -07:00
|
|
|
var err error
|
|
|
|
p, err = dht.peerFromInfo(prov)
|
2014-08-14 08:32:17 -07:00
|
|
|
if err != nil {
|
2014-08-17 20:17:43 -07:00
|
|
|
u.PErr("error connecting to new peer: %s\n", err)
|
2014-08-14 08:32:17 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2014-08-18 20:38:44 -07:00
|
|
|
dht.providers.AddProvider(key, p)
|
2014-08-16 23:03:36 -07:00
|
|
|
provArr = append(provArr, p)
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
2014-08-16 23:03:36 -07:00
|
|
|
return provArr
|
2014-08-14 08:32:17 -07:00
|
|
|
}
|
2014-08-26 14:24:51 -07:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) peerFromInfo(pbp *PBDHTMessage_PBPeer) (*peer.Peer, error) {
|
|
|
|
maddr, err := ma.NewMultiaddr(pbp.GetAddr())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dht.network.GetConnection(peer.ID(pbp.GetId()), maddr)
|
|
|
|
}
|
2014-09-04 20:32:46 +00:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) loadProvidableKeys() error {
|
2014-09-07 04:25:13 +00:00
|
|
|
kl, err := dht.datastore.KeyList()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-04 20:32:46 +00:00
|
|
|
for _, k := range kl {
|
|
|
|
dht.providers.AddProvider(u.Key(k.Bytes()), dht.self)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Builds up list of peers by requesting random peer IDs
|
|
|
|
func (dht *IpfsDHT) Bootstrap() {
|
|
|
|
id := make([]byte, 16)
|
|
|
|
rand.Read(id)
|
|
|
|
dht.FindPeer(peer.ID(id), time.Second*10)
|
|
|
|
}
|