2014-07-23 04:48:30 -07:00
|
|
|
package dht
|
|
|
|
|
|
|
|
import (
|
2014-08-07 14:16:24 -07:00
|
|
|
"bytes"
|
2014-08-05 09:38:26 -07:00
|
|
|
"encoding/json"
|
2014-08-12 15:37:26 -07:00
|
|
|
"errors"
|
2014-08-08 18:09:21 -07:00
|
|
|
"math/rand"
|
2014-08-15 09:39:38 -07:00
|
|
|
"sync"
|
2014-08-08 18:09:21 -07:00
|
|
|
"time"
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-03 17:35:12 -07:00
|
|
|
proto "code.google.com/p/goprotobuf/proto"
|
|
|
|
|
2014-08-05 09:38:26 -07:00
|
|
|
ma "github.com/jbenet/go-multiaddr"
|
|
|
|
|
2014-07-29 17:55:19 -07:00
|
|
|
peer "github.com/jbenet/go-ipfs/peer"
|
2014-08-08 19:58:42 -07:00
|
|
|
kb "github.com/jbenet/go-ipfs/routing/kbucket"
|
2014-07-29 17:55:19 -07:00
|
|
|
swarm "github.com/jbenet/go-ipfs/swarm"
|
|
|
|
u "github.com/jbenet/go-ipfs/util"
|
2014-07-23 04:48:30 -07:00
|
|
|
)
|
|
|
|
|
2014-08-03 21:46:01 -07:00
|
|
|
// Pool size is the number of nodes used for group find/set RPC calls
|
|
|
|
var PoolSize = 6
|
|
|
|
|
2014-07-29 19:33:51 -07:00
|
|
|
// TODO: determine a way of creating and managing message IDs
|
|
|
|
func GenerateMessageID() uint64 {
|
2014-08-05 18:32:22 -07:00
|
|
|
//return (uint64(rand.Uint32()) << 32) & uint64(rand.Uint32())
|
|
|
|
return uint64(rand.Uint32())
|
2014-07-29 19:33:51 -07:00
|
|
|
}
|
|
|
|
|
2014-07-23 04:48:30 -07:00
|
|
|
// This file implements the Routing interface for the IpfsDHT struct.
|
|
|
|
|
|
|
|
// Basic Put/Get
|
|
|
|
|
|
|
|
// PutValue adds value corresponding to given Key.
|
2014-08-07 18:06:50 -07:00
|
|
|
// This is the top level "Store" operation of the DHT
|
2014-08-09 22:28:46 -07:00
|
|
|
func (s *IpfsDHT) PutValue(key u.Key, value []byte) {
|
|
|
|
complete := make(chan struct{})
|
2014-08-10 21:02:05 -07:00
|
|
|
for _, route := range s.routes {
|
2014-08-09 22:28:46 -07:00
|
|
|
p := route.NearestPeer(kb.ConvertKey(key))
|
|
|
|
if p == nil {
|
2014-08-10 21:02:05 -07:00
|
|
|
s.network.Error(kb.ErrLookupFailure)
|
2014-08-09 22:28:46 -07:00
|
|
|
go func() {
|
|
|
|
complete <- struct{}{}
|
|
|
|
}()
|
2014-08-10 21:02:05 -07:00
|
|
|
continue
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
err := s.putValueToNetwork(p, string(key), value)
|
|
|
|
if err != nil {
|
2014-08-10 21:02:05 -07:00
|
|
|
s.network.Error(err)
|
2014-08-09 22:28:46 -07:00
|
|
|
}
|
|
|
|
complete <- struct{}{}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for _, _ = range s.routes {
|
|
|
|
<-complete
|
2014-08-03 17:35:12 -07:00
|
|
|
}
|
2014-07-23 04:48:30 -07:00
|
|
|
}
|
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
// A counter for incrementing a variable across multiple threads
|
|
|
|
type counter struct {
|
|
|
|
n int
|
|
|
|
mut sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *counter) Increment() {
|
|
|
|
c.mut.Lock()
|
|
|
|
c.n++
|
|
|
|
c.mut.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *counter) Decrement() {
|
|
|
|
c.mut.Lock()
|
|
|
|
c.n--
|
|
|
|
c.mut.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *counter) Size() int {
|
|
|
|
c.mut.RLock()
|
|
|
|
defer c.mut.RUnlock()
|
|
|
|
return c.n
|
|
|
|
}
|
|
|
|
|
2014-07-23 04:48:30 -07:00
|
|
|
// GetValue searches for the value corresponding to given Key.
|
2014-08-06 21:36:56 -07:00
|
|
|
// If the search does not succeed, a multiaddr string of a closer peer is
|
|
|
|
// returned along with util.ErrSearchIncomplete
|
2014-07-23 04:48:30 -07:00
|
|
|
func (s *IpfsDHT) GetValue(key u.Key, timeout time.Duration) ([]byte, error) {
|
2014-08-14 08:32:17 -07:00
|
|
|
ll := startNewRpc("GET")
|
|
|
|
defer func() {
|
|
|
|
ll.EndLog()
|
|
|
|
ll.Print()
|
|
|
|
}()
|
2014-07-28 22:14:27 -07:00
|
|
|
|
2014-08-12 22:10:44 -07:00
|
|
|
// If we have it local, dont bother doing an RPC!
|
|
|
|
// NOTE: this might not be what we want to do...
|
2014-08-14 08:32:17 -07:00
|
|
|
val, err := s.GetLocal(key)
|
|
|
|
if err == nil {
|
|
|
|
ll.Success = true
|
|
|
|
u.DOut("Found local, returning.")
|
2014-08-12 22:10:44 -07:00
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
route_level := 0
|
|
|
|
closest := s.routes[route_level].NearestPeers(kb.ConvertKey(key), PoolSize)
|
|
|
|
if closest == nil || len(closest) == 0 {
|
2014-08-10 21:02:05 -07:00
|
|
|
return nil, kb.ErrLookupFailure
|
|
|
|
}
|
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
val_chan := make(chan []byte)
|
|
|
|
npeer_chan := make(chan *peer.Peer, 30)
|
|
|
|
proc_peer := make(chan *peer.Peer, 30)
|
|
|
|
err_chan := make(chan error)
|
|
|
|
after := time.After(timeout)
|
2014-08-10 21:02:05 -07:00
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
for _, p := range closest {
|
|
|
|
npeer_chan <- p
|
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
c := counter{}
|
|
|
|
|
|
|
|
// This limit value is referred to as k in the kademlia paper
|
|
|
|
limit := 20
|
|
|
|
count := 0
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case p := <-npeer_chan:
|
|
|
|
count++
|
|
|
|
if count >= limit {
|
|
|
|
break
|
2014-08-12 22:10:44 -07:00
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
c.Increment()
|
|
|
|
proc_peer <- p
|
|
|
|
default:
|
|
|
|
if c.Size() == 0 {
|
|
|
|
err_chan <- u.ErrNotFound
|
2014-08-10 21:02:05 -07:00
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2014-08-10 21:02:05 -07:00
|
|
|
|
2014-08-15 09:39:38 -07:00
|
|
|
process := func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case p, ok := <-proc_peer:
|
|
|
|
if !ok || p == nil {
|
|
|
|
c.Decrement()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
val, peers, err := s.getValueOrPeers(p, key, timeout/4, route_level)
|
2014-08-10 21:02:05 -07:00
|
|
|
if err != nil {
|
2014-08-15 09:39:38 -07:00
|
|
|
u.DErr(err.Error())
|
|
|
|
c.Decrement()
|
2014-08-14 08:32:17 -07:00
|
|
|
continue
|
2014-08-10 21:02:05 -07:00
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
if val != nil {
|
|
|
|
val_chan <- val
|
|
|
|
c.Decrement()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, np := range peers {
|
|
|
|
// TODO: filter out peers that arent closer
|
|
|
|
npeer_chan <- np
|
|
|
|
}
|
|
|
|
c.Decrement()
|
2014-08-10 21:02:05 -07:00
|
|
|
}
|
2014-08-03 17:35:12 -07:00
|
|
|
}
|
2014-07-28 22:14:27 -07:00
|
|
|
}
|
2014-08-15 09:39:38 -07:00
|
|
|
|
|
|
|
concurFactor := 3
|
|
|
|
for i := 0; i < concurFactor; i++ {
|
|
|
|
go process()
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case val := <-val_chan:
|
|
|
|
close(npeer_chan)
|
|
|
|
return val, nil
|
|
|
|
case err := <-err_chan:
|
|
|
|
close(npeer_chan)
|
|
|
|
return nil, err
|
|
|
|
case <-after:
|
|
|
|
close(npeer_chan)
|
|
|
|
return nil, u.ErrTimeout
|
|
|
|
}
|
2014-07-23 04:48:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Value provider layer of indirection.
|
|
|
|
// This is what DSHTs (Coral and MainlineDHT) do to store large values in a DHT.
|
|
|
|
|
|
|
|
// Announce that this node can provide value for given key
|
2014-07-29 17:55:19 -07:00
|
|
|
func (s *IpfsDHT) Provide(key u.Key) error {
|
2014-08-08 19:58:42 -07:00
|
|
|
peers := s.routes[0].NearestPeers(kb.ConvertKey(key), PoolSize)
|
2014-08-03 21:46:01 -07:00
|
|
|
if len(peers) == 0 {
|
2014-08-10 21:02:05 -07:00
|
|
|
return kb.ErrLookupFailure
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
pmes := DHTMessage{
|
|
|
|
Type: PBDHTMessage_ADD_PROVIDER,
|
|
|
|
Key: string(key),
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
pbmes := pmes.ToProtobuf()
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
for _, p := range peers {
|
2014-08-03 21:46:01 -07:00
|
|
|
mes := swarm.NewMessage(p, pbmes)
|
2014-08-10 21:02:05 -07:00
|
|
|
s.network.Send(mes)
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
return nil
|
2014-07-23 04:48:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// FindProviders searches for peers who can provide the value for given key.
|
2014-08-03 21:46:01 -07:00
|
|
|
func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) {
|
2014-08-14 08:32:17 -07:00
|
|
|
ll := startNewRpc("FindProviders")
|
|
|
|
defer func() {
|
|
|
|
ll.EndLog()
|
|
|
|
ll.Print()
|
|
|
|
}()
|
|
|
|
u.DOut("Find providers for: '%s'", key)
|
2014-08-08 19:58:42 -07:00
|
|
|
p := s.routes[0].NearestPeer(kb.ConvertKey(key))
|
2014-08-10 21:02:05 -07:00
|
|
|
if p == nil {
|
|
|
|
return nil, kb.ErrLookupFailure
|
|
|
|
}
|
2014-08-03 21:46:01 -07:00
|
|
|
|
2014-08-14 08:32:17 -07:00
|
|
|
for level := 0; level < len(s.routes); {
|
|
|
|
pmes, err := s.findProvidersSingle(p, key, level, timeout)
|
2014-08-03 21:46:01 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-08-14 08:32:17 -07:00
|
|
|
if pmes.GetSuccess() {
|
|
|
|
provs := s.addPeerList(key, pmes.GetPeers())
|
|
|
|
ll.Success = true
|
|
|
|
return provs, nil
|
|
|
|
} else {
|
|
|
|
closer := pmes.GetPeers()
|
|
|
|
if len(closer) == 0 {
|
|
|
|
level++
|
2014-08-12 22:10:44 -07:00
|
|
|
continue
|
|
|
|
}
|
2014-08-14 08:32:17 -07:00
|
|
|
if peer.ID(closer[0].GetId()).Equal(s.self.ID) {
|
|
|
|
u.DOut("Got myself back as a closer peer.")
|
|
|
|
return nil, u.ErrNotFound
|
|
|
|
}
|
|
|
|
maddr, err := ma.NewMultiaddr(closer[0].GetAddr())
|
|
|
|
if err != nil {
|
|
|
|
// ??? Move up route level???
|
|
|
|
panic("not yet implemented")
|
2014-08-05 09:38:26 -07:00
|
|
|
}
|
|
|
|
|
2014-08-14 08:32:17 -07:00
|
|
|
np, err := s.network.GetConnection(peer.ID(closer[0].GetId()), maddr)
|
|
|
|
if err != nil {
|
|
|
|
u.PErr("[%s] Failed to connect to: %s", s.self.ID.Pretty(), closer[0].GetAddr())
|
|
|
|
level++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
p = np
|
|
|
|
}
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
2014-08-14 08:32:17 -07:00
|
|
|
return nil, u.ErrNotFound
|
2014-07-23 04:48:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find specific Peer
|
|
|
|
|
|
|
|
// FindPeer searches for a peer with given ID.
|
|
|
|
func (s *IpfsDHT) FindPeer(id peer.ID, timeout time.Duration) (*peer.Peer, error) {
|
2014-08-12 15:37:26 -07:00
|
|
|
// Check if were already connected to them
|
|
|
|
p, _ := s.Find(id)
|
|
|
|
if p != nil {
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
route_level := 0
|
2014-08-12 15:37:26 -07:00
|
|
|
p = s.routes[route_level].NearestPeer(kb.ConvertPeerID(id))
|
2014-08-10 21:02:05 -07:00
|
|
|
if p == nil {
|
|
|
|
return nil, kb.ErrLookupFailure
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
2014-08-12 15:37:26 -07:00
|
|
|
if p.ID.Equal(id) {
|
|
|
|
return p, nil
|
|
|
|
}
|
2014-08-03 21:46:01 -07:00
|
|
|
|
2014-08-10 21:02:05 -07:00
|
|
|
for route_level < len(s.routes) {
|
|
|
|
pmes, err := s.findPeerSingle(p, id, timeout, route_level)
|
|
|
|
plist := pmes.GetPeers()
|
|
|
|
if len(plist) == 0 {
|
|
|
|
route_level++
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
found := plist[0]
|
|
|
|
|
|
|
|
addr, err := ma.NewMultiaddr(found.GetAddr())
|
2014-08-05 20:31:48 -07:00
|
|
|
if err != nil {
|
2014-08-10 21:02:05 -07:00
|
|
|
return nil, u.WrapError(err, "FindPeer received bad info")
|
2014-08-05 20:31:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-12 15:37:26 -07:00
|
|
|
nxtPeer, err := s.network.GetConnection(peer.ID(found.GetId()), addr)
|
2014-08-06 10:02:53 -07:00
|
|
|
if err != nil {
|
2014-08-10 21:02:05 -07:00
|
|
|
return nil, u.WrapError(err, "FindPeer failed to connect to new peer.")
|
2014-08-06 10:02:53 -07:00
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
if pmes.GetSuccess() {
|
2014-08-12 15:37:26 -07:00
|
|
|
if !id.Equal(nxtPeer.ID) {
|
|
|
|
return nil, errors.New("got back invalid peer from 'successful' response")
|
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
return nxtPeer, nil
|
|
|
|
} else {
|
|
|
|
p = nxtPeer
|
2014-08-06 10:02:53 -07:00
|
|
|
}
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
2014-08-10 21:02:05 -07:00
|
|
|
return nil, u.ErrNotFound
|
2014-07-23 04:48:30 -07:00
|
|
|
}
|
2014-08-06 18:37:45 -07:00
|
|
|
|
|
|
|
// Ping a peer, log the time it took
|
|
|
|
func (dht *IpfsDHT) Ping(p *peer.Peer, timeout time.Duration) error {
|
|
|
|
// Thoughts: maybe this should accept an ID and do a peer lookup?
|
|
|
|
u.DOut("Enter Ping.")
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
pmes := DHTMessage{Id: GenerateMessageID(), Type: PBDHTMessage_PING}
|
2014-08-06 18:37:45 -07:00
|
|
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
|
|
|
|
|
|
|
before := time.Now()
|
2014-08-07 18:06:50 -07:00
|
|
|
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
|
2014-08-10 21:02:05 -07:00
|
|
|
dht.network.Send(mes)
|
2014-08-06 18:37:45 -07:00
|
|
|
|
|
|
|
tout := time.After(timeout)
|
|
|
|
select {
|
|
|
|
case <-response_chan:
|
|
|
|
roundtrip := time.Since(before)
|
2014-08-08 18:09:21 -07:00
|
|
|
p.SetLatency(roundtrip)
|
2014-08-10 21:02:05 -07:00
|
|
|
u.DOut("Ping took %s.", roundtrip.String())
|
2014-08-06 18:37:45 -07:00
|
|
|
return nil
|
|
|
|
case <-tout:
|
|
|
|
// Timed out, think about removing peer from network
|
|
|
|
u.DOut("Ping peer timed out.")
|
|
|
|
dht.Unlisten(pmes.Id)
|
|
|
|
return u.ErrTimeout
|
|
|
|
}
|
|
|
|
}
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) GetDiagnostic(timeout time.Duration) ([]*diagInfo, error) {
|
|
|
|
u.DOut("Begin Diagnostic")
|
|
|
|
//Send to N closest peers
|
2014-08-08 19:58:42 -07:00
|
|
|
targets := dht.routes[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10)
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
// TODO: Add timeout to this struct so nodes know when to return
|
2014-08-08 18:09:21 -07:00
|
|
|
pmes := DHTMessage{
|
|
|
|
Type: PBDHTMessage_DIAGNOSTIC,
|
|
|
|
Id: GenerateMessageID(),
|
2014-08-07 14:16:24 -07:00
|
|
|
}
|
|
|
|
|
2014-08-09 22:28:46 -07:00
|
|
|
listenChan := dht.ListenFor(pmes.Id, len(targets), time.Minute*2)
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
pbmes := pmes.ToProtobuf()
|
2014-08-08 18:09:21 -07:00
|
|
|
for _, p := range targets {
|
2014-08-07 14:16:24 -07:00
|
|
|
mes := swarm.NewMessage(p, pbmes)
|
2014-08-10 21:02:05 -07:00
|
|
|
dht.network.Send(mes)
|
2014-08-07 14:16:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
var out []*diagInfo
|
|
|
|
after := time.After(timeout)
|
|
|
|
for count := len(targets); count > 0; {
|
|
|
|
select {
|
|
|
|
case <-after:
|
|
|
|
u.DOut("Diagnostic request timed out.")
|
|
|
|
return out, u.ErrTimeout
|
2014-08-09 22:28:46 -07:00
|
|
|
case resp := <-listenChan:
|
2014-08-08 18:09:21 -07:00
|
|
|
pmes_out := new(PBDHTMessage)
|
2014-08-07 14:16:24 -07:00
|
|
|
err := proto.Unmarshal(resp.Data, pmes_out)
|
|
|
|
if err != nil {
|
|
|
|
// NOTE: here and elsewhere, need to audit error handling,
|
|
|
|
// some errors should be continued on from
|
|
|
|
return out, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dec := json.NewDecoder(bytes.NewBuffer(pmes_out.GetValue()))
|
|
|
|
for {
|
|
|
|
di := new(diagInfo)
|
|
|
|
err := dec.Decode(di)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, di)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-08 18:09:21 -07:00
|
|
|
return nil, nil
|
2014-08-07 14:16:24 -07:00
|
|
|
}
|