2014-07-23 04:48:30 -07:00
|
|
|
package dht
|
|
|
|
|
2014-07-28 22:14:27 -07:00
|
|
|
import (
|
2014-07-30 17:46:56 -07:00
|
|
|
"sync"
|
2014-07-31 17:43:48 -07:00
|
|
|
"time"
|
2014-08-07 14:16:24 -07:00
|
|
|
"bytes"
|
2014-08-03 21:46:01 -07:00
|
|
|
"encoding/json"
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-07-30 20:16:34 -07:00
|
|
|
peer "github.com/jbenet/go-ipfs/peer"
|
|
|
|
swarm "github.com/jbenet/go-ipfs/swarm"
|
|
|
|
u "github.com/jbenet/go-ipfs/util"
|
2014-07-31 17:43:48 -07:00
|
|
|
identify "github.com/jbenet/go-ipfs/identify"
|
|
|
|
|
|
|
|
ma "github.com/jbenet/go-multiaddr"
|
2014-07-30 20:16:34 -07:00
|
|
|
|
|
|
|
ds "github.com/jbenet/datastore.go"
|
|
|
|
|
2014-07-29 19:33:51 -07:00
|
|
|
"code.google.com/p/goprotobuf/proto"
|
2014-07-28 22:14:27 -07:00
|
|
|
)
|
|
|
|
|
2014-07-23 04:48:30 -07:00
|
|
|
// TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js
|
|
|
|
|
|
|
|
// IpfsDHT is an implementation of Kademlia with Coral and S/Kademlia modifications.
|
|
|
|
// It is used to implement the base IpfsRouting module.
|
|
|
|
type IpfsDHT struct {
|
2014-08-07 18:06:50 -07:00
|
|
|
// Array of routing tables for differently distanced nodes
|
|
|
|
// NOTE: (currently, only a single table is used)
|
2014-08-07 14:16:24 -07:00
|
|
|
routes []*RoutingTable
|
2014-07-28 22:14:27 -07:00
|
|
|
|
2014-07-29 14:50:33 -07:00
|
|
|
network *swarm.Swarm
|
|
|
|
|
2014-07-30 20:16:34 -07:00
|
|
|
// Local peer (yourself)
|
|
|
|
self *peer.Peer
|
|
|
|
|
|
|
|
// Local data
|
|
|
|
datastore ds.Datastore
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-03 21:46:01 -07:00
|
|
|
// Map keys to peers that can provide their value
|
|
|
|
// TODO: implement a TTL on each of these keys
|
2014-08-06 18:37:45 -07:00
|
|
|
providers map[u.Key][]*providerInfo
|
2014-08-05 09:38:26 -07:00
|
|
|
providerLock sync.RWMutex
|
2014-08-03 21:46:01 -07:00
|
|
|
|
2014-07-29 19:33:51 -07:00
|
|
|
// map of channels waiting for reply messages
|
2014-08-07 14:16:24 -07:00
|
|
|
listeners map[uint64]*listenInfo
|
2014-07-29 14:50:33 -07:00
|
|
|
listenLock sync.RWMutex
|
2014-07-29 19:33:51 -07:00
|
|
|
|
|
|
|
// Signal to shutdown dht
|
|
|
|
shutdown chan struct{}
|
2014-08-06 18:37:45 -07:00
|
|
|
|
|
|
|
// When this peer started up
|
|
|
|
birth time.Time
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
//lock to make diagnostics work better
|
|
|
|
diaglock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
type listenInfo struct {
|
|
|
|
resp chan *swarm.Message
|
|
|
|
count int
|
2014-08-07 18:06:50 -07:00
|
|
|
eol time.Time
|
2014-07-28 22:14:27 -07:00
|
|
|
}
|
|
|
|
|
2014-07-31 17:43:48 -07:00
|
|
|
// Create a new DHT object with the given peer as the 'local' host
|
|
|
|
func NewDHT(p *peer.Peer) (*IpfsDHT, error) {
|
2014-08-05 09:38:26 -07:00
|
|
|
if p == nil {
|
|
|
|
panic("Tried to create new dht with nil peer")
|
|
|
|
}
|
2014-08-01 13:21:51 -07:00
|
|
|
network := swarm.NewSwarm(p)
|
|
|
|
err := network.Listen()
|
|
|
|
if err != nil {
|
|
|
|
return nil,err
|
|
|
|
}
|
2014-07-31 17:43:48 -07:00
|
|
|
|
2014-08-01 13:21:51 -07:00
|
|
|
dht := new(IpfsDHT)
|
|
|
|
dht.network = network
|
2014-07-31 17:43:48 -07:00
|
|
|
dht.datastore = ds.NewMapDatastore()
|
|
|
|
dht.self = p
|
2014-08-07 14:16:24 -07:00
|
|
|
dht.listeners = make(map[uint64]*listenInfo)
|
2014-08-06 18:37:45 -07:00
|
|
|
dht.providers = make(map[u.Key][]*providerInfo)
|
2014-07-30 20:16:34 -07:00
|
|
|
dht.shutdown = make(chan struct{})
|
2014-08-07 14:16:24 -07:00
|
|
|
dht.routes = make([]*RoutingTable, 1)
|
|
|
|
dht.routes[0] = NewRoutingTable(20, convertPeerID(p.ID))
|
2014-08-06 18:37:45 -07:00
|
|
|
dht.birth = time.Now()
|
2014-07-31 17:43:48 -07:00
|
|
|
return dht, nil
|
|
|
|
}
|
|
|
|
|
2014-08-01 13:21:51 -07:00
|
|
|
// Start up background goroutines needed by the DHT
|
2014-07-31 21:55:44 -07:00
|
|
|
func (dht *IpfsDHT) Start() {
|
|
|
|
go dht.handleMessages()
|
|
|
|
}
|
|
|
|
|
2014-07-31 17:43:48 -07:00
|
|
|
// Connect to a new peer at the given address
|
2014-08-05 20:31:48 -07:00
|
|
|
// TODO: move this into swarm
|
2014-08-05 09:38:26 -07:00
|
|
|
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
|
|
|
|
if addr == nil {
|
|
|
|
panic("addr was nil!")
|
|
|
|
}
|
2014-07-31 17:43:48 -07:00
|
|
|
peer := new(peer.Peer)
|
|
|
|
peer.AddAddress(addr)
|
|
|
|
|
|
|
|
conn,err := swarm.Dial("tcp", peer)
|
|
|
|
if err != nil {
|
2014-08-05 09:38:26 -07:00
|
|
|
return nil, err
|
2014-07-31 17:43:48 -07:00
|
|
|
}
|
|
|
|
|
2014-07-31 21:55:44 -07:00
|
|
|
err = identify.Handshake(dht.self, peer, conn.Incoming.MsgChan, conn.Outgoing.MsgChan)
|
2014-07-31 17:43:48 -07:00
|
|
|
if err != nil {
|
2014-08-05 09:38:26 -07:00
|
|
|
return nil, err
|
2014-07-31 17:43:48 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 20:31:48 -07:00
|
|
|
// Send node an address that you can be reached on
|
|
|
|
myaddr := dht.self.NetAddress("tcp")
|
|
|
|
mastr,err := myaddr.String()
|
|
|
|
if err != nil {
|
|
|
|
panic("No local address to send")
|
|
|
|
}
|
|
|
|
|
|
|
|
conn.Outgoing.MsgChan <- []byte(mastr)
|
|
|
|
|
2014-07-31 21:55:44 -07:00
|
|
|
dht.network.StartConn(conn)
|
2014-07-31 17:43:48 -07:00
|
|
|
|
2014-08-07 14:16:24 -07:00
|
|
|
removed := dht.routes[0].Update(peer)
|
2014-08-05 20:31:48 -07:00
|
|
|
if removed != nil {
|
|
|
|
panic("need to remove this peer.")
|
|
|
|
}
|
2014-08-06 10:02:53 -07:00
|
|
|
|
|
|
|
// Ping new peer to register in their routing table
|
|
|
|
// NOTE: this should be done better...
|
|
|
|
err = dht.Ping(peer, time.Second * 2)
|
|
|
|
if err != nil {
|
|
|
|
panic("Failed to ping new peer.")
|
|
|
|
}
|
|
|
|
|
2014-08-05 09:38:26 -07:00
|
|
|
return peer, nil
|
2014-07-30 20:16:34 -07:00
|
|
|
}
|
|
|
|
|
2014-07-29 14:50:33 -07:00
|
|
|
// Read in all messages from swarm and handle them appropriately
|
|
|
|
// NOTE: this function is just a quick sketch
|
2014-07-28 22:14:27 -07:00
|
|
|
func (dht *IpfsDHT) handleMessages() {
|
2014-08-03 17:35:12 -07:00
|
|
|
u.DOut("Begin message handling routine")
|
2014-08-06 18:37:45 -07:00
|
|
|
|
|
|
|
checkTimeouts := time.NewTicker(time.Minute * 5)
|
2014-07-29 19:33:51 -07:00
|
|
|
for {
|
|
|
|
select {
|
2014-08-05 18:32:22 -07:00
|
|
|
case mes,ok := <-dht.network.Chan.Incoming:
|
|
|
|
if !ok {
|
|
|
|
u.DOut("handleMessages closing, bad recv on incoming")
|
|
|
|
return
|
|
|
|
}
|
2014-07-29 19:33:51 -07:00
|
|
|
pmes := new(DHTMessage)
|
|
|
|
err := proto.Unmarshal(mes.Data, pmes)
|
|
|
|
if err != nil {
|
|
|
|
u.PErr("Failed to decode protobuf message: %s", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-08-03 17:35:12 -07:00
|
|
|
// Update peers latest visit in routing table
|
2014-08-07 14:16:24 -07:00
|
|
|
removed := dht.routes[0].Update(mes.Peer)
|
2014-08-05 20:31:48 -07:00
|
|
|
if removed != nil {
|
|
|
|
panic("Need to handle removed peer.")
|
|
|
|
}
|
2014-08-03 17:35:12 -07:00
|
|
|
|
2014-07-29 19:33:51 -07:00
|
|
|
// Note: not sure if this is the correct place for this
|
2014-07-30 17:46:56 -07:00
|
|
|
if pmes.GetResponse() {
|
|
|
|
dht.listenLock.RLock()
|
2014-08-07 14:16:24 -07:00
|
|
|
list, ok := dht.listeners[pmes.GetId()]
|
2014-08-07 18:06:50 -07:00
|
|
|
dht.listenLock.RUnlock()
|
|
|
|
if time.Now().After(list.eol) {
|
|
|
|
dht.Unlisten(pmes.GetId())
|
|
|
|
ok = false
|
|
|
|
}
|
2014-08-07 14:16:24 -07:00
|
|
|
if list.count > 1 {
|
|
|
|
list.count--
|
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
if ok {
|
2014-08-07 14:16:24 -07:00
|
|
|
list.resp <- mes
|
2014-08-07 18:06:50 -07:00
|
|
|
if list.count == 1 {
|
|
|
|
dht.Unlisten(pmes.GetId())
|
|
|
|
}
|
2014-08-05 18:32:22 -07:00
|
|
|
} else {
|
|
|
|
// this is expected behaviour during a timeout
|
|
|
|
u.DOut("Received response with nobody listening...")
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
2014-07-29 14:50:33 -07:00
|
|
|
}
|
2014-07-29 19:33:51 -07:00
|
|
|
//
|
|
|
|
|
2014-08-06 18:37:45 -07:00
|
|
|
u.DOut("[peer: %s]", dht.self.ID.Pretty())
|
|
|
|
u.DOut("Got message type: '%s' [id = %x, from = %s]",
|
|
|
|
DHTMessage_MessageType_name[int32(pmes.GetType())],
|
|
|
|
pmes.GetId(), mes.Peer.ID.Pretty())
|
2014-07-29 19:33:51 -07:00
|
|
|
switch pmes.GetType() {
|
2014-07-30 17:46:56 -07:00
|
|
|
case DHTMessage_GET_VALUE:
|
|
|
|
dht.handleGetValue(mes.Peer, pmes)
|
|
|
|
case DHTMessage_PUT_VALUE:
|
2014-07-30 20:16:34 -07:00
|
|
|
dht.handlePutValue(mes.Peer, pmes)
|
2014-07-29 19:33:51 -07:00
|
|
|
case DHTMessage_FIND_NODE:
|
2014-08-05 20:31:48 -07:00
|
|
|
dht.handleFindPeer(mes.Peer, pmes)
|
2014-07-30 17:46:56 -07:00
|
|
|
case DHTMessage_ADD_PROVIDER:
|
2014-08-05 18:32:22 -07:00
|
|
|
dht.handleAddProvider(mes.Peer, pmes)
|
2014-07-29 19:33:51 -07:00
|
|
|
case DHTMessage_GET_PROVIDERS:
|
2014-08-05 18:32:22 -07:00
|
|
|
dht.handleGetProviders(mes.Peer, pmes)
|
2014-07-29 19:33:51 -07:00
|
|
|
case DHTMessage_PING:
|
2014-08-01 13:21:51 -07:00
|
|
|
dht.handlePing(mes.Peer, pmes)
|
2014-08-06 18:37:45 -07:00
|
|
|
case DHTMessage_DIAGNOSTIC:
|
2014-08-07 14:16:24 -07:00
|
|
|
dht.handleDiagnostic(mes.Peer, pmes)
|
2014-07-29 19:33:51 -07:00
|
|
|
}
|
|
|
|
|
2014-07-31 21:55:44 -07:00
|
|
|
case err := <-dht.network.Chan.Errors:
|
2014-08-05 18:32:22 -07:00
|
|
|
u.DErr("dht err: %s", err)
|
2014-08-06 21:36:56 -07:00
|
|
|
panic(err)
|
2014-07-29 19:33:51 -07:00
|
|
|
case <-dht.shutdown:
|
2014-08-06 18:37:45 -07:00
|
|
|
checkTimeouts.Stop()
|
2014-07-29 19:33:51 -07:00
|
|
|
return
|
2014-08-06 18:37:45 -07:00
|
|
|
case <-checkTimeouts.C:
|
|
|
|
dht.providerLock.Lock()
|
|
|
|
for k,parr := range dht.providers {
|
|
|
|
var cleaned []*providerInfo
|
|
|
|
for _,v := range parr {
|
|
|
|
if time.Since(v.Creation) < time.Hour {
|
|
|
|
cleaned = append(cleaned, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dht.providers[k] = cleaned
|
|
|
|
}
|
|
|
|
dht.providerLock.Unlock()
|
2014-08-07 18:06:50 -07:00
|
|
|
dht.listenLock.Lock()
|
|
|
|
var remove []uint64
|
|
|
|
now := time.Now()
|
|
|
|
for k,v := range dht.listeners {
|
|
|
|
if now.After(v.eol) {
|
|
|
|
remove = append(remove, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _,k := range remove {
|
|
|
|
delete(dht.listeners, k)
|
|
|
|
}
|
|
|
|
dht.listenLock.Unlock()
|
2014-07-29 14:50:33 -07:00
|
|
|
}
|
2014-07-28 22:14:27 -07:00
|
|
|
}
|
2014-07-23 04:48:30 -07:00
|
|
|
}
|
2014-07-29 14:50:33 -07:00
|
|
|
|
2014-08-07 18:06:50 -07:00
|
|
|
func (dht *IpfsDHT) putValueToPeer(p *peer.Peer, key string, value []byte) error {
|
|
|
|
pmes := pDHTMessage{
|
|
|
|
Type: DHTMessage_PUT_VALUE,
|
|
|
|
Key: key,
|
|
|
|
Value: value,
|
|
|
|
Id: GenerateMessageID(),
|
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
|
|
|
dht.network.Chan.Outgoing <- mes
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-07-30 17:46:56 -07:00
|
|
|
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *DHTMessage) {
|
2014-07-30 20:16:34 -07:00
|
|
|
dskey := ds.NewKey(pmes.GetKey())
|
2014-08-06 18:37:45 -07:00
|
|
|
var resp *pDHTMessage
|
2014-07-30 20:16:34 -07:00
|
|
|
i_val, err := dht.datastore.Get(dskey)
|
|
|
|
if err == nil {
|
2014-08-06 18:37:45 -07:00
|
|
|
resp = &pDHTMessage{
|
2014-08-01 13:21:51 -07:00
|
|
|
Response: true,
|
|
|
|
Id: *pmes.Id,
|
|
|
|
Key: *pmes.Key,
|
|
|
|
Value: i_val.([]byte),
|
2014-08-06 18:37:45 -07:00
|
|
|
Success: true,
|
2014-08-01 13:21:51 -07:00
|
|
|
}
|
2014-07-30 20:16:34 -07:00
|
|
|
} else if err == ds.ErrNotFound {
|
2014-08-05 20:31:48 -07:00
|
|
|
// Find closest peer(s) to desired key and reply with that info
|
2014-08-07 14:16:24 -07:00
|
|
|
closer := dht.routes[0].NearestPeer(convertKey(u.Key(pmes.GetKey())))
|
2014-08-06 18:37:45 -07:00
|
|
|
resp = &pDHTMessage{
|
|
|
|
Response: true,
|
|
|
|
Id: *pmes.Id,
|
|
|
|
Key: *pmes.Key,
|
|
|
|
Value: closer.ID,
|
|
|
|
Success: false,
|
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
2014-08-06 18:37:45 -07:00
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
|
|
|
dht.network.Chan.Outgoing <- mes
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 20:31:48 -07:00
|
|
|
// Store a value in this peer local storage
|
2014-07-30 17:46:56 -07:00
|
|
|
func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *DHTMessage) {
|
2014-07-30 20:16:34 -07:00
|
|
|
dskey := ds.NewKey(pmes.GetKey())
|
|
|
|
err := dht.datastore.Put(dskey, pmes.GetValue())
|
|
|
|
if err != nil {
|
|
|
|
// For now, just panic, handle this better later maybe
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *DHTMessage) {
|
2014-08-05 20:31:48 -07:00
|
|
|
resp := pDHTMessage{
|
2014-08-01 13:21:51 -07:00
|
|
|
Type: pmes.GetType(),
|
|
|
|
Response: true,
|
|
|
|
Id: pmes.GetId(),
|
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-01 13:21:51 -07:00
|
|
|
dht.network.Chan.Outgoing <-swarm.NewMessage(p, resp.ToProtobuf())
|
2014-07-30 17:46:56 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 20:31:48 -07:00
|
|
|
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *DHTMessage) {
|
2014-08-06 10:02:53 -07:00
|
|
|
u.POut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
|
2014-08-07 14:16:24 -07:00
|
|
|
closest := dht.routes[0].NearestPeer(convertKey(u.Key(pmes.GetKey())))
|
2014-08-05 20:31:48 -07:00
|
|
|
if closest == nil {
|
2014-08-06 10:02:53 -07:00
|
|
|
panic("could not find anything.")
|
2014-08-05 20:31:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(closest.Addresses) == 0 {
|
|
|
|
panic("no addresses for connected peer...")
|
|
|
|
}
|
|
|
|
|
2014-08-06 10:02:53 -07:00
|
|
|
u.POut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
|
|
|
|
|
2014-08-05 20:31:48 -07:00
|
|
|
addr,err := closest.Addresses[0].String()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := pDHTMessage{
|
|
|
|
Type: pmes.GetType(),
|
|
|
|
Response: true,
|
|
|
|
Id: pmes.GetId(),
|
|
|
|
Value: []byte(addr),
|
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
|
|
|
dht.network.Chan.Outgoing <-mes
|
2014-07-31 21:55:44 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *DHTMessage) {
|
2014-08-05 18:32:22 -07:00
|
|
|
dht.providerLock.RLock()
|
2014-08-03 21:46:01 -07:00
|
|
|
providers := dht.providers[u.Key(pmes.GetKey())]
|
2014-08-05 18:32:22 -07:00
|
|
|
dht.providerLock.RUnlock()
|
2014-08-03 21:46:01 -07:00
|
|
|
if providers == nil || len(providers) == 0 {
|
|
|
|
// ?????
|
2014-08-05 18:32:22 -07:00
|
|
|
u.DOut("No known providers for requested key.")
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 09:38:26 -07:00
|
|
|
// This is just a quick hack, formalize method of sending addrs later
|
2014-08-05 18:32:22 -07:00
|
|
|
addrs := make(map[u.Key]string)
|
2014-08-03 21:46:01 -07:00
|
|
|
for _,prov := range providers {
|
2014-08-06 18:37:45 -07:00
|
|
|
ma := prov.Value.NetAddress("tcp")
|
2014-08-03 21:46:01 -07:00
|
|
|
str,err := ma.String()
|
|
|
|
if err != nil {
|
|
|
|
u.PErr("Error: %s", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-08-06 18:37:45 -07:00
|
|
|
addrs[prov.Value.Key()] = str
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
data,err := json.Marshal(addrs)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := pDHTMessage{
|
|
|
|
Type: DHTMessage_GET_PROVIDERS,
|
|
|
|
Key: pmes.GetKey(),
|
|
|
|
Value: data,
|
|
|
|
Id: pmes.GetId(),
|
2014-08-05 18:32:22 -07:00
|
|
|
Response: true,
|
2014-08-03 21:46:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
|
|
|
dht.network.Chan.Outgoing <-mes
|
2014-07-31 21:55:44 -07:00
|
|
|
}
|
|
|
|
|
2014-08-06 18:37:45 -07:00
|
|
|
type providerInfo struct {
|
|
|
|
Creation time.Time
|
|
|
|
Value *peer.Peer
|
|
|
|
}
|
|
|
|
|
2014-07-31 21:55:44 -07:00
|
|
|
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *DHTMessage) {
|
2014-08-03 21:46:01 -07:00
|
|
|
//TODO: need to implement TTLs on providers
|
|
|
|
key := u.Key(pmes.GetKey())
|
2014-08-05 18:32:22 -07:00
|
|
|
dht.addProviderEntry(key, p)
|
2014-07-31 21:55:44 -07:00
|
|
|
}
|
|
|
|
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-07-29 14:50:33 -07:00
|
|
|
// Register a handler for a specific message ID, used for getting replies
|
|
|
|
// to certain messages (i.e. response to a GET_VALUE message)
|
2014-08-07 18:06:50 -07:00
|
|
|
func (dht *IpfsDHT) ListenFor(mesid uint64, count int, timeout time.Duration) <-chan *swarm.Message {
|
2014-07-29 19:33:51 -07:00
|
|
|
lchan := make(chan *swarm.Message)
|
2014-07-29 14:50:33 -07:00
|
|
|
dht.listenLock.Lock()
|
2014-08-07 18:06:50 -07:00
|
|
|
dht.listeners[mesid] = &listenInfo{lchan, count, time.Now().Add(timeout)}
|
2014-07-29 14:50:33 -07:00
|
|
|
dht.listenLock.Unlock()
|
|
|
|
return lchan
|
|
|
|
}
|
2014-07-30 17:46:56 -07:00
|
|
|
|
2014-08-01 13:21:51 -07:00
|
|
|
// Unregister the given message id from the listener map
|
2014-07-30 20:16:34 -07:00
|
|
|
func (dht *IpfsDHT) Unlisten(mesid uint64) {
|
|
|
|
dht.listenLock.Lock()
|
2014-08-07 14:16:24 -07:00
|
|
|
list, ok := dht.listeners[mesid]
|
2014-07-30 20:16:34 -07:00
|
|
|
if ok {
|
|
|
|
delete(dht.listeners, mesid)
|
|
|
|
}
|
|
|
|
dht.listenLock.Unlock()
|
2014-08-07 14:16:24 -07:00
|
|
|
close(list.resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dht *IpfsDHT) IsListening(mesid uint64) bool {
|
|
|
|
dht.listenLock.RLock()
|
2014-08-07 18:06:50 -07:00
|
|
|
li,ok := dht.listeners[mesid]
|
2014-08-07 14:16:24 -07:00
|
|
|
dht.listenLock.RUnlock()
|
2014-08-07 18:06:50 -07:00
|
|
|
if time.Now().After(li.eol) {
|
|
|
|
dht.listenLock.Lock()
|
|
|
|
delete(dht.listeners, mesid)
|
|
|
|
dht.listenLock.Unlock()
|
|
|
|
return false
|
|
|
|
}
|
2014-08-07 14:16:24 -07:00
|
|
|
return ok
|
2014-07-30 20:16:34 -07:00
|
|
|
}
|
|
|
|
|
2014-08-05 20:31:48 -07:00
|
|
|
// Stop all communications from this peer and shut down
|
2014-07-30 17:46:56 -07:00
|
|
|
func (dht *IpfsDHT) Halt() {
|
|
|
|
dht.shutdown <- struct{}{}
|
|
|
|
dht.network.Close()
|
|
|
|
}
|
2014-07-31 17:43:48 -07:00
|
|
|
|
2014-08-05 18:32:22 -07:00
|
|
|
func (dht *IpfsDHT) addProviderEntry(key u.Key, p *peer.Peer) {
|
|
|
|
u.DOut("Adding %s as provider for '%s'", p.Key().Pretty(), key)
|
|
|
|
dht.providerLock.Lock()
|
|
|
|
provs := dht.providers[key]
|
2014-08-06 18:37:45 -07:00
|
|
|
dht.providers[key] = append(provs, &providerInfo{time.Now(), p})
|
2014-08-05 18:32:22 -07:00
|
|
|
dht.providerLock.Unlock()
|
|
|
|
}
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *DHTMessage) {
|
|
|
|
dht.diaglock.Lock()
|
|
|
|
if dht.IsListening(pmes.GetId()) {
|
|
|
|
//TODO: ehhh..........
|
|
|
|
dht.diaglock.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
dht.diaglock.Unlock()
|
|
|
|
|
|
|
|
seq := dht.routes[0].NearestPeers(convertPeerID(dht.self.ID), 10)
|
2014-08-07 18:06:50 -07:00
|
|
|
listen_chan := dht.ListenFor(pmes.GetId(), len(seq), time.Second * 30)
|
2014-08-07 14:16:24 -07:00
|
|
|
|
|
|
|
for _,ps := range seq {
|
|
|
|
mes := swarm.NewMessage(ps, pmes)
|
|
|
|
dht.network.Chan.Outgoing <-mes
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
buf := new(bytes.Buffer)
|
2014-08-07 18:06:50 -07:00
|
|
|
di := dht.getDiagInfo()
|
|
|
|
buf.Write(di.Marshal())
|
|
|
|
|
2014-08-07 14:16:24 -07:00
|
|
|
// NOTE: this shouldnt be a hardcoded value
|
|
|
|
after := time.After(time.Second * 20)
|
|
|
|
count := len(seq)
|
|
|
|
for count > 0 {
|
|
|
|
select {
|
|
|
|
case <-after:
|
|
|
|
//Timeout, return what we have
|
|
|
|
goto out
|
|
|
|
case req_resp := <-listen_chan:
|
2014-08-07 18:06:50 -07:00
|
|
|
pmes_out := new(DHTMessage)
|
|
|
|
err := proto.Unmarshal(req_resp.Data, pmes_out)
|
|
|
|
if err != nil {
|
|
|
|
// It broke? eh, whatever, keep going
|
|
|
|
continue
|
|
|
|
}
|
2014-08-07 14:16:24 -07:00
|
|
|
buf.Write(req_resp.Data)
|
|
|
|
count--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
resp := pDHTMessage{
|
|
|
|
Type: DHTMessage_DIAGNOSTIC,
|
|
|
|
Id: pmes.GetId(),
|
|
|
|
Value: buf.Bytes(),
|
|
|
|
Response: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
|
|
|
dht.network.Chan.Outgoing <-mes
|
|
|
|
}
|