2014-09-16 02:07:59 -07:00
|
|
|
package dht
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
peer "github.com/jbenet/go-ipfs/peer"
|
|
|
|
u "github.com/jbenet/go-ipfs/util"
|
|
|
|
|
2014-10-21 15:10:58 -07:00
|
|
|
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
2014-09-16 02:07:59 -07:00
|
|
|
)
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
var CloserPeerCount = 4
|
|
|
|
|
2014-09-16 02:07:59 -07:00
|
|
|
// dhthandler specifies the signature of functions that handle DHT messages.
|
2014-10-20 03:26:44 -07:00
|
|
|
type dhtHandler func(peer.Peer, *Message) (*Message, error)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
|
|
|
|
switch t {
|
|
|
|
case Message_GET_VALUE:
|
|
|
|
return dht.handleGetValue
|
2014-09-16 02:16:57 -07:00
|
|
|
case Message_PUT_VALUE:
|
|
|
|
return dht.handlePutValue
|
2014-09-16 02:07:59 -07:00
|
|
|
case Message_FIND_NODE:
|
|
|
|
return dht.handleFindPeer
|
2014-09-16 02:16:57 -07:00
|
|
|
case Message_ADD_PROVIDER:
|
|
|
|
return dht.handleAddProvider
|
|
|
|
case Message_GET_PROVIDERS:
|
|
|
|
return dht.handleGetProviders
|
2014-09-16 02:07:59 -07:00
|
|
|
case Message_PING:
|
|
|
|
return dht.handlePing
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error) {
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// setup response
|
2014-09-16 07:17:55 -07:00
|
|
|
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// first, is the key even a key?
|
|
|
|
key := pmes.GetKey()
|
|
|
|
if key == "" {
|
|
|
|
return nil, errors.New("handleGetValue but no key was provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
// let's first check if we have the value locally.
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s handleGetValue looking into ds\n", dht.self)
|
2014-10-03 15:34:08 -07:00
|
|
|
dskey := u.Key(pmes.GetKey()).DsKey()
|
2014-09-16 02:07:59 -07:00
|
|
|
iVal, err := dht.datastore.Get(dskey)
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s handleGetValue looking into ds GOT %v\n", dht.self, iVal)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// if we got an unexpected error, bail.
|
2014-09-19 08:07:56 -07:00
|
|
|
if err != nil && err != ds.ErrNotFound {
|
2014-09-16 02:07:59 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-09-18 19:30:04 -07:00
|
|
|
// Note: changed the behavior here to return _as much_ info as possible
|
|
|
|
// (potentially all of {value, closer peers, provider})
|
|
|
|
|
|
|
|
// if we have the value, send it back
|
2014-09-16 02:07:59 -07:00
|
|
|
if err == nil {
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s handleGetValue success!\n", dht.self)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
byts, ok := iVal.([]byte)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.Value = byts
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we know any providers for the requested value, return those.
|
|
|
|
provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
|
|
|
|
if len(provs) > 0 {
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("handleGetValue returning %d provider[s]\n", len(provs))
|
2014-09-16 02:07:59 -07:00
|
|
|
resp.ProviderPeers = peersToPBPeers(provs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find closest peer on given cluster to desired key and reply with that info
|
2014-10-12 23:07:30 -07:00
|
|
|
closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
|
2014-09-18 19:30:04 -07:00
|
|
|
if closer != nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
for _, p := range closer {
|
|
|
|
log.Debug("handleGetValue returning closer peer: '%s'", p)
|
|
|
|
}
|
|
|
|
resp.CloserPeers = peersToPBPeers(closer)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store a value in this peer local storage
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) handlePutValue(p peer.Peer, pmes *Message) (*Message, error) {
|
2014-09-16 02:07:59 -07:00
|
|
|
dht.dslock.Lock()
|
|
|
|
defer dht.dslock.Unlock()
|
2014-10-03 15:34:08 -07:00
|
|
|
dskey := u.Key(pmes.GetKey()).DsKey()
|
2014-09-16 02:07:59 -07:00
|
|
|
err := dht.datastore.Put(dskey, pmes.GetValue())
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue())
|
2014-09-19 08:07:56 -07:00
|
|
|
return pmes, err
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) handlePing(p peer.Peer, pmes *Message) (*Message, error) {
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s Responding to ping from %s!\n", dht.self, p)
|
2014-09-23 05:21:35 -07:00
|
|
|
return pmes, nil
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *Message) (*Message, error) {
|
2014-09-16 07:17:55 -07:00
|
|
|
resp := newMessage(pmes.GetType(), "", pmes.GetClusterLevel())
|
2014-10-20 03:26:44 -07:00
|
|
|
var closest []peer.Peer
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// if looking for self... special case where we send it on CloserPeers.
|
2014-10-20 03:26:44 -07:00
|
|
|
if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) {
|
|
|
|
closest = []peer.Peer{dht.self}
|
2014-09-16 02:07:59 -07:00
|
|
|
} else {
|
2014-10-12 23:07:30 -07:00
|
|
|
closest = dht.betterPeersToQuery(pmes, CloserPeerCount)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if closest == nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
log.Error("handleFindPeer: could not find anything.")
|
2014-09-16 02:07:59 -07:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
var withAddresses []peer.Peer
|
2014-10-12 23:07:30 -07:00
|
|
|
for _, p := range closest {
|
2014-10-20 03:26:44 -07:00
|
|
|
if len(p.Addresses()) > 0 {
|
2014-10-12 23:07:30 -07:00
|
|
|
withAddresses = append(withAddresses, p)
|
|
|
|
}
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2014-10-12 23:07:30 -07:00
|
|
|
for _, p := range withAddresses {
|
|
|
|
log.Debug("handleFindPeer: sending back '%s'", p)
|
|
|
|
}
|
|
|
|
resp.CloserPeers = peersToPBPeers(withAddresses)
|
2014-09-16 02:07:59 -07:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) handleGetProviders(p peer.Peer, pmes *Message) (*Message, error) {
|
2014-09-16 07:17:55 -07:00
|
|
|
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// check if we have this value, to add ourselves as provider.
|
2014-10-08 21:55:50 +00:00
|
|
|
log.Debug("handling GetProviders: '%s'", pmes.GetKey())
|
2014-10-03 15:34:08 -07:00
|
|
|
dsk := u.Key(pmes.GetKey()).DsKey()
|
|
|
|
has, err := dht.datastore.Has(dsk)
|
2014-09-16 02:07:59 -07:00
|
|
|
if err != nil && err != ds.ErrNotFound {
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Error("unexpected datastore error: %v\n", err)
|
2014-09-16 02:07:59 -07:00
|
|
|
has = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup providers
|
|
|
|
providers := dht.providers.GetProviders(u.Key(pmes.GetKey()))
|
|
|
|
if has {
|
|
|
|
providers = append(providers, dht.self)
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we've got providers, send thos those.
|
|
|
|
if providers != nil && len(providers) > 0 {
|
|
|
|
resp.ProviderPeers = peersToPBPeers(providers)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also send closer peers.
|
2014-10-12 23:07:30 -07:00
|
|
|
closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
|
2014-09-16 02:07:59 -07:00
|
|
|
if closer != nil {
|
2014-10-12 23:07:30 -07:00
|
|
|
resp.CloserPeers = peersToPBPeers(closer)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type providerInfo struct {
|
|
|
|
Creation time.Time
|
2014-10-20 03:26:44 -07:00
|
|
|
Value peer.Peer
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2014-10-20 03:26:44 -07:00
|
|
|
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, error) {
|
2014-09-16 02:07:59 -07:00
|
|
|
key := u.Key(pmes.GetKey())
|
2014-09-16 07:17:55 -07:00
|
|
|
|
2014-10-07 21:29:03 -07:00
|
|
|
log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
|
2014-09-16 07:17:55 -07:00
|
|
|
|
2014-10-11 06:33:57 -07:00
|
|
|
// add provider should use the address given in the message
|
2014-10-13 01:31:51 -07:00
|
|
|
for _, pb := range pmes.GetProviderPeers() {
|
|
|
|
pid := peer.ID(pb.GetId())
|
2014-10-20 03:26:44 -07:00
|
|
|
if pid.Equal(p.ID()) {
|
2014-10-11 06:33:57 -07:00
|
|
|
|
|
|
|
addr, err := pb.Address()
|
|
|
|
if err != nil {
|
|
|
|
log.Error("provider %s error with address %s", p, *pb.Addr)
|
|
|
|
continue
|
|
|
|
}
|
2014-10-13 01:31:51 -07:00
|
|
|
|
|
|
|
log.Info("received provider %s %s for %s", p, addr, key)
|
2014-10-11 06:33:57 -07:00
|
|
|
p.AddAddress(addr)
|
2014-10-13 01:31:51 -07:00
|
|
|
dht.providers.AddProvider(key, p)
|
|
|
|
|
|
|
|
} else {
|
|
|
|
log.Error("handleAddProvider received provider %s from %s", pid, p)
|
2014-10-11 06:33:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-19 18:11:05 -07:00
|
|
|
return pmes, nil // send back same msg as confirmation.
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Halt stops all communications from this peer and shut down
|
|
|
|
// TODO -- remove this in favor of context
|
|
|
|
func (dht *IpfsDHT) Halt() {
|
|
|
|
dht.providers.Halt()
|
|
|
|
}
|