2014-09-16 02:07:59 -07:00
|
|
|
package dht
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
|
2015-04-28 12:33:02 +02:00
|
|
|
proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto"
|
2015-03-30 20:04:32 -07:00
|
|
|
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
|
|
|
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
2015-06-01 16:10:08 -07:00
|
|
|
key "github.com/ipfs/go-ipfs/blocks/key"
|
2015-03-30 20:04:32 -07:00
|
|
|
peer "github.com/ipfs/go-ipfs/p2p/peer"
|
|
|
|
pb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
2015-04-02 03:02:12 -07:00
|
|
|
lgbl "github.com/ipfs/go-ipfs/util/eventlog/loggables"
|
2014-09-16 02:07:59 -07:00
|
|
|
)
|
|
|
|
|
2014-11-08 22:44:37 -08:00
|
|
|
// The number of closer peers to send on requests.
|
2015-09-21 09:55:25 -07:00
|
|
|
var CloserPeerCount = KValue
|
2014-10-12 23:07:30 -07:00
|
|
|
|
2014-09-16 02:07:59 -07:00
|
|
|
// dhthandler specifies the signature of functions that handle DHT messages.
|
2014-12-19 12:19:56 -08:00
|
|
|
type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
|
2014-09-16 02:07:59 -07:00
|
|
|
switch t {
|
2014-10-25 04:13:28 -07:00
|
|
|
case pb.Message_GET_VALUE:
|
2014-09-16 02:07:59 -07:00
|
|
|
return dht.handleGetValue
|
2014-10-25 04:13:28 -07:00
|
|
|
case pb.Message_PUT_VALUE:
|
2014-09-16 02:16:57 -07:00
|
|
|
return dht.handlePutValue
|
2014-10-25 04:13:28 -07:00
|
|
|
case pb.Message_FIND_NODE:
|
2014-09-16 02:07:59 -07:00
|
|
|
return dht.handleFindPeer
|
2014-10-25 04:13:28 -07:00
|
|
|
case pb.Message_ADD_PROVIDER:
|
2014-09-16 02:16:57 -07:00
|
|
|
return dht.handleAddProvider
|
2014-10-25 04:13:28 -07:00
|
|
|
case pb.Message_GET_PROVIDERS:
|
2014-09-16 02:16:57 -07:00
|
|
|
return dht.handleGetProviders
|
2014-10-25 04:13:28 -07:00
|
|
|
case pb.Message_PING:
|
2014-09-16 02:07:59 -07:00
|
|
|
return dht.handlePing
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
2015-01-15 17:47:36 +00:00
|
|
|
defer log.EventBegin(ctx, "handleGetValue", p).Done()
|
2014-12-28 23:46:25 +00:00
|
|
|
log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey())
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// setup response
|
2014-10-25 04:13:28 -07:00
|
|
|
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
2014-09-16 02:07:59 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
// first, is there even a key?
|
2015-06-01 16:10:08 -07:00
|
|
|
k := pmes.GetKey()
|
|
|
|
if k == "" {
|
2014-09-16 02:07:59 -07:00
|
|
|
return nil, errors.New("handleGetValue but no key was provided")
|
2014-12-19 12:19:56 -08:00
|
|
|
// TODO: send back an error response? could be bad, but the other node's hanging.
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// let's first check if we have the value locally.
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Debugf("%s handleGetValue looking into ds", dht.self)
|
2015-06-01 16:10:08 -07:00
|
|
|
dskey := key.Key(k).DsKey()
|
2014-09-16 02:07:59 -07:00
|
|
|
iVal, err := dht.datastore.Get(dskey)
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// if we got an unexpected error, bail.
|
2014-09-19 08:07:56 -07:00
|
|
|
if err != nil && err != ds.ErrNotFound {
|
2014-09-16 02:07:59 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2014-09-18 19:30:04 -07:00
|
|
|
// if we have the value, send it back
|
2014-09-16 02:07:59 -07:00
|
|
|
if err == nil {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Debugf("%s handleGetValue success!", dht.self)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
byts, ok := iVal.([]byte)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
|
|
|
|
}
|
|
|
|
|
2014-11-09 23:45:16 -08:00
|
|
|
rec := new(pb.Record)
|
|
|
|
err := proto.Unmarshal(byts, rec)
|
|
|
|
if err != nil {
|
2015-01-26 19:12:12 -08:00
|
|
|
log.Debug("Failed to unmarshal dht record from datastore")
|
2014-11-09 23:45:16 -08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.Record = rec
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find closest peer on given cluster to desired key and reply with that info
|
2014-12-14 00:50:49 +00:00
|
|
|
closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
|
2015-02-21 16:20:28 -08:00
|
|
|
if len(closer) > 0 {
|
2015-02-13 08:29:10 +00:00
|
|
|
closerinfos := peer.PeerInfos(dht.peerstore, closer)
|
2014-12-19 12:19:56 -08:00
|
|
|
for _, pi := range closerinfos {
|
|
|
|
log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID)
|
|
|
|
if len(pi.Addrs) < 1 {
|
2015-06-12 04:36:25 +07:00
|
|
|
log.Errorf(`no addresses on peer being sent!
|
2014-12-19 12:19:56 -08:00
|
|
|
[local:%s]
|
|
|
|
[sending:%s]
|
|
|
|
[remote:%s]`, dht.self, pi.ID, p)
|
2014-10-20 22:49:13 -07:00
|
|
|
}
|
2014-10-12 23:07:30 -07:00
|
|
|
}
|
2014-12-19 12:19:56 -08:00
|
|
|
|
2015-01-01 12:45:39 -08:00
|
|
|
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store a value in this peer local storage
|
2014-12-19 12:19:56 -08:00
|
|
|
func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
2015-01-15 17:47:36 +00:00
|
|
|
defer log.EventBegin(ctx, "handlePutValue", p).Done()
|
2015-06-01 16:10:08 -07:00
|
|
|
dskey := key.Key(pmes.GetKey()).DsKey()
|
2014-11-09 23:45:16 -08:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil {
|
2015-09-18 10:27:55 -07:00
|
|
|
log.Warningf("Bad dht record in PUT from: %s. %s", key.Key(pmes.GetRecord().GetAuthor()), err)
|
2014-11-09 23:45:16 -08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
data, err := proto.Marshal(pmes.GetRecord())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dht.datastore.Put(dskey, data)
|
2014-12-28 23:46:25 +00:00
|
|
|
log.Debugf("%s handlePutValue %v", dht.self, dskey)
|
2014-09-19 08:07:56 -07:00
|
|
|
return pmes, err
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
2014-10-25 03:17:14 -07:00
|
|
|
log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
|
2014-09-23 05:21:35 -07:00
|
|
|
return pmes, nil
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
2015-01-15 17:47:36 +00:00
|
|
|
defer log.EventBegin(ctx, "handleFindPeer", p).Done()
|
2014-10-25 04:13:28 -07:00
|
|
|
resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
|
2014-12-19 12:19:56 -08:00
|
|
|
var closest []peer.ID
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// if looking for self... special case where we send it on CloserPeers.
|
2014-12-19 12:19:56 -08:00
|
|
|
if peer.ID(pmes.GetKey()) == dht.self {
|
|
|
|
closest = []peer.ID{dht.self}
|
2014-09-16 02:07:59 -07:00
|
|
|
} else {
|
2014-12-14 00:50:49 +00:00
|
|
|
closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if closest == nil {
|
2015-04-13 19:48:55 -07:00
|
|
|
log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
|
2014-09-16 02:07:59 -07:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
var withAddresses []peer.PeerInfo
|
|
|
|
closestinfos := peer.PeerInfos(dht.peerstore, closest)
|
|
|
|
for _, pi := range closestinfos {
|
|
|
|
if len(pi.Addrs) > 0 {
|
|
|
|
withAddresses = append(withAddresses, pi)
|
|
|
|
log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
|
2014-10-12 23:07:30 -07:00
|
|
|
}
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
2015-01-01 12:45:39 -08:00
|
|
|
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
|
2014-09-16 02:07:59 -07:00
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
2015-04-02 03:02:12 -07:00
|
|
|
lm := make(lgbl.DeferredMap)
|
|
|
|
lm["peer"] = func() interface{} { return p.Pretty() }
|
|
|
|
defer log.EventBegin(ctx, "handleGetProviders", lm).Done()
|
|
|
|
|
2014-10-25 04:13:28 -07:00
|
|
|
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
2015-06-01 16:10:08 -07:00
|
|
|
key := key.Key(pmes.GetKey())
|
2015-04-02 03:02:12 -07:00
|
|
|
lm["key"] = func() interface{} { return key.Pretty() }
|
2015-01-03 00:56:27 -08:00
|
|
|
|
|
|
|
// debug logging niceness.
|
|
|
|
reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key)
|
|
|
|
log.Debugf("%s begin", reqDesc)
|
|
|
|
defer log.Debugf("%s end", reqDesc)
|
2014-09-16 02:07:59 -07:00
|
|
|
|
|
|
|
// check if we have this value, to add ourselves as provider.
|
2015-01-03 00:56:27 -08:00
|
|
|
has, err := dht.datastore.Has(key.DsKey())
|
2014-09-16 02:07:59 -07:00
|
|
|
if err != nil && err != ds.ErrNotFound {
|
2015-01-26 19:12:12 -08:00
|
|
|
log.Debugf("unexpected datastore error: %v\n", err)
|
2014-09-16 02:07:59 -07:00
|
|
|
has = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup providers
|
2015-01-03 00:56:27 -08:00
|
|
|
providers := dht.providers.GetProviders(ctx, key)
|
2014-09-16 02:07:59 -07:00
|
|
|
if has {
|
|
|
|
providers = append(providers, dht.self)
|
2015-01-03 00:56:27 -08:00
|
|
|
log.Debugf("%s have the value. added self as provider", reqDesc)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if providers != nil && len(providers) > 0 {
|
2014-12-19 12:19:56 -08:00
|
|
|
infos := peer.PeerInfos(dht.peerstore, providers)
|
2015-01-01 12:45:39 -08:00
|
|
|
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
|
2015-01-03 00:56:27 -08:00
|
|
|
log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Also send closer peers.
|
2014-12-14 00:50:49 +00:00
|
|
|
closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
|
2014-09-16 02:07:59 -07:00
|
|
|
if closer != nil {
|
2015-02-13 08:08:30 +00:00
|
|
|
infos := peer.PeerInfos(dht.peerstore, closer)
|
2015-01-01 12:45:39 -08:00
|
|
|
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
|
2015-01-03 00:56:27 -08:00
|
|
|
log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos)
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
2015-04-02 03:02:12 -07:00
|
|
|
lm := make(lgbl.DeferredMap)
|
|
|
|
lm["peer"] = func() interface{} { return p.Pretty() }
|
|
|
|
|
|
|
|
defer log.EventBegin(ctx, "handleAddProvider", lm).Done()
|
2015-06-01 16:10:08 -07:00
|
|
|
key := key.Key(pmes.GetKey())
|
2015-04-02 03:02:12 -07:00
|
|
|
lm["key"] = func() interface{} { return key.Pretty() }
|
2014-09-16 07:17:55 -07:00
|
|
|
|
2015-01-03 00:29:49 -08:00
|
|
|
log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key)
|
2014-09-16 07:17:55 -07:00
|
|
|
|
2014-10-11 06:33:57 -07:00
|
|
|
// add provider should use the address given in the message
|
2014-12-19 12:19:56 -08:00
|
|
|
pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
|
|
|
|
for _, pi := range pinfos {
|
|
|
|
if pi.ID != p {
|
|
|
|
// we should ignore this provider reccord! not from originator.
|
|
|
|
// (we chould sign them and check signature later...)
|
2015-01-26 19:12:12 -08:00
|
|
|
log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
|
2014-12-19 12:19:56 -08:00
|
|
|
continue
|
|
|
|
}
|
2014-10-13 01:31:51 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
if len(pi.Addrs) < 1 {
|
2015-01-26 19:12:12 -08:00
|
|
|
log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
|
2014-12-19 12:19:56 -08:00
|
|
|
continue
|
|
|
|
}
|
2014-10-13 01:31:51 -07:00
|
|
|
|
2014-12-19 12:19:56 -08:00
|
|
|
log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
|
2015-01-16 02:13:00 -08:00
|
|
|
if pi.ID != dht.self { // dont add own addrs.
|
2014-12-19 12:19:56 -08:00
|
|
|
// add the received addresses to our peerstore.
|
2015-02-02 11:30:00 -08:00
|
|
|
dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
|
2014-10-11 06:33:57 -07:00
|
|
|
}
|
2015-02-26 16:44:36 -08:00
|
|
|
dht.providers.AddProvider(ctx, key, p)
|
2014-10-11 06:33:57 -07:00
|
|
|
}
|
|
|
|
|
2015-03-30 07:53:14 -07:00
|
|
|
return nil, nil
|
2014-09-16 02:07:59 -07:00
|
|
|
}
|