go-libp2p-kad-dht/handlers.go

232 lines
6.6 KiB
Go
Raw Normal View History

2014-09-16 02:07:59 -07:00
package dht
import (
"errors"
"fmt"
"time"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
2014-09-16 02:07:59 -07:00
peer "github.com/jbenet/go-ipfs/peer"
pb "github.com/jbenet/go-ipfs/routing/dht/pb"
2014-09-16 02:07:59 -07:00
u "github.com/jbenet/go-ipfs/util"
)
2014-11-08 22:44:37 -08:00
// The number of closer peers to send on requests.
var CloserPeerCount = 4
2014-09-16 02:07:59 -07:00
// dhthandler specifies the signature of functions that handle DHT messages.
type dhtHandler func(context.Context, peer.Peer, *pb.Message) (*pb.Message, error)
2014-09-16 02:07:59 -07:00
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
2014-09-16 02:07:59 -07:00
switch t {
case pb.Message_GET_VALUE:
2014-09-16 02:07:59 -07:00
return dht.handleGetValue
case pb.Message_PUT_VALUE:
2014-09-16 02:16:57 -07:00
return dht.handlePutValue
case pb.Message_FIND_NODE:
2014-09-16 02:07:59 -07:00
return dht.handleFindPeer
case pb.Message_ADD_PROVIDER:
2014-09-16 02:16:57 -07:00
return dht.handleAddProvider
case pb.Message_GET_PROVIDERS:
2014-09-16 02:16:57 -07:00
return dht.handleGetProviders
case pb.Message_PING:
2014-09-16 02:07:59 -07:00
return dht.handlePing
default:
return nil
}
}
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
2014-09-16 02:07:59 -07:00
// setup response
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
2014-09-16 02:07:59 -07:00
// first, is the key even a key?
key := pmes.GetKey()
if key == "" {
return nil, errors.New("handleGetValue but no key was provided")
}
// let's first check if we have the value locally.
log.Debugf("%s handleGetValue looking into ds", dht.self)
2014-10-03 15:34:08 -07:00
dskey := u.Key(pmes.GetKey()).DsKey()
2014-09-16 02:07:59 -07:00
iVal, err := dht.datastore.Get(dskey)
log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
2014-09-16 02:07:59 -07:00
// if we got an unexpected error, bail.
2014-09-19 08:07:56 -07:00
if err != nil && err != ds.ErrNotFound {
2014-09-16 02:07:59 -07:00
return nil, err
}
2014-09-18 19:30:04 -07:00
// Note: changed the behavior here to return _as much_ info as possible
// (potentially all of {value, closer peers, provider})
// if we have the value, send it back
2014-09-16 02:07:59 -07:00
if err == nil {
log.Debugf("%s handleGetValue success!", dht.self)
2014-09-16 02:07:59 -07:00
byts, ok := iVal.([]byte)
if !ok {
return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
}
rec := new(pb.Record)
err := proto.Unmarshal(byts, rec)
if err != nil {
log.Error("Failed to unmarshal dht record from datastore")
return nil, err
}
resp.Record = rec
2014-09-16 02:07:59 -07:00
}
// if we know any providers for the requested value, return those.
provs := dht.providers.GetProviders(ctx, u.Key(pmes.GetKey()))
2014-09-16 02:07:59 -07:00
if len(provs) > 0 {
log.Debugf("handleGetValue returning %d provider[s]", len(provs))
2014-12-16 08:55:46 -08:00
resp.ProviderPeers = pb.PeersToPBPeers(dht.network, provs)
2014-09-16 02:07:59 -07:00
}
// Find closest peer on given cluster to desired key and reply with that info
closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
2014-09-18 19:30:04 -07:00
if closer != nil {
for _, p := range closer {
log.Debugf("handleGetValue returning closer peer: '%s'", p)
2014-10-20 22:49:13 -07:00
if len(p.Addresses()) < 1 {
2014-10-21 03:02:31 -07:00
log.Critical("no addresses on peer being sent!")
2014-10-20 22:49:13 -07:00
}
}
2014-12-16 08:55:46 -08:00
resp.CloserPeers = pb.PeersToPBPeers(dht.network, closer)
2014-09-16 02:07:59 -07:00
}
return resp, nil
}
// Store a value in this peer local storage
func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
2014-09-16 02:07:59 -07:00
dht.dslock.Lock()
defer dht.dslock.Unlock()
2014-10-03 15:34:08 -07:00
dskey := u.Key(pmes.GetKey()).DsKey()
err := dht.verifyRecord(pmes.GetRecord())
if err != nil {
2014-11-10 14:22:56 -08:00
fmt.Println(u.Key(pmes.GetRecord().GetAuthor()))
log.Error("Bad dht record in put request")
return nil, err
}
data, err := proto.Marshal(pmes.GetRecord())
if err != nil {
return nil, err
}
err = dht.datastore.Put(dskey, data)
log.Debugf("%s handlePutValue %v\n", dht.self, dskey)
2014-09-19 08:07:56 -07:00
return pmes, err
2014-09-16 02:07:59 -07:00
}
func (dht *IpfsDHT) handlePing(_ context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
return pmes, nil
2014-09-16 02:07:59 -07:00
}
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
var closest []peer.Peer
2014-09-16 02:07:59 -07:00
// if looking for self... special case where we send it on CloserPeers.
if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) {
closest = []peer.Peer{dht.self}
2014-09-16 02:07:59 -07:00
} else {
closest = dht.betterPeersToQuery(pmes, CloserPeerCount)
2014-09-16 02:07:59 -07:00
}
if closest == nil {
log.Errorf("handleFindPeer: could not find anything.")
2014-09-16 02:07:59 -07:00
return resp, nil
}
var withAddresses []peer.Peer
for _, p := range closest {
if len(p.Addresses()) > 0 {
withAddresses = append(withAddresses, p)
}
2014-09-16 02:07:59 -07:00
}
for _, p := range withAddresses {
log.Debugf("handleFindPeer: sending back '%s'", p)
}
2014-11-24 14:58:51 -05:00
2014-12-16 08:55:46 -08:00
resp.CloserPeers = pb.PeersToPBPeers(dht.network, withAddresses)
2014-09-16 02:07:59 -07:00
return resp, nil
}
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
2014-09-16 02:07:59 -07:00
// check if we have this value, to add ourselves as provider.
2014-10-26 00:45:40 +00:00
log.Debugf("handling GetProviders: '%s'", u.Key(pmes.GetKey()))
2014-10-03 15:34:08 -07:00
dsk := u.Key(pmes.GetKey()).DsKey()
has, err := dht.datastore.Has(dsk)
2014-09-16 02:07:59 -07:00
if err != nil && err != ds.ErrNotFound {
log.Errorf("unexpected datastore error: %v\n", err)
2014-09-16 02:07:59 -07:00
has = false
}
// setup providers
providers := dht.providers.GetProviders(ctx, u.Key(pmes.GetKey()))
2014-09-16 02:07:59 -07:00
if has {
providers = append(providers, dht.self)
}
if providers != nil && len(providers) > 0 {
2014-12-16 08:55:46 -08:00
resp.ProviderPeers = pb.PeersToPBPeers(dht.network, providers)
2014-09-16 02:07:59 -07:00
}
// Also send closer peers.
closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
2014-09-16 02:07:59 -07:00
if closer != nil {
2014-12-16 08:55:46 -08:00
resp.CloserPeers = pb.PeersToPBPeers(dht.network, closer)
2014-09-16 02:07:59 -07:00
}
return resp, nil
}
type providerInfo struct {
Creation time.Time
Value peer.Peer
2014-09-16 02:07:59 -07:00
}
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
2014-09-16 02:07:59 -07:00
key := u.Key(pmes.GetKey())
2014-09-16 07:17:55 -07:00
log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
2014-09-16 07:17:55 -07:00
// add provider should use the address given in the message
2014-10-13 01:31:51 -07:00
for _, pb := range pmes.GetProviderPeers() {
pid := peer.ID(pb.GetId())
if pid.Equal(p.ID()) {
maddrs, err := pb.Addresses()
if err != nil {
log.Errorf("provider %s error with addresses %s", p, pb.Addrs)
continue
}
2014-10-13 01:31:51 -07:00
log.Infof("received provider %s %s for %s", p, maddrs, key)
for _, maddr := range maddrs {
p.AddAddress(maddr)
}
2014-10-13 01:31:51 -07:00
dht.providers.AddProvider(key, p)
} else {
log.Errorf("handleAddProvider received provider %s from %s", pid, p)
}
}
2014-09-19 18:11:05 -07:00
return pmes, nil // send back same msg as confirmation.
2014-09-16 02:07:59 -07:00
}