go-libp2p-kad-dht/handlers.go

388 lines
11 KiB
Go
Raw Normal View History

2014-09-16 02:07:59 -07:00
package dht
import (
2016-09-30 10:24:03 -07:00
"context"
2014-09-16 02:07:59 -07:00
"errors"
"fmt"
"time"
2014-09-16 02:07:59 -07:00
proto "github.com/gogo/protobuf/proto"
cid "github.com/ipfs/go-cid"
2016-09-02 20:21:23 +01:00
ds "github.com/ipfs/go-datastore"
u "github.com/ipfs/go-ipfs-util"
2016-09-02 20:21:23 +01:00
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
lgbl "github.com/libp2p/go-libp2p-loggables"
peer "github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
2016-09-03 20:35:59 +01:00
recpb "github.com/libp2p/go-libp2p-record/pb"
base32 "github.com/whyrusleeping/base32"
2014-09-16 02:07:59 -07:00
)
2014-11-08 22:44:37 -08:00
// The number of closer peers to send on requests.
var CloserPeerCount = KValue
2014-09-16 02:07:59 -07:00
// dhthandler specifies the signature of functions that handle DHT messages.
type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error)
2014-09-16 02:07:59 -07:00
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
2014-09-16 02:07:59 -07:00
switch t {
case pb.Message_GET_VALUE:
2014-09-16 02:07:59 -07:00
return dht.handleGetValue
case pb.Message_PUT_VALUE:
2014-09-16 02:16:57 -07:00
return dht.handlePutValue
case pb.Message_FIND_NODE:
2014-09-16 02:07:59 -07:00
return dht.handleFindPeer
case pb.Message_ADD_PROVIDER:
2014-09-16 02:16:57 -07:00
return dht.handleAddProvider
case pb.Message_GET_PROVIDERS:
2014-09-16 02:16:57 -07:00
return dht.handleGetProviders
case pb.Message_PING:
2014-09-16 02:07:59 -07:00
return dht.handlePing
default:
return nil
}
}
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
eip := log.EventBegin(ctx, "handleGetValue", p)
defer func() {
if err != nil {
eip.SetError(err)
}
eip.Done()
}()
2014-12-28 23:46:25 +00:00
log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey())
2014-09-16 02:07:59 -07:00
// setup response
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
2014-09-16 02:07:59 -07:00
// first, is there even a key?
k := pmes.GetKey()
if k == "" {
2014-09-16 02:07:59 -07:00
return nil, errors.New("handleGetValue but no key was provided")
// TODO: send back an error response? could be bad, but the other node's hanging.
2014-09-16 02:07:59 -07:00
}
rec, err := dht.checkLocalDatastore(k)
if err != nil {
2014-09-16 02:07:59 -07:00
return nil, err
}
resp.Record = rec
2014-09-16 02:07:59 -07:00
// Find closest peer on given cluster to desired key and reply with that info
closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
2015-02-21 16:20:28 -08:00
if len(closer) > 0 {
closerinfos := pstore.PeerInfos(dht.peerstore, closer)
for _, pi := range closerinfos {
log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID)
if len(pi.Addrs) < 1 {
log.Warningf(`no addresses on peer being sent!
[local:%s]
[sending:%s]
[remote:%s]`, dht.self, pi.ID, p)
2014-10-20 22:49:13 -07:00
}
}
2015-01-01 12:45:39 -08:00
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
2014-09-16 02:07:59 -07:00
}
return resp, nil
}
func (dht *IpfsDHT) checkLocalDatastore(k string) (*recpb.Record, error) {
log.Debugf("%s handleGetValue looking into ds", dht.self)
dskey := convertToDsKey(k)
iVal, err := dht.datastore.Get(dskey)
log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
if err == ds.ErrNotFound {
return nil, nil
}
// if we got an unexpected error, bail.
if err != nil {
return nil, err
}
// if we have the value, send it back
log.Debugf("%s handleGetValue success!", dht.self)
byts, ok := iVal.([]byte)
if !ok {
return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
}
2016-09-03 20:35:59 +01:00
rec := new(recpb.Record)
err = proto.Unmarshal(byts, rec)
if err != nil {
log.Debug("failed to unmarshal DHT record from datastore")
return nil, err
}
var recordIsBad bool
recvtime, err := u.ParseRFC3339(rec.GetTimeReceived())
if err != nil {
log.Info("either no receive time set on record, or it was invalid: ", err)
recordIsBad = true
}
if time.Now().Sub(recvtime) > MaxRecordAge {
log.Debug("old record found, tossing.")
recordIsBad = true
}
// NOTE: We do not verify the record here beyond checking these timestamps.
// we put the burden of checking the records on the requester as checking a record
// may be computationally expensive
if recordIsBad {
err := dht.datastore.Delete(dskey)
if err != nil {
log.Error("Failed to delete bad record from datastore: ", err)
}
return nil, nil // can treat this as not having the record at all
}
return rec, nil
}
// Cleans the record (to avoid storing arbitrary data).
func cleanRecord(rec *recpb.Record) {
rec.XXX_unrecognized = nil
rec.TimeReceived = nil
}
2014-09-16 02:07:59 -07:00
// Store a value in this peer local storage
func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
eip := log.EventBegin(ctx, "handlePutValue", p)
defer func() {
if err != nil {
eip.SetError(err)
}
eip.Done()
}()
rec := pmes.GetRecord()
if rec == nil {
log.Infof("Got nil record from: %s", p.Pretty())
return nil, errors.New("nil record")
}
if pmes.GetKey() != rec.GetKey() {
return nil, errors.New("put key doesn't match record key")
}
cleanRecord(rec)
// Make sure the record is valid (not expired, valid signature etc)
if err = dht.Validator.Validate(rec.GetKey(), rec.GetValue()); err != nil {
2018-02-01 15:09:57 -05:00
log.Warningf("Bad dht record in PUT from: %s. %s", p.Pretty(), err)
return nil, err
}
dskey := convertToDsKey(rec.GetKey())
// Make sure the new record is "better" than the record we have locally.
// This prevents a record with for example a lower sequence number from
// overwriting a record with a higher sequence number.
existing, err := dht.getRecordFromDatastore(dskey)
if err != nil {
return nil, err
}
if existing != nil {
recs := [][]byte{rec.GetValue(), existing.GetValue()}
i, err := dht.Validator.Select(rec.GetKey(), recs)
if err != nil {
log.Warningf("Bad dht record in PUT from %s: %s", p.Pretty(), err)
return nil, err
}
if i != 0 {
log.Infof("DHT record in PUT from %s is older than existing record. Ignoring", p.Pretty())
return nil, errors.New("old record")
}
}
// record the time we receive every record
rec.TimeReceived = proto.String(u.FormatRFC3339(time.Now()))
data, err := proto.Marshal(rec)
if err != nil {
return nil, err
}
err = dht.datastore.Put(dskey, data)
2014-12-28 23:46:25 +00:00
log.Debugf("%s handlePutValue %v", dht.self, dskey)
2014-09-19 08:07:56 -07:00
return pmes, err
2014-09-16 02:07:59 -07:00
}
func (dht *IpfsDHT) getRecordFromDatastore(dskey ds.Key) (*recpb.Record, error) {
reci, err := dht.datastore.Get(dskey)
if err == ds.ErrNotFound {
return nil, nil
}
if err != nil {
log.Errorf("Got error retrieving record with key %s from datastore: %s", dskey, err)
return nil, err
}
byt, ok := reci.([]byte)
if !ok {
// Bad data in datastore, log it but don't return an error, we'll just overwrite it
log.Errorf("Value stored in datastore with key %s is not []byte", dskey)
return nil, nil
}
rec := new(recpb.Record)
err = proto.Unmarshal(byt, rec)
if err != nil {
// Bad data in datastore, log it but don't return an error, we'll just overwrite it
log.Errorf("Bad record data stored in datastore with key %s: could not unmarshal record", dskey)
return nil, nil
}
err = dht.Validator.Validate(rec.GetKey(), rec.GetValue())
if err != nil {
// Invalid record in datastore, probably expired but don't return an error,
// we'll just overwrite it
log.Debugf("Local record verify failed: %s (discarded)", err)
return nil, nil
}
return rec, nil
}
func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
return pmes, nil
2014-09-16 02:07:59 -07:00
}
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
2015-01-15 17:47:36 +00:00
defer log.EventBegin(ctx, "handleFindPeer", p).Done()
resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
var closest []peer.ID
2014-09-16 02:07:59 -07:00
// if looking for self... special case where we send it on CloserPeers.
if peer.ID(pmes.GetKey()) == dht.self {
closest = []peer.ID{dht.self}
2014-09-16 02:07:59 -07:00
} else {
closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
2014-09-16 02:07:59 -07:00
}
if closest == nil {
2015-04-13 19:48:55 -07:00
log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
2014-09-16 02:07:59 -07:00
return resp, nil
}
closestinfos := pstore.PeerInfos(dht.peerstore, closest)
// possibly an over-allocation but this array is temporary anyways.
withAddresses := make([]pstore.PeerInfo, 0, len(closestinfos))
for _, pi := range closestinfos {
if len(pi.Addrs) > 0 {
withAddresses = append(withAddresses, pi)
log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
}
2014-09-16 02:07:59 -07:00
}
2015-01-01 12:45:39 -08:00
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
2014-09-16 02:07:59 -07:00
return resp, nil
}
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
2015-04-02 03:02:12 -07:00
lm := make(lgbl.DeferredMap)
lm["peer"] = func() interface{} { return p.Pretty() }
eip := log.EventBegin(ctx, "handleGetProviders", lm)
defer eip.Done()
2015-04-02 03:02:12 -07:00
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
c, err := cid.Cast([]byte(pmes.GetKey()))
if err != nil {
eip.SetError(err)
return nil, err
}
lm["key"] = func() interface{} { return c.String() }
2015-01-03 00:56:27 -08:00
// debug logging niceness.
reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, c)
2015-01-03 00:56:27 -08:00
log.Debugf("%s begin", reqDesc)
defer log.Debugf("%s end", reqDesc)
2014-09-16 02:07:59 -07:00
// check if we have this value, to add ourselves as provider.
has, err := dht.datastore.Has(convertToDsKey(c.KeyString()))
2014-09-16 02:07:59 -07:00
if err != nil && err != ds.ErrNotFound {
2015-01-26 19:12:12 -08:00
log.Debugf("unexpected datastore error: %v\n", err)
2014-09-16 02:07:59 -07:00
has = false
}
// setup providers
providers := dht.providers.GetProviders(ctx, c)
2014-09-16 02:07:59 -07:00
if has {
providers = append(providers, dht.self)
2015-01-03 00:56:27 -08:00
log.Debugf("%s have the value. added self as provider", reqDesc)
2014-09-16 02:07:59 -07:00
}
if providers != nil && len(providers) > 0 {
infos := pstore.PeerInfos(dht.peerstore, providers)
2015-01-01 12:45:39 -08:00
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
2015-01-03 00:56:27 -08:00
log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos)
2014-09-16 02:07:59 -07:00
}
// Also send closer peers.
closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
2014-09-16 02:07:59 -07:00
if closer != nil {
infos := pstore.PeerInfos(dht.peerstore, closer)
2015-01-01 12:45:39 -08:00
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
2015-01-03 00:56:27 -08:00
log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos)
2014-09-16 02:07:59 -07:00
}
return resp, nil
}
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
2015-04-02 03:02:12 -07:00
lm := make(lgbl.DeferredMap)
lm["peer"] = func() interface{} { return p.Pretty() }
eip := log.EventBegin(ctx, "handleAddProvider", lm)
defer eip.Done()
2015-04-02 03:02:12 -07:00
c, err := cid.Cast([]byte(pmes.GetKey()))
if err != nil {
eip.SetError(err)
return nil, err
}
lm["key"] = func() interface{} { return c.String() }
2014-09-16 07:17:55 -07:00
log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, c)
2014-09-16 07:17:55 -07:00
// add provider should use the address given in the message
pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
for _, pi := range pinfos {
if pi.ID != p {
2018-03-25 11:38:08 +02:00
// we should ignore this provider record! not from originator.
// (we should sign them and check signature later...)
2015-01-26 19:12:12 -08:00
log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
continue
}
2014-10-13 01:31:51 -07:00
if len(pi.Addrs) < 1 {
2015-01-26 19:12:12 -08:00
log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
continue
}
2014-10-13 01:31:51 -07:00
log.Infof("received provider %s for %s (addrs: %s)", p, c, pi.Addrs)
2018-03-25 11:38:08 +02:00
if pi.ID != dht.self { // don't add own addrs.
// add the received addresses to our peerstore.
dht.peerstore.AddAddrs(pi.ID, pi.Addrs, pstore.ProviderAddrTTL)
}
dht.providers.AddProvider(ctx, c, p)
}
return nil, nil
2014-09-16 02:07:59 -07:00
}
func convertToDsKey(s string) ds.Key {
return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s)))
}