383 lines
10 KiB
Go
Raw Normal View History

2014-11-20 10:46:19 -08:00
// Package dht implements a distributed hash table that satisfies the ipfs routing
// interface. This DHT is modeled after kademlia with S/Kademlia modifications.
2014-07-23 04:48:30 -07:00
package dht
import (
2014-09-19 08:07:56 -07:00
"bytes"
2016-09-30 10:24:03 -07:00
"context"
2014-09-16 00:56:40 -07:00
"errors"
"fmt"
"sync"
"time"
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
providers "github.com/libp2p/go-libp2p-kad-dht/providers"
routing "github.com/libp2p/go-libp2p-routing"
proto "github.com/gogo/protobuf/proto"
cid "github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log"
goprocess "github.com/jbenet/goprocess"
goprocessctx "github.com/jbenet/goprocess/context"
ci "github.com/libp2p/go-libp2p-crypto"
host "github.com/libp2p/go-libp2p-host"
2016-09-02 20:21:23 +01:00
kb "github.com/libp2p/go-libp2p-kbucket"
peer "github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
protocol "github.com/libp2p/go-libp2p-protocol"
2016-09-02 20:21:23 +01:00
record "github.com/libp2p/go-libp2p-record"
2016-09-03 20:35:59 +01:00
recpb "github.com/libp2p/go-libp2p-record/pb"
base32 "github.com/whyrusleeping/base32"
)
var log = logging.Logger("dht")
2016-09-23 02:45:58 -07:00
var ProtocolDHT protocol.ID = "/ipfs/kad/1.0.0"
var ProtocolDHTOld protocol.ID = "/ipfs/dht"
2015-01-01 12:45:39 -08:00
2014-12-23 22:04:21 -08:00
// NumBootstrapQueries defines the number of random dht queries to do to
// collect members of the routing table.
const NumBootstrapQueries = 5
// IpfsDHT is an implementation of Kademlia with S/Kademlia modifications.
2014-07-23 04:48:30 -07:00
// It is used to implement the base IpfsRouting module.
type IpfsDHT struct {
host host.Host // the network services we need
self peer.ID // Local peer (yourself)
peerstore pstore.Peerstore // Peer Registry
datastore ds.Datastore // Local data
2014-12-16 08:55:46 -08:00
routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes
providers *providers.ProviderManager
2016-11-21 20:10:14 -08:00
birth time.Time // When this peer started up
Validator record.Validator // record validator funcs
Selector record.Selector // record selection funcs
ctx context.Context
proc goprocess.Process
strmap map[peer.ID]*messageSender
smlk sync.Mutex
plk sync.Mutex
}
// NewDHT creates a new DHT object with the given peer as the 'local' host.
// IpfsDHT's initialized with this function will respond to DHT requests,
// whereas IpfsDHT's initialized with NewDHTClient will not.
func NewDHT(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
2016-11-14 10:00:07 +00:00
dht := NewDHTClient(ctx, h, dstore)
h.SetStreamHandler(ProtocolDHT, dht.handleNewStream)
2016-09-23 02:45:58 -07:00
h.SetStreamHandler(ProtocolDHTOld, dht.handleNewStream)
2016-11-15 16:45:12 -08:00
2014-08-10 21:40:17 -07:00
return dht
}
// NewDHTClient creates a new DHT object with the given peer as the 'local'
// host. IpfsDHT clients initialized with this function will not respond to DHT
// requests. If you need a peer to respond to DHT requests, use NewDHT instead.
func NewDHTClient(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
dht := makeDHT(ctx, h, dstore)
// register for network notifs.
dht.host.Network().Notify((*netNotifiee)(dht))
dht.proc = goprocessctx.WithContextAndTeardown(ctx, func() error {
// remove ourselves from network notifs.
dht.host.Network().StopNotify((*netNotifiee)(dht))
return nil
})
dht.proc.AddChild(dht.providers.Process())
dht.Validator["pk"] = record.PublicKeyValidator
dht.Selector["pk"] = record.PublicKeySelector
return dht
}
2016-09-23 04:38:05 -07:00
func makeDHT(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
rt := kb.NewRoutingTable(KValue, kb.ConvertPeerID(h.ID()), time.Minute, h.Peerstore())
cmgr := h.ConnManager()
rt.PeerAdded = func(p peer.ID) {
cmgr.TagPeer(p, "kbucket", 5)
}
rt.PeerRemoved = func(p peer.ID) {
cmgr.UntagPeer(p, "kbucket")
}
2016-09-23 04:38:05 -07:00
return &IpfsDHT{
datastore: dstore,
self: h.ID(),
peerstore: h.Peerstore(),
host: h,
strmap: make(map[peer.ID]*messageSender),
ctx: ctx,
providers: providers.NewProviderManager(ctx, h.ID(), dstore),
birth: time.Now(),
routingTable: rt,
2016-09-23 04:38:05 -07:00
Validator: make(record.Validator),
Selector: make(record.Selector),
}
}
2014-12-28 23:46:25 +00:00
// putValueToPeer stores the given key/value pair at the peer 'p'
func (dht *IpfsDHT) putValueToPeer(ctx context.Context, p peer.ID,
key string, rec *recpb.Record) error {
2014-09-19 08:07:56 -07:00
pmes := pb.NewMessage(pb.Message_PUT_VALUE, key, 0)
pmes.Record = rec
2014-09-19 08:07:56 -07:00
rpmes, err := dht.sendRequest(ctx, p, pmes)
switch err {
case ErrReadTimeout:
log.Warningf("read timeout: %s %s", p.Pretty(), key)
fallthrough
default:
return err
case nil:
break
}
2014-09-17 07:19:40 -07:00
if err != nil {
return err
}
2014-09-19 08:07:56 -07:00
if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) {
2014-09-19 08:07:56 -07:00
return errors.New("value not put correctly")
}
return nil
2014-09-17 07:19:40 -07:00
}
var errInvalidRecord = errors.New("received invalid record")
// getValueOrPeers queries a particular peer p for the value for
// key. It returns either the value or a list of closer peers.
// NOTE: It will update the dht's peerstore with any new addresses
// it finds for the given peer.
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.ID, key string) (*recpb.Record, []*pstore.PeerInfo, error) {
2014-09-16 02:43:11 -07:00
pmes, err := dht.getValueSingle(ctx, p, key)
if err != nil {
return nil, nil, err
}
// Perhaps we were given closer peers
peers := pb.PBPeersToPeerInfos(pmes.GetCloserPeers())
if record := pmes.GetRecord(); record != nil {
// Success! We were given the value
2014-09-28 00:13:07 -07:00
log.Debug("getValueOrPeers: got value")
// make sure record is valid.
2018-02-01 15:09:57 -05:00
err = dht.Validator.VerifyRecord(record)
if err != nil {
2015-01-26 19:12:12 -08:00
log.Info("Received invalid record! (discarded)")
// return a sentinal to signify an invalid record was received
err = errInvalidRecord
2016-09-03 20:35:59 +01:00
record = new(recpb.Record)
}
return record, peers, err
2014-08-16 23:03:36 -07:00
}
if len(peers) > 0 {
log.Debug("getValueOrPeers: peers")
return nil, peers, nil
}
log.Warning("getValueOrPeers: routing.ErrNotFound")
return nil, nil, routing.ErrNotFound
}
// getValueSingle simply performs the get value RPC with the given parameters
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.ID, key string) (*pb.Message, error) {
meta := logging.LoggableMap{
"key": key,
"peer": p,
}
eip := log.EventBegin(ctx, "getValueSingle", meta)
defer eip.Done()
2014-09-16 02:43:11 -07:00
pmes := pb.NewMessage(pb.Message_GET_VALUE, key, 0)
resp, err := dht.sendRequest(ctx, p, pmes)
switch err {
case nil:
return resp, nil
case ErrReadTimeout:
log.Warningf("read timeout: %s %s", p.Pretty(), key)
fallthrough
default:
eip.SetError(err)
return nil, err
}
2014-08-09 22:28:46 -07:00
}
// getLocal attempts to retrieve the value from the datastore
func (dht *IpfsDHT) getLocal(key string) (*recpb.Record, error) {
log.Debugf("getLocal %s", key)
v, err := dht.datastore.Get(mkDsKey(key))
if err != nil {
return nil, err
}
log.Debugf("found %s in local datastore")
byt, ok := v.([]byte)
if !ok {
return nil, errors.New("value stored in datastore not []byte")
}
2016-09-03 20:35:59 +01:00
rec := new(recpb.Record)
err = proto.Unmarshal(byt, rec)
if err != nil {
return nil, err
}
2018-02-01 15:09:57 -05:00
err = dht.Validator.VerifyRecord(rec)
if err != nil {
log.Debugf("local record verify failed: %s (discarded)", err)
return nil, err
}
return rec, nil
}
2015-01-16 23:53:56 +00:00
// getOwnPrivateKey attempts to load the local peers private
// key from the peerstore.
func (dht *IpfsDHT) getOwnPrivateKey() (ci.PrivKey, error) {
sk := dht.peerstore.PrivKey(dht.self)
if sk == nil {
2015-01-26 19:12:12 -08:00
log.Warningf("%s dht cannot get own private key!", dht.self)
return nil, fmt.Errorf("cannot get private key to sign record!")
}
return sk, nil
}
// putLocal stores the key value pair in the datastore
func (dht *IpfsDHT) putLocal(key string, rec *recpb.Record) error {
data, err := proto.Marshal(rec)
if err != nil {
return err
}
return dht.datastore.Put(mkDsKey(key), data)
}
// Update signals the routingTable to Update its last-seen status
2014-09-16 06:40:17 -07:00
// on the given peer.
func (dht *IpfsDHT) Update(ctx context.Context, p peer.ID) {
log.Event(ctx, "updatePeer", p)
dht.routingTable.Update(p)
}
2014-08-09 22:28:46 -07:00
2014-10-11 10:43:54 -07:00
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
func (dht *IpfsDHT) FindLocal(id peer.ID) pstore.PeerInfo {
p := dht.routingTable.Find(id)
if p != "" {
2014-12-28 23:46:25 +00:00
return dht.peerstore.PeerInfo(p)
2014-08-09 22:28:46 -07:00
}
return pstore.PeerInfo{}
2014-08-09 22:28:46 -07:00
}
2014-11-03 03:02:56 +00:00
// findPeerSingle asks peer 'p' if they know where the peer with id 'id' is
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.ID, id peer.ID) (*pb.Message, error) {
eip := log.EventBegin(ctx, "findPeerSingle",
logging.LoggableMap{
"peer": p,
"target": id,
})
defer eip.Done()
pmes := pb.NewMessage(pb.Message_FIND_NODE, string(id), 0)
resp, err := dht.sendRequest(ctx, p, pmes)
switch err {
case nil:
return resp, nil
case ErrReadTimeout:
log.Warningf("read timeout: %s %s", p.Pretty(), id)
fallthrough
default:
eip.SetError(err)
return nil, err
}
}
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.ID, key *cid.Cid) (*pb.Message, error) {
eip := log.EventBegin(ctx, "findProvidersSingle", p, key)
defer eip.Done()
pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, key.KeyString(), 0)
resp, err := dht.sendRequest(ctx, p, pmes)
switch err {
case nil:
return resp, nil
case ErrReadTimeout:
log.Warningf("read timeout: %s %s", p.Pretty(), key)
fallthrough
default:
eip.SetError(err)
return nil, err
}
2014-08-14 08:32:17 -07:00
}
// nearestPeersToQuery returns the routing tables closest peers.
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID {
closer := dht.routingTable.NearestPeers(kb.ConvertKey(pmes.GetKey()), count)
return closer
}
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) []peer.ID {
closer := dht.nearestPeersToQuery(pmes, count)
// no node? nil
if closer == nil {
2017-03-03 23:31:12 -08:00
log.Warning("no closer peers to send:", p)
return nil
}
filtered := make([]peer.ID, 0, len(closer))
2017-03-03 23:31:12 -08:00
for _, clp := range closer {
// == to self? thats bad
if clp == dht.self {
2017-03-03 23:31:12 -08:00
log.Warning("attempted to return self! this shouldn't happen...")
return nil
}
// Dont send a peer back themselves
if clp == p {
continue
}
filtered = append(filtered, clp)
}
// ok seems like closer nodes
return filtered
}
// Context return dht's context
func (dht *IpfsDHT) Context() context.Context {
return dht.ctx
}
// Process return dht's process
func (dht *IpfsDHT) Process() goprocess.Process {
return dht.proc
}
// Close calls Process Close
func (dht *IpfsDHT) Close() error {
return dht.proc.Close()
}
func mkDsKey(s string) ds.Key {
return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s)))
}