2015-01-16 12:52:12 -08:00
|
|
|
// Package dht implements a distributed hash table that satisfies the ipfs routing
|
2018-03-28 03:44:49 +02:00
|
|
|
// interface. This DHT is modeled after Kademlia with S/Kademlia modifications.
|
2015-01-16 12:52:12 -08:00
|
|
|
package dht
|
|
|
|
|
|
|
|
import (
|
2016-09-30 10:24:03 -07:00
|
|
|
"context"
|
2015-01-16 12:52:12 -08:00
|
|
|
"crypto/rand"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2016-08-21 17:18:58 +01:00
|
|
|
u "github.com/ipfs/go-ipfs-util"
|
|
|
|
goprocess "github.com/jbenet/goprocess"
|
|
|
|
periodicproc "github.com/jbenet/goprocess/periodic"
|
2016-10-05 12:34:28 -07:00
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
2016-09-02 20:21:23 +01:00
|
|
|
routing "github.com/libp2p/go-libp2p-routing"
|
2015-01-16 12:52:12 -08:00
|
|
|
)
|
|
|
|
|
2015-01-23 04:36:18 -08:00
|
|
|
// BootstrapConfig specifies parameters used bootstrapping the DHT.
|
2015-01-16 12:52:12 -08:00
|
|
|
//
|
2015-01-23 04:36:18 -08:00
|
|
|
// Note there is a tradeoff between the bootstrap period and the
|
|
|
|
// number of queries. We could support a higher period with less
|
|
|
|
// queries.
|
|
|
|
type BootstrapConfig struct {
|
|
|
|
Queries int // how many queries to run per period
|
|
|
|
Period time.Duration // how often to run periodi cbootstrap.
|
|
|
|
Timeout time.Duration // how long to wait for a bootstrao query to run
|
|
|
|
}
|
2015-01-16 12:52:12 -08:00
|
|
|
|
2015-01-23 04:36:18 -08:00
|
|
|
var DefaultBootstrapConfig = BootstrapConfig{
|
|
|
|
// For now, this is set to 1 query.
|
|
|
|
// We are currently more interested in ensuring we have a properly formed
|
|
|
|
// DHT than making sure our dht minimizes traffic. Once we are more certain
|
|
|
|
// of our implementation's robustness, we should lower this down to 8 or 4.
|
|
|
|
Queries: 1,
|
2015-01-16 12:52:12 -08:00
|
|
|
|
2015-03-19 04:01:15 -07:00
|
|
|
// For now, this is set to 1 minute, which is a medium period. We are
|
2015-01-23 04:36:18 -08:00
|
|
|
// We are currently more interested in ensuring we have a properly formed
|
2015-03-19 04:01:15 -07:00
|
|
|
// DHT than making sure our dht minimizes traffic.
|
|
|
|
Period: time.Duration(5 * time.Minute),
|
2015-01-16 12:52:12 -08:00
|
|
|
|
2015-03-19 04:01:15 -07:00
|
|
|
Timeout: time.Duration(10 * time.Second),
|
2015-01-23 04:36:18 -08:00
|
|
|
}
|
|
|
|
|
2015-02-05 07:53:53 -08:00
|
|
|
// Bootstrap ensures the dht routing table remains healthy as peers come and go.
|
|
|
|
// it builds up a list of peers by requesting random peer IDs. The Bootstrap
|
|
|
|
// process will run a number of queries each time, and run every time signal fires.
|
|
|
|
// These parameters are configurable.
|
|
|
|
//
|
|
|
|
// As opposed to BootstrapWithConfig, Bootstrap satisfies the routing interface
|
|
|
|
func (dht *IpfsDHT) Bootstrap(ctx context.Context) error {
|
|
|
|
proc, err := dht.BootstrapWithConfig(DefaultBootstrapConfig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait till ctx or dht.Context exits.
|
|
|
|
// we have to do it this way to satisfy the Routing interface (contexts)
|
|
|
|
go func() {
|
|
|
|
defer proc.Close()
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case <-dht.Context().Done():
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
2015-02-03 13:50:49 -08:00
|
|
|
}
|
|
|
|
|
2015-02-05 07:53:53 -08:00
|
|
|
// BootstrapWithConfig ensures the dht routing table remains healthy as peers come and go.
|
2015-01-23 04:36:18 -08:00
|
|
|
// it builds up a list of peers by requesting random peer IDs. The Bootstrap
|
|
|
|
// process will run a number of queries each time, and run every time signal fires.
|
|
|
|
// These parameters are configurable.
|
|
|
|
//
|
2015-02-05 07:53:53 -08:00
|
|
|
// BootstrapWithConfig returns a process, so the user can stop it.
|
2016-12-08 11:58:06 +02:00
|
|
|
func (dht *IpfsDHT) BootstrapWithConfig(cfg BootstrapConfig) (goprocess.Process, error) {
|
|
|
|
if cfg.Queries <= 0 {
|
|
|
|
return nil, fmt.Errorf("invalid number of queries: %d", cfg.Queries)
|
|
|
|
}
|
|
|
|
|
2018-06-29 12:32:09 -04:00
|
|
|
tickch := make(chan struct{}, 1)
|
|
|
|
proc := periodicproc.OnSignal(tickch, dht.bootstrapWorker(cfg))
|
|
|
|
go func() {
|
|
|
|
tickch <- struct{}{}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(cfg.Period):
|
|
|
|
select {
|
|
|
|
case tickch <- struct{}{}:
|
|
|
|
default:
|
|
|
|
// Don't queue ticks, like Tickers
|
|
|
|
log.Warning("Previous bootstrapping attempt not completed within bootstrapping period")
|
|
|
|
}
|
|
|
|
case <-dht.Context().Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-12-08 11:58:06 +02:00
|
|
|
|
|
|
|
return proc, nil
|
2015-01-16 12:52:12 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// SignalBootstrap ensures the dht routing table remains healthy as peers come and go.
|
|
|
|
// it builds up a list of peers by requesting random peer IDs. The Bootstrap
|
|
|
|
// process will run a number of queries each time, and run every time signal fires.
|
|
|
|
// These parameters are configurable.
|
|
|
|
//
|
|
|
|
// SignalBootstrap returns a process, so the user can stop it.
|
2015-01-23 04:36:18 -08:00
|
|
|
func (dht *IpfsDHT) BootstrapOnSignal(cfg BootstrapConfig, signal <-chan time.Time) (goprocess.Process, error) {
|
|
|
|
if cfg.Queries <= 0 {
|
|
|
|
return nil, fmt.Errorf("invalid number of queries: %d", cfg.Queries)
|
2015-01-16 12:52:12 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if signal == nil {
|
|
|
|
return nil, fmt.Errorf("invalid signal: %v", signal)
|
|
|
|
}
|
|
|
|
|
2016-12-08 19:37:07 +02:00
|
|
|
proc := periodicproc.Ticker(signal, dht.bootstrapWorker(cfg))
|
|
|
|
|
|
|
|
return proc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dht *IpfsDHT) bootstrapWorker(cfg BootstrapConfig) func(worker goprocess.Process) {
|
|
|
|
return func(worker goprocess.Process) {
|
2015-01-20 07:38:20 -08:00
|
|
|
// it would be useful to be able to send out signals of when we bootstrap, too...
|
|
|
|
// maybe this is a good case for whole module event pub/sub?
|
|
|
|
|
|
|
|
ctx := dht.Context()
|
2015-01-23 04:36:18 -08:00
|
|
|
if err := dht.runBootstrap(ctx, cfg); err != nil {
|
2015-01-26 19:12:12 -08:00
|
|
|
log.Warning(err)
|
2015-01-20 07:38:20 -08:00
|
|
|
// A bootstrapping error is important to notice but not fatal.
|
2015-01-16 12:52:12 -08:00
|
|
|
}
|
2016-12-08 19:37:07 +02:00
|
|
|
}
|
2015-01-16 12:52:12 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// runBootstrap builds up list of peers by requesting random peer IDs
|
2015-01-23 04:36:18 -08:00
|
|
|
func (dht *IpfsDHT) runBootstrap(ctx context.Context, cfg BootstrapConfig) error {
|
2015-01-18 00:58:34 -08:00
|
|
|
bslog := func(msg string) {
|
|
|
|
log.Debugf("DHT %s dhtRunBootstrap %s -- routing table size: %d", dht.self, msg, dht.routingTable.Size())
|
|
|
|
}
|
|
|
|
bslog("start")
|
|
|
|
defer bslog("end")
|
|
|
|
defer log.EventBegin(ctx, "dhtRunBootstrap").Done()
|
2015-01-16 12:52:12 -08:00
|
|
|
|
|
|
|
var merr u.MultiErr
|
|
|
|
|
|
|
|
randomID := func() peer.ID {
|
|
|
|
// 16 random bytes is not a valid peer id. it may be fine becuase
|
|
|
|
// the dht will rehash to its own keyspace anyway.
|
|
|
|
id := make([]byte, 16)
|
|
|
|
rand.Read(id)
|
2015-01-20 17:22:14 -08:00
|
|
|
id = u.Hash(id)
|
2015-01-16 12:52:12 -08:00
|
|
|
return peer.ID(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// bootstrap sequentially, as results will compound
|
|
|
|
runQuery := func(ctx context.Context, id peer.ID) {
|
2018-06-09 08:48:33 -07:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, cfg.Timeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
p, err := dht.FindPeer(ctx, id)
|
|
|
|
if err == routing.ErrNotFound {
|
|
|
|
// this isn't an error. this is precisely what we expect.
|
|
|
|
} else if err != nil {
|
|
|
|
merr = append(merr, err)
|
|
|
|
} else {
|
|
|
|
// woah, actually found a peer with that ID? this shouldn't happen normally
|
|
|
|
// (as the ID we use is not a real ID). this is an odd error worth logging.
|
|
|
|
err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
|
2015-01-26 19:12:12 -08:00
|
|
|
log.Warningf("%s", err)
|
2015-01-16 12:52:12 -08:00
|
|
|
merr = append(merr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-09 08:48:33 -07:00
|
|
|
// these should be parallel normally. but can make them sequential for debugging.
|
|
|
|
// note that the core/bootstrap context deadline should be extended too for that.
|
|
|
|
for i := 0; i < cfg.Queries; i++ {
|
|
|
|
id := randomID()
|
|
|
|
log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, cfg.Queries, id)
|
|
|
|
runQuery(ctx, id)
|
2015-01-16 12:52:12 -08:00
|
|
|
}
|
|
|
|
|
2018-06-09 08:48:33 -07:00
|
|
|
// Find self to distribute peer info to our neighbors.
|
|
|
|
// Do this after bootstrapping.
|
|
|
|
log.Debugf("Bootstrapping query to self: %s", dht.self)
|
|
|
|
runQuery(ctx, dht.self)
|
|
|
|
|
2015-01-16 12:52:12 -08:00
|
|
|
if len(merr) > 0 {
|
|
|
|
return merr
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|