mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-06-24 07:11:38 +00:00
Kademlia: Address some TODOs - Refactoring - API updates. (#1174)
* Address some TODOs, refactor queries and public API. The following left-over issues are addressed: * The key for FIND_NODE requests is generalised to any Multihash, instead of just peer IDs. * All queries get a (configurable) timeout. * Finishing queries as soon as enough results have been received is simplified to avoid code duplication. * No more panics in provider-API-related code paths. The provider API is however still untested and (I think) still incomplete (e.g. expiration of provider records). * Numerous smaller TODOs encountered in the code. The following public API changes / additions are made: * Introduce a `KademliaConfig` with new configuration options for the replication factor and query timeouts. * Rename `find_node` to `get_closest_peers`. * Rename `get_value` to `get_record` and `put_value` to `put_record`, introducing a `Quorum` parameter for both functions, replacing the existing `num_results` parameter with clearer semantics. * Rename `add_providing` to `start_providing` and `remove_providing` to `stop_providing`. * Add a `bootstrap` function that implements a (almost) standard Kademlia bootstrapping procedure. * Rename `KademliaOut` to `KademliaEvent` with an updated list of constructors (some renaming). All events that report query results now report a `Result` to uniformly permit reporting of errors. The following refactorings are made: * Introduce some constants. * Consolidate `query.rs` and `write.rs` behind a common query interface to reduce duplication and facilitate better code reuse, introducing the notion of a query peer iterator. `query/peers/closest.rs` contains the code that was formerly in `query.rs`. `query/peers/fixed.rs` contains a modified variant of `write.rs` (which is removed). The new `query.rs` provides an interface for working with a collection of queries, taking over some code from `behaviour.rs`. * Reduce code duplication in tests and use the current_thread runtime for polling swarms to avoid spurious errors in the test output due to aborted connections when a test finishes prematurely (e.g. because a quorum of results has been collected). * Some additions / improvements to the existing tests. * Fix test. * Fix rebase. * Tweak kad-ipfs example. * Incorporate some feedback. * Provide easy access and conversion to keys in error results.
This commit is contained in:
@ -25,18 +25,24 @@
|
||||
|
||||
use futures::prelude::*;
|
||||
use libp2p::{
|
||||
Swarm,
|
||||
PeerId,
|
||||
identity
|
||||
identity,
|
||||
build_development_transport
|
||||
};
|
||||
use libp2p::kad::Kademlia;
|
||||
use libp2p::kad::{Kademlia, KademliaConfig, KademliaEvent};
|
||||
use std::env;
|
||||
use std::time::Duration;
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
|
||||
// Create a random key for ourselves.
|
||||
let local_key = identity::Keypair::generate_ed25519();
|
||||
let local_peer_id = PeerId::from(local_key.public());
|
||||
|
||||
// Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol
|
||||
let transport = libp2p::build_development_transport(local_key);
|
||||
let transport = build_development_transport(local_key);
|
||||
|
||||
// Create a swarm to manage peers and events.
|
||||
let mut swarm = {
|
||||
@ -45,42 +51,61 @@ fn main() {
|
||||
// to insert our local node in the DHT. However here we use `without_init` because this
|
||||
// example is very ephemeral and we don't want to pollute the DHT. In a real world
|
||||
// application, you want to use `new` instead.
|
||||
let mut behaviour: Kademlia<_> = libp2p::kad::Kademlia::new(local_peer_id.clone());
|
||||
let mut cfg = KademliaConfig::new(local_peer_id.clone());
|
||||
cfg.set_query_timeout(Duration::from_secs(5 * 60));
|
||||
let mut behaviour: Kademlia<_> = Kademlia::new(cfg);
|
||||
|
||||
// TODO: the /dnsaddr/ scheme is not supported (https://github.com/libp2p/rust-libp2p/issues/967)
|
||||
/*behaviour.add_address(&"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||
behaviour.add_address(&"QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||
behaviour.add_address(&"QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||
behaviour.add_address(&"QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());*/
|
||||
behaviour.add_address(&"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse().unwrap(), "/ip4/104.131.131.82/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip4/104.236.179.241/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip4/128.199.219.111/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip4/104.236.76.40/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip4/178.62.158.247/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip6/2604:a880:1:20::203:d001/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip6/2400:6180:0:d0::151:6001/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip6/2604:a880:800:10::4a:5001/tcp/4001".parse().unwrap());
|
||||
behaviour.add_address(&"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001".parse().unwrap());
|
||||
libp2p::core::Swarm::new(transport, behaviour, local_peer_id)
|
||||
/*behaviour.add_address(&"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||
behaviour.add_address(&"QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||
behaviour.add_address(&"QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||
behaviour.add_address(&"QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());*/
|
||||
|
||||
// The only address that currently works.
|
||||
behaviour.add_address(&"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse().unwrap(), "/ip4/104.131.131.82/tcp/4001".parse().unwrap());
|
||||
|
||||
// The following addresses always fail signature verification, possibly due to
|
||||
// RSA keys with < 2048 bits.
|
||||
// behaviour.add_address(&"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip4/104.236.179.241/tcp/4001".parse().unwrap());
|
||||
// behaviour.add_address(&"QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip4/128.199.219.111/tcp/4001".parse().unwrap());
|
||||
// behaviour.add_address(&"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip4/104.236.76.40/tcp/4001".parse().unwrap());
|
||||
// behaviour.add_address(&"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip4/178.62.158.247/tcp/4001".parse().unwrap());
|
||||
|
||||
// The following addresses are permanently unreachable:
|
||||
// Other(Other(A(Transport(A(Underlying(Os { code: 101, kind: Other, message: "Network is unreachable" }))))))
|
||||
// behaviour.add_address(&"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip6/2604:a880:1:20::203:d001/tcp/4001".parse().unwrap());
|
||||
// behaviour.add_address(&"QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip6/2400:6180:0:d0::151:6001/tcp/4001".parse().unwrap());
|
||||
// behaviour.add_address(&"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip6/2604:a880:800:10::4a:5001/tcp/4001".parse().unwrap());
|
||||
// behaviour.add_address(&"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001".parse().unwrap());
|
||||
Swarm::new(transport, behaviour, local_peer_id)
|
||||
};
|
||||
|
||||
// Order Kademlia to search for a peer.
|
||||
let to_search: PeerId = if let Some(peer_id) = std::env::args().nth(1) {
|
||||
let to_search: PeerId = if let Some(peer_id) = env::args().nth(1) {
|
||||
peer_id.parse().expect("Failed to parse peer ID to find")
|
||||
} else {
|
||||
identity::Keypair::generate_ed25519().public().into()
|
||||
};
|
||||
println!("Searching for {:?}", to_search);
|
||||
swarm.find_node(to_search);
|
||||
|
||||
println!("Searching for the closest peers to {:?}", to_search);
|
||||
swarm.get_closest_peers(to_search);
|
||||
|
||||
// Kick it off!
|
||||
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
|
||||
loop {
|
||||
match swarm.poll().expect("Error while polling swarm") {
|
||||
Async::Ready(Some(ev @ libp2p::kad::KademliaOut::FindNodeResult { .. })) => {
|
||||
println!("Result: {:#?}", ev);
|
||||
return Ok(Async::Ready(()));
|
||||
Async::Ready(Some(KademliaEvent::GetClosestPeersResult(res))) => {
|
||||
match res {
|
||||
Ok(ok) => {
|
||||
println!("Closest peers: {:#?}", ok.peers);
|
||||
return Ok(Async::Ready(()));
|
||||
}
|
||||
Err(err) => {
|
||||
println!("The search for closest peers failed: {:?}", err);
|
||||
}
|
||||
}
|
||||
},
|
||||
Async::Ready(Some(_)) => (),
|
||||
Async::Ready(Some(_)) => {},
|
||||
Async::Ready(None) | Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user