mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-06-18 20:41:25 +00:00
Add a KademliaHandler (#580)
* Rework Kademlia for the new design * Minor work on protocol.rs * More work * Remove QueryTarget::FindValue * Finish work on query * Query timeout test * Work on topology * More work * Update protocols/kad/src/topology.rs Co-Authored-By: tomaka <pierre.krieger1708@gmail.com> * Fix trailing whitespaces * Use if let
This commit is contained in:
@ -147,6 +147,13 @@ impl PartialEq<PeerId> for multihash::Multihash {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<multihash::Multihash> for PeerId {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &multihash::Multihash {
|
||||
&self.multihash
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<multihash::Multihash> for PeerId {
|
||||
#[inline]
|
||||
fn into(self) -> multihash::Multihash {
|
||||
|
@ -37,12 +37,12 @@ pub use crate::nodes::raw_swarm::ConnectedPoint;
|
||||
/// Contains the state of the network, plus the way it should behave.
|
||||
pub struct Swarm<TTransport, TBehaviour, TTopology>
|
||||
where TTransport: Transport,
|
||||
TBehaviour: NetworkBehaviour,
|
||||
TBehaviour: NetworkBehaviour<TTopology>,
|
||||
{
|
||||
raw_swarm: RawSwarm<
|
||||
TTransport,
|
||||
<<TBehaviour as NetworkBehaviour>::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
<<TBehaviour as NetworkBehaviour>::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
<<TBehaviour as NetworkBehaviour<TTopology>>::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
<<TBehaviour as NetworkBehaviour<TTopology>>::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||
NodeHandlerWrapper<TBehaviour::ProtocolsHandler>,
|
||||
>,
|
||||
|
||||
@ -57,7 +57,7 @@ where TTransport: Transport,
|
||||
|
||||
impl<TTransport, TBehaviour, TTopology> Deref for Swarm<TTransport, TBehaviour, TTopology>
|
||||
where TTransport: Transport,
|
||||
TBehaviour: NetworkBehaviour,
|
||||
TBehaviour: NetworkBehaviour<TTopology>,
|
||||
{
|
||||
type Target = TBehaviour;
|
||||
|
||||
@ -69,7 +69,7 @@ where TTransport: Transport,
|
||||
|
||||
impl<TTransport, TBehaviour, TTopology> DerefMut for Swarm<TTransport, TBehaviour, TTopology>
|
||||
where TTransport: Transport,
|
||||
TBehaviour: NetworkBehaviour,
|
||||
TBehaviour: NetworkBehaviour<TTopology>,
|
||||
{
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
@ -78,7 +78,7 @@ where TTransport: Transport,
|
||||
}
|
||||
|
||||
impl<TTransport, TBehaviour, TMuxer, TTopology> Swarm<TTransport, TBehaviour, TTopology>
|
||||
where TBehaviour: NetworkBehaviour,
|
||||
where TBehaviour: NetworkBehaviour<TTopology>,
|
||||
TMuxer: StreamMuxer + Send + Sync + 'static,
|
||||
<TMuxer as StreamMuxer>::OutboundSubstream: Send + 'static,
|
||||
<TMuxer as StreamMuxer>::Substream: Send + 'static,
|
||||
@ -171,7 +171,7 @@ where TBehaviour: NetworkBehaviour,
|
||||
}
|
||||
|
||||
impl<TTransport, TBehaviour, TMuxer, TTopology> Stream for Swarm<TTransport, TBehaviour, TTopology>
|
||||
where TBehaviour: NetworkBehaviour,
|
||||
where TBehaviour: NetworkBehaviour<TTopology>,
|
||||
TMuxer: StreamMuxer + Send + Sync + 'static,
|
||||
<TMuxer as StreamMuxer>::OutboundSubstream: Send + 'static,
|
||||
<TMuxer as StreamMuxer>::Substream: Send + 'static,
|
||||
@ -230,7 +230,7 @@ where TBehaviour: NetworkBehaviour,
|
||||
Async::Ready(RawSwarmEvent::UnknownPeerDialError { .. }) => {},
|
||||
}
|
||||
|
||||
match self.behaviour.poll() {
|
||||
match self.behaviour.poll(&mut self.topology) {
|
||||
Async::NotReady if raw_swarm_not_ready => return Ok(Async::NotReady),
|
||||
Async::NotReady => (),
|
||||
Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => {
|
||||
@ -256,7 +256,7 @@ where TBehaviour: NetworkBehaviour,
|
||||
///
|
||||
/// This trait has been designed to be composable. Multiple implementations can be combined into
|
||||
/// one that handles all the behaviours at once.
|
||||
pub trait NetworkBehaviour {
|
||||
pub trait NetworkBehaviour<TTopology> {
|
||||
/// Handler for all the protocols the network supports.
|
||||
type ProtocolsHandler: ProtocolsHandler;
|
||||
/// Event generated by the swarm.
|
||||
@ -286,7 +286,7 @@ pub trait NetworkBehaviour {
|
||||
/// Polls for things that swarm should do.
|
||||
///
|
||||
/// This API mimics the API of the `Stream` trait.
|
||||
fn poll(&mut self) -> Async<NetworkBehaviourAction<<Self::ProtocolsHandler as ProtocolsHandler>::InEvent, Self::OutEvent>>;
|
||||
fn poll(&mut self, topology: &mut TTopology) -> Async<NetworkBehaviourAction<<Self::ProtocolsHandler as ProtocolsHandler>::InEvent, Self::OutEvent>>;
|
||||
}
|
||||
|
||||
/// Action to perform.
|
||||
|
@ -23,8 +23,6 @@ use {Multiaddr, PeerId};
|
||||
|
||||
/// Storage for the network topology.
|
||||
pub trait Topology {
|
||||
/// Adds a discovered address to the topology.
|
||||
fn add_discovered_address(&mut self, peer: &PeerId, addr: Multiaddr);
|
||||
/// Returns the addresses to try use to reach the given peer.
|
||||
fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec<Multiaddr>;
|
||||
}
|
||||
@ -42,6 +40,30 @@ impl MemoryTopology {
|
||||
list: Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the topology is empty.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.list.is_empty()
|
||||
}
|
||||
|
||||
/// Adds an address to the topology.
|
||||
#[inline]
|
||||
pub fn add_address(&mut self, peer: PeerId, addr: Multiaddr) {
|
||||
self.list.entry(peer).or_insert_with(|| Vec::new()).push(addr);
|
||||
}
|
||||
|
||||
/// Returns a list of all the known peers in the topology.
|
||||
#[inline]
|
||||
pub fn peers(&self) -> impl Iterator<Item = &PeerId> {
|
||||
self.list.keys()
|
||||
}
|
||||
|
||||
/// Returns an iterator to all the entries in the topology.
|
||||
#[inline]
|
||||
pub fn iter(&self) -> impl Iterator<Item = (&PeerId, &Multiaddr)> {
|
||||
self.list.iter().flat_map(|(p, l)| l.iter().map(move |ma| (p, ma)))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryTopology {
|
||||
@ -52,10 +74,6 @@ impl Default for MemoryTopology {
|
||||
}
|
||||
|
||||
impl Topology for MemoryTopology {
|
||||
fn add_discovered_address(&mut self, peer: &PeerId, addr: Multiaddr) {
|
||||
self.list.entry(peer.clone()).or_insert_with(|| Vec::new()).push(addr);
|
||||
}
|
||||
|
||||
fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec<Multiaddr> {
|
||||
self.list.get(peer).map(|v| v.clone()).unwrap_or(Vec::new())
|
||||
}
|
||||
|
100
examples/ipfs-kad.rs
Normal file
100
examples/ipfs-kad.rs
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Demonstrates how to perform Kademlia queries on the IPFS network.
|
||||
//!
|
||||
//! You can pass as parameter a base58 peer ID to search for. If you don't pass any parameter, a
|
||||
//! peer ID will be generated randomly.
|
||||
|
||||
extern crate futures;
|
||||
extern crate libp2p;
|
||||
extern crate rand;
|
||||
extern crate tokio;
|
||||
|
||||
use futures::prelude::*;
|
||||
use libp2p::{
|
||||
Transport,
|
||||
core::PublicKey,
|
||||
core::upgrade::{self, OutboundUpgradeExt},
|
||||
secio,
|
||||
mplex,
|
||||
};
|
||||
|
||||
fn main() {
|
||||
// Create a random key for ourselves.
|
||||
let local_key = secio::SecioKeyPair::ed25519_generated().unwrap();
|
||||
let local_peer_id = local_key.to_peer_id();
|
||||
|
||||
// Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol
|
||||
let transport = libp2p::CommonTransport::new()
|
||||
.with_upgrade(secio::SecioConfig::new(local_key))
|
||||
.and_then(move |out, _| {
|
||||
let peer_id = out.remote_key.into_peer_id();
|
||||
let upgrade = mplex::MplexConfig::new().map_outbound(move |muxer| (peer_id, muxer) );
|
||||
upgrade::apply_outbound(out.stream, upgrade).map_err(|e| e.into_io_error())
|
||||
});
|
||||
|
||||
// Create the topology of the network with the IPFS bootstrap nodes.
|
||||
let mut topology = libp2p::core::topology::MemoryTopology::empty();
|
||||
topology.add_address("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse().unwrap(), "/ip4/104.131.131.82/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip4/104.236.179.241/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip4/104.236.76.40/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip4/128.199.219.111/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip4/178.62.158.247/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip6/2400:6180:0:d0::151:6001/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip6/2604:a880:1:20::203:d001/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip6/2604:a880:800:10::4a:5001/tcp/4001".parse().unwrap());
|
||||
topology.add_address("QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001".parse().unwrap());
|
||||
|
||||
// Create a swarm to manage peers and events.
|
||||
let mut swarm = {
|
||||
// Create a Kademlia behaviour.
|
||||
// Note that normally the Kademlia process starts by performing lots of request in order
|
||||
// to insert our local node in the DHT. However here we use `without_init` because this
|
||||
// example is very ephemeral and we don't want to pollute the DHT. In a real world
|
||||
// application, you want to use `new` instead.
|
||||
let mut behaviour = libp2p::kad::Kademlia::without_init(local_peer_id);
|
||||
libp2p::core::Swarm::new(transport, behaviour, topology)
|
||||
};
|
||||
|
||||
// Order Kademlia to search for a peer.
|
||||
let to_search = if let Some(peer_id) = std::env::args().nth(1) {
|
||||
peer_id.parse().expect("Failed to parse peer ID to find")
|
||||
} else {
|
||||
PublicKey::Secp256k1((0..32).map(|_| -> u8 { rand::random() }).collect()).into_peer_id()
|
||||
};
|
||||
println!("Searching for {:?}", to_search);
|
||||
swarm.find_node(to_search);
|
||||
|
||||
// Kick it off!
|
||||
tokio::run(futures::future::poll_fn(move || -> Result<_, ()> {
|
||||
loop {
|
||||
match swarm.poll().expect("Error while polling swarm") {
|
||||
Async::Ready(Some(event)) => {
|
||||
println!("Result: {:#?}", event);
|
||||
return Ok(Async::Ready(()));
|
||||
},
|
||||
Async::Ready(None) | Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}));
|
||||
}
|
@ -68,12 +68,23 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
||||
quote!{#n}
|
||||
};
|
||||
|
||||
// Name of the type parameter that represents the topology.
|
||||
let topology_generic = {
|
||||
let mut n = "TTopology".to_string();
|
||||
// Avoid collisions.
|
||||
while ast.generics.type_params().any(|tp| tp.ident.to_string() == n) {
|
||||
n.push('1');
|
||||
}
|
||||
let n = Ident::new(&n, name.span());
|
||||
quote!{#n}
|
||||
};
|
||||
|
||||
// Build the generics.
|
||||
let impl_generics = {
|
||||
let tp = ast.generics.type_params();
|
||||
let lf = ast.generics.lifetimes();
|
||||
let cst = ast.generics.const_params();
|
||||
quote!{<#(#lf,)* #(#tp,)* #(#cst,)* #substream_generic>}
|
||||
quote!{<#(#lf,)* #(#tp,)* #(#cst,)* #topology_generic, #substream_generic>}
|
||||
};
|
||||
|
||||
// Build the `where ...` clause of the trait implementation.
|
||||
@ -83,11 +94,11 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
||||
.flat_map(|field| {
|
||||
let ty = &field.ty;
|
||||
vec![
|
||||
quote!{#ty: #trait_to_impl},
|
||||
quote!{<#ty as #trait_to_impl>::ProtocolsHandler: #protocols_handler<Substream = #substream_generic>},
|
||||
quote!{#ty: #trait_to_impl<#topology_generic>},
|
||||
quote!{<#ty as #trait_to_impl<#topology_generic>>::ProtocolsHandler: #protocols_handler<Substream = #substream_generic>},
|
||||
// Note: this bound is required because of https://github.com/rust-lang/rust/issues/55697
|
||||
quote!{<<#ty as #trait_to_impl>::ProtocolsHandler as #protocols_handler>::InboundProtocol: ::libp2p::core::InboundUpgrade<#substream_generic>},
|
||||
quote!{<<#ty as #trait_to_impl>::ProtocolsHandler as #protocols_handler>::OutboundProtocol: ::libp2p::core::OutboundUpgrade<#substream_generic>},
|
||||
quote!{<<#ty as #trait_to_impl<#topology_generic>>::ProtocolsHandler as #protocols_handler>::InboundProtocol: ::libp2p::core::InboundUpgrade<#substream_generic>},
|
||||
quote!{<<#ty as #trait_to_impl<#topology_generic>>::ProtocolsHandler as #protocols_handler>::OutboundProtocol: ::libp2p::core::OutboundUpgrade<#substream_generic>},
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@ -196,7 +207,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
||||
continue;
|
||||
}
|
||||
let ty = &field.ty;
|
||||
let field_info = quote!{ <#ty as #trait_to_impl>::ProtocolsHandler };
|
||||
let field_info = quote!{ <#ty as #trait_to_impl<#topology_generic>>::ProtocolsHandler };
|
||||
match ph_ty {
|
||||
Some(ev) => ph_ty = Some(quote!{ #proto_select_ident<#ev, #field_info> }),
|
||||
ref mut ev @ None => *ev = Some(field_info),
|
||||
@ -295,7 +306,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
||||
|
||||
Some(quote!{
|
||||
loop {
|
||||
match #field_name.poll() {
|
||||
match #field_name.poll(topology) {
|
||||
Async::Ready(#network_behaviour_action::GenerateEvent(event)) => {
|
||||
#handling
|
||||
}
|
||||
@ -319,7 +330,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
||||
|
||||
// Now the magic happens.
|
||||
let final_quote = quote!{
|
||||
impl #impl_generics #trait_to_impl for #name #ty_generics
|
||||
impl #impl_generics #trait_to_impl<#topology_generic> for #name #ty_generics
|
||||
#where_clause
|
||||
{
|
||||
type ProtocolsHandler = #protocols_handler_ty;
|
||||
@ -352,7 +363,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
||||
}
|
||||
}
|
||||
|
||||
fn poll(&mut self) -> ::libp2p::futures::Async<#network_behaviour_action<<Self::ProtocolsHandler as #protocols_handler>::InEvent, Self::OutEvent>> {
|
||||
fn poll(&mut self, topology: &mut #topology_generic) -> ::libp2p::futures::Async<#network_behaviour_action<<Self::ProtocolsHandler as #protocols_handler>::InEvent, Self::OutEvent>> {
|
||||
use libp2p::futures::prelude::*;
|
||||
#(#poll_stmts)*
|
||||
let f: ::libp2p::futures::Async<#network_behaviour_action<<Self::ProtocolsHandler as #protocols_handler>::InEvent, Self::OutEvent>> = #poll_method;
|
||||
|
@ -23,7 +23,7 @@ extern crate libp2p;
|
||||
|
||||
/// Small utility to check that a type implements `NetworkBehaviour`.
|
||||
#[allow(dead_code)]
|
||||
fn require_net_behaviour<T: libp2p::core::swarm::NetworkBehaviour>() {}
|
||||
fn require_net_behaviour<T: libp2p::core::swarm::NetworkBehaviour<libp2p::core::topology::MemoryTopology>>() {}
|
||||
|
||||
// TODO: doesn't compile
|
||||
/*#[test]
|
||||
@ -73,7 +73,8 @@ fn three_fields() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
// TODO: fix this example ; a Rust bug prevent us from doing so
|
||||
/*#[test]
|
||||
fn event_handler() {
|
||||
#[allow(dead_code)]
|
||||
#[derive(NetworkBehaviour)]
|
||||
@ -93,7 +94,7 @@ fn event_handler() {
|
||||
fn foo<TSubstream: libp2p::tokio_io::AsyncRead + libp2p::tokio_io::AsyncWrite>() {
|
||||
require_net_behaviour::<Foo<TSubstream>>();
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
#[test]
|
||||
fn custom_polling() {
|
||||
|
@ -172,7 +172,7 @@ impl<TSubstream> FloodsubBehaviour<TSubstream> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> NetworkBehaviour for FloodsubBehaviour<TSubstream>
|
||||
impl<TSubstream, TTopology> NetworkBehaviour<TTopology> for FloodsubBehaviour<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
@ -276,6 +276,7 @@ where
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut TTopology,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
|
@ -61,7 +61,7 @@ impl<TSubstream> IdentifyListen<TSubstream> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> NetworkBehaviour for IdentifyListen<TSubstream>
|
||||
impl<TSubstream, TTopology> NetworkBehaviour<TTopology> for IdentifyListen<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
@ -99,6 +99,7 @@ where
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut TTopology,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
|
@ -44,7 +44,7 @@ impl<TSubstream> PeriodicIdentifyBehaviour<TSubstream> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> NetworkBehaviour for PeriodicIdentifyBehaviour<TSubstream>
|
||||
impl<TSubstream, TTopology> NetworkBehaviour<TTopology> for PeriodicIdentifyBehaviour<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
@ -79,6 +79,7 @@ where
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut TTopology,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
|
@ -26,6 +26,7 @@ tokio-codec = "0.1"
|
||||
tokio-io = "0.1"
|
||||
tokio-timer = "0.2.6"
|
||||
unsigned-varint = { version = "0.2.1", features = ["codec"] }
|
||||
void = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
libp2p-tcp-transport = { path = "../../transports/tcp" }
|
||||
|
594
protocols/kad/src/behaviour.rs
Normal file
594
protocols/kad/src/behaviour.rs
Normal file
@ -0,0 +1,594 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use fnv::{FnvHashMap, FnvHashSet};
|
||||
use futures::{prelude::*, stream};
|
||||
use handler::{KademliaHandler, KademliaHandlerEvent, KademliaHandlerIn, KademliaRequestId};
|
||||
use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction};
|
||||
use libp2p_core::{protocols_handler::ProtocolsHandler, topology::Topology, Multiaddr, PeerId};
|
||||
use multihash::Multihash;
|
||||
use protocol::{KadConnectionType, KadPeer};
|
||||
use query::{QueryConfig, QueryState, QueryStatePollOut, QueryTarget};
|
||||
use rand;
|
||||
use smallvec::SmallVec;
|
||||
use std::{cmp::Ordering, marker::PhantomData, time::Duration, time::Instant};
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use tokio_timer::Interval;
|
||||
use topology::KademliaTopology;
|
||||
|
||||
/// Network behaviour that handles Kademlia.
|
||||
pub struct Kademlia<TSubstream> {
|
||||
/// Peer ID of the local node.
|
||||
local_peer_id: PeerId,
|
||||
|
||||
/// All the iterative queries we are currently performing, with their ID. The last parameter
|
||||
/// is the list of accumulated providers for `GET_PROVIDERS` queries.
|
||||
active_queries: FnvHashMap<QueryId, (QueryState, QueryPurpose, Vec<PeerId>)>,
|
||||
|
||||
/// List of queries to start once we are inside `poll()`.
|
||||
queries_to_starts: SmallVec<[(QueryId, QueryTarget, QueryPurpose); 8]>,
|
||||
|
||||
/// List of peers the swarm is connected to.
|
||||
connected_peers: FnvHashSet<PeerId>,
|
||||
|
||||
/// Contains a list of peer IDs which we are not connected to, and an RPC query to send to them
|
||||
/// once they connect.
|
||||
pending_rpcs: SmallVec<[(PeerId, KademliaHandlerIn<QueryId>); 8]>,
|
||||
|
||||
/// Identifier for the next query that we start.
|
||||
next_query_id: QueryId,
|
||||
|
||||
/// Requests received by a remote that we should fulfill as soon as possible.
|
||||
remote_requests: SmallVec<[(PeerId, KademliaRequestId, QueryTarget); 4]>,
|
||||
|
||||
/// List of multihashes that we're providing.
|
||||
///
|
||||
/// Note that we use a `PeerId` so that we know that it uses SHA-256. The question as to how to
|
||||
/// handle more hashes should eventually be resolved.
|
||||
providing_keys: SmallVec<[PeerId; 8]>,
|
||||
|
||||
/// Interval to send `ADD_PROVIDER` messages to everyone.
|
||||
refresh_add_providers: stream::Fuse<Interval>,
|
||||
|
||||
/// `α` in the Kademlia reference papers. Designates the maximum number of queries that we
|
||||
/// perform in parallel.
|
||||
parallelism: usize,
|
||||
|
||||
/// `k` in the Kademlia reference papers. Number of results in a find node query.
|
||||
num_results: usize,
|
||||
|
||||
/// Timeout for each individual RPC query.
|
||||
rpc_timeout: Duration,
|
||||
|
||||
/// Events to return when polling.
|
||||
queued_events: SmallVec<[NetworkBehaviourAction<KademliaHandlerIn<QueryId>, KademliaOut>; 32]>,
|
||||
|
||||
/// List of addresses to add to the topology as soon as we are in `poll()`.
|
||||
add_to_topology: SmallVec<[(PeerId, Multiaddr, KadConnectionType); 32]>,
|
||||
|
||||
/// List of providers to add to the topology as soon as we are in `poll()`.
|
||||
add_provider: SmallVec<[(Multihash, PeerId); 32]>,
|
||||
|
||||
/// Marker to pin the generics.
|
||||
marker: PhantomData<TSubstream>,
|
||||
}
|
||||
|
||||
/// Opaque type. Each query that we start gets a unique number.
|
||||
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
||||
pub struct QueryId(usize);
|
||||
|
||||
/// Reason why we have this query in the list of queries.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum QueryPurpose {
|
||||
/// The query was created for the Kademlia initialization process.
|
||||
Initialization,
|
||||
/// The user requested this query to be performed. It should be reported when finished.
|
||||
UserRequest,
|
||||
/// We should add an `ADD_PROVIDER` message to the peers of the outcome.
|
||||
AddProvider(Multihash),
|
||||
}
|
||||
|
||||
impl<TSubstream> Kademlia<TSubstream> {
|
||||
/// Creates a `Kademlia`.
|
||||
#[inline]
|
||||
pub fn new(local_peer_id: PeerId) -> Self {
|
||||
Self::new_inner(local_peer_id, true)
|
||||
}
|
||||
|
||||
/// Creates a `Kademlia`.
|
||||
///
|
||||
/// Contrary to `new`, doesn't perform the initialization queries that store our local ID into
|
||||
/// the DHT.
|
||||
#[inline]
|
||||
pub fn without_init(local_peer_id: PeerId) -> Self {
|
||||
Self::new_inner(local_peer_id, false)
|
||||
}
|
||||
|
||||
/// Inner implementation of the constructors.
|
||||
fn new_inner(local_peer_id: PeerId, initialize: bool) -> Self {
|
||||
let parallelism = 3;
|
||||
|
||||
let mut behaviour = Kademlia {
|
||||
local_peer_id: local_peer_id.clone(),
|
||||
queued_events: SmallVec::new(),
|
||||
queries_to_starts: SmallVec::new(),
|
||||
active_queries: Default::default(),
|
||||
connected_peers: Default::default(),
|
||||
pending_rpcs: SmallVec::with_capacity(parallelism),
|
||||
next_query_id: QueryId(0),
|
||||
remote_requests: SmallVec::new(),
|
||||
providing_keys: SmallVec::new(),
|
||||
refresh_add_providers: Interval::new_interval(Duration::from_secs(60)).fuse(), // TODO: constant
|
||||
parallelism,
|
||||
num_results: 20,
|
||||
rpc_timeout: Duration::from_secs(8),
|
||||
add_to_topology: SmallVec::new(),
|
||||
add_provider: SmallVec::new(),
|
||||
marker: PhantomData,
|
||||
};
|
||||
|
||||
if initialize {
|
||||
// As part of the initialization process, we start one `FIND_NODE` for each bit of the
|
||||
// possible range of peer IDs.
|
||||
for n in 0..256 {
|
||||
let peer_id = match gen_random_id(&local_peer_id, n) {
|
||||
Ok(p) => p,
|
||||
Err(()) => continue,
|
||||
};
|
||||
|
||||
behaviour.start_query(QueryTarget::FindPeer(peer_id), QueryPurpose::Initialization);
|
||||
}
|
||||
}
|
||||
|
||||
behaviour
|
||||
}
|
||||
|
||||
/// Builds a `KadPeer` structure corresponding to the local node.
|
||||
fn build_local_kad_peer(&self) -> KadPeer {
|
||||
KadPeer {
|
||||
node_id: self.local_peer_id.clone(),
|
||||
multiaddrs: Vec::new(), // FIXME: return the addresses we're listening on
|
||||
connection_ty: KadConnectionType::Connected,
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the answer to a request.
|
||||
fn build_result<TUserData, TTopology>(&self, query: QueryTarget, request_id: KademliaRequestId, topology: &mut TTopology)
|
||||
-> KademliaHandlerIn<TUserData>
|
||||
where TTopology: KademliaTopology
|
||||
{
|
||||
match query {
|
||||
QueryTarget::FindPeer(key) => {
|
||||
let closer_peers = topology
|
||||
.closest_peers(key.as_ref(), self.num_results)
|
||||
.map(|peer_id| build_kad_peer(peer_id, topology, &self.connected_peers))
|
||||
.collect();
|
||||
|
||||
KademliaHandlerIn::FindNodeRes {
|
||||
closer_peers,
|
||||
request_id,
|
||||
}
|
||||
},
|
||||
QueryTarget::GetProviders(key) => {
|
||||
let closer_peers = topology
|
||||
.closest_peers(&key, self.num_results)
|
||||
.map(|peer_id| build_kad_peer(peer_id, topology, &self.connected_peers))
|
||||
.collect();
|
||||
|
||||
let local_node_is_providing = self.providing_keys.iter().any(|k| k.as_ref() == &key);
|
||||
|
||||
let provider_peers = topology
|
||||
.get_providers(&key)
|
||||
.map(|peer_id| build_kad_peer(peer_id, topology, &self.connected_peers))
|
||||
.chain(if local_node_is_providing {
|
||||
Some(self.build_local_kad_peer())
|
||||
} else {
|
||||
None
|
||||
}.into_iter())
|
||||
.collect();
|
||||
|
||||
KademliaHandlerIn::GetProvidersRes {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
request_id,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> Kademlia<TSubstream> {
|
||||
/// Starts an iterative `FIND_NODE` request.
|
||||
///
|
||||
/// This will eventually produce an event containing the nodes of the DHT closest to the
|
||||
/// requested `PeerId`.
|
||||
#[inline]
|
||||
pub fn find_node(&mut self, peer_id: PeerId) {
|
||||
self.start_query(QueryTarget::FindPeer(peer_id), QueryPurpose::UserRequest);
|
||||
}
|
||||
|
||||
/// Starts an iterative `GET_PROVIDERS` request.
|
||||
#[inline]
|
||||
pub fn get_providers(&mut self, key: Multihash) {
|
||||
self.start_query(QueryTarget::GetProviders(key), QueryPurpose::UserRequest);
|
||||
}
|
||||
|
||||
/// Register the local node as the provider for the given key.
|
||||
///
|
||||
/// This will periodically send `ADD_PROVIDER` messages to the nodes closest to the key. When
|
||||
/// someone performs a `GET_PROVIDERS` iterative request on the DHT, our local node will be
|
||||
/// returned as part of the results.
|
||||
///
|
||||
/// The actual meaning of *providing* the value of a key is not defined, and is specific to
|
||||
/// the value whose key is the hash.
|
||||
pub fn add_providing(&mut self, key: PeerId) {
|
||||
if !self.providing_keys.iter().any(|k| k == &key) {
|
||||
self.providing_keys.push(key);
|
||||
}
|
||||
|
||||
// Trigger the next refresh now.
|
||||
self.refresh_add_providers = Interval::new(Instant::now(), Duration::from_secs(60)).fuse();
|
||||
}
|
||||
|
||||
/// Cancels a registration done with `add_providing`.
|
||||
///
|
||||
/// There doesn't exist any "remove provider" message to broadcast on the network, therefore we
|
||||
/// will still be registered as a provider in the DHT for as long as the timeout doesn't expire.
|
||||
pub fn remove_providing(&mut self, key: &Multihash) {
|
||||
if let Some(position) = self.providing_keys.iter().position(|k| k.as_ref() == key) {
|
||||
self.providing_keys.remove(position);
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal function that starts a query.
|
||||
fn start_query(&mut self, target: QueryTarget, purpose: QueryPurpose) {
|
||||
let query_id = self.next_query_id.clone();
|
||||
self.next_query_id.0 += 1;
|
||||
self.queries_to_starts.push((query_id, target, purpose));
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream, TTopology> NetworkBehaviour<TTopology> for Kademlia<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
TTopology: KademliaTopology,
|
||||
{
|
||||
type ProtocolsHandler = KademliaHandler<TSubstream, QueryId>;
|
||||
type OutEvent = KademliaOut;
|
||||
|
||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||
KademliaHandler::dial_and_listen()
|
||||
}
|
||||
|
||||
fn inject_connected(&mut self, id: PeerId, _: ConnectedPoint) {
|
||||
if let Some(pos) = self.pending_rpcs.iter().position(|(p, _)| p == &id) {
|
||||
let (_, rpc) = self.pending_rpcs.remove(pos);
|
||||
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
||||
peer_id: id.clone(),
|
||||
event: rpc,
|
||||
});
|
||||
}
|
||||
|
||||
self.connected_peers.insert(id);
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, id: &PeerId, _: ConnectedPoint) {
|
||||
let was_in = self.connected_peers.remove(id);
|
||||
debug_assert!(was_in);
|
||||
|
||||
for (query, _, _) in self.active_queries.values_mut() {
|
||||
query.inject_rpc_error(id);
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_node_event(&mut self, source: PeerId, event: KademliaHandlerEvent<QueryId>) {
|
||||
match event {
|
||||
KademliaHandlerEvent::FindNodeReq { key, request_id } => {
|
||||
self.remote_requests.push((source, request_id, QueryTarget::FindPeer(key)));
|
||||
return;
|
||||
}
|
||||
KademliaHandlerEvent::FindNodeRes {
|
||||
closer_peers,
|
||||
user_data,
|
||||
} => {
|
||||
// It is possible that we obtain a response for a query that has finished, which is
|
||||
// why we may not find an entry in `self.active_queries`.
|
||||
for peer in closer_peers.iter() {
|
||||
for addr in peer.multiaddrs.iter() {
|
||||
self.add_to_topology
|
||||
.push((peer.node_id.clone(), addr.clone(), peer.connection_ty));
|
||||
}
|
||||
}
|
||||
if let Some((query, _, _)) = self.active_queries.get_mut(&user_data) {
|
||||
query.inject_rpc_result(&source, closer_peers.into_iter().map(|kp| kp.node_id))
|
||||
}
|
||||
}
|
||||
KademliaHandlerEvent::GetProvidersReq { key, request_id } => {
|
||||
self.remote_requests.push((source, request_id, QueryTarget::GetProviders(key)));
|
||||
return;
|
||||
}
|
||||
KademliaHandlerEvent::GetProvidersRes {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
user_data,
|
||||
} => {
|
||||
for peer in closer_peers.iter().chain(provider_peers.iter()) {
|
||||
for addr in peer.multiaddrs.iter() {
|
||||
self.add_to_topology
|
||||
.push((peer.node_id.clone(), addr.clone(), peer.connection_ty));
|
||||
}
|
||||
}
|
||||
// It is possible that we obtain a response for a query that has finished, which is
|
||||
// why we may not find an entry in `self.active_queries`.
|
||||
if let Some((query, _, providers)) = self.active_queries.get_mut(&user_data) {
|
||||
for peer in provider_peers {
|
||||
providers.push(peer.node_id);
|
||||
}
|
||||
query.inject_rpc_result(&source, closer_peers.into_iter().map(|kp| kp.node_id))
|
||||
}
|
||||
}
|
||||
KademliaHandlerEvent::QueryError { user_data, .. } => {
|
||||
// It is possible that we obtain a response for a query that has finished, which is
|
||||
// why we may not find an entry in `self.active_queries`.
|
||||
if let Some((query, _, _)) = self.active_queries.get_mut(&user_data) {
|
||||
query.inject_rpc_error(&source)
|
||||
}
|
||||
}
|
||||
KademliaHandlerEvent::AddProvider { key, provider_peer } => {
|
||||
for addr in provider_peer.multiaddrs.iter() {
|
||||
self.add_to_topology
|
||||
.push((provider_peer.node_id.clone(), addr.clone(), provider_peer.connection_ty));
|
||||
}
|
||||
self.add_provider.push((key, provider_peer.node_id));
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
topology: &mut TTopology,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
Self::OutEvent,
|
||||
>,
|
||||
> {
|
||||
// Flush the changes to the topology that we want to make.
|
||||
for (peer_id, addr, connection_ty) in self.add_to_topology.drain() {
|
||||
topology.add_kad_discovered_address(peer_id, addr, connection_ty);
|
||||
}
|
||||
self.add_to_topology.shrink_to_fit();
|
||||
for (key, provider) in self.add_provider.drain() {
|
||||
topology.add_provider(key, provider);
|
||||
}
|
||||
self.add_provider.shrink_to_fit();
|
||||
|
||||
// Handle `refresh_add_providers`.
|
||||
match self.refresh_add_providers.poll() {
|
||||
Ok(Async::NotReady) => {},
|
||||
Ok(Async::Ready(Some(_))) => {
|
||||
for provided in self.providing_keys.clone().into_iter() {
|
||||
let purpose = QueryPurpose::AddProvider(provided.as_ref().clone());
|
||||
self.start_query(QueryTarget::FindPeer(provided), purpose);
|
||||
}
|
||||
},
|
||||
// Ignore errors.
|
||||
Ok(Async::Ready(None)) | Err(_) => {},
|
||||
}
|
||||
|
||||
// Start queries that are waiting to start.
|
||||
for (query_id, query_target, query_purpose) in self.queries_to_starts.drain() {
|
||||
let known_closest_peers = topology
|
||||
.closest_peers(query_target.as_hash(), self.num_results);
|
||||
self.active_queries.insert(
|
||||
query_id,
|
||||
(
|
||||
QueryState::new(QueryConfig {
|
||||
target: query_target,
|
||||
parallelism: self.parallelism,
|
||||
num_results: self.num_results,
|
||||
rpc_timeout: self.rpc_timeout,
|
||||
known_closest_peers,
|
||||
}),
|
||||
query_purpose,
|
||||
Vec::new() // TODO: insert ourselves if we provide the data?
|
||||
)
|
||||
);
|
||||
}
|
||||
self.queries_to_starts.shrink_to_fit();
|
||||
|
||||
// Handle remote queries.
|
||||
if !self.remote_requests.is_empty() {
|
||||
let (peer_id, request_id, query) = self.remote_requests.remove(0);
|
||||
let result = self.build_result(query, request_id, topology);
|
||||
return Async::Ready(NetworkBehaviourAction::SendEvent {
|
||||
peer_id,
|
||||
event: result,
|
||||
});
|
||||
}
|
||||
|
||||
loop {
|
||||
// Handle events queued by other parts of this struct
|
||||
if !self.queued_events.is_empty() {
|
||||
return Async::Ready(self.queued_events.remove(0));
|
||||
}
|
||||
self.queued_events.shrink_to_fit();
|
||||
|
||||
// If iterating finds a query that is finished, stores it here and stops looping.
|
||||
let mut finished_query = None;
|
||||
|
||||
'queries_iter: for (&query_id, (query, _, _)) in self.active_queries.iter_mut() {
|
||||
loop {
|
||||
match query.poll() {
|
||||
Async::Ready(QueryStatePollOut::Finished) => {
|
||||
finished_query = Some(query_id);
|
||||
break 'queries_iter;
|
||||
}
|
||||
Async::Ready(QueryStatePollOut::SendRpc {
|
||||
peer_id,
|
||||
query_target,
|
||||
}) => {
|
||||
let rpc = query_target.to_rpc_request(query_id);
|
||||
if self.connected_peers.contains(&peer_id) {
|
||||
return Async::Ready(NetworkBehaviourAction::SendEvent {
|
||||
peer_id: peer_id.clone(),
|
||||
event: rpc,
|
||||
});
|
||||
} else {
|
||||
self.pending_rpcs.push((peer_id.clone(), rpc));
|
||||
return Async::Ready(NetworkBehaviourAction::DialPeer {
|
||||
peer_id: peer_id.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
Async::Ready(QueryStatePollOut::CancelRpc { peer_id }) => {
|
||||
// We don't cancel if the RPC has already been sent out.
|
||||
self.pending_rpcs.retain(|(id, _)| id != peer_id);
|
||||
}
|
||||
Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(finished_query) = finished_query {
|
||||
let (query, purpose, provider_peers) = self
|
||||
.active_queries
|
||||
.remove(&finished_query)
|
||||
.expect("finished_query was gathered when iterating active_queries ; qed");
|
||||
match purpose {
|
||||
QueryPurpose::Initialization => {},
|
||||
QueryPurpose::UserRequest => {
|
||||
let event = match query.target().clone() {
|
||||
QueryTarget::FindPeer(key) => {
|
||||
debug_assert!(provider_peers.is_empty());
|
||||
KademliaOut::FindNodeResult {
|
||||
key,
|
||||
closer_peers: query.into_closest_peers().collect(),
|
||||
}
|
||||
},
|
||||
QueryTarget::GetProviders(key) => {
|
||||
KademliaOut::GetProvidersResult {
|
||||
key,
|
||||
closer_peers: query.into_closest_peers().collect(),
|
||||
provider_peers,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
||||
},
|
||||
QueryPurpose::AddProvider(key) => {
|
||||
for closest in query.into_closest_peers() {
|
||||
let event = NetworkBehaviourAction::SendEvent {
|
||||
peer_id: closest,
|
||||
event: KademliaHandlerIn::AddProvider {
|
||||
key: key.clone(),
|
||||
provider_peer: self.build_local_kad_peer(),
|
||||
},
|
||||
};
|
||||
|
||||
self.queued_events.push(event);
|
||||
}
|
||||
},
|
||||
}
|
||||
} else {
|
||||
break Async::NotReady;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Output event of the `Kademlia` behaviour.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum KademliaOut {
|
||||
/// Result of a `FIND_NODE` iterative query.
|
||||
FindNodeResult {
|
||||
/// The key that we looked for in the query.
|
||||
key: PeerId,
|
||||
/// List of peers ordered from closest to furthest away.
|
||||
closer_peers: Vec<PeerId>,
|
||||
},
|
||||
|
||||
/// Result of a `GET_PROVIDERS` iterative query.
|
||||
GetProvidersResult {
|
||||
/// The key that we looked for in the query.
|
||||
key: Multihash,
|
||||
/// The peers that are providing the requested key.
|
||||
provider_peers: Vec<PeerId>,
|
||||
/// List of peers ordered from closest to furthest away.
|
||||
closer_peers: Vec<PeerId>,
|
||||
},
|
||||
}
|
||||
|
||||
// Generates a random `PeerId` that belongs to the given bucket.
|
||||
//
|
||||
// Returns an error if `bucket_num` is out of range.
|
||||
fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> {
|
||||
let my_id_len = my_id.as_bytes().len();
|
||||
|
||||
// TODO: this 2 is magic here; it is the length of the hash of the multihash
|
||||
let bits_diff = bucket_num + 1;
|
||||
if bits_diff > 8 * (my_id_len - 2) {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let mut random_id = [0; 64];
|
||||
for byte in 0..my_id_len {
|
||||
match byte.cmp(&(my_id_len - bits_diff / 8 - 1)) {
|
||||
Ordering::Less => {
|
||||
random_id[byte] = my_id.as_bytes()[byte];
|
||||
}
|
||||
Ordering::Equal => {
|
||||
let mask: u8 = (1 << (bits_diff % 8)) - 1;
|
||||
random_id[byte] = (my_id.as_bytes()[byte] & !mask) | (rand::random::<u8>() & mask);
|
||||
}
|
||||
Ordering::Greater => {
|
||||
random_id[byte] = rand::random();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let peer_id = PeerId::from_bytes(random_id[..my_id_len].to_owned())
|
||||
.expect("randomly-generated peer ID should always be valid");
|
||||
Ok(peer_id)
|
||||
}
|
||||
|
||||
/// Builds a `KadPeer` struct corresponding to the given `PeerId`.
|
||||
///
|
||||
/// > **Note**: This is just a convenience function that doesn't do anything note-worthy.
|
||||
fn build_kad_peer<TTopology>(peer_id: PeerId, topology: &mut TTopology, connected_peers: &FnvHashSet<PeerId>) -> KadPeer
|
||||
where TTopology: Topology
|
||||
{
|
||||
let multiaddrs = topology.addresses_of_peer(&peer_id);
|
||||
|
||||
// TODO: implement the other possibilities correctly
|
||||
let connection_ty = if connected_peers.contains(&peer_id) {
|
||||
KadConnectionType::Connected
|
||||
} else {
|
||||
KadConnectionType::NotConnected
|
||||
};
|
||||
|
||||
KadPeer {
|
||||
node_id: peer_id,
|
||||
multiaddrs,
|
||||
connection_ty,
|
||||
}
|
||||
}
|
747
protocols/kad/src/handler.rs
Normal file
747
protocols/kad/src/handler.rs
Normal file
@ -0,0 +1,747 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use futures::prelude::*;
|
||||
use libp2p_core::protocols_handler::{ProtocolsHandler, ProtocolsHandlerEvent};
|
||||
use libp2p_core::{upgrade, either::EitherOutput, InboundUpgrade, OutboundUpgrade, PeerId};
|
||||
use multihash::Multihash;
|
||||
use protocol::{
|
||||
KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg,
|
||||
KademliaProtocolConfig,
|
||||
};
|
||||
use std::io;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
|
||||
/// Protocol handler that handles Kademlia communications with the remote.
|
||||
///
|
||||
/// The handler will automatically open a Kademlia substream with the remote for each request we
|
||||
/// make.
|
||||
///
|
||||
/// It also handles requests made by the remote.
|
||||
pub struct KademliaHandler<TSubstream, TUserData>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
/// Configuration for the Kademlia protocol.
|
||||
config: KademliaProtocolConfig,
|
||||
|
||||
/// If true, we are trying to shut down the existing Kademlia substream and should refuse any
|
||||
/// incoming connection.
|
||||
shutting_down: bool,
|
||||
|
||||
/// If false, we always refuse incoming Kademlia substreams.
|
||||
allow_listening: bool,
|
||||
|
||||
/// Next unique ID of a connection.
|
||||
next_connec_unique_id: UniqueConnecId,
|
||||
|
||||
/// List of active substreams with the state they are in.
|
||||
substreams: Vec<SubstreamState<TSubstream, TUserData>>,
|
||||
}
|
||||
|
||||
/// State of an active substream, opened either by us or by the remote.
|
||||
enum SubstreamState<TSubstream, TUserData>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
/// We haven't started opening the outgoing substream yet.
|
||||
/// Contains the request we want to send, and the user data if we expect an answer.
|
||||
OutPendingOpen(KadRequestMsg, Option<TUserData>),
|
||||
/// We are waiting for the outgoing substream to be upgraded.
|
||||
/// Contains the request we want to send, and the user data if we expect an answer.
|
||||
OutPendingUpgrade(KadRequestMsg, Option<TUserData>),
|
||||
/// Waiting to send a message to the remote.
|
||||
OutPendingSend(
|
||||
KadOutStreamSink<TSubstream>,
|
||||
KadRequestMsg,
|
||||
Option<TUserData>,
|
||||
),
|
||||
/// Waiting to send a message to the remote.
|
||||
/// Waiting to flush the substream so that the data arrives to the remote.
|
||||
OutPendingFlush(KadOutStreamSink<TSubstream>, Option<TUserData>),
|
||||
/// Waiting for an answer back from the remote.
|
||||
// TODO: add timeout
|
||||
OutWaitingAnswer(KadOutStreamSink<TSubstream>, TUserData),
|
||||
/// An error happened on the substream and we should report the error to the user.
|
||||
OutReportError(io::Error, TUserData),
|
||||
/// The substream is being closed.
|
||||
OutClosing(KadOutStreamSink<TSubstream>),
|
||||
/// Waiting for a request from the remote.
|
||||
InWaitingMessage(UniqueConnecId, KadInStreamSink<TSubstream>),
|
||||
/// Waiting for the user to send a `KademliaHandlerIn` event containing the response.
|
||||
InWaitingUser(UniqueConnecId, KadInStreamSink<TSubstream>),
|
||||
/// Waiting to send an answer back to the remote.
|
||||
InPendingSend(UniqueConnecId, KadInStreamSink<TSubstream>, KadResponseMsg),
|
||||
/// Waiting to flush an answer back to the remote.
|
||||
InPendingFlush(UniqueConnecId, KadInStreamSink<TSubstream>),
|
||||
/// The substream is being closed.
|
||||
InClosing(KadInStreamSink<TSubstream>),
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> SubstreamState<TSubstream, TUserData>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
/// Consumes this state and tries to close the substream.
|
||||
///
|
||||
/// If the substream is not ready to be closed, returns it back.
|
||||
fn try_close(self) -> AsyncSink<Self> {
|
||||
match self {
|
||||
SubstreamState::OutPendingOpen(_, _)
|
||||
| SubstreamState::OutPendingUpgrade(_, _)
|
||||
| SubstreamState::OutReportError(_, _) => AsyncSink::Ready,
|
||||
SubstreamState::OutPendingSend(mut stream, _, _)
|
||||
| SubstreamState::OutPendingFlush(mut stream, _)
|
||||
| SubstreamState::OutWaitingAnswer(mut stream, _)
|
||||
| SubstreamState::OutClosing(mut stream) => match stream.close() {
|
||||
Ok(Async::Ready(())) | Err(_) => AsyncSink::Ready,
|
||||
Ok(Async::NotReady) => AsyncSink::NotReady(SubstreamState::OutClosing(stream)),
|
||||
},
|
||||
SubstreamState::InWaitingMessage(_, mut stream)
|
||||
| SubstreamState::InWaitingUser(_, mut stream)
|
||||
| SubstreamState::InPendingSend(_, mut stream, _)
|
||||
| SubstreamState::InPendingFlush(_, mut stream)
|
||||
| SubstreamState::InClosing(mut stream) => match stream.close() {
|
||||
Ok(Async::Ready(())) | Err(_) => AsyncSink::Ready,
|
||||
Ok(Async::NotReady) => AsyncSink::NotReady(SubstreamState::InClosing(stream)),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Event produced by the Kademlia handler.
|
||||
#[derive(Debug)]
|
||||
pub enum KademliaHandlerEvent<TUserData> {
|
||||
/// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes
|
||||
/// returned is not specified, but should be around 20.
|
||||
FindNodeReq {
|
||||
/// Identifier of the node.
|
||||
key: PeerId,
|
||||
/// Identifier of the request. Needs to be passed back when answering.
|
||||
request_id: KademliaRequestId,
|
||||
},
|
||||
|
||||
/// Response to an `KademliaHandlerIn::FindNodeReq`.
|
||||
FindNodeRes {
|
||||
/// Results of the request.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
/// The user data passed to the `FindNodeReq`.
|
||||
user_data: TUserData,
|
||||
},
|
||||
|
||||
/// Same as `FindNodeReq`, but should also return the entries of the local providers list for
|
||||
/// this key.
|
||||
GetProvidersReq {
|
||||
/// Identifier being searched.
|
||||
key: Multihash,
|
||||
/// Identifier of the request. Needs to be passed back when answering.
|
||||
request_id: KademliaRequestId,
|
||||
},
|
||||
|
||||
/// Response to an `KademliaHandlerIn::GetProvidersReq`.
|
||||
GetProvidersRes {
|
||||
/// Nodes closest to the key.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
/// Known providers for this key.
|
||||
provider_peers: Vec<KadPeer>,
|
||||
/// The user data passed to the `GetProvidersReq`.
|
||||
user_data: TUserData,
|
||||
},
|
||||
|
||||
/// An error happened when performing a query.
|
||||
QueryError {
|
||||
/// The error that happened.
|
||||
error: io::Error,
|
||||
/// The user data passed to the query.
|
||||
user_data: TUserData,
|
||||
},
|
||||
|
||||
/// The remote indicates that this list of providers is known for this key.
|
||||
AddProvider {
|
||||
/// Key for which we should add providers.
|
||||
key: Multihash,
|
||||
/// Known provider for this key.
|
||||
provider_peer: KadPeer,
|
||||
},
|
||||
}
|
||||
|
||||
/// Event to send to the handler.
|
||||
pub enum KademliaHandlerIn<TUserData> {
|
||||
/// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes
|
||||
/// returned is not specified, but should be around 20.
|
||||
FindNodeReq {
|
||||
/// Identifier of the node.
|
||||
key: PeerId,
|
||||
/// Custom user data. Passed back in the out event when the results arrive.
|
||||
user_data: TUserData,
|
||||
},
|
||||
|
||||
/// Response to a `FindNodeReq`.
|
||||
FindNodeRes {
|
||||
/// Results of the request.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
/// Identifier of the request that was made by the remote.
|
||||
///
|
||||
/// It is a logic error to use an id of the handler of a different node.
|
||||
request_id: KademliaRequestId,
|
||||
},
|
||||
|
||||
/// Same as `FindNodeReq`, but should also return the entries of the local providers list for
|
||||
/// this key.
|
||||
GetProvidersReq {
|
||||
/// Identifier being searched.
|
||||
key: Multihash,
|
||||
/// Custom user data. Passed back in the out event when the results arrive.
|
||||
user_data: TUserData,
|
||||
},
|
||||
|
||||
/// Response to a `GetProvidersReq`.
|
||||
GetProvidersRes {
|
||||
/// Nodes closest to the key.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
/// Known providers for this key.
|
||||
provider_peers: Vec<KadPeer>,
|
||||
/// Identifier of the request that was made by the remote.
|
||||
///
|
||||
/// It is a logic error to use an id of the handler of a different node.
|
||||
request_id: KademliaRequestId,
|
||||
},
|
||||
|
||||
/// Indicates that this provider is known for this key.
|
||||
///
|
||||
/// The API of the handler doesn't expose any event that allows you to know whether this
|
||||
/// succeeded.
|
||||
AddProvider {
|
||||
/// Key for which we should add providers.
|
||||
key: Multihash,
|
||||
/// Known provider for this key.
|
||||
provider_peer: KadPeer,
|
||||
},
|
||||
}
|
||||
|
||||
/// Unique identifier for a request. Must be passed back in order to answer a request from
|
||||
/// the remote.
|
||||
///
|
||||
/// We don't implement `Clone` on purpose, in order to prevent users from answering the same
|
||||
/// request twice.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct KademliaRequestId {
|
||||
/// Unique identifier for an incoming connection.
|
||||
connec_unique_id: UniqueConnecId,
|
||||
}
|
||||
|
||||
/// Unique identifier for a connection.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
struct UniqueConnecId(u64);
|
||||
|
||||
impl<TSubstream, TUserData> KademliaHandler<TSubstream, TUserData>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
/// Create a `KademliaHandler` that only allows sending messages to the remote but denying
|
||||
/// incoming connections.
|
||||
#[inline]
|
||||
pub fn dial_only() -> Self {
|
||||
KademliaHandler::with_allow_listening(false)
|
||||
}
|
||||
|
||||
/// Create a `KademliaHandler` that only allows sending messages but also receive incoming
|
||||
/// requests.
|
||||
///
|
||||
/// The `Default` trait implementation wraps around this function.
|
||||
#[inline]
|
||||
pub fn dial_and_listen() -> Self {
|
||||
KademliaHandler::with_allow_listening(true)
|
||||
}
|
||||
|
||||
fn with_allow_listening(allow_listening: bool) -> Self {
|
||||
KademliaHandler {
|
||||
config: Default::default(),
|
||||
shutting_down: false,
|
||||
allow_listening,
|
||||
next_connec_unique_id: UniqueConnecId(0),
|
||||
substreams: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> Default for KademliaHandler<TSubstream, TUserData>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
KademliaHandler::dial_and_listen()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> ProtocolsHandler for KademliaHandler<TSubstream, TUserData>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
TUserData: Clone,
|
||||
{
|
||||
type InEvent = KademliaHandlerIn<TUserData>;
|
||||
type OutEvent = KademliaHandlerEvent<TUserData>;
|
||||
type Substream = TSubstream;
|
||||
type InboundProtocol = upgrade::EitherUpgrade<KademliaProtocolConfig, upgrade::DeniedUpgrade>;
|
||||
type OutboundProtocol = KademliaProtocolConfig;
|
||||
// Message of the request to send to the remote, and user data if we expect an answer.
|
||||
type OutboundOpenInfo = (KadRequestMsg, Option<TUserData>);
|
||||
|
||||
#[inline]
|
||||
fn listen_protocol(&self) -> Self::InboundProtocol {
|
||||
if self.allow_listening {
|
||||
upgrade::EitherUpgrade::A(self.config)
|
||||
} else {
|
||||
upgrade::EitherUpgrade::B(upgrade::DeniedUpgrade)
|
||||
}
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
protocol: <Self::OutboundProtocol as OutboundUpgrade<TSubstream>>::Output,
|
||||
(msg, user_data): Self::OutboundOpenInfo,
|
||||
) {
|
||||
if self.shutting_down {
|
||||
return;
|
||||
}
|
||||
|
||||
self.substreams
|
||||
.push(SubstreamState::OutPendingSend(protocol, msg, user_data));
|
||||
}
|
||||
|
||||
fn inject_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
protocol: <Self::InboundProtocol as InboundUpgrade<TSubstream>>::Output,
|
||||
) {
|
||||
// If `self.allow_listening` is false, then we produced a `DeniedUpgrade` and `protocol`
|
||||
// is a `Void`.
|
||||
let protocol = match protocol {
|
||||
EitherOutput::First(p) => p,
|
||||
EitherOutput::Second(p) => void::unreachable(p),
|
||||
};
|
||||
|
||||
if self.shutting_down {
|
||||
return;
|
||||
}
|
||||
|
||||
debug_assert!(self.allow_listening);
|
||||
let connec_unique_id = self.next_connec_unique_id;
|
||||
self.next_connec_unique_id.0 += 1;
|
||||
self.substreams
|
||||
.push(SubstreamState::InWaitingMessage(connec_unique_id, protocol));
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inject_event(&mut self, message: KademliaHandlerIn<TUserData>) {
|
||||
match message {
|
||||
KademliaHandlerIn::FindNodeReq { key, user_data } => {
|
||||
let msg = KadRequestMsg::FindNode { key: key.clone() };
|
||||
self.substreams
|
||||
.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone())));
|
||||
}
|
||||
KademliaHandlerIn::FindNodeRes {
|
||||
closer_peers,
|
||||
request_id,
|
||||
} => {
|
||||
let pos = self.substreams.iter().position(|state| match state {
|
||||
SubstreamState::InWaitingUser(ref conn_id, _)
|
||||
if conn_id == &request_id.connec_unique_id =>
|
||||
{
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
});
|
||||
|
||||
if let Some(pos) = pos {
|
||||
let (conn_id, substream) = match self.substreams.remove(pos) {
|
||||
SubstreamState::InWaitingUser(conn_id, substream) => (conn_id, substream),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let msg = KadResponseMsg::FindNode {
|
||||
closer_peers: closer_peers.clone(),
|
||||
};
|
||||
self.substreams
|
||||
.push(SubstreamState::InPendingSend(conn_id, substream, msg));
|
||||
}
|
||||
}
|
||||
KademliaHandlerIn::GetProvidersReq { key, user_data } => {
|
||||
let msg = KadRequestMsg::GetProviders { key: key.clone() };
|
||||
self.substreams
|
||||
.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone())));
|
||||
}
|
||||
KademliaHandlerIn::GetProvidersRes {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
request_id,
|
||||
} => {
|
||||
let pos = self.substreams.iter().position(|state| match state {
|
||||
SubstreamState::InWaitingUser(ref conn_id, _)
|
||||
if conn_id == &request_id.connec_unique_id =>
|
||||
{
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
});
|
||||
|
||||
if let Some(pos) = pos {
|
||||
let (conn_id, substream) = match self.substreams.remove(pos) {
|
||||
SubstreamState::InWaitingUser(conn_id, substream) => (conn_id, substream),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let msg = KadResponseMsg::GetProviders {
|
||||
closer_peers: closer_peers.clone(),
|
||||
provider_peers: provider_peers.clone(),
|
||||
};
|
||||
self.substreams
|
||||
.push(SubstreamState::InPendingSend(conn_id, substream, msg));
|
||||
}
|
||||
}
|
||||
KademliaHandlerIn::AddProvider { key, provider_peer } => {
|
||||
let msg = KadRequestMsg::AddProvider {
|
||||
key: key.clone(),
|
||||
provider_peer: provider_peer.clone(),
|
||||
};
|
||||
self.substreams
|
||||
.push(SubstreamState::OutPendingOpen(msg, None));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inject_inbound_closed(&mut self) {}
|
||||
|
||||
#[inline]
|
||||
fn inject_dial_upgrade_error(
|
||||
&mut self,
|
||||
(_, user_data): Self::OutboundOpenInfo,
|
||||
error: io::Error,
|
||||
) {
|
||||
// TODO: cache the fact that the remote doesn't support kademlia at all, so that we don't
|
||||
// continue trying
|
||||
if let Some(user_data) = user_data {
|
||||
self.substreams
|
||||
.push(SubstreamState::OutReportError(error, user_data));
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn shutdown(&mut self) {
|
||||
self.shutting_down = true;
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
) -> Poll<
|
||||
Option<
|
||||
ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent>,
|
||||
>,
|
||||
io::Error,
|
||||
> {
|
||||
// Special case if shutting down.
|
||||
if self.shutting_down {
|
||||
for n in (0..self.substreams.len()).rev() {
|
||||
match self.substreams.swap_remove(n).try_close() {
|
||||
AsyncSink::Ready => (),
|
||||
AsyncSink::NotReady(stream) => self.substreams.push(stream),
|
||||
}
|
||||
}
|
||||
|
||||
if self.substreams.is_empty() {
|
||||
return Ok(Async::Ready(None));
|
||||
} else {
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
|
||||
// We remove each element from `substreams` one by one and add them back.
|
||||
for n in (0..self.substreams.len()).rev() {
|
||||
let mut substream = self.substreams.swap_remove(n);
|
||||
|
||||
loop {
|
||||
match advance_substream(substream, self.config) {
|
||||
(Some(new_state), Some(event), _) => {
|
||||
self.substreams.push(new_state);
|
||||
return Ok(Async::Ready(Some(event)));
|
||||
}
|
||||
(None, Some(event), _) => {
|
||||
return Ok(Async::Ready(Some(event)));
|
||||
}
|
||||
(Some(new_state), None, false) => {
|
||||
self.substreams.push(new_state);
|
||||
break;
|
||||
}
|
||||
(Some(new_state), None, true) => {
|
||||
substream = new_state;
|
||||
continue;
|
||||
}
|
||||
(None, None, _) => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
/// Advances one substream.
|
||||
///
|
||||
/// Returns the new state for that substream, an event to generate, and whether the substream
|
||||
/// should be polled again.
|
||||
fn advance_substream<TSubstream, TUserData>(
|
||||
state: SubstreamState<TSubstream, TUserData>,
|
||||
upgrade: KademliaProtocolConfig,
|
||||
) -> (
|
||||
Option<SubstreamState<TSubstream, TUserData>>,
|
||||
Option<
|
||||
ProtocolsHandlerEvent<
|
||||
KademliaProtocolConfig,
|
||||
(KadRequestMsg, Option<TUserData>),
|
||||
KademliaHandlerEvent<TUserData>,
|
||||
>,
|
||||
>,
|
||||
bool,
|
||||
)
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
match state {
|
||||
SubstreamState::OutPendingOpen(msg, user_data) => {
|
||||
let ev = ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
||||
upgrade,
|
||||
info: (msg, user_data),
|
||||
};
|
||||
(None, Some(ev), false)
|
||||
}
|
||||
SubstreamState::OutPendingUpgrade(msg, user_data) => (
|
||||
Some(SubstreamState::OutPendingUpgrade(msg, user_data)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
SubstreamState::OutPendingSend(mut substream, msg, user_data) => {
|
||||
match substream.start_send(msg) {
|
||||
Ok(AsyncSink::Ready) => (
|
||||
Some(SubstreamState::OutPendingFlush(substream, user_data)),
|
||||
None,
|
||||
true,
|
||||
),
|
||||
Ok(AsyncSink::NotReady(msg)) => (
|
||||
Some(SubstreamState::OutPendingSend(substream, msg, user_data)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
Err(error) => {
|
||||
let event = if let Some(user_data) = user_data {
|
||||
let ev = KademliaHandlerEvent::QueryError { error, user_data };
|
||||
Some(ProtocolsHandlerEvent::Custom(ev))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
(None, event, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
SubstreamState::OutPendingFlush(mut substream, user_data) => {
|
||||
match substream.poll_complete() {
|
||||
Ok(Async::Ready(())) => {
|
||||
if let Some(user_data) = user_data {
|
||||
(
|
||||
Some(SubstreamState::OutWaitingAnswer(substream, user_data)),
|
||||
None,
|
||||
true,
|
||||
)
|
||||
} else {
|
||||
(Some(SubstreamState::OutClosing(substream)), None, true)
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady) => (
|
||||
Some(SubstreamState::OutPendingFlush(substream, user_data)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
Err(error) => {
|
||||
let event = if let Some(user_data) = user_data {
|
||||
let ev = KademliaHandlerEvent::QueryError { error, user_data };
|
||||
Some(ProtocolsHandlerEvent::Custom(ev))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
(None, event, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
SubstreamState::OutWaitingAnswer(mut substream, user_data) => match substream.poll() {
|
||||
Ok(Async::Ready(Some(msg))) => {
|
||||
let new_state = SubstreamState::OutClosing(substream);
|
||||
let event = process_kad_response(msg, user_data);
|
||||
(
|
||||
Some(new_state),
|
||||
Some(ProtocolsHandlerEvent::Custom(event)),
|
||||
true,
|
||||
)
|
||||
}
|
||||
Ok(Async::NotReady) => (
|
||||
Some(SubstreamState::OutWaitingAnswer(substream, user_data)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
Err(error) => {
|
||||
let event = KademliaHandlerEvent::QueryError { error, user_data };
|
||||
(None, Some(ProtocolsHandlerEvent::Custom(event)), false)
|
||||
}
|
||||
Ok(Async::Ready(None)) => {
|
||||
let error = io::Error::new(io::ErrorKind::Other, "unexpected EOF");
|
||||
let event = KademliaHandlerEvent::QueryError { error, user_data };
|
||||
(None, Some(ProtocolsHandlerEvent::Custom(event)), false)
|
||||
}
|
||||
},
|
||||
SubstreamState::OutReportError(error, user_data) => {
|
||||
let event = KademliaHandlerEvent::QueryError { error, user_data };
|
||||
(None, Some(ProtocolsHandlerEvent::Custom(event)), false)
|
||||
}
|
||||
SubstreamState::OutClosing(mut stream) => match stream.close() {
|
||||
Ok(Async::Ready(())) => (None, None, false),
|
||||
Ok(Async::NotReady) => (Some(SubstreamState::OutClosing(stream)), None, false),
|
||||
Err(_) => (None, None, false),
|
||||
},
|
||||
SubstreamState::InWaitingMessage(id, mut substream) => match substream.poll() {
|
||||
Ok(Async::Ready(Some(msg))) => {
|
||||
if let Ok(ev) = process_kad_request(msg, id) {
|
||||
(
|
||||
Some(SubstreamState::InWaitingUser(id, substream)),
|
||||
Some(ProtocolsHandlerEvent::Custom(ev)),
|
||||
false,
|
||||
)
|
||||
} else {
|
||||
(Some(SubstreamState::InClosing(substream)), None, true)
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady) => (
|
||||
Some(SubstreamState::InWaitingMessage(id, substream)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
Ok(Async::Ready(None)) | Err(_) => (None, None, false),
|
||||
},
|
||||
SubstreamState::InWaitingUser(id, substream) => (
|
||||
Some(SubstreamState::InWaitingUser(id, substream)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
SubstreamState::InPendingSend(id, mut substream, msg) => match substream.start_send(msg) {
|
||||
Ok(AsyncSink::Ready) => (
|
||||
Some(SubstreamState::InPendingFlush(id, substream)),
|
||||
None,
|
||||
true,
|
||||
),
|
||||
Ok(AsyncSink::NotReady(msg)) => (
|
||||
Some(SubstreamState::InPendingSend(id, substream, msg)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
Err(_) => (None, None, false),
|
||||
},
|
||||
SubstreamState::InPendingFlush(id, mut substream) => match substream.poll_complete() {
|
||||
Ok(Async::Ready(())) => (
|
||||
Some(SubstreamState::InWaitingMessage(id, substream)),
|
||||
None,
|
||||
true,
|
||||
),
|
||||
Ok(Async::NotReady) => (
|
||||
Some(SubstreamState::InPendingFlush(id, substream)),
|
||||
None,
|
||||
false,
|
||||
),
|
||||
Err(_) => (None, None, false),
|
||||
},
|
||||
SubstreamState::InClosing(mut stream) => match stream.close() {
|
||||
Ok(Async::Ready(())) => (None, None, false),
|
||||
Ok(Async::NotReady) => (Some(SubstreamState::InClosing(stream)), None, false),
|
||||
Err(_) => (None, None, false),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes a Kademlia message that's expected to be a request from a remote.
|
||||
fn process_kad_request<TUserData>(
|
||||
event: KadRequestMsg,
|
||||
connec_unique_id: UniqueConnecId,
|
||||
) -> Result<KademliaHandlerEvent<TUserData>, io::Error> {
|
||||
match event {
|
||||
KadRequestMsg::Ping => {
|
||||
// TODO: implement ; in practice the PING message is never used, so we may consider
|
||||
// removing it altogether
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"the PING Kademlia message is not implemented",
|
||||
))
|
||||
}
|
||||
KadRequestMsg::FindNode { key } => Ok(KademliaHandlerEvent::FindNodeReq {
|
||||
key,
|
||||
request_id: KademliaRequestId { connec_unique_id },
|
||||
}),
|
||||
KadRequestMsg::GetProviders { key } => Ok(KademliaHandlerEvent::GetProvidersReq {
|
||||
key,
|
||||
request_id: KademliaRequestId { connec_unique_id },
|
||||
}),
|
||||
KadRequestMsg::AddProvider { key, provider_peer } => {
|
||||
Ok(KademliaHandlerEvent::AddProvider { key, provider_peer })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Kademlia message that's supposed to be a response to one of our requests.
|
||||
fn process_kad_response<TUserData>(
|
||||
event: KadResponseMsg,
|
||||
user_data: TUserData,
|
||||
) -> KademliaHandlerEvent<TUserData> {
|
||||
// TODO: must check that the response corresponds to the request
|
||||
match event {
|
||||
KadResponseMsg::Pong => {
|
||||
// We never send out pings.
|
||||
let err = io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"received unexpected PONG message",
|
||||
);
|
||||
KademliaHandlerEvent::QueryError {
|
||||
error: err,
|
||||
user_data,
|
||||
}
|
||||
}
|
||||
KadResponseMsg::FindNode { closer_peers } => KademliaHandlerEvent::FindNodeRes {
|
||||
closer_peers,
|
||||
user_data,
|
||||
},
|
||||
KadResponseMsg::GetProviders {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
} => KademliaHandlerEvent::GetProvidersRes {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
user_data,
|
||||
},
|
||||
}
|
||||
}
|
@ -1,466 +0,0 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use fnv::FnvHashSet;
|
||||
use futures::{future, Future, IntoFuture, stream, Stream};
|
||||
use kad_server::KadConnecController;
|
||||
use kbucket::{KBucketsTable, KBucketsPeerId};
|
||||
use libp2p_core::PeerId;
|
||||
use log::{debug, trace};
|
||||
use protocol;
|
||||
use rand;
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp::Ordering;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
|
||||
use std::mem;
|
||||
use std::time::Duration;
|
||||
use tokio_timer::Timeout;
|
||||
|
||||
/// Prototype for a future Kademlia protocol running on a socket.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct KadSystemConfig<I> {
|
||||
/// Degree of parallelism on the network. Often called `alpha` in technical papers.
|
||||
/// No more than this number of remotes will be used at a given time for any given operation.
|
||||
// TODO: ^ share this number between operations? or does each operation use `alpha` remotes?
|
||||
pub parallelism: u32,
|
||||
/// Id of the local peer.
|
||||
pub local_peer_id: PeerId,
|
||||
/// List of peers initially known.
|
||||
pub known_initial_peers: I,
|
||||
/// Duration after which a node in the k-buckets needs to be pinged again.
|
||||
pub kbuckets_timeout: Duration,
|
||||
/// When contacting a node, duration after which we consider it unresponsive.
|
||||
pub request_timeout: Duration,
|
||||
}
|
||||
|
||||
/// System that drives the whole Kademlia process.
|
||||
pub struct KadSystem {
|
||||
// The actual DHT.
|
||||
kbuckets: KBucketsTable<PeerId, ()>,
|
||||
// Same as in the config.
|
||||
parallelism: u32,
|
||||
// Same as in the config.
|
||||
request_timeout: Duration,
|
||||
}
|
||||
|
||||
/// Event that happens during a query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum KadQueryEvent<TOut> {
|
||||
/// Learned about new mutiaddresses for the given peers.
|
||||
PeersReported(Vec<protocol::KadPeer>),
|
||||
/// Finished the processing of the query. Contains the result.
|
||||
Finished(TOut),
|
||||
}
|
||||
|
||||
impl KadSystem {
|
||||
/// Starts a new Kademlia system.
|
||||
///
|
||||
/// Also produces a `Future` that drives a Kademlia initialization process.
|
||||
/// This future should be driven to completion by the caller.
|
||||
pub fn start<'a, F, Fut>(config: KadSystemConfig<impl Iterator<Item = PeerId>>, access: F)
|
||||
-> (KadSystem, impl Future<Item = (), Error = IoError> + 'a)
|
||||
where F: FnMut(&PeerId) -> Fut + Send + Clone + 'a,
|
||||
Fut: IntoFuture<Item = KadConnecController, Error = IoError> + 'a,
|
||||
Fut::Future: Send,
|
||||
{
|
||||
let system = KadSystem::without_init(config);
|
||||
let init_future = system.perform_initialization(access);
|
||||
(system, init_future)
|
||||
}
|
||||
|
||||
/// Same as `start`, but doesn't perform the initialization process.
|
||||
pub fn without_init(config: KadSystemConfig<impl Iterator<Item = PeerId>>) -> KadSystem {
|
||||
let kbuckets = KBucketsTable::new(config.local_peer_id.clone(), config.kbuckets_timeout);
|
||||
for peer in config.known_initial_peers {
|
||||
let _ = kbuckets.update(peer, ());
|
||||
}
|
||||
|
||||
let system = KadSystem {
|
||||
kbuckets: kbuckets,
|
||||
parallelism: config.parallelism,
|
||||
request_timeout: config.request_timeout,
|
||||
};
|
||||
|
||||
system
|
||||
}
|
||||
|
||||
/// Starts an initialization process.
|
||||
pub fn perform_initialization<'a, F, Fut>(&self, access: F) -> impl Future<Item = (), Error = IoError> + 'a
|
||||
where F: FnMut(&PeerId) -> Fut + Send + Clone + 'a,
|
||||
Fut: IntoFuture<Item = KadConnecController, Error = IoError> + 'a,
|
||||
Fut::Future: Send,
|
||||
{
|
||||
let futures: Vec<_> = (0..256) // TODO: 256 is arbitrary
|
||||
.map(|n| {
|
||||
refresh(n, access.clone(), &self.kbuckets,
|
||||
self.parallelism as usize, self.request_timeout)
|
||||
})
|
||||
.map(|stream| stream.for_each(|_| Ok(())))
|
||||
.collect();
|
||||
|
||||
future::loop_fn(futures, |futures| {
|
||||
if futures.is_empty() {
|
||||
let fut = future::ok(future::Loop::Break(()));
|
||||
return future::Either::A(fut);
|
||||
}
|
||||
|
||||
let fut = future::select_all(futures)
|
||||
.map_err(|(err, _, _)| err)
|
||||
.map(|(_, _, rest)| future::Loop::Continue(rest));
|
||||
future::Either::B(fut)
|
||||
})
|
||||
}
|
||||
|
||||
/// Updates the k-buckets with the specific peer.
|
||||
///
|
||||
/// Should be called whenever we receive a message from a peer.
|
||||
pub fn update_kbuckets(&self, peer: PeerId) {
|
||||
// TODO: ping system
|
||||
let _ = self.kbuckets.update(peer, ());
|
||||
}
|
||||
|
||||
/// Returns the local peer ID, as passed in the configuration.
|
||||
pub fn local_peer_id(&self) -> &PeerId {
|
||||
self.kbuckets.my_id()
|
||||
}
|
||||
|
||||
/// Finds the known nodes closest to `id`, ordered by distance.
|
||||
pub fn known_closest_peers(&self, id: &PeerId) -> impl Iterator<Item = PeerId> {
|
||||
self.kbuckets.find_closest_with_self(id)
|
||||
}
|
||||
|
||||
/// Starts a query for an iterative `FIND_NODE` request.
|
||||
pub fn find_node<'a, F, Fut>(&self, searched_key: PeerId, access: F)
|
||||
-> impl Stream<Item = KadQueryEvent<Vec<PeerId>>, Error = IoError> + 'a
|
||||
where F: FnMut(&PeerId) -> Fut + Send + 'a,
|
||||
Fut: IntoFuture<Item = KadConnecController, Error = IoError> + 'a,
|
||||
Fut::Future: Send,
|
||||
{
|
||||
query(access, &self.kbuckets, searched_key, self.parallelism as usize,
|
||||
20, self.request_timeout) // TODO: arbitrary const
|
||||
}
|
||||
}
|
||||
|
||||
// Refreshes a specific bucket by performing an iterative `FIND_NODE` on a random ID of this
|
||||
// bucket.
|
||||
//
|
||||
// Returns a dummy no-op future if `bucket_num` is out of range.
|
||||
fn refresh<'a, F, Fut>(bucket_num: usize, access: F, kbuckets: &KBucketsTable<PeerId, ()>,
|
||||
parallelism: usize, request_timeout: Duration)
|
||||
-> impl Stream<Item = KadQueryEvent<()>, Error = IoError> + 'a
|
||||
where F: FnMut(&PeerId) -> Fut + Send + 'a,
|
||||
Fut: IntoFuture<Item = KadConnecController, Error = IoError> + 'a,
|
||||
Fut::Future: Send,
|
||||
{
|
||||
let peer_id = match gen_random_id(kbuckets.my_id(), bucket_num) {
|
||||
Ok(p) => p,
|
||||
Err(()) => {
|
||||
let stream = stream::once(Ok(KadQueryEvent::Finished(())));
|
||||
return Box::new(stream) as Box<Stream<Item = _, Error = _> + Send>;
|
||||
},
|
||||
};
|
||||
|
||||
let stream = query(access, kbuckets, peer_id, parallelism, 20, request_timeout) // TODO: 20 is arbitrary
|
||||
.map(|event| {
|
||||
match event {
|
||||
KadQueryEvent::PeersReported(peers) => KadQueryEvent::PeersReported(peers),
|
||||
KadQueryEvent::Finished(_) => KadQueryEvent::Finished(()),
|
||||
}
|
||||
});
|
||||
Box::new(stream) as Box<Stream<Item = _, Error = _> + Send>
|
||||
}
|
||||
|
||||
// Generates a random `PeerId` that belongs to the given bucket.
|
||||
//
|
||||
// Returns an error if `bucket_num` is out of range.
|
||||
fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> {
|
||||
let my_id_len = my_id.as_bytes().len();
|
||||
|
||||
// TODO: this 2 is magic here; it is the length of the hash of the multihash
|
||||
let bits_diff = bucket_num + 1;
|
||||
if bits_diff > 8 * (my_id_len - 2) {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let mut random_id = [0; 64];
|
||||
for byte in 0..my_id_len {
|
||||
match byte.cmp(&(my_id_len - bits_diff / 8 - 1)) {
|
||||
Ordering::Less => {
|
||||
random_id[byte] = my_id.as_bytes()[byte];
|
||||
}
|
||||
Ordering::Equal => {
|
||||
let mask: u8 = (1 << (bits_diff % 8)) - 1;
|
||||
random_id[byte] = (my_id.as_bytes()[byte] & !mask) | (rand::random::<u8>() & mask);
|
||||
}
|
||||
Ordering::Greater => {
|
||||
random_id[byte] = rand::random();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let peer_id = PeerId::from_bytes(random_id[..my_id_len].to_owned())
|
||||
.expect("randomly-generated peer ID should always be valid");
|
||||
Ok(peer_id)
|
||||
}
|
||||
|
||||
// Generic query-performing function.
|
||||
fn query<'a, F, Fut>(
|
||||
access: F,
|
||||
kbuckets: &KBucketsTable<PeerId, ()>,
|
||||
searched_key: PeerId,
|
||||
parallelism: usize,
|
||||
num_results: usize,
|
||||
request_timeout: Duration,
|
||||
) -> impl Stream<Item = KadQueryEvent<Vec<PeerId>>, Error = IoError> + 'a
|
||||
where F: FnMut(&PeerId) -> Fut + 'a,
|
||||
Fut: IntoFuture<Item = KadConnecController, Error = IoError> + 'a,
|
||||
Fut::Future: Send,
|
||||
{
|
||||
debug!("Start query for {:?}; num results = {}", searched_key, num_results);
|
||||
|
||||
// State of the current iterative process.
|
||||
struct State<'a, F> {
|
||||
// At which stage we are.
|
||||
stage: Stage,
|
||||
// The `access` parameter.
|
||||
access: F,
|
||||
// Final output of the iteration.
|
||||
result: Vec<PeerId>,
|
||||
// For each open connection, a future with the response of the remote.
|
||||
// Note that don't use a `SmallVec` here because `select_all` produces a `Vec`.
|
||||
current_attempts_fut: Vec<Box<Future<Item = Vec<protocol::KadPeer>, Error = IoError> + Send + 'a>>,
|
||||
// For each open connection, the peer ID that we are connected to.
|
||||
// Must always have the same length as `current_attempts_fut`.
|
||||
current_attempts_addrs: SmallVec<[PeerId; 32]>,
|
||||
// Nodes that need to be attempted.
|
||||
pending_nodes: Vec<PeerId>,
|
||||
// Peers that we tried to contact but failed.
|
||||
failed_to_contact: FnvHashSet<PeerId>,
|
||||
}
|
||||
|
||||
// General stage of the state.
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
enum Stage {
|
||||
// We are still in the first step of the algorithm where we try to find the closest node.
|
||||
FirstStep,
|
||||
// We are contacting the k closest nodes in order to fill the list with enough results.
|
||||
SecondStep,
|
||||
// The results are complete, and the next stream iteration will produce the outcome.
|
||||
FinishingNextIter,
|
||||
// We are finished and the stream shouldn't return anything anymore.
|
||||
Finished,
|
||||
}
|
||||
|
||||
let initial_state = State {
|
||||
stage: Stage::FirstStep,
|
||||
access: access,
|
||||
result: Vec::with_capacity(num_results),
|
||||
current_attempts_fut: Vec::new(),
|
||||
current_attempts_addrs: SmallVec::new(),
|
||||
pending_nodes: kbuckets.find_closest(&searched_key).collect(),
|
||||
failed_to_contact: Default::default(),
|
||||
};
|
||||
|
||||
// Start of the iterative process.
|
||||
let stream = stream::unfold(initial_state, move |mut state| -> Option<_> {
|
||||
match state.stage {
|
||||
Stage::FinishingNextIter => {
|
||||
let result = mem::replace(&mut state.result, Vec::new());
|
||||
debug!("Query finished with {} results", result.len());
|
||||
state.stage = Stage::Finished;
|
||||
let future = future::ok((Some(KadQueryEvent::Finished(result)), state));
|
||||
return Some(future::Either::A(future));
|
||||
},
|
||||
Stage::Finished => {
|
||||
return None;
|
||||
},
|
||||
_ => ()
|
||||
};
|
||||
|
||||
let searched_key = searched_key.clone();
|
||||
|
||||
// Find out which nodes to contact at this iteration.
|
||||
let to_contact = {
|
||||
let wanted_len = if state.stage == Stage::FirstStep {
|
||||
parallelism.saturating_sub(state.current_attempts_fut.len())
|
||||
} else {
|
||||
num_results.saturating_sub(state.current_attempts_fut.len())
|
||||
};
|
||||
let mut to_contact = SmallVec::<[_; 16]>::new();
|
||||
while to_contact.len() < wanted_len && !state.pending_nodes.is_empty() {
|
||||
// Move the first element of `pending_nodes` to `to_contact`, but ignore nodes that
|
||||
// are already part of the results or of a current attempt or if we failed to
|
||||
// contact it before.
|
||||
let peer = state.pending_nodes.remove(0);
|
||||
if state.result.iter().any(|p| p == &peer) {
|
||||
continue;
|
||||
}
|
||||
if state.current_attempts_addrs.iter().any(|p| p == &peer) {
|
||||
continue;
|
||||
}
|
||||
if state.failed_to_contact.iter().any(|p| p == &peer) {
|
||||
continue;
|
||||
}
|
||||
to_contact.push(peer);
|
||||
}
|
||||
to_contact
|
||||
};
|
||||
|
||||
debug!("New query round; {} queries in progress; contacting {} new peers",
|
||||
state.current_attempts_fut.len(),
|
||||
to_contact.len());
|
||||
|
||||
// For each node in `to_contact`, start an RPC query and a corresponding entry in the two
|
||||
// `state.current_attempts_*` fields.
|
||||
for peer in to_contact {
|
||||
let searched_key2 = searched_key.clone();
|
||||
let current_attempt = (state.access)(&peer)
|
||||
.into_future()
|
||||
.and_then(move |controller| {
|
||||
controller.find_node(&searched_key2)
|
||||
});
|
||||
let with_deadline = Timeout::new(current_attempt, request_timeout)
|
||||
.map_err(|err| {
|
||||
if let Some(err) = err.into_inner() {
|
||||
err
|
||||
} else {
|
||||
IoError::new(IoErrorKind::ConnectionAborted, "kademlia request timeout")
|
||||
}
|
||||
});
|
||||
state.current_attempts_addrs.push(peer.clone());
|
||||
state
|
||||
.current_attempts_fut
|
||||
.push(Box::new(with_deadline) as Box<_>);
|
||||
}
|
||||
debug_assert_eq!(
|
||||
state.current_attempts_addrs.len(),
|
||||
state.current_attempts_fut.len()
|
||||
);
|
||||
|
||||
// Extract `current_attempts_fut` so that we can pass it to `select_all`. We will push the
|
||||
// values back when inside the loop.
|
||||
let current_attempts_fut = mem::replace(&mut state.current_attempts_fut, Vec::new());
|
||||
if current_attempts_fut.is_empty() {
|
||||
// If `current_attempts_fut` is empty, then `select_all` would panic. It happens
|
||||
// when we have no additional node to query.
|
||||
debug!("Finishing query early because no additional node available");
|
||||
state.stage = Stage::FinishingNextIter;
|
||||
let future = future::ok((None, state));
|
||||
return Some(future::Either::A(future));
|
||||
}
|
||||
|
||||
// This is the future that continues or breaks the `loop_fn`.
|
||||
let future = future::select_all(current_attempts_fut.into_iter()).then(move |result| {
|
||||
let (message, trigger_idx, other_current_attempts) = match result {
|
||||
Err((err, trigger_idx, other_current_attempts)) => {
|
||||
(Err(err), trigger_idx, other_current_attempts)
|
||||
}
|
||||
Ok((message, trigger_idx, other_current_attempts)) => {
|
||||
(Ok(message), trigger_idx, other_current_attempts)
|
||||
}
|
||||
};
|
||||
|
||||
// Putting back the extracted elements in `state`.
|
||||
let remote_id = state.current_attempts_addrs.remove(trigger_idx);
|
||||
debug_assert!(state.current_attempts_fut.is_empty());
|
||||
state.current_attempts_fut = other_current_attempts;
|
||||
|
||||
// `message` contains the reason why the current future was woken up.
|
||||
let closer_peers = match message {
|
||||
Ok(msg) => msg,
|
||||
Err(err) => {
|
||||
trace!("RPC query failed for {:?}: {:?}", remote_id, err);
|
||||
state.failed_to_contact.insert(remote_id);
|
||||
return future::ok((None, state));
|
||||
}
|
||||
};
|
||||
|
||||
// Inserting the node we received a response from into `state.result`.
|
||||
// The code is non-trivial because `state.result` is ordered by distance and is limited
|
||||
// by `num_results` elements.
|
||||
if let Some(insert_pos) = state.result.iter().position(|e| {
|
||||
e.distance_with(&searched_key) >= remote_id.distance_with(&searched_key)
|
||||
}) {
|
||||
if state.result[insert_pos] != remote_id {
|
||||
if state.result.len() >= num_results {
|
||||
state.result.pop();
|
||||
}
|
||||
state.result.insert(insert_pos, remote_id);
|
||||
}
|
||||
} else if state.result.len() < num_results {
|
||||
state.result.push(remote_id);
|
||||
}
|
||||
|
||||
// The loop below will set this variable to `true` if we find a new element to put at
|
||||
// the top of the result. This would mean that we have to continue looping.
|
||||
let mut local_nearest_node_updated = false;
|
||||
|
||||
// Update `state` with the actual content of the message.
|
||||
let mut peers_reported = Vec::with_capacity(closer_peers.len());
|
||||
for mut peer in closer_peers {
|
||||
// Update the peerstore with the information sent by
|
||||
// the remote.
|
||||
trace!("Reporting multiaddresses for {:?}: {:?}", peer.node_id, peer.multiaddrs);
|
||||
peers_reported.push(peer.clone());
|
||||
|
||||
if peer.node_id.distance_with(&searched_key)
|
||||
<= state.result[0].distance_with(&searched_key)
|
||||
{
|
||||
local_nearest_node_updated = true;
|
||||
}
|
||||
|
||||
if state.result.iter().any(|ma| ma == &peer.node_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Insert the node into `pending_nodes` at the right position, or do not
|
||||
// insert it if it is already in there.
|
||||
if let Some(insert_pos) = state.pending_nodes.iter().position(|e| {
|
||||
e.distance_with(&searched_key) >= peer.node_id.distance_with(&searched_key)
|
||||
}) {
|
||||
if state.pending_nodes[insert_pos] != peer.node_id {
|
||||
state.pending_nodes.insert(insert_pos, peer.node_id.clone());
|
||||
}
|
||||
} else {
|
||||
state.pending_nodes.push(peer.node_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if state.result.len() >= num_results
|
||||
|| (state.stage != Stage::FirstStep && state.current_attempts_fut.is_empty())
|
||||
{
|
||||
state.stage = Stage::FinishingNextIter;
|
||||
|
||||
} else {
|
||||
if !local_nearest_node_updated {
|
||||
trace!("Loop didn't update closer node; jumping to step 2");
|
||||
state.stage = Stage::SecondStep;
|
||||
}
|
||||
}
|
||||
|
||||
future::ok((Some(KadQueryEvent::PeersReported(peers_reported)), state))
|
||||
});
|
||||
|
||||
Some(future::Either::B(future))
|
||||
}).filter_map(|val| val);
|
||||
|
||||
// Boxing the stream is not necessary, but we do it in order to improve compilation time.
|
||||
Box::new(stream) as Box<_>
|
||||
}
|
@ -1,620 +0,0 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Contains a `ConnectionUpgrade` that makes it possible to send requests and receive responses
|
||||
//! from nodes after the upgrade.
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! - Create a `KadConnecConfig` object. This struct implements `ConnectionUpgrade`.
|
||||
//!
|
||||
//! - Update a connection through that `KadConnecConfig`. The output yields you a
|
||||
//! `KadConnecController` and a stream that must be driven to completion. The controller
|
||||
//! allows you to perform queries and receive responses. The stream produces incoming requests
|
||||
//! from the remote.
|
||||
//!
|
||||
//! This `KadConnecController` is usually extracted and stored in some sort of hash map in an
|
||||
//! `Arc` in order to be available whenever we need to request something from a node.
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::sync::{mpsc, oneshot};
|
||||
use futures::{future, Future, Sink, stream, Stream};
|
||||
use libp2p_core::{PeerId, upgrade::{InboundUpgrade, UpgradeInfo}};
|
||||
use log::{debug, warn};
|
||||
use multihash::Multihash;
|
||||
use protocol::{self, KadMsg, KademliaProtocolConfig, KadPeer};
|
||||
use std::collections::VecDeque;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
|
||||
use std::iter;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
|
||||
/// Configuration for a Kademlia server.
|
||||
///
|
||||
/// Implements `ConnectionUpgrade`. On a successful upgrade, produces a `KadConnecController`
|
||||
/// and a `Future`. The controller lets you send queries to the remote and receive answers, while
|
||||
/// the `Future` must be driven to completion in order for things to work.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct KadConnecConfig {
|
||||
raw_proto: KademliaProtocolConfig,
|
||||
}
|
||||
|
||||
impl KadConnecConfig {
|
||||
/// Builds a configuration object for an upcoming Kademlia server.
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
KadConnecConfig {
|
||||
raw_proto: KademliaProtocolConfig,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UpgradeInfo for KadConnecConfig {
|
||||
type NamesIter = iter::Once<(Bytes, Self::UpgradeId)>;
|
||||
type UpgradeId = ();
|
||||
|
||||
#[inline]
|
||||
fn protocol_names(&self) -> Self::NamesIter {
|
||||
self.raw_proto.protocol_names()
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> InboundUpgrade<C> for KadConnecConfig
|
||||
where
|
||||
C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
{
|
||||
type Output = (
|
||||
KadConnecController,
|
||||
Box<Stream<Item = KadIncomingRequest, Error = IoError> + Send>,
|
||||
);
|
||||
type Error = IoError;
|
||||
type Future = future::Map<<KademliaProtocolConfig as InboundUpgrade<C>>::Future, fn(<KademliaProtocolConfig as InboundUpgrade<C>>::Output) -> Self::Output>;
|
||||
|
||||
#[inline]
|
||||
fn upgrade_inbound(self, incoming: C, id: Self::UpgradeId) -> Self::Future {
|
||||
self.raw_proto
|
||||
.upgrade_inbound(incoming, id)
|
||||
.map(build_from_sink_stream)
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows sending Kademlia requests and receiving responses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct KadConnecController {
|
||||
// In order to send a request, we use this sender to send a tuple. The first element of the
|
||||
// tuple is the message to send to the remote, and the second element is what is used to
|
||||
// receive the response. If the query doesn't expect a response (e.g. `PUT_VALUE`), then the
|
||||
// one-shot sender will be dropped without being used.
|
||||
inner: mpsc::UnboundedSender<(KadMsg, oneshot::Sender<KadMsg>)>,
|
||||
}
|
||||
|
||||
impl KadConnecController {
|
||||
/// Sends a `FIND_NODE` query to the node and provides a future that will contain the response.
|
||||
// TODO: future item could be `impl Iterator` instead
|
||||
pub fn find_node(
|
||||
&self,
|
||||
searched_key: &PeerId,
|
||||
) -> impl Future<Item = Vec<KadPeer>, Error = IoError> {
|
||||
let message = protocol::KadMsg::FindNodeReq {
|
||||
key: searched_key.clone().into(),
|
||||
};
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
match self.inner.unbounded_send((message, tx)) {
|
||||
Ok(()) => (),
|
||||
Err(_) => {
|
||||
let fut = future::err(IoError::new(
|
||||
IoErrorKind::ConnectionAborted,
|
||||
"connection to remote has aborted",
|
||||
));
|
||||
|
||||
return future::Either::B(fut);
|
||||
}
|
||||
};
|
||||
|
||||
let future = rx.map_err(|_| {
|
||||
IoError::new(
|
||||
IoErrorKind::ConnectionAborted,
|
||||
"connection to remote has aborted",
|
||||
)
|
||||
}).and_then(|msg| match msg {
|
||||
KadMsg::FindNodeRes { closer_peers, .. } => Ok(closer_peers),
|
||||
_ => Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"invalid response type received from the remote",
|
||||
)),
|
||||
});
|
||||
|
||||
future::Either::A(future)
|
||||
}
|
||||
|
||||
/// Sends a `GET_PROVIDERS` query to the node and provides a future that will contain the response.
|
||||
// TODO: future item could be `impl Iterator` instead
|
||||
pub fn get_providers(
|
||||
&self,
|
||||
searched_key: &Multihash,
|
||||
) -> impl Future<Item = (Vec<KadPeer>, Vec<KadPeer>), Error = IoError> {
|
||||
let message = protocol::KadMsg::GetProvidersReq {
|
||||
key: searched_key.clone(),
|
||||
};
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
match self.inner.unbounded_send((message, tx)) {
|
||||
Ok(()) => (),
|
||||
Err(_) => {
|
||||
let fut = future::err(IoError::new(
|
||||
IoErrorKind::ConnectionAborted,
|
||||
"connection to remote has aborted",
|
||||
));
|
||||
|
||||
return future::Either::B(fut);
|
||||
}
|
||||
};
|
||||
|
||||
let future = rx.map_err(|_| {
|
||||
IoError::new(
|
||||
IoErrorKind::ConnectionAborted,
|
||||
"connection to remote has aborted",
|
||||
)
|
||||
}).and_then(|msg| match msg {
|
||||
KadMsg::GetProvidersRes { closer_peers, provider_peers } => Ok((closer_peers, provider_peers)),
|
||||
_ => Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"invalid response type received from the remote",
|
||||
)),
|
||||
});
|
||||
|
||||
future::Either::A(future)
|
||||
}
|
||||
|
||||
/// Sends an `ADD_PROVIDER` message to the node.
|
||||
pub fn add_provider(&self, key: Multihash, provider_peer: KadPeer) -> Result<(), IoError> {
|
||||
// Dummy channel, as the `tx` is going to be dropped anyway.
|
||||
let (tx, _rx) = oneshot::channel();
|
||||
let message = protocol::KadMsg::AddProvider {
|
||||
key,
|
||||
provider_peer,
|
||||
};
|
||||
match self.inner.unbounded_send((message, tx)) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(_) => Err(IoError::new(
|
||||
IoErrorKind::ConnectionAborted,
|
||||
"connection to remote has aborted",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a `PING` query to the node. Because of the way the protocol is designed, there is
|
||||
/// no way to differentiate between a ping and a pong. Therefore this function doesn't return a
|
||||
/// future, and the only way to be notified of the result is through the stream.
|
||||
pub fn ping(&self) -> Result<(), IoError> {
|
||||
// Dummy channel, as the `tx` is going to be dropped anyway.
|
||||
let (tx, _rx) = oneshot::channel();
|
||||
match self.inner.unbounded_send((protocol::KadMsg::Ping, tx)) {
|
||||
Ok(()) => Ok(()),
|
||||
Err(_) => Err(IoError::new(
|
||||
IoErrorKind::ConnectionAborted,
|
||||
"connection to remote has aborted",
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request received from the remote.
|
||||
pub enum KadIncomingRequest {
|
||||
/// Find the nodes closest to `searched`.
|
||||
FindNode {
|
||||
/// The value being searched.
|
||||
searched: PeerId,
|
||||
/// Object to use to respond to the request.
|
||||
responder: KadFindNodeRespond,
|
||||
},
|
||||
|
||||
/// Find the nodes closest to `searched` and return the known providers for `searched`.
|
||||
GetProviders {
|
||||
/// The value being searched.
|
||||
searched: Multihash,
|
||||
/// Object to use to respond to the request.
|
||||
responder: KadGetProvidersRespond,
|
||||
},
|
||||
|
||||
/// Registers a provider for the given key.
|
||||
///
|
||||
/// The local node is supposed to remember this and return the provider on a `GetProviders`
|
||||
/// request for the given key.
|
||||
AddProvider {
|
||||
/// The key of the provider.
|
||||
key: Multihash,
|
||||
/// The provider to register.
|
||||
provider_peer: KadPeer,
|
||||
},
|
||||
|
||||
/// Received either a ping or a pong.
|
||||
PingPong,
|
||||
|
||||
// TODO: PutValue and FindValue
|
||||
}
|
||||
|
||||
/// Object used to respond to `FindNode` queries from remotes.
|
||||
pub struct KadFindNodeRespond {
|
||||
inner: oneshot::Sender<KadMsg>,
|
||||
}
|
||||
|
||||
impl KadFindNodeRespond {
|
||||
/// Respond to the `FindNode` request.
|
||||
pub fn respond<I>(self, peers: I)
|
||||
where I: IntoIterator<Item = protocol::KadPeer>
|
||||
{
|
||||
let _ = self.inner.send(KadMsg::FindNodeRes {
|
||||
closer_peers: peers.into_iter().collect()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Object used to respond to `GetProviders` queries from remotes.
|
||||
pub struct KadGetProvidersRespond {
|
||||
inner: oneshot::Sender<KadMsg>,
|
||||
}
|
||||
|
||||
impl KadGetProvidersRespond {
|
||||
/// Respond to the `GetProviders` request.
|
||||
pub fn respond<Ic, Ip>(self, closest_peers: Ic, providers: Ip)
|
||||
where Ic: IntoIterator<Item = protocol::KadPeer>,
|
||||
Ip: IntoIterator<Item = protocol::KadPeer>,
|
||||
{
|
||||
let _ = self.inner.send(KadMsg::GetProvidersRes {
|
||||
closer_peers: closest_peers.into_iter().collect(),
|
||||
provider_peers: providers.into_iter().collect(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Builds a controller and stream from a stream/sink of raw messages.
|
||||
fn build_from_sink_stream<'a, S>(connec: S) -> (KadConnecController, Box<Stream<Item = KadIncomingRequest, Error = IoError> + Send + 'a>)
|
||||
where S: Sink<SinkItem = KadMsg, SinkError = IoError> + Stream<Item = KadMsg, Error = IoError> + Send + 'a
|
||||
{
|
||||
let (tx, rx) = mpsc::unbounded();
|
||||
let future = kademlia_handler(connec, rx);
|
||||
let controller = KadConnecController { inner: tx };
|
||||
(controller, future)
|
||||
}
|
||||
|
||||
// Handles a newly-opened Kademlia stream with a remote peer.
|
||||
//
|
||||
// Takes a `Stream` and `Sink` of Kademlia messages representing the connection to the client,
|
||||
// plus a `Receiver` that will receive messages to transmit to that connection.
|
||||
//
|
||||
// Returns a `Stream` that must be resolved in order for progress to work. The `Stream` will
|
||||
// produce objects that represent the requests sent by the remote. These requests must be answered
|
||||
// immediately before the stream continues to produce items.
|
||||
fn kademlia_handler<'a, S>(
|
||||
kad_bistream: S,
|
||||
rq_rx: mpsc::UnboundedReceiver<(KadMsg, oneshot::Sender<KadMsg>)>,
|
||||
) -> Box<Stream<Item = KadIncomingRequest, Error = IoError> + Send + 'a>
|
||||
where
|
||||
S: Stream<Item = KadMsg, Error = IoError> + Sink<SinkItem = KadMsg, SinkError = IoError> + Send + 'a,
|
||||
{
|
||||
let (kad_sink, kad_stream) = kad_bistream.split();
|
||||
|
||||
// This is a stream of futures containing local responses.
|
||||
// Every time we receive a request from the remote, we create a `oneshot::channel()` and send
|
||||
// the receiving end to `responders_tx`.
|
||||
// This way, if a future is available on `responders_rx`, we block until it produces the
|
||||
// response.
|
||||
let (responders_tx, responders_rx) = mpsc::unbounded();
|
||||
|
||||
// We combine all the streams into one so that the loop wakes up whenever any generates
|
||||
// something.
|
||||
enum EventSource {
|
||||
Remote(KadMsg),
|
||||
LocalRequest(KadMsg, oneshot::Sender<KadMsg>),
|
||||
LocalResponse(oneshot::Receiver<KadMsg>),
|
||||
Finished,
|
||||
}
|
||||
|
||||
let events = {
|
||||
let responders = responders_rx
|
||||
.map(|m| EventSource::LocalResponse(m))
|
||||
.map_err(|_| unreachable!());
|
||||
let rq_rx = rq_rx
|
||||
.map(|(m, o)| EventSource::LocalRequest(m, o))
|
||||
.map_err(|_| unreachable!());
|
||||
let kad_stream = kad_stream
|
||||
.map(|m| EventSource::Remote(m))
|
||||
.chain(future::ok(EventSource::Finished).into_stream());
|
||||
responders.select(rq_rx).select(kad_stream)
|
||||
};
|
||||
|
||||
let stream = stream::unfold((events, kad_sink, responders_tx, VecDeque::new(), 0u32, false),
|
||||
move |(events, kad_sink, responders_tx, mut send_back_queue, expected_pongs, finished)| {
|
||||
if finished {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(events
|
||||
.into_future()
|
||||
.map_err(|(err, _)| err)
|
||||
.and_then(move |(message, events)| -> Box<Future<Item = _, Error = _> + Send> {
|
||||
match message {
|
||||
Some(EventSource::Finished) | None => {
|
||||
let future = future::ok({
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, true);
|
||||
(None, state)
|
||||
});
|
||||
Box::new(future)
|
||||
},
|
||||
Some(EventSource::LocalResponse(message)) => {
|
||||
let future = message
|
||||
.map_err(|err| {
|
||||
// The user destroyed the responder without responding.
|
||||
warn!("Kad responder object destroyed without responding");
|
||||
// TODO: what to do here? we have to close the connection
|
||||
IoError::new(IoErrorKind::Other, err)
|
||||
})
|
||||
.and_then(move |message| {
|
||||
kad_sink
|
||||
.send(message)
|
||||
.map(move |kad_sink| {
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
(None, state)
|
||||
})
|
||||
});
|
||||
Box::new(future)
|
||||
},
|
||||
Some(EventSource::LocalRequest(message @ KadMsg::PutValue { .. }, _)) |
|
||||
Some(EventSource::LocalRequest(message @ KadMsg::AddProvider { .. }, _)) => {
|
||||
// A `PutValue` or `AddProvider` request. Contrary to other types of
|
||||
// messages, these ones don't expect any answer and therefore we ignore
|
||||
// the sender.
|
||||
let future = kad_sink
|
||||
.send(message)
|
||||
.map(move |kad_sink| {
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
(None, state)
|
||||
});
|
||||
Box::new(future) as Box<_>
|
||||
}
|
||||
Some(EventSource::LocalRequest(message @ KadMsg::Ping { .. }, _)) => {
|
||||
// A local `Ping` request.
|
||||
let expected_pongs = expected_pongs.checked_add(1)
|
||||
.expect("overflow in number of simultaneous pings");
|
||||
let future = kad_sink
|
||||
.send(message)
|
||||
.map(move |kad_sink| {
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
(None, state)
|
||||
});
|
||||
Box::new(future) as Box<_>
|
||||
}
|
||||
Some(EventSource::LocalRequest(message, send_back)) => {
|
||||
// Any local request other than `PutValue` or `Ping`.
|
||||
send_back_queue.push_back(send_back);
|
||||
let future = kad_sink
|
||||
.send(message)
|
||||
.map(move |kad_sink| {
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
(None, state)
|
||||
});
|
||||
Box::new(future) as Box<_>
|
||||
}
|
||||
Some(EventSource::Remote(KadMsg::Ping)) => {
|
||||
// The way the protocol was designed, there is no way to differentiate
|
||||
// between a ping and a pong.
|
||||
if let Some(expected_pongs) = expected_pongs.checked_sub(1) {
|
||||
// Maybe we received a PONG, or maybe we received a PONG, no way
|
||||
// to tell. If it was a PING and we expected a PONG, then the
|
||||
// remote will see its PING answered only when it PONGs us.
|
||||
let future = future::ok({
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
let rq = KadIncomingRequest::PingPong;
|
||||
(Some(rq), state)
|
||||
});
|
||||
Box::new(future) as Box<_>
|
||||
} else {
|
||||
let future = kad_sink
|
||||
.send(KadMsg::Ping)
|
||||
.map(move |kad_sink| {
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
let rq = KadIncomingRequest::PingPong;
|
||||
(Some(rq), state)
|
||||
});
|
||||
Box::new(future) as Box<_>
|
||||
}
|
||||
}
|
||||
Some(EventSource::Remote(message @ KadMsg::FindNodeRes { .. }))
|
||||
| Some(EventSource::Remote(message @ KadMsg::GetValueRes { .. }))
|
||||
| Some(EventSource::Remote(message @ KadMsg::GetProvidersRes { .. })) => {
|
||||
// `FindNodeRes`, `GetValueRes` or `GetProvidersRes` received on the socket.
|
||||
// Send it back through `send_back_queue`.
|
||||
if let Some(send_back) = send_back_queue.pop_front() {
|
||||
let _ = send_back.send(message);
|
||||
let future = future::ok({
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
(None, state)
|
||||
});
|
||||
Box::new(future)
|
||||
} else {
|
||||
debug!("Remote sent a Kad response but we didn't request anything");
|
||||
let future = future::err(IoErrorKind::InvalidData.into());
|
||||
Box::new(future)
|
||||
}
|
||||
}
|
||||
Some(EventSource::Remote(KadMsg::FindNodeReq { key })) => {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = responders_tx.unbounded_send(rx);
|
||||
let future = future::ok({
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
let rq = KadIncomingRequest::FindNode {
|
||||
searched: key,
|
||||
responder: KadFindNodeRespond {
|
||||
inner: tx
|
||||
}
|
||||
};
|
||||
(Some(rq), state)
|
||||
});
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
Some(EventSource::Remote(KadMsg::GetProvidersReq { key })) => {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = responders_tx.unbounded_send(rx);
|
||||
let future = future::ok({
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
let rq = KadIncomingRequest::GetProviders {
|
||||
searched: key,
|
||||
responder: KadGetProvidersRespond {
|
||||
inner: tx
|
||||
}
|
||||
};
|
||||
(Some(rq), state)
|
||||
});
|
||||
|
||||
Box::new(future)
|
||||
}
|
||||
Some(EventSource::Remote(KadMsg::AddProvider { key, provider_peer })) => {
|
||||
let future = future::ok({
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
let rq = KadIncomingRequest::AddProvider { key, provider_peer };
|
||||
(Some(rq), state)
|
||||
});
|
||||
Box::new(future) as Box<_>
|
||||
}
|
||||
Some(EventSource::Remote(KadMsg::GetValueReq { .. })) => {
|
||||
warn!("GET_VALUE requests are not implemented yet");
|
||||
let future = future::err(IoError::new(IoErrorKind::Other,
|
||||
"GET_VALUE requests are not implemented yet"));
|
||||
return Box::new(future);
|
||||
}
|
||||
Some(EventSource::Remote(KadMsg::PutValue { .. })) => {
|
||||
warn!("PUT_VALUE requests are not implemented yet");
|
||||
let state = (events, kad_sink, responders_tx, send_back_queue, expected_pongs, finished);
|
||||
let future = future::ok((None, state));
|
||||
return Box::new(future);
|
||||
}
|
||||
}
|
||||
}))
|
||||
}).filter_map(|val| val);
|
||||
|
||||
Box::new(stream) as Box<Stream<Item = _, Error = IoError> + Send>
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Error as IoError;
|
||||
use std::iter;
|
||||
use futures::{Future, Poll, Sink, StartSend, Stream};
|
||||
use futures::sync::mpsc;
|
||||
use kad_server::{self, KadIncomingRequest, KadConnecController};
|
||||
use libp2p_core::PeerId;
|
||||
use protocol::{KadConnectionType, KadPeer};
|
||||
|
||||
// This struct merges a stream and a sink and is quite useful for tests.
|
||||
struct Wrapper<St, Si>(St, Si);
|
||||
impl<St, Si> Stream for Wrapper<St, Si>
|
||||
where
|
||||
St: Stream,
|
||||
{
|
||||
type Item = St::Item;
|
||||
type Error = St::Error;
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
self.0.poll()
|
||||
}
|
||||
}
|
||||
impl<St, Si> Sink for Wrapper<St, Si>
|
||||
where
|
||||
Si: Sink,
|
||||
{
|
||||
type SinkItem = Si::SinkItem;
|
||||
type SinkError = Si::SinkError;
|
||||
fn start_send(
|
||||
&mut self,
|
||||
item: Self::SinkItem,
|
||||
) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
self.1.start_send(item)
|
||||
}
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.1.poll_complete()
|
||||
}
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.1.close()
|
||||
}
|
||||
}
|
||||
|
||||
fn build_test() -> (KadConnecController, impl Stream<Item = KadIncomingRequest, Error = IoError>, KadConnecController, impl Stream<Item = KadIncomingRequest, Error = IoError>) {
|
||||
let (a_to_b, b_from_a) = mpsc::unbounded();
|
||||
let (b_to_a, a_from_b) = mpsc::unbounded();
|
||||
|
||||
let sink_stream_a = Wrapper(a_from_b, a_to_b)
|
||||
.map_err(|_| panic!()).sink_map_err(|_| panic!());
|
||||
let sink_stream_b = Wrapper(b_from_a, b_to_a)
|
||||
.map_err(|_| panic!()).sink_map_err(|_| panic!());
|
||||
|
||||
let (controller_a, stream_events_a) = kad_server::build_from_sink_stream(sink_stream_a);
|
||||
let (controller_b, stream_events_b) = kad_server::build_from_sink_stream(sink_stream_b);
|
||||
(controller_a, stream_events_a, controller_b, stream_events_b)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ping_response() {
|
||||
let (controller_a, stream_events_a, _controller_b, stream_events_b) = build_test();
|
||||
|
||||
controller_a.ping().unwrap();
|
||||
|
||||
let streams = stream_events_a.map(|ev| (ev, "a"))
|
||||
.select(stream_events_b.map(|ev| (ev, "b")));
|
||||
match streams.into_future().map_err(|(err, _)| err).wait().unwrap() {
|
||||
(Some((KadIncomingRequest::PingPong, "b")), _) => {},
|
||||
_ => panic!()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn find_node_response() {
|
||||
let (controller_a, stream_events_a, _controller_b, stream_events_b) = build_test();
|
||||
|
||||
let random_peer_id = PeerId::random();
|
||||
|
||||
let find_node_fut = controller_a.find_node(&random_peer_id);
|
||||
|
||||
let example_response = KadPeer {
|
||||
node_id: PeerId::random(),
|
||||
multiaddrs: Vec::new(),
|
||||
connection_ty: KadConnectionType::Connected,
|
||||
};
|
||||
|
||||
let streams = stream_events_a.map(|ev| (ev, "a"))
|
||||
.select(stream_events_b.map(|ev| (ev, "b")));
|
||||
|
||||
let streams = match streams.into_future().map_err(|(err, _)| err).wait().unwrap() {
|
||||
(Some((KadIncomingRequest::FindNode { searched, responder }, "b")), streams) => {
|
||||
assert_eq!(searched, random_peer_id);
|
||||
responder.respond(iter::once(example_response.clone()));
|
||||
streams
|
||||
},
|
||||
_ => panic!()
|
||||
};
|
||||
|
||||
let resp = streams.into_future().map_err(|(err, _)| err).map(|_| unreachable!())
|
||||
.select(find_node_fut)
|
||||
.map_err(|_| -> IoError { panic!() });
|
||||
assert_eq!(resp.wait().unwrap().0, vec![example_response]);
|
||||
}
|
||||
}
|
@ -29,7 +29,7 @@
|
||||
|
||||
use arrayvec::ArrayVec;
|
||||
use bigint::U512;
|
||||
use libp2p_core::PeerId;
|
||||
use multihash::Multihash;
|
||||
use parking_lot::{Mutex, MutexGuard};
|
||||
use std::mem;
|
||||
use std::slice::Iter as SliceIter;
|
||||
@ -57,7 +57,8 @@ where
|
||||
fn clone(&self) -> Self {
|
||||
KBucketsTable {
|
||||
my_id: self.my_id.clone(),
|
||||
tables: self.tables
|
||||
tables: self
|
||||
.tables
|
||||
.iter()
|
||||
.map(|t| t.lock().clone())
|
||||
.map(Mutex::new)
|
||||
@ -124,7 +125,7 @@ pub trait KBucketsPeerId: Eq + Clone {
|
||||
fn leading_zeros(Self::Distance) -> u32;
|
||||
}
|
||||
|
||||
impl KBucketsPeerId for PeerId {
|
||||
impl KBucketsPeerId for Multihash {
|
||||
type Distance = U512;
|
||||
|
||||
#[inline]
|
||||
@ -201,7 +202,7 @@ where
|
||||
let mut table = table.lock();
|
||||
table.flush(self.ping_timeout);
|
||||
if table.last_update.elapsed() > self.ping_timeout {
|
||||
continue // ignore bucket with expired nodes
|
||||
continue; // ignore bucket with expired nodes
|
||||
}
|
||||
for node in table.nodes.iter() {
|
||||
out.push(node.id.clone());
|
||||
@ -357,7 +358,7 @@ mod tests {
|
||||
extern crate rand;
|
||||
use self::rand::random;
|
||||
use kbucket::{KBucketsTable, UpdateOutcome, MAX_NODES_PER_BUCKET};
|
||||
use libp2p_core::PeerId;
|
||||
use multihash::Multihash;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -367,14 +368,20 @@ mod tests {
|
||||
let mut bytes = vec![random(); 34];
|
||||
bytes[0] = 18;
|
||||
bytes[1] = 32;
|
||||
PeerId::from_bytes(bytes.clone()).expect(&format!("creating `my_id` PeerId from bytes {:#?} failed", bytes))
|
||||
Multihash::from_bytes(bytes.clone()).expect(&format!(
|
||||
"creating `my_id` Multihash from bytes {:#?} failed",
|
||||
bytes
|
||||
))
|
||||
};
|
||||
|
||||
let other_id = {
|
||||
let mut bytes = vec![random(); 34];
|
||||
bytes[0] = 18;
|
||||
bytes[1] = 32;
|
||||
PeerId::from_bytes(bytes.clone()).expect(&format!("creating `other_id` PeerId from bytes {:#?} failed", bytes))
|
||||
Multihash::from_bytes(bytes.clone()).expect(&format!(
|
||||
"creating `other_id` Multihash from bytes {:#?} failed",
|
||||
bytes
|
||||
))
|
||||
};
|
||||
|
||||
let table = KBucketsTable::new(my_id, Duration::from_secs(5));
|
||||
@ -391,13 +398,13 @@ mod tests {
|
||||
let mut bytes = vec![random(); 34];
|
||||
bytes[0] = 18;
|
||||
bytes[1] = 32;
|
||||
PeerId::from_bytes(bytes).unwrap()
|
||||
Multihash::from_bytes(bytes).unwrap()
|
||||
};
|
||||
|
||||
let table = KBucketsTable::new(my_id.clone(), Duration::from_secs(5));
|
||||
match table.update(my_id, ()) {
|
||||
UpdateOutcome::FailSelfUpdate => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,7 +414,7 @@ mod tests {
|
||||
let mut bytes = vec![random(); 34];
|
||||
bytes[0] = 18;
|
||||
bytes[1] = 32;
|
||||
PeerId::from_bytes(bytes).unwrap()
|
||||
Multihash::from_bytes(bytes).unwrap()
|
||||
};
|
||||
|
||||
// Generate some other IDs varying by just one bit.
|
||||
@ -416,7 +423,7 @@ mod tests {
|
||||
let bit_num = random::<usize>() % 256;
|
||||
let mut id = my_id.as_bytes().to_vec().clone();
|
||||
id[33 - (bit_num / 8)] ^= 1 << (bit_num % 8);
|
||||
(PeerId::from_bytes(id).unwrap(), bit_num)
|
||||
(Multihash::from_bytes(id).unwrap(), bit_num)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@ -445,7 +452,7 @@ mod tests {
|
||||
let mut bytes = vec![random(); 34];
|
||||
bytes[0] = 18;
|
||||
bytes[1] = 32;
|
||||
PeerId::from_bytes(bytes).unwrap()
|
||||
Multihash::from_bytes(bytes).unwrap()
|
||||
};
|
||||
|
||||
assert!(MAX_NODES_PER_BUCKET <= 251); // Test doesn't work otherwise.
|
||||
@ -454,7 +461,7 @@ mod tests {
|
||||
let mut id = my_id.clone().into_bytes();
|
||||
id[2] ^= 0x80; // Flip the first bit so that we get in the most distant bucket.
|
||||
id[33] = id[33].wrapping_add(n as u8);
|
||||
PeerId::from_bytes(id).unwrap()
|
||||
Multihash::from_bytes(id).unwrap()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
|
@ -62,10 +62,11 @@ extern crate bs58;
|
||||
extern crate bytes;
|
||||
extern crate datastore;
|
||||
extern crate fnv;
|
||||
#[cfg_attr(test, macro_use)]
|
||||
extern crate futures;
|
||||
extern crate libp2p_core;
|
||||
extern crate libp2p_identify;
|
||||
extern crate libp2p_ping;
|
||||
extern crate libp2p_core;
|
||||
extern crate log;
|
||||
extern crate multiaddr;
|
||||
extern crate multihash;
|
||||
@ -77,13 +78,21 @@ extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
extern crate tokio_timer;
|
||||
extern crate unsigned_varint;
|
||||
extern crate void;
|
||||
|
||||
pub use self::high_level::{KadSystemConfig, KadSystem, KadQueryEvent};
|
||||
pub use self::kad_server::{KadConnecController, KadConnecConfig, KadIncomingRequest, KadFindNodeRespond};
|
||||
pub use self::protocol::{KadConnectionType, KadPeer};
|
||||
#[cfg(test)]
|
||||
extern crate tokio;
|
||||
|
||||
mod high_level;
|
||||
mod kad_server;
|
||||
pub use self::behaviour::{Kademlia, KademliaOut};
|
||||
pub use self::kbucket::KBucketsPeerId;
|
||||
pub use self::protocol::KadConnectionType;
|
||||
pub use self::topology::KademliaTopology;
|
||||
|
||||
pub mod handler;
|
||||
pub mod protocol;
|
||||
|
||||
mod behaviour;
|
||||
mod kbucket;
|
||||
mod protobuf_structs;
|
||||
mod protocol;
|
||||
mod query;
|
||||
mod topology;
|
||||
|
@ -18,19 +18,17 @@
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Provides the `KadMsg` enum of all the possible messages transmitted with the Kademlia protocol,
|
||||
//! and the `KademliaProtocolConfig` connection upgrade whose output is a
|
||||
//! `Stream<Item = KadMsg> + Sink<SinkItem = KadMsg>`.
|
||||
//! Provides the `KadRequestMsg` and `KadResponseMsg` enums of all the possible messages
|
||||
//! transmitted with the Kademlia protocol, and the `KademliaProtocolConfig` connection upgrade.
|
||||
//!
|
||||
//! The upgrade's output a `Sink + Stream` of messages.
|
||||
//!
|
||||
//! The `Stream` component is used to poll the underlying transport, and the `Sink` component is
|
||||
//! used to send messages.
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::{future, sink, Sink, stream, Stream};
|
||||
use libp2p_core::{
|
||||
Multiaddr, PeerId,
|
||||
upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}
|
||||
};
|
||||
use futures::{future, sink, stream, Sink, Stream};
|
||||
use libp2p_core::{InboundUpgrade, Multiaddr, OutboundUpgrade, PeerId, UpgradeInfo};
|
||||
use multihash::Multihash;
|
||||
use protobuf::{self, Message};
|
||||
use protobuf_structs;
|
||||
@ -40,6 +38,7 @@ use tokio_codec::Framed;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use unsigned_varint::codec;
|
||||
|
||||
/// Status of our connection to a node reported by the Kademlia protocol.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
|
||||
pub enum KadConnectionType {
|
||||
/// Sender hasn't tried to connect to peer.
|
||||
@ -81,9 +80,11 @@ impl Into<protobuf_structs::dht::Message_ConnectionType> for KadConnectionType {
|
||||
/// Information about a peer, as known by the sender.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct KadPeer {
|
||||
/// Identifier of the peer.
|
||||
pub node_id: PeerId,
|
||||
/// The multiaddresses that are known for that peer.
|
||||
/// The multiaddresses that the sender think can be used in order to reach the peer.
|
||||
pub multiaddrs: Vec<Multiaddr>,
|
||||
/// How the sender is connected to that remote.
|
||||
pub connection_ty: KadConnectionType,
|
||||
}
|
||||
|
||||
@ -127,13 +128,16 @@ impl Into<protobuf_structs::dht::Message_Peer> for KadPeer {
|
||||
}
|
||||
|
||||
/// Configuration for a Kademlia connection upgrade. When applied to a connection, turns this
|
||||
/// connection into a `Stream + Sink` whose items are of type `KadMsg`.
|
||||
/// connection into a `Stream + Sink` whose items are of type `KadRequestMsg` and `KadResponseMsg`.
|
||||
// TODO: if, as suspected, we can confirm with Protocol Labs that each open Kademlia substream does
|
||||
// only one request, then we can change the output of the `InboundUpgrade` and
|
||||
// `OutboundUpgrade` to be just a single message
|
||||
#[derive(Debug, Default, Copy, Clone)]
|
||||
pub struct KademliaProtocolConfig;
|
||||
|
||||
impl UpgradeInfo for KademliaProtocolConfig {
|
||||
type NamesIter = iter::Once<(Bytes, ())>;
|
||||
type UpgradeId = ();
|
||||
type NamesIter = iter::Once<(Bytes, Self::UpgradeId)>;
|
||||
|
||||
#[inline]
|
||||
fn protocol_names(&self) -> Self::NamesIter {
|
||||
@ -143,106 +147,102 @@ impl UpgradeInfo for KademliaProtocolConfig {
|
||||
|
||||
impl<C> InboundUpgrade<C> for KademliaProtocolConfig
|
||||
where
|
||||
C: AsyncRead + AsyncWrite + 'static, // TODO: 'static :-/
|
||||
C: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type Output = KadStreamSink<C>;
|
||||
type Output = KadInStreamSink<C>;
|
||||
type Future = future::FutureResult<Self::Output, IoError>;
|
||||
type Error = IoError;
|
||||
type Future = future::FutureResult<Self::Output, Self::Error>;
|
||||
|
||||
#[inline]
|
||||
fn upgrade_inbound(self, incoming: C, _: Self::UpgradeId) -> Self::Future {
|
||||
future::ok(kademlia_protocol(incoming))
|
||||
fn upgrade_inbound(self, incoming: C, _: ()) -> Self::Future {
|
||||
future::ok(
|
||||
Framed::new(incoming, codec::UviBytes::default())
|
||||
.from_err::<IoError>()
|
||||
.with::<_, fn(_) -> _, _>(|response| -> Result<_, IoError> {
|
||||
let proto_struct = resp_msg_to_proto(response);
|
||||
proto_struct.write_to_bytes()
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err.to_string()))
|
||||
})
|
||||
.and_then::<fn(_) -> _, _>(|bytes: BytesMut| {
|
||||
let request = protobuf::parse_from_bytes(&bytes)?;
|
||||
proto_to_req_msg(request)
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> OutboundUpgrade<C> for KademliaProtocolConfig
|
||||
where
|
||||
C: AsyncRead + AsyncWrite + 'static, // TODO: 'static :-/
|
||||
C: AsyncRead + AsyncWrite,
|
||||
{
|
||||
type Output = KadStreamSink<C>;
|
||||
type Output = KadOutStreamSink<C>;
|
||||
type Future = future::FutureResult<Self::Output, IoError>;
|
||||
type Error = IoError;
|
||||
type Future = future::FutureResult<Self::Output, Self::Error>;
|
||||
|
||||
#[inline]
|
||||
fn upgrade_outbound(self, incoming: C, _: Self::UpgradeId) -> Self::Future {
|
||||
future::ok(kademlia_protocol(incoming))
|
||||
}
|
||||
}
|
||||
|
||||
type KadStreamSink<S> = stream::AndThen<sink::With<stream::FromErr<Framed<S, codec::UviBytes<Vec<u8>>>, IoError>, KadMsg, fn(KadMsg) -> Result<Vec<u8>, IoError>, Result<Vec<u8>, IoError>>, fn(BytesMut) -> Result<KadMsg, IoError>, Result<KadMsg, IoError>>;
|
||||
|
||||
// Upgrades a socket to use the Kademlia protocol.
|
||||
fn kademlia_protocol<S>(socket: S) -> KadStreamSink<S>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite,
|
||||
{
|
||||
Framed::new(socket, codec::UviBytes::default())
|
||||
fn upgrade_outbound(self, incoming: C, _: ()) -> Self::Future {
|
||||
future::ok(
|
||||
Framed::new(incoming, codec::UviBytes::default())
|
||||
.from_err::<IoError>()
|
||||
.with::<_, fn(_) -> _, _>(|request| -> Result<_, IoError> {
|
||||
let proto_struct = msg_to_proto(request);
|
||||
Ok(proto_struct.write_to_bytes().unwrap()) // TODO: error?
|
||||
let proto_struct = req_msg_to_proto(request);
|
||||
match proto_struct.write_to_bytes() {
|
||||
Ok(msg) => Ok(msg),
|
||||
Err(err) => Err(IoError::new(IoErrorKind::Other, err.to_string())),
|
||||
}
|
||||
})
|
||||
.and_then::<fn(_) -> _, _>(|bytes| {
|
||||
.and_then::<fn(_) -> _, _>(|bytes: BytesMut| {
|
||||
let response = protobuf::parse_from_bytes(&bytes)?;
|
||||
proto_to_msg(response)
|
||||
})
|
||||
proto_to_resp_msg(response)
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Message that we can send to a peer or received from a peer.
|
||||
// TODO: document the rest
|
||||
/// Sink of responses and stream of requests.
|
||||
pub type KadInStreamSink<S> = stream::AndThen<
|
||||
sink::With<
|
||||
stream::FromErr<Framed<S, codec::UviBytes<Vec<u8>>>, IoError>,
|
||||
KadResponseMsg,
|
||||
fn(KadResponseMsg) -> Result<Vec<u8>, IoError>,
|
||||
Result<Vec<u8>, IoError>,
|
||||
>,
|
||||
fn(BytesMut) -> Result<KadRequestMsg, IoError>,
|
||||
Result<KadRequestMsg, IoError>,
|
||||
>;
|
||||
|
||||
/// Sink of requests and stream of responses.
|
||||
pub type KadOutStreamSink<S> = stream::AndThen<
|
||||
sink::With<
|
||||
stream::FromErr<Framed<S, codec::UviBytes<Vec<u8>>>, IoError>,
|
||||
KadRequestMsg,
|
||||
fn(KadRequestMsg) -> Result<Vec<u8>, IoError>,
|
||||
Result<Vec<u8>, IoError>,
|
||||
>,
|
||||
fn(BytesMut) -> Result<KadResponseMsg, IoError>,
|
||||
Result<KadResponseMsg, IoError>,
|
||||
>;
|
||||
|
||||
/// Request that we can send to a peer or that we received from a peer.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum KadMsg {
|
||||
/// Ping request or response.
|
||||
pub enum KadRequestMsg {
|
||||
/// Ping request.
|
||||
Ping,
|
||||
|
||||
/// Target must save the given record, can be queried later with `GetValueReq`.
|
||||
PutValue {
|
||||
/// Identifier of the record.
|
||||
key: Multihash,
|
||||
/// The record itself.
|
||||
record: (), //record: protobuf_structs::record::Record, // TODO: no
|
||||
},
|
||||
|
||||
GetValueReq {
|
||||
/// Identifier of the record.
|
||||
key: Multihash,
|
||||
},
|
||||
|
||||
GetValueRes {
|
||||
/// Identifier of the returned record.
|
||||
key: Multihash,
|
||||
record: (), //record: Option<protobuf_structs::record::Record>, // TODO: no
|
||||
closer_peers: Vec<KadPeer>,
|
||||
},
|
||||
|
||||
/// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes
|
||||
/// returned is not specified, but should be around 20.
|
||||
FindNodeReq {
|
||||
FindNode {
|
||||
/// Identifier of the node.
|
||||
key: PeerId,
|
||||
},
|
||||
|
||||
/// Response to a `FindNodeReq`.
|
||||
FindNodeRes {
|
||||
/// Results of the request.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
},
|
||||
|
||||
/// Same as `FindNodeReq`, but should also return the entries of the local providers list for
|
||||
/// Same as `FindNode`, but should also return the entries of the local providers list for
|
||||
/// this key.
|
||||
GetProvidersReq {
|
||||
GetProviders {
|
||||
/// Identifier being searched.
|
||||
key: Multihash,
|
||||
},
|
||||
|
||||
/// Response to a `FindNodeReq`.
|
||||
GetProvidersRes {
|
||||
/// Nodes closest to the key.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
/// Known providers for this key.
|
||||
provider_peers: Vec<KadPeer>,
|
||||
},
|
||||
|
||||
/// Indicates that this list of providers is known for this key.
|
||||
AddProvider {
|
||||
/// Key for which we should add providers.
|
||||
@ -252,39 +252,69 @@ pub enum KadMsg {
|
||||
},
|
||||
}
|
||||
|
||||
// Turns a type-safe kadmelia message into the corresponding row protobuf message.
|
||||
fn msg_to_proto(kad_msg: KadMsg) -> protobuf_structs::dht::Message {
|
||||
/// Response that we can send to a peer or that we received from a peer.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum KadResponseMsg {
|
||||
/// Ping response.
|
||||
Pong,
|
||||
|
||||
/// Response to a `FindNode`.
|
||||
FindNode {
|
||||
/// Results of the request.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
},
|
||||
|
||||
/// Response to a `GetProviders`.
|
||||
GetProviders {
|
||||
/// Nodes closest to the key.
|
||||
closer_peers: Vec<KadPeer>,
|
||||
/// Known providers for this key.
|
||||
provider_peers: Vec<KadPeer>,
|
||||
},
|
||||
}
|
||||
|
||||
// Turns a type-safe Kadmelia message into the corresponding raw protobuf message.
|
||||
fn req_msg_to_proto(kad_msg: KadRequestMsg) -> protobuf_structs::dht::Message {
|
||||
match kad_msg {
|
||||
KadMsg::Ping => {
|
||||
KadRequestMsg::Ping => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::PING);
|
||||
msg
|
||||
}
|
||||
KadMsg::PutValue { key, .. } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::PUT_VALUE);
|
||||
msg.set_key(key.into_bytes());
|
||||
//msg.set_record(record); // TODO:
|
||||
msg
|
||||
}
|
||||
KadMsg::GetValueReq { key } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::GET_VALUE);
|
||||
msg.set_key(key.into_bytes());
|
||||
msg.set_clusterLevelRaw(10);
|
||||
msg
|
||||
}
|
||||
KadMsg::GetValueRes { .. } => unimplemented!(), // TODO:
|
||||
KadMsg::FindNodeReq { key } => {
|
||||
KadRequestMsg::FindNode { key } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::FIND_NODE);
|
||||
msg.set_key(key.into_bytes());
|
||||
msg.set_clusterLevelRaw(10);
|
||||
msg
|
||||
}
|
||||
KadMsg::FindNodeRes { closer_peers } => {
|
||||
// TODO: if empty, the remote will think it's a request
|
||||
// TODO: not good, possibly exposed in the API
|
||||
KadRequestMsg::GetProviders { key } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::GET_PROVIDERS);
|
||||
msg.set_key(key.into_bytes());
|
||||
msg.set_clusterLevelRaw(10);
|
||||
msg
|
||||
}
|
||||
KadRequestMsg::AddProvider { key, provider_peer } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::ADD_PROVIDER);
|
||||
msg.set_clusterLevelRaw(10);
|
||||
msg.set_key(key.into_bytes());
|
||||
msg.mut_providerPeers().push(provider_peer.into());
|
||||
msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Turns a type-safe Kadmelia message into the corresponding raw protobuf message.
|
||||
fn resp_msg_to_proto(kad_msg: KadResponseMsg) -> protobuf_structs::dht::Message {
|
||||
match kad_msg {
|
||||
KadResponseMsg::Pong => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::PING);
|
||||
msg
|
||||
}
|
||||
KadResponseMsg::FindNode { closer_peers } => {
|
||||
assert!(!closer_peers.is_empty());
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::FIND_NODE);
|
||||
@ -294,16 +324,10 @@ fn msg_to_proto(kad_msg: KadMsg) -> protobuf_structs::dht::Message {
|
||||
}
|
||||
msg
|
||||
}
|
||||
KadMsg::GetProvidersReq { key } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::GET_PROVIDERS);
|
||||
msg.set_key(key.into_bytes());
|
||||
msg.set_clusterLevelRaw(10);
|
||||
msg
|
||||
}
|
||||
KadMsg::GetProvidersRes { closer_peers, provider_peers } => {
|
||||
// TODO: if empty, the remote will think it's a request
|
||||
// TODO: not good, possibly exposed in the API
|
||||
KadResponseMsg::GetProviders {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
} => {
|
||||
assert!(!closer_peers.is_empty());
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::GET_PROVIDERS);
|
||||
@ -316,94 +340,47 @@ fn msg_to_proto(kad_msg: KadMsg) -> protobuf_structs::dht::Message {
|
||||
}
|
||||
msg
|
||||
}
|
||||
KadMsg::AddProvider { key, provider_peer } => {
|
||||
let mut msg = protobuf_structs::dht::Message::new();
|
||||
msg.set_field_type(protobuf_structs::dht::Message_MessageType::ADD_PROVIDER);
|
||||
msg.set_clusterLevelRaw(10);
|
||||
msg.set_key(key.into_bytes());
|
||||
msg.mut_providerPeers().push(provider_peer.into());
|
||||
msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Turns a raw Kademlia message into a type-safe message.
|
||||
fn proto_to_msg(mut message: protobuf_structs::dht::Message) -> Result<KadMsg, IoError> {
|
||||
fn proto_to_req_msg(mut message: protobuf_structs::dht::Message) -> Result<KadRequestMsg, IoError> {
|
||||
match message.get_field_type() {
|
||||
protobuf_structs::dht::Message_MessageType::PING => Ok(KadMsg::Ping),
|
||||
protobuf_structs::dht::Message_MessageType::PING => Ok(KadRequestMsg::Ping),
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::PUT_VALUE => {
|
||||
let key = Multihash::from_bytes(message.take_key())
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))?;
|
||||
let _record = message.take_record();
|
||||
Ok(KadMsg::PutValue {
|
||||
key: key,
|
||||
record: (),
|
||||
})
|
||||
Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"received a PUT_VALUE message, but this is not supported by rust-libp2p yet",
|
||||
))
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::GET_VALUE => {
|
||||
let key = Multihash::from_bytes(message.take_key())
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))?;
|
||||
Ok(KadMsg::GetValueReq { key: key })
|
||||
Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"received a GET_VALUE message, but this is not supported by rust-libp2p yet",
|
||||
))
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::FIND_NODE => {
|
||||
if message.get_closerPeers().is_empty() {
|
||||
let key = PeerId::from_bytes(message.take_key())
|
||||
.map_err(|_| IoError::new(IoErrorKind::InvalidData, "invalid peer id in FIND_NODE"))?;
|
||||
Ok(KadMsg::FindNodeReq {
|
||||
key,
|
||||
})
|
||||
|
||||
} else {
|
||||
// TODO: for now we don't parse the peer properly, so it is possible that we get
|
||||
// parsing errors for peers even when they are valid; we ignore these
|
||||
// errors for now, but ultimately we should just error altogether
|
||||
let closer_peers = message.mut_closerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(KadMsg::FindNodeRes {
|
||||
closer_peers,
|
||||
})
|
||||
}
|
||||
let key = PeerId::from_bytes(message.take_key()).map_err(|_| {
|
||||
IoError::new(IoErrorKind::InvalidData, "invalid peer id in FIND_NODE")
|
||||
})?;
|
||||
Ok(KadRequestMsg::FindNode { key })
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::GET_PROVIDERS => {
|
||||
if message.get_closerPeers().is_empty() {
|
||||
let key = Multihash::from_bytes(message.take_key())
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))?;
|
||||
Ok(KadMsg::GetProvidersReq {
|
||||
key,
|
||||
})
|
||||
|
||||
} else {
|
||||
// TODO: for now we don't parse the peer properly, so it is possible that we get
|
||||
// parsing errors for peers even when they are valid; we ignore these
|
||||
// errors for now, but ultimately we should just error altogether
|
||||
let closer_peers = message.mut_closerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.collect::<Vec<_>>();
|
||||
let provider_peers = message.mut_providerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(KadMsg::GetProvidersRes {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
})
|
||||
}
|
||||
Ok(KadRequestMsg::GetProviders { key })
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::ADD_PROVIDER => {
|
||||
// TODO: for now we don't parse the peer properly, so it is possible that we get
|
||||
// parsing errors for peers even when they are valid; we ignore these
|
||||
// errors for now, but ultimately we should just error altogether
|
||||
let provider_peer = message.mut_providerPeers()
|
||||
let provider_peer = message
|
||||
.mut_providerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.next();
|
||||
@ -411,10 +388,7 @@ fn proto_to_msg(mut message: protobuf_structs::dht::Message) -> Result<KadMsg, I
|
||||
if let Some(provider_peer) = provider_peer {
|
||||
let key = Multihash::from_bytes(message.take_key())
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))?;
|
||||
Ok(KadMsg::AddProvider {
|
||||
key,
|
||||
provider_peer,
|
||||
})
|
||||
Ok(KadRequestMsg::AddProvider { key, provider_peer })
|
||||
} else {
|
||||
Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
@ -425,64 +399,105 @@ fn proto_to_msg(mut message: protobuf_structs::dht::Message) -> Result<KadMsg, I
|
||||
}
|
||||
}
|
||||
|
||||
/// Turns a raw Kademlia message into a type-safe message.
|
||||
fn proto_to_resp_msg(
|
||||
mut message: protobuf_structs::dht::Message,
|
||||
) -> Result<KadResponseMsg, IoError> {
|
||||
match message.get_field_type() {
|
||||
protobuf_structs::dht::Message_MessageType::PING => Ok(KadResponseMsg::Pong),
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::GET_VALUE => {
|
||||
Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"received a GET_VALUE message, but this is not supported by rust-libp2p yet",
|
||||
))
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::FIND_NODE => {
|
||||
let closer_peers = message
|
||||
.mut_closerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(KadResponseMsg::FindNode { closer_peers })
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::GET_PROVIDERS => {
|
||||
let closer_peers = message
|
||||
.mut_closerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.collect::<Vec<_>>();
|
||||
let provider_peers = message
|
||||
.mut_providerPeers()
|
||||
.iter_mut()
|
||||
.filter_map(|peer| KadPeer::from_peer(peer).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(KadResponseMsg::GetProviders {
|
||||
closer_peers,
|
||||
provider_peers,
|
||||
})
|
||||
}
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::PUT_VALUE => Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"received an unexpected PUT_VALUE message",
|
||||
)),
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::ADD_PROVIDER => Err(IoError::new(
|
||||
IoErrorKind::InvalidData,
|
||||
"received an unexpected ADD_PROVIDER message",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
extern crate libp2p_tcp_transport;
|
||||
extern crate tokio;
|
||||
|
||||
use self::libp2p_tcp_transport::TcpConfig;
|
||||
use self::tokio::runtime::current_thread::Runtime;
|
||||
use futures::{Future, Sink, Stream};
|
||||
use libp2p_core::{Transport, PeerId};
|
||||
use libp2p_core::{PeerId, PublicKey, Transport};
|
||||
use multihash::{encode, Hash};
|
||||
use protocol::{KadConnectionType, KadMsg, KademliaProtocolConfig, KadPeer};
|
||||
use protocol::{KadConnectionType, KadPeer, KademliaProtocolConfig};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use self::tokio::runtime::current_thread::Runtime;
|
||||
|
||||
|
||||
/*// TODO: restore
|
||||
#[test]
|
||||
fn correct_transfer() {
|
||||
// We open a server and a client, send a message between the two, and check that they were
|
||||
// successfully received.
|
||||
|
||||
test_one(KadMsg::Ping);
|
||||
test_one(KadMsg::PutValue {
|
||||
key: encode(Hash::SHA2256, &[1, 2, 3, 4]).unwrap(),
|
||||
record: (),
|
||||
});
|
||||
test_one(KadMsg::GetValueReq {
|
||||
key: encode(Hash::SHA2256, &[10, 11, 12]).unwrap(),
|
||||
});
|
||||
test_one(KadMsg::FindNodeReq {
|
||||
key: PeerId::random()
|
||||
key: PeerId::random(),
|
||||
});
|
||||
test_one(KadMsg::FindNodeRes {
|
||||
closer_peers: vec![
|
||||
KadPeer {
|
||||
closer_peers: vec![KadPeer {
|
||||
node_id: PeerId::random(),
|
||||
multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()],
|
||||
connection_ty: KadConnectionType::Connected,
|
||||
},
|
||||
],
|
||||
}],
|
||||
});
|
||||
test_one(KadMsg::GetProvidersReq {
|
||||
key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(),
|
||||
});
|
||||
test_one(KadMsg::GetProvidersRes {
|
||||
closer_peers: vec![
|
||||
KadPeer {
|
||||
closer_peers: vec![KadPeer {
|
||||
node_id: PeerId::random(),
|
||||
multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()],
|
||||
connection_ty: KadConnectionType::Connected,
|
||||
},
|
||||
],
|
||||
provider_peers: vec![
|
||||
KadPeer {
|
||||
}],
|
||||
provider_peers: vec![KadPeer {
|
||||
node_id: PeerId::random(),
|
||||
multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()],
|
||||
connection_ty: KadConnectionType::NotConnected,
|
||||
},
|
||||
],
|
||||
}],
|
||||
});
|
||||
test_one(KadMsg::AddProvider {
|
||||
key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(),
|
||||
@ -504,7 +519,6 @@ mod tests {
|
||||
let (listener, addr) = transport
|
||||
.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap())
|
||||
.unwrap();
|
||||
|
||||
tx.send(addr).unwrap();
|
||||
|
||||
let future = listener
|
||||
@ -531,5 +545,5 @@ mod tests {
|
||||
let _ = rt.block_on(future).unwrap();
|
||||
bg_thread.join().unwrap();
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
@ -18,348 +18,532 @@
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This module handles performing iterative queries about the network.
|
||||
//! Contains the iterative querying process of Kademlia.
|
||||
//!
|
||||
//! This allows one to create queries that iterate on the DHT on nodes that become closer and
|
||||
//! closer to the target.
|
||||
|
||||
use fnv::FnvHashSet;
|
||||
use futures::{future, Future, stream, Stream};
|
||||
use futures::prelude::*;
|
||||
use handler::KademliaHandlerIn;
|
||||
use kbucket::KBucketsPeerId;
|
||||
use libp2p_core::PeerId;
|
||||
use multiaddr::{Protocol, Multiaddr};
|
||||
use protocol;
|
||||
use rand;
|
||||
use multihash::Multihash;
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp::Ordering;
|
||||
use std::io::Error as IoError;
|
||||
use std::mem;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio_timer::Delay;
|
||||
|
||||
/// Parameters of a query. Allows plugging the query-related code with the rest of the
|
||||
/// infrastructure.
|
||||
pub struct QueryParams<FBuckets, FFindNode> {
|
||||
/// Identifier of the local peer.
|
||||
pub local_id: PeerId,
|
||||
/// Called whenever we need to obtain the peers closest to a certain peer.
|
||||
pub kbuckets_find_closest: FBuckets,
|
||||
/// Level of parallelism for networking. If this is `N`, then we can dial `N` nodes at a time.
|
||||
pub parallelism: usize,
|
||||
/// Called whenever we want to send a `FIND_NODE` RPC query.
|
||||
pub find_node: FFindNode,
|
||||
}
|
||||
|
||||
/// Event that happens during a query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryEvent<TOut> {
|
||||
/// Learned about new mutiaddresses for the given peers.
|
||||
PeersReported(Vec<(PeerId, Vec<Multiaddr>)>),
|
||||
/// Finished the processing of the query. Contains the result.
|
||||
Finished(TOut),
|
||||
}
|
||||
|
||||
/// Starts a query for an iterative `FIND_NODE` request.
|
||||
#[inline]
|
||||
pub fn find_node<'a, FBuckets, FFindNode>(
|
||||
query_params: QueryParams<FBuckets, FFindNode>,
|
||||
searched_key: PeerId,
|
||||
) -> Box<Stream<Item = QueryEvent<Vec<PeerId>>, Error = IoError> + Send + 'a>
|
||||
where
|
||||
FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone,
|
||||
FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone,
|
||||
{
|
||||
query(query_params, searched_key, 20) // TODO: constant
|
||||
}
|
||||
|
||||
/// Refreshes a specific bucket by performing an iterative `FIND_NODE` on a random ID of this
|
||||
/// bucket.
|
||||
/// State of a query iterative process.
|
||||
///
|
||||
/// Returns a dummy no-op future if `bucket_num` is out of range.
|
||||
pub fn refresh<'a, FBuckets, FFindNode>(
|
||||
query_params: QueryParams<FBuckets, FFindNode>,
|
||||
bucket_num: usize,
|
||||
) -> Box<Stream<Item = QueryEvent<()>, Error = IoError> + Send + 'a>
|
||||
where
|
||||
FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone,
|
||||
FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone,
|
||||
{
|
||||
let peer_id = match gen_random_id(&query_params.local_id, bucket_num) {
|
||||
Ok(p) => p,
|
||||
Err(()) => return Box::new(stream::once(Ok(QueryEvent::Finished(())))),
|
||||
};
|
||||
/// The API of this state machine is similar to the one of `Future`, `Stream` or `Swarm`. You need
|
||||
/// to call `poll()` to query the state for actions to perform. If `NotReady` is returned, the
|
||||
/// current task will be woken up automatically when `poll()` needs to be called again.
|
||||
///
|
||||
/// Note that this struct only handles iterating over nodes that are close to the target. For
|
||||
/// `FIND_NODE` queries you don't need more than that. However for `FIND_VALUE` and
|
||||
/// `GET_PROVIDERS`, you need to extract yourself the value or list of providers from RPC requests
|
||||
/// received by remotes as this is not handled by the `QueryState`.
|
||||
#[derive(Debug)]
|
||||
pub struct QueryState {
|
||||
/// Target we're looking for.
|
||||
target: QueryTarget,
|
||||
|
||||
let stream = find_node(query_params, peer_id).map(|event| {
|
||||
match event {
|
||||
QueryEvent::PeersReported(peers) => QueryEvent::PeersReported(peers),
|
||||
QueryEvent::Finished(_) => QueryEvent::Finished(()),
|
||||
}
|
||||
});
|
||||
/// Stage of the query. See the documentation of `QueryStage`.
|
||||
stage: QueryStage,
|
||||
|
||||
Box::new(stream) as Box<_>
|
||||
}
|
||||
/// Ordered list of the peers closest to the result we're looking for.
|
||||
/// Entries that are `InProgress` shouldn't be removed from the list before they complete.
|
||||
/// Must never contain two entries with the same peer IDs.
|
||||
closest_peers: SmallVec<[(PeerId, QueryPeerState); 32]>,
|
||||
|
||||
// Generates a random `PeerId` that belongs to the given bucket.
|
||||
//
|
||||
// Returns an error if `bucket_num` is out of range.
|
||||
fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> {
|
||||
let my_id_len = my_id.as_bytes().len();
|
||||
/// Allowed level of parallelism.
|
||||
parallelism: usize,
|
||||
|
||||
// TODO: this 2 is magic here; it is the length of the hash of the multihash
|
||||
let bits_diff = bucket_num + 1;
|
||||
if bits_diff > 8 * (my_id_len - 2) {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let mut random_id = [0; 64];
|
||||
for byte in 0..my_id_len {
|
||||
match byte.cmp(&(my_id_len - bits_diff / 8 - 1)) {
|
||||
Ordering::Less => {
|
||||
random_id[byte] = my_id.as_bytes()[byte];
|
||||
}
|
||||
Ordering::Equal => {
|
||||
let mask: u8 = (1 << (bits_diff % 8)) - 1;
|
||||
random_id[byte] = (my_id.as_bytes()[byte] & !mask) | (rand::random::<u8>() & mask);
|
||||
}
|
||||
Ordering::Greater => {
|
||||
random_id[byte] = rand::random();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let peer_id = PeerId::from_bytes(random_id[..my_id_len].to_owned())
|
||||
.expect("randomly-generated peer ID should always be valid");
|
||||
Ok(peer_id)
|
||||
}
|
||||
|
||||
// Generic query-performing function.
|
||||
fn query<'a, FBuckets, FFindNode>(
|
||||
query_params: QueryParams<FBuckets, FFindNode>,
|
||||
searched_key: PeerId,
|
||||
/// Number of results to produce.
|
||||
num_results: usize,
|
||||
) -> Box<Stream<Item = QueryEvent<Vec<PeerId>>, Error = IoError> + Send + 'a>
|
||||
where
|
||||
FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone,
|
||||
FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone,
|
||||
{
|
||||
debug!("Start query for {:?}; num results = {}", searched_key, num_results);
|
||||
|
||||
// State of the current iterative process.
|
||||
struct State<'a> {
|
||||
// At which stage we are.
|
||||
stage: Stage,
|
||||
// Final output of the iteration.
|
||||
result: Vec<PeerId>,
|
||||
// For each open connection, a future with the response of the remote.
|
||||
// Note that don't use a `SmallVec` here because `select_all` produces a `Vec`.
|
||||
current_attempts_fut: Vec<Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send + 'a>>,
|
||||
// For each open connection, the peer ID that we are connected to.
|
||||
// Must always have the same length as `current_attempts_fut`.
|
||||
current_attempts_addrs: SmallVec<[PeerId; 32]>,
|
||||
// Nodes that need to be attempted.
|
||||
pending_nodes: Vec<PeerId>,
|
||||
// Peers that we tried to contact but failed.
|
||||
failed_to_contact: FnvHashSet<PeerId>,
|
||||
/// Timeout for each individual RPC query.
|
||||
rpc_timeout: Duration,
|
||||
}
|
||||
|
||||
// General stage of the state.
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
enum Stage {
|
||||
// We are still in the first step of the algorithm where we try to find the closest node.
|
||||
FirstStep,
|
||||
// We are contacting the k closest nodes in order to fill the list with enough results.
|
||||
SecondStep,
|
||||
// The results are complete, and the next stream iteration will produce the outcome.
|
||||
FinishingNextIter,
|
||||
// We are finished and the stream shouldn't return anything anymore.
|
||||
Finished,
|
||||
/// Configuration for a query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct QueryConfig<TIter> {
|
||||
/// Target of the query.
|
||||
pub target: QueryTarget,
|
||||
|
||||
/// Iterator to a list of `num_results` nodes that we know of whose distance is close to the
|
||||
/// target.
|
||||
pub known_closest_peers: TIter,
|
||||
|
||||
/// Allowed level of parallelism.
|
||||
pub parallelism: usize,
|
||||
|
||||
/// Number of results to produce.
|
||||
pub num_results: usize,
|
||||
|
||||
/// Timeout for each individual RPC query.
|
||||
pub rpc_timeout: Duration,
|
||||
}
|
||||
|
||||
let initial_state = State {
|
||||
stage: Stage::FirstStep,
|
||||
result: Vec::with_capacity(num_results),
|
||||
current_attempts_fut: Vec::new(),
|
||||
current_attempts_addrs: SmallVec::new(),
|
||||
pending_nodes: {
|
||||
let kbuckets_find_closest = query_params.kbuckets_find_closest.clone();
|
||||
kbuckets_find_closest(searched_key.clone()) // TODO: suboptimal
|
||||
/// Stage of the query.
|
||||
#[derive(Debug)]
|
||||
enum QueryStage {
|
||||
/// We are trying to find a closest node.
|
||||
Iterating {
|
||||
/// Number of successful query results in a row that didn't find any closer node.
|
||||
// TODO: this is not great, because we don't necessarily receive responses in the order
|
||||
// we made the queries ; it is possible that we query multiple far-away nodes in a
|
||||
// row, and obtain results before the result of the closest nodes
|
||||
no_closer_in_a_row: usize,
|
||||
},
|
||||
failed_to_contact: Default::default(),
|
||||
};
|
||||
|
||||
let parallelism = query_params.parallelism;
|
||||
// We have found the closest node, and we are now pinging the nodes we know about.
|
||||
Frozen,
|
||||
}
|
||||
|
||||
// Start of the iterative process.
|
||||
let stream = stream::unfold(initial_state, move |mut state| -> Option<_> {
|
||||
match state.stage {
|
||||
Stage::FinishingNextIter => {
|
||||
let result = mem::replace(&mut state.result, Vec::new());
|
||||
debug!("Query finished with {} results", result.len());
|
||||
state.stage = Stage::Finished;
|
||||
let future = future::ok((Some(QueryEvent::Finished(result)), state));
|
||||
return Some(future::Either::A(future));
|
||||
impl QueryState {
|
||||
/// Creates a new query.
|
||||
///
|
||||
/// You should call `poll()` this function returns in order to know what to do.
|
||||
pub fn new(config: QueryConfig<impl IntoIterator<Item = PeerId>>) -> QueryState {
|
||||
QueryState {
|
||||
target: config.target,
|
||||
stage: QueryStage::Iterating {
|
||||
no_closer_in_a_row: 0,
|
||||
},
|
||||
Stage::Finished => {
|
||||
return None;
|
||||
},
|
||||
_ => ()
|
||||
};
|
||||
|
||||
let searched_key = searched_key.clone();
|
||||
let find_node_rpc = query_params.find_node.clone();
|
||||
|
||||
// Find out which nodes to contact at this iteration.
|
||||
let to_contact = {
|
||||
let wanted_len = if state.stage == Stage::FirstStep {
|
||||
parallelism.saturating_sub(state.current_attempts_fut.len())
|
||||
} else {
|
||||
num_results.saturating_sub(state.current_attempts_fut.len())
|
||||
};
|
||||
let mut to_contact = SmallVec::<[_; 16]>::new();
|
||||
while to_contact.len() < wanted_len && !state.pending_nodes.is_empty() {
|
||||
// Move the first element of `pending_nodes` to `to_contact`, but ignore nodes that
|
||||
// are already part of the results or of a current attempt or if we failed to
|
||||
// contact it before.
|
||||
let peer = state.pending_nodes.remove(0);
|
||||
if state.result.iter().any(|p| p == &peer) {
|
||||
continue;
|
||||
closest_peers: config
|
||||
.known_closest_peers
|
||||
.into_iter()
|
||||
.map(|peer_id| (peer_id, QueryPeerState::NotContacted))
|
||||
.take(config.num_results)
|
||||
.collect(),
|
||||
parallelism: config.parallelism,
|
||||
num_results: config.num_results,
|
||||
rpc_timeout: config.rpc_timeout,
|
||||
}
|
||||
if state.current_attempts_addrs.iter().any(|p| p == &peer) {
|
||||
continue;
|
||||
}
|
||||
if state.failed_to_contact.iter().any(|p| p == &peer) {
|
||||
continue;
|
||||
}
|
||||
to_contact.push(peer);
|
||||
}
|
||||
to_contact
|
||||
};
|
||||
|
||||
debug!("New query round; {} queries in progress; contacting {} new peers",
|
||||
state.current_attempts_fut.len(),
|
||||
to_contact.len());
|
||||
|
||||
// For each node in `to_contact`, start an RPC query and a corresponding entry in the two
|
||||
// `state.current_attempts_*` fields.
|
||||
for peer in to_contact {
|
||||
let multiaddr: Multiaddr = Protocol::P2p(peer.clone().into_bytes()).into();
|
||||
|
||||
let searched_key2 = searched_key.clone();
|
||||
let current_attempt = find_node_rpc(multiaddr.clone(), searched_key2); // TODO: suboptimal
|
||||
state.current_attempts_addrs.push(peer.clone());
|
||||
state
|
||||
.current_attempts_fut
|
||||
.push(Box::new(current_attempt) as Box<_>);
|
||||
}
|
||||
debug_assert_eq!(
|
||||
state.current_attempts_addrs.len(),
|
||||
state.current_attempts_fut.len()
|
||||
);
|
||||
|
||||
// Extract `current_attempts_fut` so that we can pass it to `select_all`. We will push the
|
||||
// values back when inside the loop.
|
||||
let current_attempts_fut = mem::replace(&mut state.current_attempts_fut, Vec::new());
|
||||
if current_attempts_fut.is_empty() {
|
||||
// If `current_attempts_fut` is empty, then `select_all` would panic. It happens
|
||||
// when we have no additional node to query.
|
||||
debug!("Finishing query early because no additional node available");
|
||||
state.stage = Stage::FinishingNextIter;
|
||||
let future = future::ok((None, state));
|
||||
return Some(future::Either::A(future));
|
||||
}
|
||||
|
||||
// This is the future that continues or breaks the `loop_fn`.
|
||||
let future = future::select_all(current_attempts_fut.into_iter()).then(move |result| {
|
||||
let (message, trigger_idx, other_current_attempts) = match result {
|
||||
Err((err, trigger_idx, other_current_attempts)) => {
|
||||
(Err(err), trigger_idx, other_current_attempts)
|
||||
}
|
||||
Ok((message, trigger_idx, other_current_attempts)) => {
|
||||
(Ok(message), trigger_idx, other_current_attempts)
|
||||
}
|
||||
};
|
||||
|
||||
// Putting back the extracted elements in `state`.
|
||||
let remote_id = state.current_attempts_addrs.remove(trigger_idx);
|
||||
debug_assert!(state.current_attempts_fut.is_empty());
|
||||
state.current_attempts_fut = other_current_attempts;
|
||||
|
||||
// `message` contains the reason why the current future was woken up.
|
||||
let closer_peers = match message {
|
||||
Ok(msg) => msg,
|
||||
Err(err) => {
|
||||
trace!("RPC query failed for {:?}: {:?}", remote_id, err);
|
||||
state.failed_to_contact.insert(remote_id);
|
||||
return future::ok((None, state));
|
||||
}
|
||||
};
|
||||
|
||||
// Inserting the node we received a response from into `state.result`.
|
||||
// The code is non-trivial because `state.result` is ordered by distance and is limited
|
||||
// by `num_results` elements.
|
||||
if let Some(insert_pos) = state.result.iter().position(|e| {
|
||||
e.distance_with(&searched_key) >= remote_id.distance_with(&searched_key)
|
||||
}) {
|
||||
if state.result[insert_pos] != remote_id {
|
||||
if state.result.len() >= num_results {
|
||||
state.result.pop();
|
||||
}
|
||||
state.result.insert(insert_pos, remote_id);
|
||||
}
|
||||
} else if state.result.len() < num_results {
|
||||
state.result.push(remote_id);
|
||||
/// Returns the target of the query. Always the same as what was passed to `new()`.
|
||||
#[inline]
|
||||
pub fn target(&self) -> &QueryTarget {
|
||||
&self.target
|
||||
}
|
||||
|
||||
// The loop below will set this variable to `true` if we find a new element to put at
|
||||
// the top of the result. This would mean that we have to continue looping.
|
||||
let mut local_nearest_node_updated = false;
|
||||
/// After `poll()` returned `SendRpc`, this method should be called when the node sends back
|
||||
/// the result of the query.
|
||||
///
|
||||
/// Note that if this query is a `FindValue` query and a node returns a record, feel free to
|
||||
/// immediately drop the query altogether and use the record.
|
||||
///
|
||||
/// After this function returns, you should call `poll()` again.
|
||||
pub fn inject_rpc_result(
|
||||
&mut self,
|
||||
result_source: &PeerId,
|
||||
closer_peers: impl IntoIterator<Item = PeerId>,
|
||||
) {
|
||||
// Mark the peer as succeeded.
|
||||
for (peer_id, state) in self.closest_peers.iter_mut() {
|
||||
if peer_id == result_source {
|
||||
if let state @ QueryPeerState::InProgress(_) = state {
|
||||
*state = QueryPeerState::Succeeded;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update `state` with the actual content of the message.
|
||||
let mut new_known_multiaddrs = Vec::with_capacity(closer_peers.len());
|
||||
for mut peer in closer_peers {
|
||||
// Update the peerstore with the information sent by
|
||||
// the remote.
|
||||
// Add the entries in `closest_peers`.
|
||||
if let QueryStage::Iterating {
|
||||
ref mut no_closer_in_a_row,
|
||||
} = self.stage
|
||||
{
|
||||
let multiaddrs = mem::replace(&mut peer.multiaddrs, Vec::new());
|
||||
trace!("Reporting multiaddresses for {:?}: {:?}", peer.node_id, multiaddrs);
|
||||
new_known_multiaddrs.push((peer.node_id.clone(), multiaddrs));
|
||||
}
|
||||
// We increment now, and reset to 0 if we find a closer node.
|
||||
*no_closer_in_a_row += 1;
|
||||
|
||||
if peer.node_id.distance_with(&searched_key)
|
||||
<= state.result[0].distance_with(&searched_key)
|
||||
{
|
||||
local_nearest_node_updated = true;
|
||||
}
|
||||
|
||||
if state.result.iter().any(|ma| ma == &peer.node_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Insert the node into `pending_nodes` at the right position, or do not
|
||||
// insert it if it is already in there.
|
||||
if let Some(insert_pos) = state.pending_nodes.iter().position(|e| {
|
||||
e.distance_with(&searched_key) >= peer.node_id.distance_with(&searched_key)
|
||||
}) {
|
||||
if state.pending_nodes[insert_pos] != peer.node_id {
|
||||
state.pending_nodes.insert(insert_pos, peer.node_id.clone());
|
||||
}
|
||||
} else {
|
||||
state.pending_nodes.push(peer.node_id.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if state.result.len() >= num_results
|
||||
|| (state.stage != Stage::FirstStep && state.current_attempts_fut.is_empty())
|
||||
{
|
||||
state.stage = Stage::FinishingNextIter;
|
||||
|
||||
} else {
|
||||
if !local_nearest_node_updated {
|
||||
trace!("Loop didn't update closer node; jumping to step 2");
|
||||
state.stage = Stage::SecondStep;
|
||||
}
|
||||
}
|
||||
|
||||
future::ok((Some(QueryEvent::PeersReported(new_known_multiaddrs)), state))
|
||||
for elem_to_add in closer_peers {
|
||||
let target = &self.target;
|
||||
let insert_pos = self.closest_peers.iter().position(|(id, _)| {
|
||||
let a = target.as_hash().distance_with(id.as_ref());
|
||||
let b = target.as_hash().distance_with(elem_to_add.as_ref());
|
||||
a >= b
|
||||
});
|
||||
|
||||
Some(future::Either::B(future))
|
||||
}).filter_map(|val| val);
|
||||
|
||||
Box::new(stream) as Box<_>
|
||||
if let Some(insert_pos) = insert_pos {
|
||||
// Make sure we don't insert duplicates.
|
||||
if self.closest_peers[insert_pos].0 != elem_to_add {
|
||||
if insert_pos == 0 {
|
||||
*no_closer_in_a_row = 0;
|
||||
}
|
||||
self.closest_peers
|
||||
.insert(insert_pos, (elem_to_add, QueryPeerState::NotContacted));
|
||||
}
|
||||
} else if self.closest_peers.len() < self.num_results {
|
||||
self.closest_peers
|
||||
.push((elem_to_add, QueryPeerState::NotContacted));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle if `no_closer_in_a_row` is too high.
|
||||
let freeze = if let QueryStage::Iterating { no_closer_in_a_row } = self.stage {
|
||||
no_closer_in_a_row >= self.parallelism
|
||||
} else {
|
||||
false
|
||||
};
|
||||
if freeze {
|
||||
self.stage = QueryStage::Frozen;
|
||||
}
|
||||
}
|
||||
|
||||
/// After `poll()` returned `SendRpc`, this function should be called if we were unable to
|
||||
/// reach the peer, or if an error of some sort happened.
|
||||
///
|
||||
/// Has no effect if the peer ID is not relevant to the query, so feel free to call this
|
||||
/// function whenever an error happens on the network.
|
||||
///
|
||||
/// After this function returns, you should call `poll()` again.
|
||||
pub fn inject_rpc_error(&mut self, id: &PeerId) {
|
||||
let state = self
|
||||
.closest_peers
|
||||
.iter_mut()
|
||||
.filter_map(
|
||||
|(peer_id, state)| {
|
||||
if peer_id == id {
|
||||
Some(state)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
)
|
||||
.next();
|
||||
|
||||
match state {
|
||||
Some(state @ &mut QueryPeerState::InProgress(_)) => *state = QueryPeerState::Failed,
|
||||
Some(&mut QueryPeerState::NotContacted) => (),
|
||||
Some(&mut QueryPeerState::Succeeded) => (),
|
||||
Some(&mut QueryPeerState::Failed) => (),
|
||||
None => (),
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls this individual query.
|
||||
pub fn poll(&mut self) -> Async<QueryStatePollOut> {
|
||||
// While iterating over peers, count the number of queries currently being processed.
|
||||
// This is used to not go over the limit of parallel requests.
|
||||
// If this is still 0 at the end of the function, that means the query is finished.
|
||||
let mut active_counter = 0;
|
||||
|
||||
// While iterating over peers, count the number of queries in a row (from closer to further
|
||||
// away from target) that are in the succeeded in state.
|
||||
// Contains `None` if the chain is broken.
|
||||
let mut succeeded_counter = Some(0);
|
||||
|
||||
// Extract `self.num_results` to avoid borrowing errors with closures.
|
||||
let num_results = self.num_results;
|
||||
|
||||
for &mut (ref peer_id, ref mut state) in self.closest_peers.iter_mut() {
|
||||
// Start by "killing" the query if it timed out.
|
||||
{
|
||||
let timed_out = match state {
|
||||
QueryPeerState::InProgress(timeout) => match timeout.poll() {
|
||||
Ok(Async::Ready(_)) | Err(_) => true,
|
||||
Ok(Async::NotReady) => false,
|
||||
},
|
||||
_ => false,
|
||||
};
|
||||
if timed_out {
|
||||
*state = QueryPeerState::Failed;
|
||||
return Async::Ready(QueryStatePollOut::CancelRpc { peer_id });
|
||||
}
|
||||
}
|
||||
|
||||
// Increment the local counters.
|
||||
match state {
|
||||
QueryPeerState::InProgress(_) => {
|
||||
active_counter += 1;
|
||||
}
|
||||
QueryPeerState::Succeeded => {
|
||||
if let Some(ref mut c) = succeeded_counter {
|
||||
*c += 1;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
};
|
||||
|
||||
// We have enough results ; the query is done.
|
||||
if succeeded_counter
|
||||
.as_ref()
|
||||
.map(|&c| c >= num_results)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return Async::Ready(QueryStatePollOut::Finished);
|
||||
}
|
||||
|
||||
// Dial the node if it needs dialing.
|
||||
let need_connect = match state {
|
||||
QueryPeerState::NotContacted => match self.stage {
|
||||
QueryStage::Iterating { .. } => active_counter < self.parallelism,
|
||||
QueryStage::Frozen => match self.target {
|
||||
QueryTarget::FindPeer(_) => true,
|
||||
QueryTarget::GetProviders(_) => false,
|
||||
},
|
||||
},
|
||||
_ => false,
|
||||
};
|
||||
|
||||
if need_connect {
|
||||
let delay = Delay::new(Instant::now() + self.rpc_timeout);
|
||||
*state = QueryPeerState::InProgress(delay);
|
||||
return Async::Ready(QueryStatePollOut::SendRpc {
|
||||
peer_id,
|
||||
query_target: &self.target,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have any query in progress, return `Finished` as we don't have anything more
|
||||
// we can do.
|
||||
if active_counter > 0 {
|
||||
Async::NotReady
|
||||
} else {
|
||||
Async::Ready(QueryStatePollOut::Finished)
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the query and returns the known closest peers.
|
||||
///
|
||||
/// > **Note**: This can be called at any time, but you normally only do that once the query
|
||||
/// > is finished.
|
||||
pub fn into_closest_peers(self) -> impl Iterator<Item = PeerId> {
|
||||
self.closest_peers
|
||||
.into_iter()
|
||||
.filter_map(|(peer_id, state)| {
|
||||
if let QueryPeerState::Succeeded = state {
|
||||
Some(peer_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.take(self.num_results)
|
||||
}
|
||||
}
|
||||
|
||||
/// Outcome of polling a query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryStatePollOut<'a> {
|
||||
/// The query is finished.
|
||||
///
|
||||
/// If this is a `FindValue` query, the user is supposed to extract the record themselves from
|
||||
/// any RPC result sent by a remote. If the query finished without that happening, this means
|
||||
/// that we didn't find any record.
|
||||
/// Similarly, if this is a `GetProviders` query, the user is supposed to extract the providers
|
||||
/// from any RPC result sent by a remote.
|
||||
///
|
||||
/// If this is a `FindNode` query, you can call `into_closest_peers` in order to obtain the
|
||||
/// result.
|
||||
Finished,
|
||||
|
||||
/// We need to send an RPC query to the given peer.
|
||||
///
|
||||
/// The RPC query to send can be derived from the target of the query.
|
||||
///
|
||||
/// After this has been returned, you should call either `inject_rpc_result` or
|
||||
/// `inject_rpc_error` at a later point in time.
|
||||
SendRpc {
|
||||
/// The peer to send the RPC query to.
|
||||
peer_id: &'a PeerId,
|
||||
/// A reminder of the query target. Same as what you obtain by calling `target()`.
|
||||
query_target: &'a QueryTarget,
|
||||
},
|
||||
|
||||
/// We no longer need to send a query to this specific node.
|
||||
///
|
||||
/// It is guaranteed that an earlier polling returned `SendRpc` with this peer id.
|
||||
CancelRpc {
|
||||
/// The target.
|
||||
peer_id: &'a PeerId,
|
||||
},
|
||||
}
|
||||
|
||||
/// What we're aiming for with our query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum QueryTarget {
|
||||
/// Finding a peer.
|
||||
FindPeer(PeerId),
|
||||
/// Find the peers that provide a certain value.
|
||||
GetProviders(Multihash),
|
||||
}
|
||||
|
||||
impl QueryTarget {
|
||||
/// Creates the corresponding RPC request to send to remote.
|
||||
#[inline]
|
||||
pub fn to_rpc_request<TUserData>(&self, user_data: TUserData) -> KademliaHandlerIn<TUserData> {
|
||||
self.clone().into_rpc_request(user_data)
|
||||
}
|
||||
|
||||
/// Creates the corresponding RPC request to send to remote.
|
||||
pub fn into_rpc_request<TUserData>(self, user_data: TUserData) -> KademliaHandlerIn<TUserData> {
|
||||
match self {
|
||||
QueryTarget::FindPeer(key) => KademliaHandlerIn::FindNodeReq {
|
||||
key,
|
||||
user_data,
|
||||
},
|
||||
QueryTarget::GetProviders(key) => KademliaHandlerIn::GetProvidersReq {
|
||||
key,
|
||||
user_data,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the hash of the thing we're looking for.
|
||||
pub fn as_hash(&self) -> &Multihash {
|
||||
match self {
|
||||
QueryTarget::FindPeer(peer) => peer.as_ref(),
|
||||
QueryTarget::GetProviders(key) => key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// State of peer in the context of a query.
|
||||
#[derive(Debug)]
|
||||
enum QueryPeerState {
|
||||
/// We haven't tried contacting the node.
|
||||
NotContacted,
|
||||
/// Waiting for an answer from the node to our RPC query. Includes a timeout.
|
||||
InProgress(Delay),
|
||||
/// We successfully reached the node.
|
||||
Succeeded,
|
||||
/// We tried to reach the node but failed.
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{QueryConfig, QueryState, QueryStatePollOut, QueryTarget};
|
||||
use futures::{self, prelude::*};
|
||||
use libp2p_core::PeerId;
|
||||
use std::{iter, time::Duration, sync::Arc, sync::Mutex, thread};
|
||||
use tokio;
|
||||
|
||||
#[test]
|
||||
fn start_by_sending_rpc_to_known_peers() {
|
||||
let random_id = PeerId::random();
|
||||
let target = QueryTarget::FindPeer(PeerId::random());
|
||||
|
||||
let mut query = QueryState::new(QueryConfig {
|
||||
target,
|
||||
known_closest_peers: iter::once(random_id.clone()),
|
||||
parallelism: 3,
|
||||
num_results: 100,
|
||||
rpc_timeout: Duration::from_secs(10),
|
||||
});
|
||||
|
||||
tokio::run(futures::future::poll_fn(move || {
|
||||
match try_ready!(Ok(query.poll())) {
|
||||
QueryStatePollOut::SendRpc { peer_id, .. } if peer_id == &random_id => {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn continue_second_result() {
|
||||
let random_id = PeerId::random();
|
||||
let random_id2 = PeerId::random();
|
||||
let target = QueryTarget::FindPeer(PeerId::random());
|
||||
|
||||
let query = Arc::new(Mutex::new(QueryState::new(QueryConfig {
|
||||
target,
|
||||
known_closest_peers: iter::once(random_id.clone()),
|
||||
parallelism: 3,
|
||||
num_results: 100,
|
||||
rpc_timeout: Duration::from_secs(10),
|
||||
})));
|
||||
|
||||
// Let's do a first polling round to obtain the `SendRpc` request.
|
||||
tokio::run(futures::future::poll_fn({
|
||||
let random_id = random_id.clone();
|
||||
let query = query.clone();
|
||||
move || {
|
||||
match try_ready!(Ok(query.lock().unwrap().poll())) {
|
||||
QueryStatePollOut::SendRpc { peer_id, .. } if peer_id == &random_id => {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Send the reply.
|
||||
query.lock().unwrap().inject_rpc_result(&random_id, iter::once(random_id2.clone()));
|
||||
|
||||
// Second polling round to check the second `SendRpc` request.
|
||||
tokio::run(futures::future::poll_fn({
|
||||
let query = query.clone();
|
||||
move || {
|
||||
match try_ready!(Ok(query.lock().unwrap().poll())) {
|
||||
QueryStatePollOut::SendRpc { peer_id, .. } if peer_id == &random_id2 => {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timeout_works() {
|
||||
let random_id = PeerId::random();
|
||||
|
||||
let query = Arc::new(Mutex::new(QueryState::new(QueryConfig {
|
||||
target: QueryTarget::FindPeer(PeerId::random()),
|
||||
known_closest_peers: iter::once(random_id.clone()),
|
||||
parallelism: 3,
|
||||
num_results: 100,
|
||||
rpc_timeout: Duration::from_millis(100),
|
||||
})));
|
||||
|
||||
// Let's do a first polling round to obtain the `SendRpc` request.
|
||||
tokio::run(futures::future::poll_fn({
|
||||
let random_id = random_id.clone();
|
||||
let query = query.clone();
|
||||
move || {
|
||||
match try_ready!(Ok(query.lock().unwrap().poll())) {
|
||||
QueryStatePollOut::SendRpc { peer_id, .. } if peer_id == &random_id => {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Wait for a bit.
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
|
||||
// Second polling round to check the timeout.
|
||||
tokio::run(futures::future::poll_fn({
|
||||
let query = query.clone();
|
||||
move || {
|
||||
match try_ready!(Ok(query.lock().unwrap().poll())) {
|
||||
QueryStatePollOut::CancelRpc { peer_id, .. } if peer_id == &random_id => {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}));
|
||||
|
||||
// Third polling round for finished.
|
||||
tokio::run(futures::future::poll_fn({
|
||||
let query = query.clone();
|
||||
move || {
|
||||
match try_ready!(Ok(query.lock().unwrap().poll())) {
|
||||
QueryStatePollOut::Finished => {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
79
protocols/kad/src/topology.rs
Normal file
79
protocols/kad/src/topology.rs
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use kbucket::KBucketsPeerId;
|
||||
use libp2p_core::{Multiaddr, PeerId, topology::MemoryTopology, topology::Topology};
|
||||
use multihash::Multihash;
|
||||
use protocol::KadConnectionType;
|
||||
use std::vec;
|
||||
|
||||
/// Trait allowing retreival of information necessary for the Kadmelia system to work.
|
||||
pub trait KademliaTopology: Topology {
|
||||
/// Iterator returned by `closest_peers`.
|
||||
type ClosestPeersIter: Iterator<Item = PeerId>;
|
||||
|
||||
/// Iterator returned by `get_providers`.
|
||||
type GetProvidersIter: Iterator<Item = PeerId>;
|
||||
|
||||
/// Adds an address discovered through Kademlia to the topology.
|
||||
fn add_kad_discovered_address(&mut self, peer: PeerId, addr: Multiaddr,
|
||||
connection_ty: KadConnectionType);
|
||||
|
||||
/// Returns the known peers closest by XOR distance to the `target`.
|
||||
///
|
||||
/// The `max` parameter is the maximum number of results that we are going to use. If more
|
||||
/// than `max` elements are returned, they will be ignored.
|
||||
fn closest_peers(&mut self, target: &Multihash, max: usize) -> Self::ClosestPeersIter;
|
||||
|
||||
/// Registers the given peer as provider of the resource with the given ID.
|
||||
///
|
||||
/// > **Note**: There is no `remove_provider` method. Implementations must include a
|
||||
/// > time-to-live system so that entries disappear after a while.
|
||||
// TODO: specify the TTL? it has to match the timeout in the behaviour somehow, but this could
|
||||
// also be handled by the user
|
||||
fn add_provider(&mut self, key: Multihash, peer_id: PeerId);
|
||||
|
||||
/// Returns the list of providers that have been registered with `add_provider`.
|
||||
fn get_providers(&mut self, key: &Multihash) -> Self::GetProvidersIter;
|
||||
}
|
||||
|
||||
// TODO: stupid idea to implement on `MemoryTopology`
|
||||
impl KademliaTopology for MemoryTopology {
|
||||
type ClosestPeersIter = vec::IntoIter<PeerId>;
|
||||
type GetProvidersIter = vec::IntoIter<PeerId>;
|
||||
|
||||
fn add_kad_discovered_address(&mut self, peer: PeerId, addr: Multiaddr, _: KadConnectionType) {
|
||||
self.add_address(peer, addr)
|
||||
}
|
||||
|
||||
fn closest_peers(&mut self, target: &Multihash, _: usize) -> Self::ClosestPeersIter {
|
||||
let mut list = self.peers().cloned().collect::<Vec<_>>();
|
||||
list.sort_by(|a, b| target.distance_with(b.as_ref()).cmp(&target.distance_with(a.as_ref())));
|
||||
list.into_iter()
|
||||
}
|
||||
|
||||
fn add_provider(&mut self, _: Multihash, _: PeerId) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_providers(&mut self, _: &Multihash) -> Self::GetProvidersIter {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
@ -48,7 +48,7 @@ impl<TSubstream> Default for PeriodicPingBehaviour<TSubstream> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> NetworkBehaviour for PeriodicPingBehaviour<TSubstream>
|
||||
impl<TSubstream, TTopology> NetworkBehaviour<TTopology> for PeriodicPingBehaviour<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
@ -72,6 +72,7 @@ where
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut TTopology,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
|
@ -48,7 +48,7 @@ impl<TSubstream> Default for PingListenBehaviour<TSubstream> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream> NetworkBehaviour for PingListenBehaviour<TSubstream>
|
||||
impl<TSubstream, TTopology> NetworkBehaviour<TTopology> for PingListenBehaviour<TSubstream>
|
||||
where
|
||||
TSubstream: AsyncRead + AsyncWrite,
|
||||
{
|
||||
@ -72,6 +72,7 @@ where
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
_: &mut TTopology,
|
||||
) -> Async<
|
||||
NetworkBehaviourAction<
|
||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||
|
Reference in New Issue
Block a user