2018-11-29 12:11:35 +01:00
|
|
|
|
// Copyright 2018 Parity Technologies (UK) Ltd.
|
|
|
|
|
//
|
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
//
|
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
|
//
|
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
use crate::addresses::Addresses;
|
2019-01-21 10:33:51 +00:00
|
|
|
|
use crate::handler::{KademliaHandler, KademliaHandlerEvent, KademliaHandlerIn, KademliaRequestId};
|
2019-03-18 18:20:57 +01:00
|
|
|
|
use crate::kbucket::{KBucketsTable, KBucketsPeerId, Update};
|
2019-01-21 10:33:51 +00:00
|
|
|
|
use crate::protocol::{KadConnectionType, KadPeer};
|
2019-03-18 18:20:57 +01:00
|
|
|
|
use crate::query::{QueryConfig, QueryState, QueryStatePollOut};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
use fnv::{FnvHashMap, FnvHashSet};
|
|
|
|
|
use futures::{prelude::*, stream};
|
2018-12-01 13:34:57 +01:00
|
|
|
|
use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
2019-01-26 23:57:53 +01:00
|
|
|
|
use libp2p_core::{protocols_handler::ProtocolsHandler, Multiaddr, PeerId};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
use multihash::Multihash;
|
|
|
|
|
use rand;
|
|
|
|
|
use smallvec::SmallVec;
|
2019-02-12 12:56:39 +01:00
|
|
|
|
use std::{cmp::Ordering, error, marker::PhantomData, time::Duration, time::Instant};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
use tokio_io::{AsyncRead, AsyncWrite};
|
|
|
|
|
use tokio_timer::Interval;
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
mod test;
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Network behaviour that handles Kademlia.
|
|
|
|
|
pub struct Kademlia<TSubstream> {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// Storage for the nodes. Contains the known multiaddresses for this node.
|
2019-02-12 12:56:39 +01:00
|
|
|
|
kbuckets: KBucketsTable<PeerId, Addresses>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
|
|
/// All the iterative queries we are currently performing, with their ID. The last parameter
|
|
|
|
|
/// is the list of accumulated providers for `GET_PROVIDERS` queries.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
active_queries: FnvHashMap<QueryId, QueryState<QueryInfo, PeerId>>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
|
|
/// List of peers the swarm is connected to.
|
|
|
|
|
connected_peers: FnvHashSet<PeerId>,
|
|
|
|
|
|
|
|
|
|
/// Contains a list of peer IDs which we are not connected to, and an RPC query to send to them
|
|
|
|
|
/// once they connect.
|
|
|
|
|
pending_rpcs: SmallVec<[(PeerId, KademliaHandlerIn<QueryId>); 8]>,
|
|
|
|
|
|
|
|
|
|
/// Identifier for the next query that we start.
|
|
|
|
|
next_query_id: QueryId,
|
|
|
|
|
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// List of values and peers that are providing them.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
///
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// Our local peer ID can be in this container.
|
|
|
|
|
// TODO: Note that in reality the value is a SHA-256 of the actual value (https://github.com/libp2p/rust-libp2p/issues/694)
|
|
|
|
|
values_providers: FnvHashMap<Multihash, SmallVec<[PeerId; 20]>>,
|
|
|
|
|
|
|
|
|
|
/// List of values that we are providing ourselves. Must be kept in sync with
|
|
|
|
|
/// `values_providers`.
|
|
|
|
|
providing_keys: FnvHashSet<Multihash>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
|
|
/// Interval to send `ADD_PROVIDER` messages to everyone.
|
|
|
|
|
refresh_add_providers: stream::Fuse<Interval>,
|
|
|
|
|
|
|
|
|
|
/// `α` in the Kademlia reference papers. Designates the maximum number of queries that we
|
|
|
|
|
/// perform in parallel.
|
|
|
|
|
parallelism: usize,
|
|
|
|
|
|
|
|
|
|
/// `k` in the Kademlia reference papers. Number of results in a find node query.
|
|
|
|
|
num_results: usize,
|
|
|
|
|
|
|
|
|
|
/// Timeout for each individual RPC query.
|
|
|
|
|
rpc_timeout: Duration,
|
|
|
|
|
|
|
|
|
|
/// Events to return when polling.
|
|
|
|
|
queued_events: SmallVec<[NetworkBehaviourAction<KademliaHandlerIn<QueryId>, KademliaOut>; 32]>,
|
|
|
|
|
|
|
|
|
|
/// List of providers to add to the topology as soon as we are in `poll()`.
|
|
|
|
|
add_provider: SmallVec<[(Multihash, PeerId); 32]>,
|
|
|
|
|
|
|
|
|
|
/// Marker to pin the generics.
|
|
|
|
|
marker: PhantomData<TSubstream>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Opaque type. Each query that we start gets a unique number.
|
|
|
|
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
|
|
|
|
pub struct QueryId(usize);
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
/// Information about a query.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
2019-03-18 18:20:57 +01:00
|
|
|
|
struct QueryInfo {
|
|
|
|
|
/// What we are querying and why.
|
|
|
|
|
inner: QueryInfoInner,
|
|
|
|
|
/// Temporary addresses used when trying to reach nodes.
|
|
|
|
|
untrusted_addresses: FnvHashMap<PeerId, SmallVec<[Multiaddr; 8]>>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Additional information about the query.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
|
|
|
enum QueryInfoInner {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// The query was created for the Kademlia initialization process.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
Initialization {
|
|
|
|
|
/// Hash we're targetting to insert ourselves in the k-buckets.
|
|
|
|
|
target: PeerId,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// The user requested a `FIND_PEER` query to be performed. It should be reported when finished.
|
|
|
|
|
FindPeer(PeerId),
|
|
|
|
|
|
|
|
|
|
/// The user requested a `GET_PROVIDERS` query to be performed. It should be reported when
|
|
|
|
|
/// finished.
|
|
|
|
|
GetProviders {
|
|
|
|
|
/// Target we are searching the providers of.
|
|
|
|
|
target: Multihash,
|
|
|
|
|
/// Results to return. Filled over time.
|
|
|
|
|
pending_results: Vec<PeerId>,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// We are traversing towards `target` and should add an `ADD_PROVIDER` message to the peers
|
|
|
|
|
/// of the outcome with our own identity.
|
|
|
|
|
AddProvider {
|
|
|
|
|
/// Which hash we're targetting.
|
|
|
|
|
target: Multihash,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl KBucketsPeerId<PeerId> for QueryInfo {
|
|
|
|
|
fn distance_with(&self, other: &PeerId) -> u32 {
|
|
|
|
|
let other: &Multihash = other.as_ref();
|
|
|
|
|
self.as_ref().distance_with(other)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn max_distance() -> usize {
|
|
|
|
|
<PeerId as KBucketsPeerId>::max_distance()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl AsRef<Multihash> for QueryInfo {
|
|
|
|
|
fn as_ref(&self) -> &Multihash {
|
|
|
|
|
match &self.inner {
|
|
|
|
|
QueryInfoInner::Initialization { target } => target.as_ref(),
|
|
|
|
|
QueryInfoInner::FindPeer(peer) => peer.as_ref(),
|
|
|
|
|
QueryInfoInner::GetProviders { target, .. } => target,
|
|
|
|
|
QueryInfoInner::AddProvider { target } => target,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl PartialEq<PeerId> for QueryInfo {
|
|
|
|
|
fn eq(&self, other: &PeerId) -> bool {
|
|
|
|
|
self.as_ref().eq(other)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl QueryInfo {
|
|
|
|
|
/// Creates the corresponding RPC request to send to remote.
|
|
|
|
|
fn to_rpc_request<TUserData>(&self, user_data: TUserData) -> KademliaHandlerIn<TUserData> {
|
|
|
|
|
match &self.inner {
|
|
|
|
|
QueryInfoInner::Initialization { target } => KademliaHandlerIn::FindNodeReq {
|
|
|
|
|
key: target.clone(),
|
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::FindPeer(key) => KademliaHandlerIn::FindNodeReq {
|
|
|
|
|
key: key.clone(),
|
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::GetProviders { target, .. } => KademliaHandlerIn::GetProvidersReq {
|
|
|
|
|
key: target.clone().into(),
|
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::AddProvider { target, .. } => KademliaHandlerIn::FindNodeReq {
|
|
|
|
|
key: unimplemented!(), // TODO: target.clone(),
|
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<TSubstream> Kademlia<TSubstream> {
|
|
|
|
|
/// Creates a `Kademlia`.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn new(local_peer_id: PeerId) -> Self {
|
|
|
|
|
Self::new_inner(local_peer_id, true)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Creates a `Kademlia`.
|
|
|
|
|
///
|
|
|
|
|
/// Contrary to `new`, doesn't perform the initialization queries that store our local ID into
|
|
|
|
|
/// the DHT.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn without_init(local_peer_id: PeerId) -> Self {
|
|
|
|
|
Self::new_inner(local_peer_id, false)
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// Adds a known address for the given `PeerId`.
|
2019-02-12 12:56:39 +01:00
|
|
|
|
#[deprecated(note = "Use add_connected_address or add_not_connected_address instead")]
|
2019-01-26 23:57:53 +01:00
|
|
|
|
pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
self.add_connected_address(peer_id, address)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Adds a known address for the given `PeerId`. We are connected to this address.
|
|
|
|
|
pub fn add_connected_address(&mut self, peer_id: &PeerId, address: Multiaddr) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
if let Some(list) = self.kbuckets.entry_mut(peer_id) {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
list.insert_connected(address);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Adds a known address for the given `PeerId`. We are not connected or don't know whether we
|
|
|
|
|
/// are connected to this address.
|
|
|
|
|
pub fn add_not_connected_address(&mut self, peer_id: &PeerId, address: Multiaddr) {
|
|
|
|
|
if let Some(list) = self.kbuckets.entry_mut(peer_id) {
|
|
|
|
|
list.insert_not_connected(address);
|
2019-01-26 23:57:53 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Inner implementation of the constructors.
|
|
|
|
|
fn new_inner(local_peer_id: PeerId, initialize: bool) -> Self {
|
|
|
|
|
let parallelism = 3;
|
|
|
|
|
|
|
|
|
|
let mut behaviour = Kademlia {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
kbuckets: KBucketsTable::new(local_peer_id, Duration::from_secs(60)), // TODO: constant
|
2018-11-29 12:11:35 +01:00
|
|
|
|
queued_events: SmallVec::new(),
|
|
|
|
|
active_queries: Default::default(),
|
|
|
|
|
connected_peers: Default::default(),
|
|
|
|
|
pending_rpcs: SmallVec::with_capacity(parallelism),
|
|
|
|
|
next_query_id: QueryId(0),
|
2019-01-26 23:57:53 +01:00
|
|
|
|
values_providers: FnvHashMap::default(),
|
|
|
|
|
providing_keys: FnvHashSet::default(),
|
2018-11-29 12:11:35 +01:00
|
|
|
|
refresh_add_providers: Interval::new_interval(Duration::from_secs(60)).fuse(), // TODO: constant
|
|
|
|
|
parallelism,
|
|
|
|
|
num_results: 20,
|
|
|
|
|
rpc_timeout: Duration::from_secs(8),
|
|
|
|
|
add_provider: SmallVec::new(),
|
|
|
|
|
marker: PhantomData,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if initialize {
|
|
|
|
|
// As part of the initialization process, we start one `FIND_NODE` for each bit of the
|
|
|
|
|
// possible range of peer IDs.
|
|
|
|
|
for n in 0..256 {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let target = match gen_random_id(behaviour.kbuckets.my_id(), n) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
Ok(p) => p,
|
|
|
|
|
Err(()) => continue,
|
|
|
|
|
};
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
behaviour.start_query(QueryInfoInner::Initialization { target });
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
behaviour
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<TSubstream> Kademlia<TSubstream> {
|
|
|
|
|
/// Starts an iterative `FIND_NODE` request.
|
|
|
|
|
///
|
|
|
|
|
/// This will eventually produce an event containing the nodes of the DHT closest to the
|
|
|
|
|
/// requested `PeerId`.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn find_node(&mut self, peer_id: PeerId) {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
self.start_query(QueryInfoInner::FindPeer(peer_id));
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Starts an iterative `GET_PROVIDERS` request.
|
|
|
|
|
#[inline]
|
2019-03-18 18:20:57 +01:00
|
|
|
|
pub fn get_providers(&mut self, target: Multihash) {
|
|
|
|
|
self.start_query(QueryInfoInner::GetProviders { target, pending_results: Vec::new() });
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Register the local node as the provider for the given key.
|
|
|
|
|
///
|
|
|
|
|
/// This will periodically send `ADD_PROVIDER` messages to the nodes closest to the key. When
|
|
|
|
|
/// someone performs a `GET_PROVIDERS` iterative request on the DHT, our local node will be
|
|
|
|
|
/// returned as part of the results.
|
|
|
|
|
///
|
|
|
|
|
/// The actual meaning of *providing* the value of a key is not defined, and is specific to
|
|
|
|
|
/// the value whose key is the hash.
|
|
|
|
|
pub fn add_providing(&mut self, key: PeerId) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.providing_keys.insert(key.clone().into());
|
|
|
|
|
let providers = self.values_providers.entry(key.into()).or_insert_with(Default::default);
|
|
|
|
|
let my_id = self.kbuckets.my_id();
|
|
|
|
|
if !providers.iter().any(|k| k == my_id) {
|
|
|
|
|
providers.push(my_id.clone());
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Trigger the next refresh now.
|
|
|
|
|
self.refresh_add_providers = Interval::new(Instant::now(), Duration::from_secs(60)).fuse();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Cancels a registration done with `add_providing`.
|
|
|
|
|
///
|
|
|
|
|
/// There doesn't exist any "remove provider" message to broadcast on the network, therefore we
|
|
|
|
|
/// will still be registered as a provider in the DHT for as long as the timeout doesn't expire.
|
|
|
|
|
pub fn remove_providing(&mut self, key: &Multihash) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.providing_keys.remove(key);
|
|
|
|
|
|
|
|
|
|
let providers = match self.values_providers.get_mut(key) {
|
|
|
|
|
Some(p) => p,
|
|
|
|
|
None => return,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if let Some(position) = providers.iter().position(|k| k == key) {
|
|
|
|
|
providers.remove(position);
|
|
|
|
|
providers.shrink_to_fit();
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Internal function that starts a query.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
fn start_query(&mut self, target: QueryInfoInner) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
let query_id = self.next_query_id;
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.next_query_id.0 += 1;
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
|
|
let target = QueryInfo {
|
|
|
|
|
inner: target,
|
|
|
|
|
untrusted_addresses: Default::default(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let known_closest_peers = self.kbuckets
|
|
|
|
|
.find_closest::<Multihash>(target.as_ref())
|
|
|
|
|
.take(self.num_results);
|
|
|
|
|
|
|
|
|
|
self.active_queries.insert(
|
|
|
|
|
query_id,
|
|
|
|
|
QueryState::new(QueryConfig {
|
|
|
|
|
target,
|
|
|
|
|
parallelism: self.parallelism,
|
|
|
|
|
num_results: self.num_results,
|
|
|
|
|
rpc_timeout: self.rpc_timeout,
|
|
|
|
|
known_closest_peers,
|
|
|
|
|
})
|
|
|
|
|
);
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-26 23:57:53 +01:00
|
|
|
|
impl<TSubstream> NetworkBehaviour for Kademlia<TSubstream>
|
2018-11-29 12:11:35 +01:00
|
|
|
|
where
|
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
|
{
|
|
|
|
|
type ProtocolsHandler = KademliaHandler<TSubstream, QueryId>;
|
|
|
|
|
type OutEvent = KademliaOut;
|
|
|
|
|
|
|
|
|
|
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
|
|
|
|
KademliaHandler::dial_and_listen()
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-30 14:55:39 +01:00
|
|
|
|
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
// We should order addresses from decreasing likelyhood of connectivity, so start with
|
|
|
|
|
// the addresses of that peer in the k-buckets.
|
|
|
|
|
let mut out_list = self.kbuckets
|
2019-01-26 23:57:53 +01:00
|
|
|
|
.get(peer_id)
|
|
|
|
|
.map(|l| l.iter().cloned().collect::<Vec<_>>())
|
2019-03-18 18:20:57 +01:00
|
|
|
|
.unwrap_or_else(Vec::new);
|
|
|
|
|
|
|
|
|
|
// We add to that a temporary list of addresses from the ongoing queries.
|
|
|
|
|
for query in self.active_queries.values() {
|
|
|
|
|
if let Some(addrs) = query.target().untrusted_addresses.get(peer_id) {
|
|
|
|
|
for addr in addrs {
|
|
|
|
|
out_list.push(addr.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_list
|
2019-01-26 23:57:53 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 15:21:50 +01:00
|
|
|
|
fn inject_connected(&mut self, id: PeerId, endpoint: ConnectedPoint) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
if let Some(pos) = self.pending_rpcs.iter().position(|(p, _)| p == &id) {
|
|
|
|
|
let (_, rpc) = self.pending_rpcs.remove(pos);
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: id.clone(),
|
|
|
|
|
event: rpc,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
if let Update::Pending(to_ping) = self.kbuckets.set_connected(&id) {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::DialPeer {
|
|
|
|
|
peer_id: to_ping.clone(),
|
|
|
|
|
})
|
2019-01-26 23:57:53 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 15:21:50 +01:00
|
|
|
|
if let ConnectedPoint::Dialer { address } = endpoint {
|
|
|
|
|
if let Some(list) = self.kbuckets.entry_mut(&id) {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
list.insert_connected(address);
|
2019-02-04 15:21:50 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.connected_peers.insert(id);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
fn inject_dial_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, _: &dyn error::Error) {
|
|
|
|
|
if let Some(peer_id) = peer_id {
|
|
|
|
|
if let Some(list) = self.kbuckets.get_mut(peer_id) {
|
|
|
|
|
// TODO: don't remove the address if the error is that we are already connected
|
|
|
|
|
// to this peer
|
|
|
|
|
list.remove_addr(addr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn inject_disconnected(&mut self, id: &PeerId, old_endpoint: ConnectedPoint) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
let was_in = self.connected_peers.remove(id);
|
|
|
|
|
debug_assert!(was_in);
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for query in self.active_queries.values_mut() {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
query.inject_rpc_error(id);
|
|
|
|
|
}
|
2019-02-04 15:21:50 +01:00
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
if let ConnectedPoint::Dialer { address } = old_endpoint {
|
|
|
|
|
if let Some(list) = self.kbuckets.get_mut(id) {
|
|
|
|
|
debug_assert!(list.is_connected());
|
|
|
|
|
list.set_disconnected(&address);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 15:21:50 +01:00
|
|
|
|
self.kbuckets.set_disconnected(&id);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
fn inject_replaced(&mut self, peer_id: PeerId, old_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) {
|
2019-02-04 15:21:50 +01:00
|
|
|
|
// We need to re-send the active queries.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for (query_id, query) in self.active_queries.iter() {
|
2019-02-04 15:21:50 +01:00
|
|
|
|
if query.is_waiting(&peer_id) {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
event: query.target().to_rpc_request(*query_id),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
if let ConnectedPoint::Dialer { address } = old_endpoint {
|
|
|
|
|
if let Some(list) = self.kbuckets.get_mut(&peer_id) {
|
|
|
|
|
list.set_disconnected(&address);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 15:21:50 +01:00
|
|
|
|
if let ConnectedPoint::Dialer { address } = new_endpoint {
|
|
|
|
|
if let Some(list) = self.kbuckets.entry_mut(&peer_id) {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
list.insert_connected(address);
|
2019-02-04 15:21:50 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn inject_node_event(&mut self, source: PeerId, event: KademliaHandlerEvent<QueryId>) {
|
|
|
|
|
match event {
|
|
|
|
|
KademliaHandlerEvent::FindNodeReq { key, request_id } => {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let closer_peers = self.kbuckets
|
|
|
|
|
.find_closest(&key)
|
|
|
|
|
.take(self.num_results)
|
|
|
|
|
.map(|peer_id| build_kad_peer(peer_id, &self.kbuckets))
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: source,
|
|
|
|
|
event: KademliaHandlerIn::FindNodeRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
request_id,
|
|
|
|
|
},
|
|
|
|
|
});
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::FindNodeRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
user_data,
|
|
|
|
|
} => {
|
|
|
|
|
// It is possible that we obtain a response for a query that has finished, which is
|
|
|
|
|
// why we may not find an entry in `self.active_queries`.
|
|
|
|
|
for peer in closer_peers.iter() {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(KademliaOut::Discovered {
|
|
|
|
|
peer_id: peer.node_id.clone(),
|
|
|
|
|
addresses: peer.multiaddrs.clone(),
|
|
|
|
|
ty: peer.connection_ty,
|
|
|
|
|
}));
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
|
if let Some(query) = self.active_queries.get_mut(&user_data) {
|
|
|
|
|
for peer in closer_peers.iter() {
|
|
|
|
|
query.target_mut().untrusted_addresses
|
|
|
|
|
.insert(peer.node_id.clone(), peer.multiaddrs.iter().cloned().collect());
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
query.inject_rpc_result(&source, closer_peers.into_iter().map(|kp| kp.node_id))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::GetProvidersReq { key, request_id } => {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let closer_peers = self.kbuckets
|
|
|
|
|
.find_closest(&key)
|
|
|
|
|
.take(self.num_results)
|
|
|
|
|
.map(|peer_id| build_kad_peer(peer_id, &self.kbuckets))
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
let provider_peers = self.values_providers
|
|
|
|
|
.get(&key)
|
|
|
|
|
.into_iter()
|
|
|
|
|
.flat_map(|peers| peers)
|
|
|
|
|
.map(|peer_id| build_kad_peer(peer_id.clone(), &self.kbuckets))
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: source,
|
|
|
|
|
event: KademliaHandlerIn::GetProvidersRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
provider_peers,
|
|
|
|
|
request_id,
|
|
|
|
|
},
|
|
|
|
|
});
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::GetProvidersRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
provider_peers,
|
|
|
|
|
user_data,
|
|
|
|
|
} => {
|
|
|
|
|
for peer in closer_peers.iter().chain(provider_peers.iter()) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(KademliaOut::Discovered {
|
|
|
|
|
peer_id: peer.node_id.clone(),
|
|
|
|
|
addresses: peer.multiaddrs.clone(),
|
|
|
|
|
ty: peer.connection_ty,
|
|
|
|
|
}));
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
2019-01-26 23:57:53 +01:00
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
// It is possible that we obtain a response for a query that has finished, which is
|
|
|
|
|
// why we may not find an entry in `self.active_queries`.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
if let Some(query) = self.active_queries.get_mut(&user_data) {
|
|
|
|
|
if let QueryInfoInner::GetProviders { pending_results, .. } = &mut query.target_mut().inner {
|
|
|
|
|
for peer in provider_peers {
|
|
|
|
|
pending_results.push(peer.node_id);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for peer in closer_peers.iter() {
|
|
|
|
|
query.target_mut().untrusted_addresses
|
|
|
|
|
.insert(peer.node_id.clone(), peer.multiaddrs.iter().cloned().collect());
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
query.inject_rpc_result(&source, closer_peers.into_iter().map(|kp| kp.node_id))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::QueryError { user_data, .. } => {
|
|
|
|
|
// It is possible that we obtain a response for a query that has finished, which is
|
|
|
|
|
// why we may not find an entry in `self.active_queries`.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
if let Some(query) = self.active_queries.get_mut(&user_data) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
query.inject_rpc_error(&source)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::AddProvider { key, provider_peer } => {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(KademliaOut::Discovered {
|
|
|
|
|
peer_id: provider_peer.node_id.clone(),
|
|
|
|
|
addresses: provider_peer.multiaddrs.clone(),
|
|
|
|
|
ty: provider_peer.connection_ty,
|
|
|
|
|
}));
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.add_provider.push((key, provider_peer.node_id));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn poll(
|
|
|
|
|
&mut self,
|
2019-02-11 14:58:15 +01:00
|
|
|
|
parameters: &mut PollParameters<'_>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
) -> Async<
|
|
|
|
|
NetworkBehaviourAction<
|
|
|
|
|
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
|
|
|
|
Self::OutEvent,
|
|
|
|
|
>,
|
|
|
|
|
> {
|
|
|
|
|
// Flush the changes to the topology that we want to make.
|
|
|
|
|
for (key, provider) in self.add_provider.drain() {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
// Don't add ourselves to the providers.
|
|
|
|
|
if provider == *self.kbuckets.my_id() {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
let providers = self.values_providers.entry(key).or_insert_with(Default::default);
|
|
|
|
|
if !providers.iter().any(|k| k == &provider) {
|
|
|
|
|
providers.push(provider);
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
self.add_provider.shrink_to_fit();
|
|
|
|
|
|
|
|
|
|
// Handle `refresh_add_providers`.
|
|
|
|
|
match self.refresh_add_providers.poll() {
|
|
|
|
|
Ok(Async::NotReady) => {},
|
|
|
|
|
Ok(Async::Ready(Some(_))) => {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for target in self.providing_keys.clone().into_iter() {
|
|
|
|
|
self.start_query(QueryInfoInner::AddProvider { target });
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
// Ignore errors.
|
|
|
|
|
Ok(Async::Ready(None)) | Err(_) => {},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
loop {
|
|
|
|
|
// Handle events queued by other parts of this struct
|
|
|
|
|
if !self.queued_events.is_empty() {
|
|
|
|
|
return Async::Ready(self.queued_events.remove(0));
|
|
|
|
|
}
|
|
|
|
|
self.queued_events.shrink_to_fit();
|
|
|
|
|
|
|
|
|
|
// If iterating finds a query that is finished, stores it here and stops looping.
|
|
|
|
|
let mut finished_query = None;
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
'queries_iter: for (&query_id, query) in self.active_queries.iter_mut() {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
loop {
|
|
|
|
|
match query.poll() {
|
|
|
|
|
Async::Ready(QueryStatePollOut::Finished) => {
|
|
|
|
|
finished_query = Some(query_id);
|
|
|
|
|
break 'queries_iter;
|
|
|
|
|
}
|
|
|
|
|
Async::Ready(QueryStatePollOut::SendRpc {
|
|
|
|
|
peer_id,
|
|
|
|
|
query_target,
|
|
|
|
|
}) => {
|
|
|
|
|
let rpc = query_target.to_rpc_request(query_id);
|
|
|
|
|
if self.connected_peers.contains(&peer_id) {
|
|
|
|
|
return Async::Ready(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
event: rpc,
|
|
|
|
|
});
|
|
|
|
|
} else {
|
|
|
|
|
self.pending_rpcs.push((peer_id.clone(), rpc));
|
|
|
|
|
return Async::Ready(NetworkBehaviourAction::DialPeer {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Async::Ready(QueryStatePollOut::CancelRpc { peer_id }) => {
|
|
|
|
|
// We don't cancel if the RPC has already been sent out.
|
|
|
|
|
self.pending_rpcs.retain(|(id, _)| id != peer_id);
|
|
|
|
|
}
|
|
|
|
|
Async::NotReady => break,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Some(finished_query) = finished_query {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let (query_info, closer_peers) = self
|
2018-11-29 12:11:35 +01:00
|
|
|
|
.active_queries
|
|
|
|
|
.remove(&finished_query)
|
2019-03-18 18:20:57 +01:00
|
|
|
|
.expect("finished_query was gathered when iterating active_queries; QED.")
|
|
|
|
|
.into_target_and_closest_peers();
|
|
|
|
|
|
|
|
|
|
match query_info.inner {
|
|
|
|
|
QueryInfoInner::Initialization { .. } => {},
|
|
|
|
|
QueryInfoInner::FindPeer(target) => {
|
|
|
|
|
let event = KademliaOut::FindNodeResult {
|
|
|
|
|
key: target,
|
|
|
|
|
closer_peers: closer_peers.collect(),
|
|
|
|
|
};
|
|
|
|
|
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::GetProviders { target, pending_results } => {
|
|
|
|
|
let event = KademliaOut::GetProvidersResult {
|
|
|
|
|
key: target,
|
|
|
|
|
closer_peers: closer_peers.collect(),
|
|
|
|
|
provider_peers: pending_results,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
2019-03-18 18:20:57 +01:00
|
|
|
|
QueryInfoInner::AddProvider { target } => {
|
|
|
|
|
for closest in closer_peers {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
let event = NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: closest,
|
|
|
|
|
event: KademliaHandlerIn::AddProvider {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
key: target.clone(),
|
|
|
|
|
provider_peer: build_kad_peer(parameters.local_peer_id().clone(), &self.kbuckets),
|
2018-11-29 12:11:35 +01:00
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
self.queued_events.push(event);
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
break Async::NotReady;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Output event of the `Kademlia` behaviour.
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
|
pub enum KademliaOut {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// We have discovered a node.
|
|
|
|
|
Discovered {
|
|
|
|
|
/// Id of the node that was discovered.
|
|
|
|
|
peer_id: PeerId,
|
|
|
|
|
/// Addresses of the node.
|
|
|
|
|
addresses: Vec<Multiaddr>,
|
|
|
|
|
/// How the reporter is connected to the reported.
|
|
|
|
|
ty: KadConnectionType,
|
|
|
|
|
},
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Result of a `FIND_NODE` iterative query.
|
|
|
|
|
FindNodeResult {
|
|
|
|
|
/// The key that we looked for in the query.
|
|
|
|
|
key: PeerId,
|
|
|
|
|
/// List of peers ordered from closest to furthest away.
|
|
|
|
|
closer_peers: Vec<PeerId>,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// Result of a `GET_PROVIDERS` iterative query.
|
|
|
|
|
GetProvidersResult {
|
|
|
|
|
/// The key that we looked for in the query.
|
|
|
|
|
key: Multihash,
|
|
|
|
|
/// The peers that are providing the requested key.
|
|
|
|
|
provider_peers: Vec<PeerId>,
|
|
|
|
|
/// List of peers ordered from closest to furthest away.
|
|
|
|
|
closer_peers: Vec<PeerId>,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Generates a random `PeerId` that belongs to the given bucket.
|
|
|
|
|
//
|
|
|
|
|
// Returns an error if `bucket_num` is out of range.
|
|
|
|
|
fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> {
|
|
|
|
|
let my_id_len = my_id.as_bytes().len();
|
|
|
|
|
|
|
|
|
|
// TODO: this 2 is magic here; it is the length of the hash of the multihash
|
|
|
|
|
let bits_diff = bucket_num + 1;
|
|
|
|
|
if bits_diff > 8 * (my_id_len - 2) {
|
|
|
|
|
return Err(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut random_id = [0; 64];
|
|
|
|
|
for byte in 0..my_id_len {
|
|
|
|
|
match byte.cmp(&(my_id_len - bits_diff / 8 - 1)) {
|
|
|
|
|
Ordering::Less => {
|
|
|
|
|
random_id[byte] = my_id.as_bytes()[byte];
|
|
|
|
|
}
|
|
|
|
|
Ordering::Equal => {
|
|
|
|
|
let mask: u8 = (1 << (bits_diff % 8)) - 1;
|
|
|
|
|
random_id[byte] = (my_id.as_bytes()[byte] & !mask) | (rand::random::<u8>() & mask);
|
|
|
|
|
}
|
|
|
|
|
Ordering::Greater => {
|
|
|
|
|
random_id[byte] = rand::random();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let peer_id = PeerId::from_bytes(random_id[..my_id_len].to_owned())
|
|
|
|
|
.expect("randomly-generated peer ID should always be valid");
|
|
|
|
|
Ok(peer_id)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Builds a `KadPeer` struct corresponding to the given `PeerId`.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
/// The `PeerId` cannot be the same as the local one.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
///
|
|
|
|
|
/// > **Note**: This is just a convenience function that doesn't do anything note-worthy.
|
2019-01-26 23:57:53 +01:00
|
|
|
|
fn build_kad_peer(
|
|
|
|
|
peer_id: PeerId,
|
2019-02-12 12:56:39 +01:00
|
|
|
|
kbuckets: &KBucketsTable<PeerId, Addresses>
|
2019-01-26 23:57:53 +01:00
|
|
|
|
) -> KadPeer {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
debug_assert_ne!(*kbuckets.my_id(), peer_id);
|
2019-02-12 12:56:39 +01:00
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let (multiaddrs, connection_ty) = if let Some(addresses) = kbuckets.get(&peer_id) {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
let connected = if addresses.is_connected() {
|
|
|
|
|
KadConnectionType::Connected
|
|
|
|
|
} else {
|
|
|
|
|
// TODO: there's also pending connection
|
|
|
|
|
KadConnectionType::NotConnected
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
(addresses.iter().cloned().collect(), connected)
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
|
|
} else {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
// TODO: there's also pending connection
|
|
|
|
|
(Vec::new(), KadConnectionType::NotConnected)
|
2018-11-29 12:11:35 +01:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
KadPeer {
|
|
|
|
|
node_id: peer_id,
|
|
|
|
|
multiaddrs,
|
|
|
|
|
connection_ty,
|
|
|
|
|
}
|
|
|
|
|
}
|