2018-11-29 12:11:35 +01:00
|
|
|
|
// Copyright 2018 Parity Technologies (UK) Ltd.
|
|
|
|
|
//
|
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
//
|
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
|
//
|
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
2019-02-12 12:56:39 +01:00
|
|
|
|
use crate::addresses::Addresses;
|
2019-03-20 17:09:48 +01:00
|
|
|
|
use crate::handler::{KademliaHandler, KademliaHandlerEvent, KademliaHandlerIn};
|
2019-05-22 14:49:38 +02:00
|
|
|
|
use crate::kbucket::{self, KBucketsTable, NodeStatus};
|
2019-01-21 10:33:51 +00:00
|
|
|
|
use crate::protocol::{KadConnectionType, KadPeer};
|
2019-03-18 18:20:57 +01:00
|
|
|
|
use crate::query::{QueryConfig, QueryState, QueryStatePollOut};
|
2019-06-04 14:44:24 +03:00
|
|
|
|
use crate::write::WriteState;
|
|
|
|
|
use crate::record::{MemoryRecordStorage, RecordStore, Record, RecordStorageError};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
use fnv::{FnvHashMap, FnvHashSet};
|
|
|
|
|
use futures::{prelude::*, stream};
|
2018-12-01 13:34:57 +01:00
|
|
|
|
use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
2019-01-26 23:57:53 +01:00
|
|
|
|
use libp2p_core::{protocols_handler::ProtocolsHandler, Multiaddr, PeerId};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
use multihash::Multihash;
|
|
|
|
|
use smallvec::SmallVec;
|
2019-06-04 14:44:24 +03:00
|
|
|
|
use std::{borrow::Cow, error, iter::FromIterator, marker::PhantomData, num::NonZeroU8, time::Duration};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
use tokio_io::{AsyncRead, AsyncWrite};
|
2019-04-25 15:08:06 +02:00
|
|
|
|
use wasm_timer::{Instant, Interval};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
mod test;
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Network behaviour that handles Kademlia.
|
2019-06-04 14:44:24 +03:00
|
|
|
|
pub struct Kademlia<TSubstream, TRecordStorage: RecordStore = MemoryRecordStorage> {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// Storage for the nodes. Contains the known multiaddresses for this node.
|
2019-05-17 17:27:57 +02:00
|
|
|
|
kbuckets: KBucketsTable<PeerId, Addresses>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
2019-05-15 15:44:51 +02:00
|
|
|
|
/// If `Some`, we overwrite the Kademlia protocol name with this one.
|
|
|
|
|
protocol_name_override: Option<Cow<'static, [u8]>>,
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// All the iterative queries we are currently performing, with their ID. The last parameter
|
|
|
|
|
/// is the list of accumulated providers for `GET_PROVIDERS` queries.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
active_queries: FnvHashMap<QueryId, QueryState<QueryInfo, PeerId>>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
/// All the `PUT_VALUE` actions we are currently performing
|
|
|
|
|
active_writes: FnvHashMap<QueryId, WriteState<PeerId, Multihash>>,
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// List of peers the swarm is connected to.
|
|
|
|
|
connected_peers: FnvHashSet<PeerId>,
|
|
|
|
|
|
|
|
|
|
/// Contains a list of peer IDs which we are not connected to, and an RPC query to send to them
|
|
|
|
|
/// once they connect.
|
|
|
|
|
pending_rpcs: SmallVec<[(PeerId, KademliaHandlerIn<QueryId>); 8]>,
|
|
|
|
|
|
|
|
|
|
/// Identifier for the next query that we start.
|
|
|
|
|
next_query_id: QueryId,
|
|
|
|
|
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// List of values and peers that are providing them.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
///
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// Our local peer ID can be in this container.
|
|
|
|
|
// TODO: Note that in reality the value is a SHA-256 of the actual value (https://github.com/libp2p/rust-libp2p/issues/694)
|
|
|
|
|
values_providers: FnvHashMap<Multihash, SmallVec<[PeerId; 20]>>,
|
|
|
|
|
|
|
|
|
|
/// List of values that we are providing ourselves. Must be kept in sync with
|
|
|
|
|
/// `values_providers`.
|
|
|
|
|
providing_keys: FnvHashSet<Multihash>,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
|
|
/// Interval to send `ADD_PROVIDER` messages to everyone.
|
|
|
|
|
refresh_add_providers: stream::Fuse<Interval>,
|
|
|
|
|
|
|
|
|
|
/// `α` in the Kademlia reference papers. Designates the maximum number of queries that we
|
|
|
|
|
/// perform in parallel.
|
|
|
|
|
parallelism: usize,
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
/// The number of results to return from a query. Defaults to the maximum number
|
|
|
|
|
/// of entries in a single k-bucket, i.e. the `k` parameter.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
num_results: usize,
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
/// Timeout for a single RPC.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
rpc_timeout: Duration,
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
/// Queued events to return when the behaviour is being polled.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
queued_events: SmallVec<[NetworkBehaviourAction<KademliaHandlerIn<QueryId>, KademliaOut>; 32]>,
|
|
|
|
|
|
|
|
|
|
/// List of providers to add to the topology as soon as we are in `poll()`.
|
|
|
|
|
add_provider: SmallVec<[(Multihash, PeerId); 32]>,
|
|
|
|
|
|
|
|
|
|
/// Marker to pin the generics.
|
|
|
|
|
marker: PhantomData<TSubstream>,
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
|
|
/// The records that we keep.
|
|
|
|
|
records: TRecordStorage,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Opaque type. Each query that we start gets a unique number.
|
|
|
|
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
|
|
|
|
|
pub struct QueryId(usize);
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
/// Information about a query.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
2019-03-18 18:20:57 +01:00
|
|
|
|
struct QueryInfo {
|
|
|
|
|
/// What we are querying and why.
|
|
|
|
|
inner: QueryInfoInner,
|
|
|
|
|
/// Temporary addresses used when trying to reach nodes.
|
|
|
|
|
untrusted_addresses: FnvHashMap<PeerId, SmallVec<[Multiaddr; 8]>>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Additional information about the query.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
|
|
|
enum QueryInfoInner {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// The query was created for the Kademlia initialization process.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
Initialization {
|
|
|
|
|
/// Hash we're targetting to insert ourselves in the k-buckets.
|
|
|
|
|
target: PeerId,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// The user requested a `FIND_PEER` query to be performed. It should be reported when finished.
|
|
|
|
|
FindPeer(PeerId),
|
|
|
|
|
|
|
|
|
|
/// The user requested a `GET_PROVIDERS` query to be performed. It should be reported when
|
|
|
|
|
/// finished.
|
|
|
|
|
GetProviders {
|
|
|
|
|
/// Target we are searching the providers of.
|
|
|
|
|
target: Multihash,
|
|
|
|
|
/// Results to return. Filled over time.
|
|
|
|
|
pending_results: Vec<PeerId>,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// We are traversing towards `target` and should add an `ADD_PROVIDER` message to the peers
|
|
|
|
|
/// of the outcome with our own identity.
|
|
|
|
|
AddProvider {
|
|
|
|
|
/// Which hash we're targetting.
|
|
|
|
|
target: Multihash,
|
|
|
|
|
},
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
|
|
/// Put the value to the dht records
|
|
|
|
|
PutValue {
|
|
|
|
|
/// The key of the record being inserted
|
|
|
|
|
key: Multihash,
|
|
|
|
|
/// The value of the record being inserted
|
|
|
|
|
value: Vec<u8>,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// Get value from the dht record
|
|
|
|
|
GetValue {
|
|
|
|
|
/// The key we're looking for
|
|
|
|
|
key: Multihash,
|
|
|
|
|
/// The results from peers are stored here
|
|
|
|
|
results: Vec<Record>,
|
|
|
|
|
/// The number of results to look for.
|
|
|
|
|
num_results: usize,
|
|
|
|
|
},
|
2019-03-18 18:20:57 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-17 17:27:57 +02:00
|
|
|
|
impl Into<kbucket::Key<QueryInfo>> for QueryInfo {
|
|
|
|
|
fn into(self) -> kbucket::Key<QueryInfo> {
|
|
|
|
|
kbucket::Key::new(self)
|
2019-03-18 18:20:57 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-17 17:27:57 +02:00
|
|
|
|
impl AsRef<[u8]> for QueryInfo {
|
|
|
|
|
fn as_ref(&self) -> &[u8] {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
match &self.inner {
|
|
|
|
|
QueryInfoInner::Initialization { target } => target.as_ref(),
|
|
|
|
|
QueryInfoInner::FindPeer(peer) => peer.as_ref(),
|
2019-05-17 17:27:57 +02:00
|
|
|
|
QueryInfoInner::GetProviders { target, .. } => target.as_bytes(),
|
|
|
|
|
QueryInfoInner::AddProvider { target } => target.as_bytes(),
|
2019-06-04 14:44:24 +03:00
|
|
|
|
QueryInfoInner::GetValue { key, .. } => key.as_bytes(),
|
|
|
|
|
QueryInfoInner::PutValue { key, .. } => key.as_bytes(),
|
2019-03-18 18:20:57 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl QueryInfo {
|
|
|
|
|
/// Creates the corresponding RPC request to send to remote.
|
|
|
|
|
fn to_rpc_request<TUserData>(&self, user_data: TUserData) -> KademliaHandlerIn<TUserData> {
|
|
|
|
|
match &self.inner {
|
|
|
|
|
QueryInfoInner::Initialization { target } => KademliaHandlerIn::FindNodeReq {
|
2019-06-04 14:44:24 +03:00
|
|
|
|
key: target.clone().into(),
|
2019-03-18 18:20:57 +01:00
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::FindPeer(key) => KademliaHandlerIn::FindNodeReq {
|
2019-06-04 14:44:24 +03:00
|
|
|
|
// TODO: Change the `key` of `QueryInfoInner::FindPeer` to be a `Multihash`.
|
|
|
|
|
key: key.clone().into(),
|
2019-03-18 18:20:57 +01:00
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::GetProviders { target, .. } => KademliaHandlerIn::GetProvidersReq {
|
2019-05-17 17:27:57 +02:00
|
|
|
|
key: target.clone(),
|
2019-03-18 18:20:57 +01:00
|
|
|
|
user_data,
|
|
|
|
|
},
|
2019-03-26 16:17:34 +01:00
|
|
|
|
QueryInfoInner::AddProvider { .. } => KademliaHandlerIn::FindNodeReq {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
key: unimplemented!(), // TODO: target.clone(),
|
|
|
|
|
user_data,
|
|
|
|
|
},
|
2019-06-04 14:44:24 +03:00
|
|
|
|
QueryInfoInner::GetValue { key, .. } => KademliaHandlerIn::GetValue {
|
|
|
|
|
key: key.clone(),
|
|
|
|
|
user_data,
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::PutValue { key, .. } => KademliaHandlerIn::FindNodeReq {
|
|
|
|
|
key: key.clone(),
|
|
|
|
|
user_data,
|
|
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
impl<TSubstream, TRecordStorage> Kademlia<TSubstream, TRecordStorage>
|
|
|
|
|
where
|
|
|
|
|
TRecordStorage: RecordStore
|
|
|
|
|
{
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Creates a `Kademlia`.
|
|
|
|
|
#[inline]
|
2019-06-04 14:44:24 +03:00
|
|
|
|
pub fn new(local_peer_id: PeerId) -> Self
|
|
|
|
|
where
|
|
|
|
|
TRecordStorage: Default
|
|
|
|
|
{
|
|
|
|
|
Self::new_inner(local_peer_id, Default::default())
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-15 15:44:51 +02:00
|
|
|
|
/// The same as `new`, but using a custom protocol name.
|
|
|
|
|
///
|
|
|
|
|
/// Kademlia nodes only communicate with other nodes using the same protocol name. Using a
|
|
|
|
|
/// custom name therefore allows to segregate the DHT from others, if that is desired.
|
2019-06-04 14:44:24 +03:00
|
|
|
|
pub fn with_protocol_name(local_peer_id: PeerId, name: impl Into<Cow<'static, [u8]>>) -> Self
|
|
|
|
|
where
|
|
|
|
|
TRecordStorage: Default
|
|
|
|
|
{
|
|
|
|
|
let mut me = Kademlia::new_inner(local_peer_id, Default::default());
|
2019-05-15 15:44:51 +02:00
|
|
|
|
me.protocol_name_override = Some(name.into());
|
|
|
|
|
me
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
/// The same as `new`, but with a custom storage.
|
|
|
|
|
///
|
|
|
|
|
/// The default records storage is in memory, this lets override the
|
|
|
|
|
/// storage with user-defined behaviour
|
|
|
|
|
pub fn with_storage(local_peer_id: PeerId, records: TRecordStorage) -> Self {
|
|
|
|
|
Self::new_inner(local_peer_id, records)
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Creates a `Kademlia`.
|
|
|
|
|
///
|
|
|
|
|
/// Contrary to `new`, doesn't perform the initialization queries that store our local ID into
|
2019-03-20 17:09:48 +01:00
|
|
|
|
/// the DHT and fill our buckets.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
#[inline]
|
2019-03-26 16:17:34 +01:00
|
|
|
|
#[deprecated(note="this function is now equivalent to new() and will be removed in the future")]
|
2019-06-04 14:44:24 +03:00
|
|
|
|
pub fn without_init(local_peer_id: PeerId) -> Self
|
|
|
|
|
where TRecordStorage: Default
|
|
|
|
|
{
|
|
|
|
|
Self::new_inner(local_peer_id, Default::default())
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
/// Adds a known address of a peer participating in the Kademlia DHT to the
|
|
|
|
|
/// routing table.
|
|
|
|
|
///
|
|
|
|
|
/// This allows prepopulating the Kademlia routing table with known
|
|
|
|
|
/// addresses, so that they can be used immediately in following DHT
|
|
|
|
|
/// operations involving one of these peers, without having to dial
|
|
|
|
|
/// them upfront.
|
|
|
|
|
pub fn add_address(&mut self, peer_id: &PeerId, address: Multiaddr) {
|
2019-05-17 17:27:57 +02:00
|
|
|
|
let key = kbucket::Key::new(peer_id.clone());
|
|
|
|
|
match self.kbuckets.entry(&key) {
|
2019-05-22 14:49:38 +02:00
|
|
|
|
kbucket::Entry::Present(mut entry, _) => {
|
|
|
|
|
entry.value().insert(address);
|
|
|
|
|
}
|
|
|
|
|
kbucket::Entry::Pending(mut entry, _) => {
|
|
|
|
|
entry.value().insert(address);
|
|
|
|
|
}
|
|
|
|
|
kbucket::Entry::Absent(entry) => {
|
2019-03-20 17:09:48 +01:00
|
|
|
|
let mut addresses = Addresses::new();
|
2019-03-20 17:30:00 +01:00
|
|
|
|
addresses.insert(address);
|
2019-05-22 14:49:38 +02:00
|
|
|
|
match entry.insert(addresses, NodeStatus::Disconnected) {
|
|
|
|
|
kbucket::InsertResult::Inserted => {
|
2019-03-20 17:09:48 +01:00
|
|
|
|
let event = KademliaOut::KBucketAdded {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
replaced: None,
|
|
|
|
|
};
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
2019-05-22 14:49:38 +02:00
|
|
|
|
kbucket::InsertResult::Full => (),
|
|
|
|
|
kbucket::InsertResult::Pending { disconnected } => {
|
2019-03-20 17:09:48 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::DialPeer {
|
2019-05-22 14:49:38 +02:00
|
|
|
|
peer_id: disconnected.into_preimage(),
|
2019-03-20 17:09:48 +01:00
|
|
|
|
})
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
},
|
|
|
|
|
kbucket::Entry::SelfEntry => return,
|
|
|
|
|
};
|
2019-01-26 23:57:53 +01:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Inner implementation of the constructors.
|
2019-06-04 14:44:24 +03:00
|
|
|
|
fn new_inner(local_peer_id: PeerId, records: TRecordStorage) -> Self {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
let parallelism = 3;
|
|
|
|
|
|
2019-03-26 16:17:34 +01:00
|
|
|
|
Kademlia {
|
2019-05-17 17:27:57 +02:00
|
|
|
|
kbuckets: KBucketsTable::new(kbucket::Key::new(local_peer_id), Duration::from_secs(60)), // TODO: constant
|
2019-05-15 15:44:51 +02:00
|
|
|
|
protocol_name_override: None,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
queued_events: SmallVec::new(),
|
|
|
|
|
active_queries: Default::default(),
|
2019-06-04 14:44:24 +03:00
|
|
|
|
active_writes: Default::default(),
|
2018-11-29 12:11:35 +01:00
|
|
|
|
connected_peers: Default::default(),
|
|
|
|
|
pending_rpcs: SmallVec::with_capacity(parallelism),
|
|
|
|
|
next_query_id: QueryId(0),
|
2019-01-26 23:57:53 +01:00
|
|
|
|
values_providers: FnvHashMap::default(),
|
|
|
|
|
providing_keys: FnvHashSet::default(),
|
2018-11-29 12:11:35 +01:00
|
|
|
|
refresh_add_providers: Interval::new_interval(Duration::from_secs(60)).fuse(), // TODO: constant
|
|
|
|
|
parallelism,
|
2019-05-22 14:49:38 +02:00
|
|
|
|
num_results: kbucket::MAX_NODES_PER_BUCKET,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
rpc_timeout: Duration::from_secs(8),
|
|
|
|
|
add_provider: SmallVec::new(),
|
|
|
|
|
marker: PhantomData,
|
2019-06-04 14:44:24 +03:00
|
|
|
|
records,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
/// Returns an iterator over all peer IDs of nodes currently contained in a bucket
|
|
|
|
|
/// of the Kademlia routing table.
|
|
|
|
|
pub fn kbuckets_entries(&mut self) -> impl Iterator<Item = &PeerId> {
|
|
|
|
|
self.kbuckets.iter().map(|entry| entry.node.key.preimage())
|
2019-03-20 17:09:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Starts an iterative `FIND_NODE` request.
|
|
|
|
|
///
|
|
|
|
|
/// This will eventually produce an event containing the nodes of the DHT closest to the
|
|
|
|
|
/// requested `PeerId`.
|
|
|
|
|
pub fn find_node(&mut self, peer_id: PeerId) {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
self.start_query(QueryInfoInner::FindPeer(peer_id));
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Starts an iterative `GET_PROVIDERS` request.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
pub fn get_providers(&mut self, target: Multihash) {
|
|
|
|
|
self.start_query(QueryInfoInner::GetProviders { target, pending_results: Vec::new() });
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
/// Starts an iterative `GET_VALUE` request.
|
|
|
|
|
///
|
|
|
|
|
/// Returns a number of results that is in the interval [1, 20],
|
|
|
|
|
/// if the user requested a larger amount of results it is cropped to 20.
|
|
|
|
|
pub fn get_value(&mut self, key: &Multihash, num_results: NonZeroU8) {
|
|
|
|
|
let num_results = usize::min(num_results.get() as usize, kbucket::MAX_NODES_PER_BUCKET);
|
|
|
|
|
let mut results = Vec::with_capacity(num_results);
|
|
|
|
|
|
|
|
|
|
if let Some(record) = self.records.get(key) {
|
|
|
|
|
results.push(record.into_owned());
|
|
|
|
|
if num_results == 1 {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(
|
|
|
|
|
KademliaOut::GetValueResult(
|
|
|
|
|
GetValueResult::Found { results }
|
|
|
|
|
)));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.start_query(QueryInfoInner::GetValue {
|
|
|
|
|
key: key.clone(),
|
|
|
|
|
results,
|
|
|
|
|
num_results
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Starts an iterative `PUT_VALUE` request
|
|
|
|
|
pub fn put_value(&mut self, key: Multihash, value: Vec<u8>) {
|
|
|
|
|
if let Err(error) = self.records.put(Record { key: key.clone(), value: value.clone() }) {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(
|
|
|
|
|
KademliaOut::PutValueResult(
|
|
|
|
|
PutValueResult::Err { key, cause: error }
|
|
|
|
|
)
|
|
|
|
|
));
|
|
|
|
|
} else {
|
|
|
|
|
self.start_query(QueryInfoInner::PutValue { key, value });
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Register the local node as the provider for the given key.
|
|
|
|
|
///
|
|
|
|
|
/// This will periodically send `ADD_PROVIDER` messages to the nodes closest to the key. When
|
|
|
|
|
/// someone performs a `GET_PROVIDERS` iterative request on the DHT, our local node will be
|
|
|
|
|
/// returned as part of the results.
|
|
|
|
|
///
|
|
|
|
|
/// The actual meaning of *providing* the value of a key is not defined, and is specific to
|
|
|
|
|
/// the value whose key is the hash.
|
2019-04-30 19:39:26 +02:00
|
|
|
|
pub fn add_providing(&mut self, key: Multihash) {
|
|
|
|
|
self.providing_keys.insert(key.clone());
|
|
|
|
|
let providers = self.values_providers.entry(key).or_insert_with(Default::default);
|
2019-05-17 17:27:57 +02:00
|
|
|
|
let local_id = self.kbuckets.local_key().preimage();
|
|
|
|
|
if !providers.iter().any(|peer_id| peer_id == local_id) {
|
|
|
|
|
providers.push(local_id.clone());
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Trigger the next refresh now.
|
|
|
|
|
self.refresh_add_providers = Interval::new(Instant::now(), Duration::from_secs(60)).fuse();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Cancels a registration done with `add_providing`.
|
|
|
|
|
///
|
|
|
|
|
/// There doesn't exist any "remove provider" message to broadcast on the network, therefore we
|
|
|
|
|
/// will still be registered as a provider in the DHT for as long as the timeout doesn't expire.
|
|
|
|
|
pub fn remove_providing(&mut self, key: &Multihash) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.providing_keys.remove(key);
|
|
|
|
|
|
|
|
|
|
let providers = match self.values_providers.get_mut(key) {
|
|
|
|
|
Some(p) => p,
|
|
|
|
|
None => return,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if let Some(position) = providers.iter().position(|k| k == key) {
|
|
|
|
|
providers.remove(position);
|
|
|
|
|
providers.shrink_to_fit();
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Internal function that starts a query.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
fn start_query(&mut self, target: QueryInfoInner) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
let query_id = self.next_query_id;
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.next_query_id.0 += 1;
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
|
|
let target = QueryInfo {
|
|
|
|
|
inner: target,
|
|
|
|
|
untrusted_addresses: Default::default(),
|
|
|
|
|
};
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
let target_key = kbucket::Key::new(target.clone());
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let known_closest_peers = self.kbuckets
|
2019-05-22 14:49:38 +02:00
|
|
|
|
.closest_keys(&target_key)
|
2019-05-17 17:27:57 +02:00
|
|
|
|
.take(self.num_results);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
|
|
self.active_queries.insert(
|
|
|
|
|
query_id,
|
|
|
|
|
QueryState::new(QueryConfig {
|
|
|
|
|
target,
|
|
|
|
|
parallelism: self.parallelism,
|
|
|
|
|
num_results: self.num_results,
|
|
|
|
|
rpc_timeout: self.rpc_timeout,
|
|
|
|
|
known_closest_peers,
|
|
|
|
|
})
|
|
|
|
|
);
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
|
|
|
|
|
/// Processes discovered peers from a query.
|
|
|
|
|
fn discovered<'a, I>(&'a mut self, query_id: &QueryId, source: &PeerId, peers: I)
|
|
|
|
|
where
|
|
|
|
|
I: Iterator<Item=&'a KadPeer> + Clone
|
|
|
|
|
{
|
2019-05-17 17:27:57 +02:00
|
|
|
|
let local_id = self.kbuckets.local_key().preimage().clone();
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
let others_iter = peers.filter(|p| p.node_id != local_id);
|
|
|
|
|
|
|
|
|
|
for peer in others_iter.clone() {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(
|
|
|
|
|
KademliaOut::Discovered {
|
|
|
|
|
peer_id: peer.node_id.clone(),
|
|
|
|
|
addresses: peer.multiaddrs.clone(),
|
|
|
|
|
ty: peer.connection_ty,
|
|
|
|
|
}
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Some(query) = self.active_queries.get_mut(query_id) {
|
|
|
|
|
for peer in others_iter.clone() {
|
|
|
|
|
query.target_mut().untrusted_addresses
|
|
|
|
|
.insert(peer.node_id.clone(), peer.multiaddrs.iter().cloned().collect());
|
|
|
|
|
}
|
|
|
|
|
query.inject_rpc_result(source, others_iter.cloned().map(|kp| kp.node_id))
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-05-22 14:49:38 +02:00
|
|
|
|
|
|
|
|
|
/// Finds the closest peers to a `target` in the context of a request by
|
|
|
|
|
/// the `source` peer, such that the `source` peer is never included in the
|
|
|
|
|
/// result.
|
|
|
|
|
fn find_closest<T: Clone>(&mut self, target: &kbucket::Key<T>, source: &PeerId) -> Vec<KadPeer> {
|
|
|
|
|
self.kbuckets
|
|
|
|
|
.closest(target)
|
|
|
|
|
.filter(|e| e.node.key.preimage() != source)
|
|
|
|
|
.take(self.num_results)
|
|
|
|
|
.map(KadPeer::from)
|
|
|
|
|
.collect()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Collects all peers who are known to be providers of the value for a given `Multihash`.
|
|
|
|
|
fn provider_peers(&mut self, key: &Multihash, source: &PeerId) -> Vec<KadPeer> {
|
|
|
|
|
let kbuckets = &mut self.kbuckets;
|
|
|
|
|
self.values_providers
|
|
|
|
|
.get(key)
|
|
|
|
|
.into_iter()
|
|
|
|
|
.flat_map(|peers| peers)
|
|
|
|
|
.filter_map(move |p|
|
|
|
|
|
if p != source {
|
|
|
|
|
let key = kbucket::Key::new(p.clone());
|
|
|
|
|
kbuckets.entry(&key).view().map(|e| KadPeer::from(e.to_owned()))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Update the connection status of a peer in the Kademlia routing table.
|
|
|
|
|
fn connection_updated(&mut self, peer: PeerId, address: Option<Multiaddr>, new_status: NodeStatus) {
|
|
|
|
|
let key = kbucket::Key::new(peer.clone());
|
|
|
|
|
match self.kbuckets.entry(&key) {
|
|
|
|
|
kbucket::Entry::Present(mut entry, old_status) => {
|
|
|
|
|
if let Some(address) = address {
|
|
|
|
|
entry.value().insert(address);
|
|
|
|
|
}
|
|
|
|
|
if old_status != new_status {
|
|
|
|
|
entry.update(new_status);
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
kbucket::Entry::Pending(mut entry, old_status) => {
|
|
|
|
|
if let Some(address) = address {
|
|
|
|
|
entry.value().insert(address);
|
|
|
|
|
}
|
|
|
|
|
if old_status != new_status {
|
|
|
|
|
entry.update(new_status);
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
kbucket::Entry::Absent(entry) => if new_status == NodeStatus::Connected {
|
|
|
|
|
let mut addresses = Addresses::new();
|
|
|
|
|
if let Some(address) = address {
|
|
|
|
|
addresses.insert(address);
|
|
|
|
|
}
|
|
|
|
|
match entry.insert(addresses, new_status) {
|
|
|
|
|
kbucket::InsertResult::Inserted => {
|
|
|
|
|
let event = KademliaOut::KBucketAdded {
|
|
|
|
|
peer_id: peer.clone(),
|
|
|
|
|
replaced: None,
|
|
|
|
|
};
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
|
|
|
|
kbucket::InsertResult::Full => (),
|
|
|
|
|
kbucket::InsertResult::Pending { disconnected } => {
|
|
|
|
|
debug_assert!(!self.connected_peers.contains(disconnected.preimage()));
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::DialPeer {
|
|
|
|
|
peer_id: disconnected.into_preimage(),
|
|
|
|
|
})
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
impl<TSubstream, TRecordStorage> NetworkBehaviour for Kademlia<TSubstream, TRecordStorage>
|
2018-11-29 12:11:35 +01:00
|
|
|
|
where
|
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
2019-06-04 14:44:24 +03:00
|
|
|
|
TRecordStorage: RecordStore,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
{
|
|
|
|
|
type ProtocolsHandler = KademliaHandler<TSubstream, QueryId>;
|
|
|
|
|
type OutEvent = KademliaOut;
|
|
|
|
|
|
|
|
|
|
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
2019-05-15 15:44:51 +02:00
|
|
|
|
let mut handler = KademliaHandler::dial_and_listen();
|
|
|
|
|
if let Some(name) = self.protocol_name_override.as_ref() {
|
|
|
|
|
handler = handler.with_protocol_name(name.clone());
|
|
|
|
|
}
|
|
|
|
|
handler
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-01-30 14:55:39 +01:00
|
|
|
|
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
// We should order addresses from decreasing likelyhood of connectivity, so start with
|
|
|
|
|
// the addresses of that peer in the k-buckets.
|
2019-05-22 14:49:38 +02:00
|
|
|
|
let key = kbucket::Key::new(peer_id.clone());
|
|
|
|
|
let mut out_list =
|
|
|
|
|
if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) {
|
|
|
|
|
entry.value().iter().cloned().collect::<Vec<_>>()
|
|
|
|
|
} else {
|
|
|
|
|
Vec::new()
|
|
|
|
|
};
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
|
|
// We add to that a temporary list of addresses from the ongoing queries.
|
|
|
|
|
for query in self.active_queries.values() {
|
|
|
|
|
if let Some(addrs) = query.target().untrusted_addresses.get(peer_id) {
|
|
|
|
|
for addr in addrs {
|
|
|
|
|
out_list.push(addr.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_list
|
2019-01-26 23:57:53 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-02-04 15:21:50 +01:00
|
|
|
|
fn inject_connected(&mut self, id: PeerId, endpoint: ConnectedPoint) {
|
2019-06-03 12:08:01 +02:00
|
|
|
|
while let Some(pos) = self.pending_rpcs.iter().position(|(p, _)| p == &id) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
let (_, rpc) = self.pending_rpcs.remove(pos);
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: id.clone(),
|
|
|
|
|
event: rpc,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-20 17:09:48 +01:00
|
|
|
|
let address = match endpoint {
|
|
|
|
|
ConnectedPoint::Dialer { address } => Some(address),
|
|
|
|
|
ConnectedPoint::Listener { .. } => None,
|
|
|
|
|
};
|
2019-01-26 23:57:53 +01:00
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
self.connection_updated(id.clone(), address, NodeStatus::Connected);
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.connected_peers.insert(id);
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-20 20:28:55 +01:00
|
|
|
|
fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, _: &dyn error::Error) {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
if let Some(peer_id) = peer_id {
|
2019-05-17 17:27:57 +02:00
|
|
|
|
let key = kbucket::Key::new(peer_id.clone());
|
2019-03-26 16:17:34 +01:00
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
if let Some(addrs) = self.kbuckets.entry(&key).value() {
|
2019-02-12 12:56:39 +01:00
|
|
|
|
// TODO: don't remove the address if the error is that we are already connected
|
|
|
|
|
// to this peer
|
2019-05-22 14:49:38 +02:00
|
|
|
|
addrs.remove(addr);
|
2019-02-12 12:56:39 +01:00
|
|
|
|
}
|
2019-03-20 20:28:55 +01:00
|
|
|
|
|
|
|
|
|
for query in self.active_queries.values_mut() {
|
2019-05-17 17:27:57 +02:00
|
|
|
|
if let Some(addrs) = query.target_mut().untrusted_addresses.get_mut(&peer_id) {
|
2019-03-20 20:28:55 +01:00
|
|
|
|
addrs.retain(|a| a != addr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn inject_dial_failure(&mut self, peer_id: &PeerId) {
|
|
|
|
|
for query in self.active_queries.values_mut() {
|
|
|
|
|
query.inject_rpc_error(peer_id);
|
2019-02-12 12:56:39 +01:00
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
for write in self.active_writes.values_mut() {
|
|
|
|
|
write.inject_write_error(peer_id);
|
|
|
|
|
}
|
2019-02-12 12:56:39 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-03-26 16:17:34 +01:00
|
|
|
|
fn inject_disconnected(&mut self, id: &PeerId, _old_endpoint: ConnectedPoint) {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for query in self.active_queries.values_mut() {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
query.inject_rpc_error(id);
|
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
for write in self.active_writes.values_mut() {
|
|
|
|
|
write.inject_write_error(id);
|
|
|
|
|
}
|
2019-05-22 14:49:38 +02:00
|
|
|
|
self.connection_updated(id.clone(), None, NodeStatus::Disconnected);
|
|
|
|
|
self.connected_peers.remove(id);
|
2019-02-04 15:21:50 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-03-20 17:30:00 +01:00
|
|
|
|
fn inject_replaced(&mut self, peer_id: PeerId, _old: ConnectedPoint, new_endpoint: ConnectedPoint) {
|
2019-02-04 15:21:50 +01:00
|
|
|
|
// We need to re-send the active queries.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for (query_id, query) in self.active_queries.iter() {
|
2019-02-04 15:21:50 +01:00
|
|
|
|
if query.is_waiting(&peer_id) {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
event: query.target().to_rpc_request(*query_id),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::new(peer_id)).value() {
|
2019-03-20 17:09:48 +01:00
|
|
|
|
if let ConnectedPoint::Dialer { address } = new_endpoint {
|
2019-05-22 14:49:38 +02:00
|
|
|
|
addrs.insert(address);
|
2019-02-04 15:21:50 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn inject_node_event(&mut self, source: PeerId, event: KademliaHandlerEvent<QueryId>) {
|
|
|
|
|
match event {
|
|
|
|
|
KademliaHandlerEvent::FindNodeReq { key, request_id } => {
|
2019-05-22 14:49:38 +02:00
|
|
|
|
let closer_peers = self.find_closest(&kbucket::Key::new(key), &source);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: source,
|
|
|
|
|
event: KademliaHandlerIn::FindNodeRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
request_id,
|
|
|
|
|
},
|
|
|
|
|
});
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::FindNodeRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
user_data,
|
|
|
|
|
} => {
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
self.discovered(&user_data, &source, closer_peers.iter());
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::GetProvidersReq { key, request_id } => {
|
2019-05-22 14:49:38 +02:00
|
|
|
|
let provider_peers = self.provider_peers(&key, &source);
|
|
|
|
|
let closer_peers = self.find_closest(&kbucket::Key::from(key), &source);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: source,
|
|
|
|
|
event: KademliaHandlerIn::GetProvidersRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
provider_peers,
|
|
|
|
|
request_id,
|
|
|
|
|
},
|
|
|
|
|
});
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::GetProvidersRes {
|
|
|
|
|
closer_peers,
|
|
|
|
|
provider_peers,
|
|
|
|
|
user_data,
|
|
|
|
|
} => {
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
let peers = closer_peers.iter().chain(provider_peers.iter());
|
|
|
|
|
self.discovered(&user_data, &source, peers);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
if let Some(query) = self.active_queries.get_mut(&user_data) {
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
if let QueryInfoInner::GetProviders {
|
|
|
|
|
pending_results, ..
|
|
|
|
|
} = &mut query.target_mut().inner {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for peer in provider_peers {
|
|
|
|
|
pending_results.push(peer.node_id);
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::QueryError { user_data, .. } => {
|
|
|
|
|
// It is possible that we obtain a response for a query that has finished, which is
|
|
|
|
|
// why we may not find an entry in `self.active_queries`.
|
2019-03-18 18:20:57 +01:00
|
|
|
|
if let Some(query) = self.active_queries.get_mut(&user_data) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
query.inject_rpc_error(&source)
|
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
|
|
if let Some(write) = self.active_writes.get_mut(&user_data) {
|
|
|
|
|
write.inject_write_error(&source);
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::AddProvider { key, provider_peer } => {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(KademliaOut::Discovered {
|
|
|
|
|
peer_id: provider_peer.node_id.clone(),
|
|
|
|
|
addresses: provider_peer.multiaddrs.clone(),
|
|
|
|
|
ty: provider_peer.connection_ty,
|
|
|
|
|
}));
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.add_provider.push((key, provider_peer.node_id));
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
KademliaHandlerEvent::GetValue { key, request_id } => {
|
|
|
|
|
let (result, closer_peers) = match self.records.get(&key) {
|
|
|
|
|
Some(record) => {
|
|
|
|
|
(Some(record.into_owned()), Vec::new())
|
|
|
|
|
},
|
|
|
|
|
None => {
|
|
|
|
|
let closer_peers = self.find_closest(&kbucket::Key::from(key), &source);
|
|
|
|
|
(None, closer_peers)
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: source,
|
|
|
|
|
event: KademliaHandlerIn::GetValueRes {
|
|
|
|
|
result,
|
|
|
|
|
closer_peers,
|
|
|
|
|
request_id,
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::GetValueRes {
|
|
|
|
|
result,
|
|
|
|
|
closer_peers,
|
|
|
|
|
user_data,
|
|
|
|
|
} => {
|
|
|
|
|
let mut finished_query = None;
|
|
|
|
|
|
|
|
|
|
if let Some(query) = self.active_queries.get_mut(&user_data) {
|
|
|
|
|
if let QueryInfoInner::GetValue {
|
|
|
|
|
key: _,
|
|
|
|
|
results,
|
|
|
|
|
num_results,
|
|
|
|
|
} = &mut query.target_mut().inner {
|
|
|
|
|
if let Some(result) = result {
|
|
|
|
|
results.push(result);
|
|
|
|
|
if results.len() == *num_results {
|
|
|
|
|
finished_query = Some(user_data);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Some(finished_query) = finished_query {
|
|
|
|
|
let (query_info, _) = self
|
|
|
|
|
.active_queries
|
|
|
|
|
.remove(&finished_query)
|
|
|
|
|
.expect("finished_query was gathered when peeking into active_queries; QED.")
|
|
|
|
|
.into_target_and_closest_peers();
|
|
|
|
|
|
|
|
|
|
match query_info.inner {
|
|
|
|
|
QueryInfoInner::GetValue { key: _, results, .. } => {
|
|
|
|
|
let result = GetValueResult::Found { results };
|
|
|
|
|
let event = KademliaOut::GetValueResult(result);
|
|
|
|
|
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
}
|
|
|
|
|
// TODO: write a better proof
|
|
|
|
|
_ => panic!("unexpected query_info.inner variant for a get_value result; QED.")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.discovered(&user_data, &source, closer_peers.iter());
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::PutValue {
|
|
|
|
|
key,
|
|
|
|
|
value,
|
|
|
|
|
request_id
|
|
|
|
|
} => {
|
|
|
|
|
// TODO: Log errors and immediately reset the stream on error instead of letting the request time out.
|
|
|
|
|
if let Ok(()) = self.records.put(Record { key: key.clone(), value: value.clone() }) {
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: source,
|
|
|
|
|
event: KademliaHandlerIn::PutValueRes {
|
|
|
|
|
key,
|
|
|
|
|
value,
|
|
|
|
|
request_id,
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
KademliaHandlerEvent::PutValueRes {
|
|
|
|
|
key: _,
|
|
|
|
|
user_data,
|
|
|
|
|
} => {
|
|
|
|
|
if let Some(write) = self.active_writes.get_mut(&user_data) {
|
|
|
|
|
write.inject_write_success(&source);
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn poll(
|
|
|
|
|
&mut self,
|
2019-06-18 10:23:26 +02:00
|
|
|
|
parameters: &mut impl PollParameters,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
) -> Async<
|
|
|
|
|
NetworkBehaviourAction<
|
|
|
|
|
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
|
|
|
|
Self::OutEvent,
|
|
|
|
|
>,
|
|
|
|
|
> {
|
|
|
|
|
// Flush the changes to the topology that we want to make.
|
|
|
|
|
for (key, provider) in self.add_provider.drain() {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
// Don't add ourselves to the providers.
|
2019-05-22 14:49:38 +02:00
|
|
|
|
if &provider == self.kbuckets.local_key().preimage() {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
let providers = self.values_providers.entry(key).or_insert_with(Default::default);
|
2019-03-26 16:17:34 +01:00
|
|
|
|
if !providers.iter().any(|peer_id| peer_id == &provider) {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
providers.push(provider);
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
self.add_provider.shrink_to_fit();
|
|
|
|
|
|
|
|
|
|
// Handle `refresh_add_providers`.
|
|
|
|
|
match self.refresh_add_providers.poll() {
|
|
|
|
|
Ok(Async::NotReady) => {},
|
|
|
|
|
Ok(Async::Ready(Some(_))) => {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
for target in self.providing_keys.clone().into_iter() {
|
|
|
|
|
self.start_query(QueryInfoInner::AddProvider { target });
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
// Ignore errors.
|
|
|
|
|
Ok(Async::Ready(None)) | Err(_) => {},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
loop {
|
2019-05-22 14:49:38 +02:00
|
|
|
|
// Drain queued events first.
|
2018-11-29 12:11:35 +01:00
|
|
|
|
if !self.queued_events.is_empty() {
|
|
|
|
|
return Async::Ready(self.queued_events.remove(0));
|
|
|
|
|
}
|
|
|
|
|
self.queued_events.shrink_to_fit();
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
// Drain applied pending entries from the routing table.
|
|
|
|
|
if let Some(entry) = self.kbuckets.take_applied_pending() {
|
|
|
|
|
let event = KademliaOut::KBucketAdded {
|
|
|
|
|
peer_id: entry.inserted.into_preimage(),
|
|
|
|
|
replaced: entry.evicted.map(|n| n.key.into_preimage())
|
|
|
|
|
};
|
|
|
|
|
return Async::Ready(NetworkBehaviourAction::GenerateEvent(event))
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
// If iterating finds a query that is finished, stores it here and stops looping.
|
|
|
|
|
let mut finished_query = None;
|
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
|
'queries_iter: for (&query_id, query) in self.active_queries.iter_mut() {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
loop {
|
|
|
|
|
match query.poll() {
|
|
|
|
|
Async::Ready(QueryStatePollOut::Finished) => {
|
|
|
|
|
finished_query = Some(query_id);
|
|
|
|
|
break 'queries_iter;
|
|
|
|
|
}
|
|
|
|
|
Async::Ready(QueryStatePollOut::SendRpc {
|
|
|
|
|
peer_id,
|
|
|
|
|
query_target,
|
|
|
|
|
}) => {
|
|
|
|
|
let rpc = query_target.to_rpc_request(query_id);
|
2019-05-17 17:27:57 +02:00
|
|
|
|
if self.connected_peers.contains(peer_id) {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
return Async::Ready(NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
event: rpc,
|
|
|
|
|
});
|
2019-05-17 17:27:57 +02:00
|
|
|
|
} else if peer_id != self.kbuckets.local_key().preimage() {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
self.pending_rpcs.push((peer_id.clone(), rpc));
|
|
|
|
|
return Async::Ready(NetworkBehaviourAction::DialPeer {
|
|
|
|
|
peer_id: peer_id.clone(),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Async::Ready(QueryStatePollOut::CancelRpc { peer_id }) => {
|
|
|
|
|
// We don't cancel if the RPC has already been sent out.
|
|
|
|
|
self.pending_rpcs.retain(|(id, _)| id != peer_id);
|
|
|
|
|
}
|
|
|
|
|
Async::NotReady => break,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
let finished_write = self.active_writes.iter()
|
|
|
|
|
.find_map(|(&query_id, write)|
|
|
|
|
|
if write.done() {
|
|
|
|
|
Some(query_id)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
if let Some(finished_write) = finished_write {
|
|
|
|
|
let (t, successes, failures) = self
|
|
|
|
|
.active_writes
|
|
|
|
|
.remove(&finished_write)
|
|
|
|
|
.expect("finished_write was gathered when iterating active_writes; QED.")
|
|
|
|
|
.into_inner();
|
|
|
|
|
let event = KademliaOut::PutValueResult(PutValueResult::Ok {
|
|
|
|
|
key: t,
|
|
|
|
|
successes,
|
|
|
|
|
failures,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
if let Some(finished_query) = finished_query {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
let (query_info, closer_peers) = self
|
2018-11-29 12:11:35 +01:00
|
|
|
|
.active_queries
|
|
|
|
|
.remove(&finished_query)
|
2019-03-18 18:20:57 +01:00
|
|
|
|
.expect("finished_query was gathered when iterating active_queries; QED.")
|
|
|
|
|
.into_target_and_closest_peers();
|
|
|
|
|
|
|
|
|
|
match query_info.inner {
|
|
|
|
|
QueryInfoInner::Initialization { .. } => {},
|
|
|
|
|
QueryInfoInner::FindPeer(target) => {
|
|
|
|
|
let event = KademliaOut::FindNodeResult {
|
|
|
|
|
key: target,
|
|
|
|
|
closer_peers: closer_peers.collect(),
|
|
|
|
|
};
|
|
|
|
|
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::GetProviders { target, pending_results } => {
|
|
|
|
|
let event = KademliaOut::GetProvidersResult {
|
|
|
|
|
key: target,
|
|
|
|
|
closer_peers: closer_peers.collect(),
|
|
|
|
|
provider_peers: pending_results,
|
2018-11-29 12:11:35 +01:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
2019-03-18 18:20:57 +01:00
|
|
|
|
QueryInfoInner::AddProvider { target } => {
|
|
|
|
|
for closest in closer_peers {
|
2018-11-29 12:11:35 +01:00
|
|
|
|
let event = NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: closest,
|
|
|
|
|
event: KademliaHandlerIn::AddProvider {
|
2019-03-18 18:20:57 +01:00
|
|
|
|
key: target.clone(),
|
2019-05-22 14:49:38 +02:00
|
|
|
|
provider_peer: KadPeer {
|
|
|
|
|
node_id: parameters.local_peer_id().clone(),
|
2019-06-18 10:23:26 +02:00
|
|
|
|
multiaddrs: parameters.external_addresses().collect(),
|
2019-05-22 14:49:38 +02:00
|
|
|
|
connection_ty: KadConnectionType::Connected,
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
self.queued_events.push(event);
|
|
|
|
|
}
|
|
|
|
|
},
|
2019-06-07 17:50:06 +03:00
|
|
|
|
QueryInfoInner::GetValue { key, results, .. } => {
|
2019-06-04 14:44:24 +03:00
|
|
|
|
let result = match results.len() {
|
|
|
|
|
0 => GetValueResult::NotFound{
|
2019-06-07 17:50:06 +03:00
|
|
|
|
key,
|
2019-06-04 14:44:24 +03:00
|
|
|
|
closest_peers: closer_peers.collect()
|
|
|
|
|
},
|
|
|
|
|
_ => GetValueResult::Found{ results },
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let event = KademliaOut::GetValueResult(result);
|
|
|
|
|
|
|
|
|
|
break Async::Ready(NetworkBehaviourAction::GenerateEvent(event));
|
|
|
|
|
},
|
|
|
|
|
QueryInfoInner::PutValue { key, value } => {
|
|
|
|
|
let closer_peers = Vec::from_iter(closer_peers);
|
|
|
|
|
for peer in &closer_peers {
|
|
|
|
|
let event = KademliaHandlerIn::PutValue {
|
|
|
|
|
key: key.clone(),
|
|
|
|
|
value: value.clone(),
|
|
|
|
|
user_data: finished_query,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if self.connected_peers.contains(peer) {
|
|
|
|
|
let event = NetworkBehaviourAction::SendEvent {
|
|
|
|
|
peer_id: peer.clone(),
|
|
|
|
|
event
|
|
|
|
|
};
|
|
|
|
|
self.queued_events.push(event);
|
|
|
|
|
} else {
|
|
|
|
|
self.pending_rpcs.push((peer.clone(), event));
|
|
|
|
|
self.queued_events.push(NetworkBehaviourAction::DialPeer {
|
|
|
|
|
peer_id: peer.clone(),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.active_writes.insert(finished_query, WriteState::new(key, closer_peers));
|
|
|
|
|
},
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
break Async::NotReady;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
|
/// The result of a `GET_VALUE` query.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
|
|
|
pub enum GetValueResult {
|
|
|
|
|
/// The results received from peers. Always contains non-zero number of results.
|
|
|
|
|
Found { results: Vec<Record> },
|
|
|
|
|
/// The record wasn't found.
|
2019-06-07 17:50:06 +03:00
|
|
|
|
NotFound {
|
|
|
|
|
key: Multihash,
|
|
|
|
|
closest_peers: Vec<PeerId>
|
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// The result of a `PUT_VALUE` query.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
|
|
|
pub enum PutValueResult {
|
|
|
|
|
/// The value has been put successfully.
|
|
|
|
|
Ok { key: Multihash, successes: usize, failures: usize },
|
|
|
|
|
/// The put value failed.
|
|
|
|
|
Err { key: Multihash, cause: RecordStorageError }
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Output event of the `Kademlia` behaviour.
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
|
pub enum KademliaOut {
|
2019-01-26 23:57:53 +01:00
|
|
|
|
/// We have discovered a node.
|
2019-03-20 17:09:48 +01:00
|
|
|
|
///
|
|
|
|
|
/// > **Note**: The Kademlia behaviour doesn't store the addresses of this node, and therefore
|
|
|
|
|
/// > attempting to connect to this node may or may not work.
|
2019-01-26 23:57:53 +01:00
|
|
|
|
Discovered {
|
|
|
|
|
/// Id of the node that was discovered.
|
|
|
|
|
peer_id: PeerId,
|
|
|
|
|
/// Addresses of the node.
|
|
|
|
|
addresses: Vec<Multiaddr>,
|
|
|
|
|
/// How the reporter is connected to the reported.
|
|
|
|
|
ty: KadConnectionType,
|
|
|
|
|
},
|
|
|
|
|
|
2019-03-20 17:09:48 +01:00
|
|
|
|
/// A node has been added to a k-bucket.
|
|
|
|
|
KBucketAdded {
|
|
|
|
|
/// Id of the node that was added.
|
|
|
|
|
peer_id: PeerId,
|
|
|
|
|
/// If `Some`, this addition replaced the value that is inside the option.
|
|
|
|
|
replaced: Option<PeerId>,
|
|
|
|
|
},
|
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
|
/// Result of a `FIND_NODE` iterative query.
|
|
|
|
|
FindNodeResult {
|
|
|
|
|
/// The key that we looked for in the query.
|
|
|
|
|
key: PeerId,
|
|
|
|
|
/// List of peers ordered from closest to furthest away.
|
|
|
|
|
closer_peers: Vec<PeerId>,
|
|
|
|
|
},
|
|
|
|
|
|
|
|
|
|
/// Result of a `GET_PROVIDERS` iterative query.
|
|
|
|
|
GetProvidersResult {
|
|
|
|
|
/// The key that we looked for in the query.
|
|
|
|
|
key: Multihash,
|
|
|
|
|
/// The peers that are providing the requested key.
|
|
|
|
|
provider_peers: Vec<PeerId>,
|
|
|
|
|
/// List of peers ordered from closest to furthest away.
|
|
|
|
|
closer_peers: Vec<PeerId>,
|
|
|
|
|
},
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
|
|
/// Result of a `GET_VALUE` query
|
|
|
|
|
GetValueResult(GetValueResult),
|
|
|
|
|
|
|
|
|
|
/// Result of a `PUT_VALUE` query
|
|
|
|
|
PutValueResult(PutValueResult),
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 14:49:38 +02:00
|
|
|
|
impl From<kbucket::EntryView<PeerId, Addresses>> for KadPeer {
|
|
|
|
|
fn from(e: kbucket::EntryView<PeerId, Addresses>) -> KadPeer {
|
|
|
|
|
KadPeer {
|
|
|
|
|
node_id: e.node.key.into_preimage(),
|
|
|
|
|
multiaddrs: e.node.value.into_vec(),
|
|
|
|
|
connection_ty: match e.status {
|
|
|
|
|
NodeStatus::Connected => KadConnectionType::Connected,
|
|
|
|
|
NodeStatus::Disconnected => KadConnectionType::NotConnected
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2019-05-22 14:49:38 +02:00
|
|
|
|
|