2019-03-18 18:20:57 +01:00
|
|
|
// Copyright 2019 Parity Technologies (UK) Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
#![cfg(test)]
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
use super::*;
|
|
|
|
|
|
|
|
use crate::kbucket::Distance;
|
|
|
|
use futures::future;
|
2019-04-10 10:29:21 +02:00
|
|
|
use libp2p_core::{
|
2019-07-04 14:47:59 +02:00
|
|
|
PeerId,
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
Transport,
|
|
|
|
identity,
|
|
|
|
transport::{MemoryTransport, boxed::Boxed},
|
|
|
|
nodes::Substream,
|
|
|
|
multiaddr::{Protocol, multiaddr},
|
|
|
|
muxing::StreamMuxerBox,
|
|
|
|
upgrade,
|
2019-04-10 10:29:21 +02:00
|
|
|
};
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
use libp2p_secio::SecioConfig;
|
2019-07-04 14:47:59 +02:00
|
|
|
use libp2p_swarm::Swarm;
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
use libp2p_yamux as yamux;
|
2019-07-03 16:16:25 +02:00
|
|
|
use rand::{Rng, random, thread_rng};
|
2019-06-04 14:44:24 +03:00
|
|
|
use std::{collections::HashSet, iter::FromIterator, io, num::NonZeroU8, u64};
|
2019-07-03 16:16:25 +02:00
|
|
|
use tokio::runtime::current_thread;
|
|
|
|
use multihash::Hash::SHA2256;
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
|
|
|
type TestSwarm = Swarm<
|
|
|
|
Boxed<(PeerId, StreamMuxerBox), io::Error>,
|
|
|
|
Kademlia<Substream<StreamMuxerBox>>
|
|
|
|
>;
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
/// Builds swarms, each listening on a port. Does *not* connect the nodes together.
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
fn build_nodes(num: usize) -> (u64, Vec<TestSwarm>) {
|
|
|
|
let port_base = 1 + random::<u64>() % (u64::MAX - num as u64);
|
2019-03-18 18:20:57 +01:00
|
|
|
let mut result: Vec<Swarm<_, _>> = Vec::with_capacity(num);
|
|
|
|
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
for _ in 0 .. num {
|
2019-03-18 18:20:57 +01:00
|
|
|
// TODO: make creating the transport more elegant ; literaly half of the code of the test
|
|
|
|
// is about creating the transport
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let local_key = identity::Keypair::generate_ed25519();
|
2019-03-18 18:20:57 +01:00
|
|
|
let local_public_key = local_key.public();
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let transport = MemoryTransport::default()
|
|
|
|
.with_upgrade(SecioConfig::new(local_key))
|
2019-03-18 18:20:57 +01:00
|
|
|
.and_then(move |out, endpoint| {
|
|
|
|
let peer_id = out.remote_key.into_peer_id();
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let yamux = yamux::Config::default();
|
|
|
|
upgrade::apply(out.stream, yamux, endpoint)
|
|
|
|
.map(|muxer| (peer_id, StreamMuxerBox::new(muxer)))
|
2019-03-18 18:20:57 +01:00
|
|
|
})
|
2019-04-10 10:29:21 +02:00
|
|
|
.map_err(|e| panic!("Failed to create transport: {:?}", e))
|
2019-03-18 18:20:57 +01:00
|
|
|
.boxed();
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let cfg = KademliaConfig::new(local_public_key.clone().into_peer_id());
|
|
|
|
let kad = Kademlia::new(cfg);
|
2019-03-18 18:20:57 +01:00
|
|
|
result.push(Swarm::new(transport, kad, local_public_key.into_peer_id()));
|
|
|
|
}
|
|
|
|
|
2019-04-10 10:29:21 +02:00
|
|
|
let mut i = 0;
|
2019-03-18 18:20:57 +01:00
|
|
|
for s in result.iter_mut() {
|
2019-04-10 10:29:21 +02:00
|
|
|
Swarm::listen_on(s, Protocol::Memory(port_base + i).into()).unwrap();
|
|
|
|
i += 1
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
(port_base, result)
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
fn build_connected_nodes(total: usize, step: usize) -> (Vec<PeerId>, Vec<TestSwarm>) {
|
|
|
|
let (port_base, mut swarms) = build_nodes(total);
|
|
|
|
let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect();
|
|
|
|
|
|
|
|
let mut i = 0;
|
|
|
|
for (j, peer) in swarm_ids.iter().enumerate().skip(1) {
|
|
|
|
if i < swarm_ids.len() {
|
|
|
|
swarms[i].add_address(&peer, Protocol::Memory(port_base + j as u64).into());
|
|
|
|
}
|
|
|
|
if j % step == 0 {
|
|
|
|
i += step;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
(swarm_ids, swarms)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn bootstrap() {
|
|
|
|
fn run<G: rand::Rng>(rng: &mut G) {
|
|
|
|
let num_total = rng.gen_range(2, 20);
|
|
|
|
let num_group = rng.gen_range(1, num_total);
|
|
|
|
let (swarm_ids, mut swarms) = build_connected_nodes(num_total, num_group);
|
|
|
|
|
|
|
|
swarms[0].bootstrap();
|
|
|
|
|
|
|
|
// Expected known peers
|
|
|
|
let expected_known = swarm_ids.iter().skip(1).cloned().collect::<HashSet<_>>();
|
|
|
|
|
|
|
|
// Run test
|
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
|
|
|
for (i, swarm) in swarms.iter_mut().enumerate() {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
|
|
|
Async::Ready(Some(KademliaEvent::BootstrapResult(Ok(ok)))) => {
|
|
|
|
assert_eq!(i, 0);
|
|
|
|
assert_eq!(ok.peer, swarm_ids[0]);
|
|
|
|
let known = swarm.kbuckets.iter()
|
|
|
|
.map(|e| e.node.key.preimage().clone())
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
assert_eq!(expected_known, known);
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut rng = thread_rng();
|
|
|
|
for _ in 0 .. 10 {
|
|
|
|
run(&mut rng)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
#[test]
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
fn query_iter() {
|
2019-07-03 16:16:25 +02:00
|
|
|
fn distances<K>(key: &kbucket::Key<K>, peers: Vec<PeerId>) -> Vec<Distance> {
|
2019-05-17 17:27:57 +02:00
|
|
|
peers.into_iter()
|
|
|
|
.map(kbucket::Key::from)
|
|
|
|
.map(|k| k.distance(key))
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
fn run<G: Rng>(rng: &mut G) {
|
|
|
|
let num_total = rng.gen_range(2, 20);
|
|
|
|
let (swarm_ids, mut swarms) = build_connected_nodes(num_total, 1);
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
// Ask the first peer in the list to search a random peer. The search should
|
|
|
|
// propagate forwards through the list of peers.
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let search_target = PeerId::random();
|
2019-05-17 17:27:57 +02:00
|
|
|
let search_target_key = kbucket::Key::from(search_target.clone());
|
2019-07-03 16:16:25 +02:00
|
|
|
swarms[0].get_closest_peers(search_target.clone());
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
|
|
|
// Set up expectations.
|
2019-07-03 16:16:25 +02:00
|
|
|
let expected_swarm_id = swarm_ids[0].clone();
|
|
|
|
let expected_peer_ids: Vec<_> = swarm_ids.iter().skip(1).cloned().collect();
|
2019-05-17 17:27:57 +02:00
|
|
|
let mut expected_distances = distances(&search_target_key, expected_peer_ids.clone());
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
expected_distances.sort();
|
|
|
|
|
|
|
|
// Run test
|
2019-07-03 16:16:25 +02:00
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
for (i, swarm) in swarms.iter_mut().enumerate() {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::GetClosestPeersResult(Ok(ok)))) => {
|
|
|
|
assert_eq!(ok.key, search_target);
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
assert_eq!(swarm_ids[i], expected_swarm_id);
|
2019-07-03 16:16:25 +02:00
|
|
|
assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p)));
|
|
|
|
let key = kbucket::Key::new(ok.key);
|
|
|
|
assert_eq!(expected_distances, distances(&key, ok.peers));
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let mut rng = thread_rng();
|
|
|
|
for _ in 0 .. 10 {
|
|
|
|
run(&mut rng)
|
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn unresponsive_not_returned_direct() {
|
|
|
|
// Build one node. It contains fake addresses to non-existing nodes. We ask it to find a
|
|
|
|
// random peer. We make sure that no fake address is returned.
|
|
|
|
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let (_, mut swarms) = build_nodes(1);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
// Add fake addresses.
|
|
|
|
for _ in 0 .. 10 {
|
2019-05-22 14:49:38 +02:00
|
|
|
swarms[0].add_address(&PeerId::random(), Protocol::Udp(10u16).into());
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ask first to search a random value.
|
|
|
|
let search_target = PeerId::random();
|
2019-07-03 16:16:25 +02:00
|
|
|
swarms[0].get_closest_peers(search_target.clone());
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
2019-03-18 18:20:57 +01:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::GetClosestPeersResult(Ok(ok)))) => {
|
|
|
|
assert_eq!(ok.key, search_target);
|
|
|
|
assert_eq!(ok.peers.len(), 0);
|
2019-03-18 18:20:57 +01:00
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn unresponsive_not_returned_indirect() {
|
|
|
|
// Build two nodes. Node #2 knows about node #1. Node #1 contains fake addresses to
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
// non-existing nodes. We ask node #2 to find a random peer. We make sure that no fake address
|
2019-03-18 18:20:57 +01:00
|
|
|
// is returned.
|
|
|
|
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let (port_base, mut swarms) = build_nodes(2);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
// Add fake addresses to first.
|
|
|
|
let first_peer_id = Swarm::local_peer_id(&swarms[0]).clone();
|
|
|
|
for _ in 0 .. 10 {
|
2019-05-22 14:49:38 +02:00
|
|
|
swarms[0].add_address(
|
2019-03-18 18:20:57 +01:00
|
|
|
&PeerId::random(),
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
multiaddr![Udp(10u16)]
|
2019-03-18 18:20:57 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect second to first.
|
2019-05-22 14:49:38 +02:00
|
|
|
swarms[1].add_address(&first_peer_id, Protocol::Memory(port_base).into());
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
// Ask second to search a random value.
|
|
|
|
let search_target = PeerId::random();
|
2019-07-03 16:16:25 +02:00
|
|
|
swarms[1].get_closest_peers(search_target.clone());
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
2019-03-18 18:20:57 +01:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::GetClosestPeersResult(Ok(ok)))) => {
|
|
|
|
assert_eq!(ok.key, search_target);
|
|
|
|
assert_eq!(ok.peers.len(), 1);
|
|
|
|
assert_eq!(ok.peers[0], first_peer_id);
|
2019-03-18 18:20:57 +01:00
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
#[test]
|
2019-07-03 16:16:25 +02:00
|
|
|
fn get_record_not_found() {
|
2019-06-04 14:44:24 +03:00
|
|
|
let (port_base, mut swarms) = build_nodes(3);
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
swarms[0].add_address(&swarm_ids[1], Protocol::Memory(port_base + 1).into());
|
|
|
|
swarms[1].add_address(&swarm_ids[2], Protocol::Memory(port_base + 2).into());
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let target_key = multihash::encode(SHA2256, &vec![1,2,3]).unwrap();
|
|
|
|
swarms[0].get_record(&target_key, Quorum::One);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::GetRecordResult(Err(e)))) => {
|
|
|
|
if let GetRecordError::NotFound { key, closest_peers, } = e {
|
2019-06-07 17:50:06 +03:00
|
|
|
assert_eq!(key, target_key);
|
2019-06-04 14:44:24 +03:00
|
|
|
assert_eq!(closest_peers.len(), 2);
|
|
|
|
assert!(closest_peers.contains(&swarm_ids[1]));
|
|
|
|
assert!(closest_peers.contains(&swarm_ids[2]));
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
} else {
|
2019-07-03 16:16:25 +02:00
|
|
|
panic!("Unexpected error result: {:?}", e);
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn put_value() {
|
2019-07-03 16:16:25 +02:00
|
|
|
fn run<G: rand::Rng>(rng: &mut G) {
|
|
|
|
let num_total = rng.gen_range(21, 40);
|
|
|
|
let num_group = rng.gen_range(1, usize::min(num_total, kbucket::K_VALUE));
|
|
|
|
let (swarm_ids, mut swarms) = build_connected_nodes(num_total, num_group);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let key = multihash::encode(SHA2256, &vec![1,2,3]).unwrap();
|
|
|
|
let bucket_key = kbucket::Key::from(key.clone());
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
let mut sorted_peer_ids: Vec<_> = swarm_ids
|
|
|
|
.iter()
|
2019-07-03 16:16:25 +02:00
|
|
|
.map(|id| (id.clone(), kbucket::Key::from(id.clone()).distance(&bucket_key)))
|
2019-06-04 14:44:24 +03:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
sorted_peer_ids.sort_by(|(_, d1), (_, d2)| d1.cmp(d2));
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let closest = HashSet::from_iter(sorted_peer_ids.into_iter().map(|(id, _)| id));
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let record = Record { key: key.clone(), value: vec![4,5,6] };
|
|
|
|
swarms[0].put_record(record, Quorum::All);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
2019-06-04 14:44:24 +03:00
|
|
|
let mut check_results = false;
|
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::PutRecordResult(Ok(_)))) => {
|
2019-06-04 14:44:24 +03:00
|
|
|
check_results = true;
|
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if check_results {
|
|
|
|
let mut have: HashSet<_> = Default::default();
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
for (i, swarm) in swarms.iter().skip(1).enumerate() {
|
|
|
|
if swarm.records.get(&key).is_some() {
|
2019-06-04 14:44:24 +03:00
|
|
|
have.insert(swarm_ids[i].clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let intersection: HashSet<_> = have.intersection(&closest).collect();
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
assert_eq!(have.len(), kbucket::K_VALUE);
|
|
|
|
assert_eq!(intersection.len(), kbucket::K_VALUE);
|
|
|
|
|
2019-06-04 14:44:24 +03:00
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
let mut rng = thread_rng();
|
2019-06-04 14:44:24 +03:00
|
|
|
for _ in 0 .. 10 {
|
2019-07-03 16:16:25 +02:00
|
|
|
run(&mut rng);
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn get_value() {
|
|
|
|
let (port_base, mut swarms) = build_nodes(3);
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
swarms[0].add_address(&swarm_ids[1], Protocol::Memory(port_base + 1).into());
|
|
|
|
swarms[1].add_address(&swarm_ids[2], Protocol::Memory(port_base + 2).into());
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
let record = Record {
|
|
|
|
key: multihash::encode(SHA2256, &vec![1,2,3]).unwrap(),
|
|
|
|
value: vec![4,5,6]
|
|
|
|
};
|
|
|
|
|
|
|
|
swarms[1].records.put(record.clone()).unwrap();
|
|
|
|
swarms[0].get_record(&record.key, Quorum::One);
|
|
|
|
|
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::GetRecordResult(Ok(ok)))) => {
|
|
|
|
assert_eq!(ok.records.len(), 1);
|
|
|
|
assert_eq!(ok.records.first(), Some(&record));
|
|
|
|
return Ok(Async::Ready(()));
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn get_value_multiple() {
|
|
|
|
// Check that if we have responses from multiple peers, a correct number of
|
|
|
|
// results is returned.
|
2019-07-03 16:16:25 +02:00
|
|
|
let num_nodes = 12;
|
|
|
|
let (_swarm_ids, mut swarms) = build_connected_nodes(num_nodes, num_nodes);
|
|
|
|
let num_results = 10;
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let record = Record {
|
|
|
|
key: multihash::encode(SHA2256, &vec![1,2,3]).unwrap(),
|
|
|
|
value: vec![4,5,6],
|
|
|
|
};
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
for i in 0 .. num_nodes {
|
|
|
|
swarms[i].records.put(record.clone()).unwrap();
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let quorum = Quorum::N(NonZeroU8::new(num_results as u8).unwrap());
|
|
|
|
swarms[0].get_record(&record.key, quorum);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
current_thread::run(
|
|
|
|
future::poll_fn(move || {
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
|
|
|
match swarm.poll().unwrap() {
|
2019-07-03 16:16:25 +02:00
|
|
|
Async::Ready(Some(KademliaEvent::GetRecordResult(Ok(ok)))) => {
|
|
|
|
assert_eq!(ok.records.len(), num_results);
|
|
|
|
assert_eq!(ok.records.first(), Some(&record));
|
|
|
|
return Ok(Async::Ready(()));
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
Async::Ready(_) => (),
|
|
|
|
Async::NotReady => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady)
|
|
|
|
}))
|
|
|
|
}
|