2019-03-18 18:20:57 +01:00
|
|
|
// Copyright 2019 Parity Technologies (UK) Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
#![cfg(test)]
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
use super::*;
|
|
|
|
|
2020-06-19 12:22:26 +02:00
|
|
|
use crate::K_VALUE;
|
2019-07-03 16:16:25 +02:00
|
|
|
use crate::kbucket::Distance;
|
2020-06-19 12:22:26 +02:00
|
|
|
use crate::record::{Key, store::MemoryStore};
|
2019-11-28 16:12:02 +01:00
|
|
|
use futures::{
|
|
|
|
prelude::*,
|
|
|
|
executor::block_on,
|
|
|
|
future::poll_fn,
|
|
|
|
};
|
2020-06-19 12:22:26 +02:00
|
|
|
use futures_timer::Delay;
|
2019-04-10 10:29:21 +02:00
|
|
|
use libp2p_core::{
|
2019-07-04 14:47:59 +02:00
|
|
|
PeerId,
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
Transport,
|
|
|
|
identity,
|
2020-02-07 16:29:30 +01:00
|
|
|
transport::MemoryTransport,
|
2020-04-17 19:57:35 +02:00
|
|
|
multiaddr::{Protocol, Multiaddr, multiaddr},
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
muxing::StreamMuxerBox,
|
2019-10-10 11:31:44 +02:00
|
|
|
upgrade
|
2019-04-10 10:29:21 +02:00
|
|
|
};
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
use libp2p_secio::SecioConfig;
|
2019-07-04 14:47:59 +02:00
|
|
|
use libp2p_swarm::Swarm;
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
use libp2p_yamux as yamux;
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
use quickcheck::*;
|
2020-06-19 12:22:26 +02:00
|
|
|
use rand::{Rng, random, thread_rng, rngs::StdRng, SeedableRng};
|
|
|
|
use std::{collections::{HashSet, HashMap}, time::Duration, io, num::NonZeroUsize, u64};
|
2020-03-05 16:49:36 +01:00
|
|
|
use multihash::{wrap, Code, Multihash};
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
2020-02-07 16:29:30 +01:00
|
|
|
type TestSwarm = Swarm<Kademlia<MemoryStore>>;
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
fn build_node() -> (Multiaddr, TestSwarm) {
|
|
|
|
build_node_with_config(Default::default())
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) {
|
|
|
|
let local_key = identity::Keypair::generate_ed25519();
|
|
|
|
let local_public_key = local_key.public();
|
|
|
|
let transport = MemoryTransport::default()
|
|
|
|
.upgrade(upgrade::Version::V1)
|
|
|
|
.authenticate(SecioConfig::new(local_key))
|
|
|
|
.multiplex(yamux::Config::default())
|
|
|
|
.map(|(p, m), _| (p, StreamMuxerBox::new(m)))
|
|
|
|
.map_err(|e| -> io::Error { panic!("Failed to create transport: {:?}", e); })
|
|
|
|
.boxed();
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let local_id = local_public_key.clone().into_peer_id();
|
|
|
|
let store = MemoryStore::new(local_id.clone());
|
|
|
|
let behaviour = Kademlia::with_config(local_id.clone(), store, cfg.clone());
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarm = Swarm::new(transport, behaviour, local_id);
|
|
|
|
|
|
|
|
let address: Multiaddr = Protocol::Memory(random::<u64>()).into();
|
|
|
|
Swarm::listen_on(&mut swarm, address.clone()).unwrap();
|
|
|
|
|
|
|
|
(address, swarm)
|
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
/// Builds swarms, each listening on a port. Does *not* connect the nodes together.
|
|
|
|
fn build_nodes(num: usize) -> Vec<(Multiaddr, TestSwarm)> {
|
|
|
|
build_nodes_with_config(num, Default::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds swarms, each listening on a port. Does *not* connect the nodes together.
|
|
|
|
fn build_nodes_with_config(num: usize, cfg: KademliaConfig) -> Vec<(Multiaddr, TestSwarm)> {
|
|
|
|
(0..num).map(|_| build_node_with_config(cfg.clone())).collect()
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
fn build_connected_nodes(total: usize, step: usize) -> Vec<(Multiaddr, TestSwarm)> {
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
build_connected_nodes_with_config(total, step, Default::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn build_connected_nodes_with_config(total: usize, step: usize, cfg: KademliaConfig)
|
2020-04-17 19:57:35 +02:00
|
|
|
-> Vec<(Multiaddr, TestSwarm)>
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
{
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_nodes_with_config(total, cfg);
|
|
|
|
let swarm_ids: Vec<_> = swarms.iter()
|
|
|
|
.map(|(addr, swarm)| (addr.clone(), Swarm::local_peer_id(swarm).clone()))
|
|
|
|
.collect();
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
let mut i = 0;
|
2020-04-17 19:57:35 +02:00
|
|
|
for (j, (addr, peer_id)) in swarm_ids.iter().enumerate().skip(1) {
|
2019-07-03 16:16:25 +02:00
|
|
|
if i < swarm_ids.len() {
|
2020-04-17 19:57:35 +02:00
|
|
|
swarms[i].1.add_address(peer_id, addr.clone());
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
|
|
|
if j % step == 0 {
|
|
|
|
i += step;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
swarms
|
|
|
|
}
|
|
|
|
|
|
|
|
fn build_fully_connected_nodes_with_config(total: usize, cfg: KademliaConfig)
|
|
|
|
-> Vec<(Multiaddr, TestSwarm)>
|
|
|
|
{
|
|
|
|
let mut swarms = build_nodes_with_config(total, cfg);
|
|
|
|
let swarm_addr_and_peer_id: Vec<_> = swarms.iter()
|
|
|
|
.map(|(addr, swarm)| (addr.clone(), Swarm::local_peer_id(swarm).clone()))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
for (_addr, swarm) in swarms.iter_mut() {
|
|
|
|
for (addr, peer) in &swarm_addr_and_peer_id {
|
|
|
|
swarm.add_address(&peer, addr.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
swarms
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
|
|
|
|
2020-03-05 16:49:36 +01:00
|
|
|
fn random_multihash() -> Multihash {
|
|
|
|
wrap(Code::Sha2_256, &thread_rng().gen::<[u8; 32]>())
|
|
|
|
}
|
|
|
|
|
2020-06-19 12:22:26 +02:00
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
struct Seed([u8; 32]);
|
|
|
|
|
|
|
|
impl Arbitrary for Seed {
|
|
|
|
fn arbitrary<G: Gen>(g: &mut G) -> Seed {
|
|
|
|
Seed(g.gen())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
#[test]
|
|
|
|
fn bootstrap() {
|
2020-06-19 12:22:26 +02:00
|
|
|
fn prop(seed: Seed) {
|
|
|
|
let mut rng = StdRng::from_seed(seed.0);
|
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let num_total = rng.gen_range(2, 20);
|
2020-06-19 12:22:26 +02:00
|
|
|
// When looking for the closest node to a key, Kademlia considers
|
|
|
|
// K_VALUE nodes to query at initialization. If `num_group` is larger
|
|
|
|
// than K_VALUE the remaining locally known nodes will not be
|
|
|
|
// considered. Given that no other node is aware of them, they would be
|
|
|
|
// lost entirely. To prevent the above restrict `num_group` to be equal
|
|
|
|
// or smaller than K_VALUE.
|
|
|
|
let num_group = rng.gen_range(1, (num_total % K_VALUE.get()) + 2);
|
|
|
|
|
|
|
|
let mut cfg = KademliaConfig::default();
|
|
|
|
if rng.gen() {
|
|
|
|
cfg.disjoint_query_paths(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut swarms = build_connected_nodes_with_config(
|
|
|
|
num_total,
|
|
|
|
num_group,
|
|
|
|
cfg,
|
|
|
|
).into_iter()
|
2020-04-17 19:57:35 +02:00
|
|
|
.map(|(_a, s)| s)
|
|
|
|
.collect::<Vec<_>>();
|
2020-06-19 12:22:26 +02:00
|
|
|
let swarm_ids: Vec<_> = swarms.iter()
|
|
|
|
.map(Swarm::local_peer_id)
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
2019-07-03 16:16:25 +02:00
|
|
|
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].bootstrap().unwrap();
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
// Expected known peers
|
|
|
|
let expected_known = swarm_ids.iter().skip(1).cloned().collect::<HashSet<_>>();
|
2020-05-16 10:43:09 +02:00
|
|
|
let mut first = true;
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
// Run test
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-07-03 16:16:25 +02:00
|
|
|
for (i, swarm) in swarms.iter_mut().enumerate() {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::Bootstrap(Ok(ok)), ..
|
|
|
|
})) => {
|
|
|
|
assert_eq!(id, qid);
|
2019-07-03 16:16:25 +02:00
|
|
|
assert_eq!(i, 0);
|
2020-05-16 10:43:09 +02:00
|
|
|
if first {
|
|
|
|
// Bootstrapping must start with a self-lookup.
|
|
|
|
assert_eq!(ok.peer, swarm_ids[0]);
|
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
if ok.num_remaining == 0 {
|
|
|
|
let known = swarm.kbuckets.iter()
|
|
|
|
.map(|e| e.node.key.preimage().clone())
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
assert_eq!(expected_known, known);
|
|
|
|
return Poll::Ready(())
|
|
|
|
}
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
|
|
|
|
2020-06-19 12:22:26 +02:00
|
|
|
QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _)
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
|
|
|
|
2019-03-18 18:20:57 +01:00
|
|
|
#[test]
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
fn query_iter() {
|
2019-07-03 16:16:25 +02:00
|
|
|
fn distances<K>(key: &kbucket::Key<K>, peers: Vec<PeerId>) -> Vec<Distance> {
|
2019-05-17 17:27:57 +02:00
|
|
|
peers.into_iter()
|
|
|
|
.map(kbucket::Key::from)
|
|
|
|
.map(|k| k.distance(key))
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
fn run(rng: &mut impl Rng) {
|
2019-07-03 16:16:25 +02:00
|
|
|
let num_total = rng.gen_range(2, 20);
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_connected_nodes(num_total, 1).into_iter()
|
|
|
|
.map(|(_a, s)| s)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect();
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
// Ask the first peer in the list to search a random peer. The search should
|
|
|
|
// propagate forwards through the list of peers.
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
let search_target = PeerId::random();
|
2019-08-15 11:36:47 +02:00
|
|
|
let search_target_key = kbucket::Key::new(search_target.clone());
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].get_closest_peers(search_target.clone());
|
|
|
|
|
|
|
|
match swarms[0].query(&qid) {
|
|
|
|
Some(q) => match q.info() {
|
|
|
|
QueryInfo::GetClosestPeers { key } => {
|
|
|
|
assert_eq!(&key[..], search_target.borrow() as &[u8])
|
|
|
|
},
|
|
|
|
i => panic!("Unexpected query info: {:?}", i)
|
|
|
|
}
|
|
|
|
None => panic!("Query not found: {:?}", qid)
|
|
|
|
}
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
|
|
|
|
// Set up expectations.
|
2019-07-03 16:16:25 +02:00
|
|
|
let expected_swarm_id = swarm_ids[0].clone();
|
|
|
|
let expected_peer_ids: Vec<_> = swarm_ids.iter().skip(1).cloned().collect();
|
2019-05-17 17:27:57 +02:00
|
|
|
let mut expected_distances = distances(&search_target_key, expected_peer_ids.clone());
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
expected_distances.sort();
|
|
|
|
|
|
|
|
// Run test
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
for (i, swarm) in swarms.iter_mut().enumerate() {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::GetClosestPeers(Ok(ok)), ..
|
|
|
|
})) => {
|
|
|
|
assert_eq!(id, qid);
|
2019-08-15 11:36:47 +02:00
|
|
|
assert_eq!(&ok.key[..], search_target.as_bytes());
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
assert_eq!(swarm_ids[i], expected_swarm_id);
|
2019-08-07 09:27:50 +02:00
|
|
|
assert_eq!(swarm.queries.size(), 0);
|
2019-07-03 16:16:25 +02:00
|
|
|
assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p)));
|
|
|
|
let key = kbucket::Key::new(ok.key);
|
|
|
|
assert_eq!(expected_distances, distances(&key, ok.peers));
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
let mut rng = thread_rng();
|
|
|
|
for _ in 0 .. 10 {
|
|
|
|
run(&mut rng)
|
|
|
|
}
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn unresponsive_not_returned_direct() {
|
|
|
|
// Build one node. It contains fake addresses to non-existing nodes. We ask it to find a
|
|
|
|
// random peer. We make sure that no fake address is returned.
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_nodes(1).into_iter()
|
|
|
|
.map(|(_a, s)| s)
|
|
|
|
.collect::<Vec<_>>();
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
// Add fake addresses.
|
|
|
|
for _ in 0 .. 10 {
|
2019-05-22 14:49:38 +02:00
|
|
|
swarms[0].add_address(&PeerId::random(), Protocol::Udp(10u16).into());
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ask first to search a random value.
|
|
|
|
let search_target = PeerId::random();
|
2019-07-03 16:16:25 +02:00
|
|
|
swarms[0].get_closest_peers(search_target.clone());
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-03-18 18:20:57 +01:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
result: QueryResult::GetClosestPeers(Ok(ok)), ..
|
|
|
|
})) => {
|
2019-08-15 11:36:47 +02:00
|
|
|
assert_eq!(&ok.key[..], search_target.as_bytes());
|
2019-07-03 16:16:25 +02:00
|
|
|
assert_eq!(ok.peers.len(), 0);
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn unresponsive_not_returned_indirect() {
|
|
|
|
// Build two nodes. Node #2 knows about node #1. Node #1 contains fake addresses to
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
// non-existing nodes. We ask node #2 to find a random peer. We make sure that no fake address
|
2019-03-18 18:20:57 +01:00
|
|
|
// is returned.
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_nodes(2);
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
// Add fake addresses to first.
|
|
|
|
for _ in 0 .. 10 {
|
2020-04-17 19:57:35 +02:00
|
|
|
swarms[0].1.add_address(&PeerId::random(), multiaddr![Udp(10u16)]);
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Connect second to first.
|
2020-04-17 19:57:35 +02:00
|
|
|
let first_peer_id = Swarm::local_peer_id(&swarms[0].1).clone();
|
|
|
|
let first_address = swarms[0].0.clone();
|
|
|
|
swarms[1].1.add_address(&first_peer_id, first_address);
|
|
|
|
|
|
|
|
// Drop the swarm addresses.
|
|
|
|
let mut swarms = swarms.into_iter().map(|(_addr, swarm)| swarm).collect::<Vec<_>>();
|
2019-03-18 18:20:57 +01:00
|
|
|
|
|
|
|
// Ask second to search a random value.
|
|
|
|
let search_target = PeerId::random();
|
2019-07-03 16:16:25 +02:00
|
|
|
swarms[1].get_closest_peers(search_target.clone());
|
2019-03-18 18:20:57 +01:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-03-18 18:20:57 +01:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
result: QueryResult::GetClosestPeers(Ok(ok)), ..
|
|
|
|
})) => {
|
2019-08-15 11:36:47 +02:00
|
|
|
assert_eq!(&ok.key[..], search_target.as_bytes());
|
2019-07-03 16:16:25 +02:00
|
|
|
assert_eq!(ok.peers.len(), 1);
|
|
|
|
assert_eq!(ok.peers[0], first_peer_id);
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
2019-03-18 18:20:57 +01:00
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
|
|
|
#[test]
|
2019-07-03 16:16:25 +02:00
|
|
|
fn get_record_not_found() {
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_nodes(3);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let swarm_ids: Vec<_> = swarms.iter()
|
|
|
|
.map(|(_addr, swarm)| Swarm::local_peer_id(swarm))
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let (second, third) = (swarms[1].0.clone(), swarms[2].0.clone());
|
|
|
|
swarms[0].1.add_address(&swarm_ids[1], second);
|
|
|
|
swarms[1].1.add_address(&swarm_ids[2], third);
|
|
|
|
|
|
|
|
// Drop the swarm addresses.
|
|
|
|
let mut swarms = swarms.into_iter().map(|(_addr, swarm)| swarm).collect::<Vec<_>>();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-03-05 16:49:36 +01:00
|
|
|
let target_key = record::Key::from(random_multihash());
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].get_record(&target_key, Quorum::One);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::GetRecord(Err(e)), ..
|
|
|
|
})) => {
|
|
|
|
assert_eq!(id, qid);
|
2019-07-03 16:16:25 +02:00
|
|
|
if let GetRecordError::NotFound { key, closest_peers, } = e {
|
2019-06-07 17:50:06 +03:00
|
|
|
assert_eq!(key, target_key);
|
2019-06-04 14:44:24 +03:00
|
|
|
assert_eq!(closest_peers.len(), 2);
|
|
|
|
assert!(closest_peers.contains(&swarm_ids[1]));
|
|
|
|
assert!(closest_peers.contains(&swarm_ids[2]));
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
2019-06-04 14:44:24 +03:00
|
|
|
} else {
|
2019-07-03 16:16:25 +02:00
|
|
|
panic!("Unexpected error result: {:?}", e);
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
|
2020-06-19 12:22:26 +02:00
|
|
|
/// A node joining a fully connected network via three (ALPHA_VALUE) bootnodes
|
|
|
|
/// should be able to put a record to the X closest nodes of the network where X
|
|
|
|
/// is equal to the configured replication factor.
|
2019-06-04 14:44:24 +03:00
|
|
|
#[test]
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
fn put_record() {
|
2020-06-19 12:22:26 +02:00
|
|
|
fn prop(records: Vec<Record>, seed: Seed) {
|
|
|
|
let mut rng = StdRng::from_seed(seed.0);
|
|
|
|
let replication_factor = NonZeroUsize::new(rng.gen_range(1, (K_VALUE.get() / 2) + 1)).unwrap();
|
|
|
|
// At least 4 nodes, 1 under test + 3 bootnodes.
|
|
|
|
let num_total = usize::max(4, replication_factor.get() * 2);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let mut config = KademliaConfig::default();
|
|
|
|
config.set_replication_factor(replication_factor);
|
2020-06-19 12:22:26 +02:00
|
|
|
if rng.gen() {
|
|
|
|
config.disjoint_query_paths(true);
|
|
|
|
}
|
2020-04-17 19:57:35 +02:00
|
|
|
|
|
|
|
let mut swarms = {
|
|
|
|
let mut fully_connected_swarms = build_fully_connected_nodes_with_config(
|
|
|
|
num_total - 1,
|
|
|
|
config.clone(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut single_swarm = build_node_with_config(config);
|
2020-06-19 12:22:26 +02:00
|
|
|
// Connect `single_swarm` to three bootnodes.
|
|
|
|
for i in 0..3 {
|
|
|
|
single_swarm.1.add_address(
|
|
|
|
Swarm::local_peer_id(&fully_connected_swarms[i].1),
|
|
|
|
fully_connected_swarms[i].0.clone(),
|
|
|
|
);
|
|
|
|
}
|
2020-04-17 19:57:35 +02:00
|
|
|
|
|
|
|
let mut swarms = vec![single_swarm];
|
|
|
|
swarms.append(&mut fully_connected_swarms);
|
|
|
|
|
|
|
|
// Drop the swarm addresses.
|
|
|
|
swarms.into_iter().map(|(_addr, swarm)| swarm).collect::<Vec<_>>()
|
|
|
|
};
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let records = records.into_iter()
|
|
|
|
.take(num_total)
|
|
|
|
.map(|mut r| {
|
|
|
|
// We don't want records to expire prematurely, as they would
|
|
|
|
// be removed from storage and no longer replicated, but we still
|
|
|
|
// want to check that an explicitly set expiration is preserved.
|
|
|
|
r.expires = r.expires.map(|t| t + Duration::from_secs(60));
|
|
|
|
(r.key.clone(), r)
|
|
|
|
})
|
|
|
|
.collect::<HashMap<_,_>>();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-05-16 10:43:09 +02:00
|
|
|
// Initiate put_record queries.
|
|
|
|
let mut qids = HashSet::new();
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
for r in records.values() {
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].put_record(r.clone(), Quorum::All).unwrap();
|
|
|
|
match swarms[0].query(&qid) {
|
|
|
|
Some(q) => match q.info() {
|
|
|
|
QueryInfo::PutRecord { phase, record, .. } => {
|
|
|
|
assert_eq!(phase, &PutRecordPhase::GetClosestPeers);
|
|
|
|
assert_eq!(record.key, r.key);
|
|
|
|
assert_eq!(record.value, r.value);
|
|
|
|
assert!(record.expires.is_some());
|
|
|
|
qids.insert(qid);
|
|
|
|
},
|
|
|
|
i => panic!("Unexpected query info: {:?}", i)
|
|
|
|
}
|
|
|
|
None => panic!("Query not found: {:?}", qid)
|
|
|
|
}
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
// Each test run republishes all records once.
|
|
|
|
let mut republished = false;
|
|
|
|
// The accumulated results for one round of publishing.
|
|
|
|
let mut results = Vec::new();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| loop {
|
|
|
|
// Poll all swarms until they are "Pending".
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::PutRecord(res), stats
|
|
|
|
})) |
|
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::RepublishRecord(res), stats
|
|
|
|
})) => {
|
|
|
|
assert!(qids.is_empty() || qids.remove(&id));
|
|
|
|
assert!(stats.duration().is_some());
|
|
|
|
assert!(stats.num_successes() >= replication_factor.get() as u32);
|
|
|
|
assert!(stats.num_requests() >= stats.num_successes());
|
|
|
|
assert_eq!(stats.num_failures(), 0);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
match res {
|
2020-04-17 19:57:35 +02:00
|
|
|
Err(e) => panic!("{:?}", e),
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
Ok(ok) => {
|
|
|
|
assert!(records.contains_key(&ok.key));
|
|
|
|
let record = swarm.store.get(&ok.key).unwrap();
|
|
|
|
results.push(record.into_owned());
|
|
|
|
}
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
// All swarms are Pending and not enough results have been collected
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
// so far, thus wait to be polled again for further progress.
|
|
|
|
if results.len() != records.len() {
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Pending
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
// Consume the results, checking that each record was replicated
|
|
|
|
// correctly to the closest peers to the key.
|
|
|
|
while let Some(r) = results.pop() {
|
|
|
|
let expected = records.get(&r.key).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(r.key, expected.key);
|
|
|
|
assert_eq!(r.value, expected.value);
|
|
|
|
assert_eq!(r.expires, expected.expires);
|
2020-04-17 19:57:35 +02:00
|
|
|
assert_eq!(r.publisher.as_ref(), Some(Swarm::local_peer_id(&swarms[0])));
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let key = kbucket::Key::new(r.key.clone());
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut expected = swarms.iter()
|
|
|
|
.skip(1)
|
|
|
|
.map(Swarm::local_peer_id)
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>();
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
expected.sort_by(|id1, id2|
|
2020-02-13 10:36:14 +01:00
|
|
|
kbucket::Key::new(id1.clone()).distance(&key).cmp(
|
|
|
|
&kbucket::Key::new(id2.clone()).distance(&key)));
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let expected = expected
|
|
|
|
.into_iter()
|
|
|
|
.take(replication_factor.get())
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let actual = swarms.iter()
|
|
|
|
.skip(1)
|
|
|
|
.filter_map(|swarm|
|
|
|
|
if swarm.store.get(key.preimage()).is_some() {
|
|
|
|
Some(Swarm::local_peer_id(swarm).clone())
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
})
|
|
|
|
.collect::<HashSet<_>>();
|
2019-06-04 14:44:24 +03:00
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
assert_eq!(actual.len(), replication_factor.get());
|
2020-04-17 19:57:35 +02:00
|
|
|
|
|
|
|
let actual_not_expected = actual.difference(&expected)
|
|
|
|
.collect::<Vec<&PeerId>>();
|
|
|
|
assert!(
|
|
|
|
actual_not_expected.is_empty(),
|
|
|
|
"Did not expect records to be stored on nodes {:?}.",
|
|
|
|
actual_not_expected,
|
|
|
|
);
|
|
|
|
|
|
|
|
let expected_not_actual = expected.difference(&actual)
|
|
|
|
.collect::<Vec<&PeerId>>();
|
|
|
|
assert!(expected_not_actual.is_empty(),
|
|
|
|
"Expected record to be stored on nodes {:?}.",
|
|
|
|
expected_not_actual,
|
|
|
|
);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
2019-07-03 16:16:25 +02:00
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
if republished {
|
|
|
|
assert_eq!(swarms[0].store.records().count(), records.len());
|
2019-08-07 09:27:50 +02:00
|
|
|
assert_eq!(swarms[0].queries.size(), 0);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
for k in records.keys() {
|
|
|
|
swarms[0].store.remove(&k);
|
|
|
|
}
|
|
|
|
assert_eq!(swarms[0].store.records().count(), 0);
|
|
|
|
// All records have been republished, thus the test is complete.
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
// Tell the replication job to republish asap.
|
|
|
|
swarms[0].put_record_job.as_mut().unwrap().asap(true);
|
|
|
|
republished = true;
|
|
|
|
})
|
|
|
|
)
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
2019-07-03 16:16:25 +02:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
QuickCheck::new().tests(3).quickcheck(prop as fn(_,_) -> _)
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-05-16 10:43:09 +02:00
|
|
|
fn get_record() {
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_nodes(3);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
// Let first peer know of second peer and second peer know of third peer.
|
|
|
|
for i in 0..2 {
|
|
|
|
let (peer_id, address) = (Swarm::local_peer_id(&swarms[i+1].1).clone(), swarms[i+1].0.clone());
|
|
|
|
swarms[i].1.add_address(&peer_id, address);
|
|
|
|
}
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
// Drop the swarm addresses.
|
|
|
|
let mut swarms = swarms.into_iter().map(|(_addr, swarm)| swarm).collect::<Vec<_>>();
|
2019-07-03 16:16:25 +02:00
|
|
|
|
2020-03-05 16:49:36 +01:00
|
|
|
let record = Record::new(random_multihash(), vec![4,5,6]);
|
2019-07-03 16:16:25 +02:00
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
swarms[1].store.put(record.clone()).unwrap();
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].get_record(&record.key, Quorum::One);
|
2019-07-03 16:16:25 +02:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
2020-06-19 12:22:26 +02:00
|
|
|
id,
|
|
|
|
result: QueryResult::GetRecord(Ok(GetRecordOk { records })),
|
|
|
|
..
|
2020-05-16 10:43:09 +02:00
|
|
|
})) => {
|
|
|
|
assert_eq!(id, qid);
|
2020-06-19 12:22:26 +02:00
|
|
|
assert_eq!(records.len(), 1);
|
|
|
|
assert_eq!(records.first().unwrap().record, record);
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-05-16 10:43:09 +02:00
|
|
|
fn get_record_many() {
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
// TODO: Randomise
|
2019-07-03 16:16:25 +02:00
|
|
|
let num_nodes = 12;
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = build_connected_nodes(num_nodes, 3).into_iter()
|
|
|
|
.map(|(_addr, swarm)| swarm)
|
|
|
|
.collect::<Vec<_>>();
|
2019-07-03 16:16:25 +02:00
|
|
|
let num_results = 10;
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2020-03-05 16:49:36 +01:00
|
|
|
let record = Record::new(random_multihash(), vec![4,5,6]);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-07-03 16:16:25 +02:00
|
|
|
for i in 0 .. num_nodes {
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
swarms[i].store.put(record.clone()).unwrap();
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
let quorum = Quorum::N(NonZeroUsize::new(num_results).unwrap());
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].get_record(&record.key, quorum);
|
2019-06-04 14:44:24 +03:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-06-04 14:44:24 +03:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
2020-06-19 12:22:26 +02:00
|
|
|
id,
|
|
|
|
result: QueryResult::GetRecord(Ok(GetRecordOk { records })),
|
|
|
|
..
|
2020-05-16 10:43:09 +02:00
|
|
|
})) => {
|
|
|
|
assert_eq!(id, qid);
|
2020-06-19 12:22:26 +02:00
|
|
|
assert_eq!(records.len(), num_results);
|
|
|
|
assert_eq!(records.first().unwrap().record, record);
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
)
|
2019-06-04 14:44:24 +03:00
|
|
|
}
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
2020-06-19 12:22:26 +02:00
|
|
|
/// A node joining a fully connected network via three (ALPHA_VALUE) bootnodes
|
|
|
|
/// should be able to add itself as a provider to the X closest nodes of the
|
|
|
|
/// network where X is equal to the configured replication factor.
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
#[test]
|
|
|
|
fn add_provider() {
|
2020-06-19 12:22:26 +02:00
|
|
|
fn prop(keys: Vec<record::Key>, seed: Seed) {
|
|
|
|
let mut rng = StdRng::from_seed(seed.0);
|
|
|
|
let replication_factor = NonZeroUsize::new(rng.gen_range(1, (K_VALUE.get() / 2) + 1)).unwrap();
|
|
|
|
// At least 4 nodes, 1 under test + 3 bootnodes.
|
|
|
|
let num_total = usize::max(4, replication_factor.get() * 2);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let mut config = KademliaConfig::default();
|
|
|
|
config.set_replication_factor(replication_factor);
|
2020-06-19 12:22:26 +02:00
|
|
|
if rng.gen() {
|
|
|
|
config.disjoint_query_paths(true);
|
|
|
|
}
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut swarms = {
|
|
|
|
let mut fully_connected_swarms = build_fully_connected_nodes_with_config(
|
|
|
|
num_total - 1,
|
|
|
|
config.clone(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut single_swarm = build_node_with_config(config);
|
2020-06-19 12:22:26 +02:00
|
|
|
// Connect `single_swarm` to three bootnodes.
|
|
|
|
for i in 0..3 {
|
|
|
|
single_swarm.1.add_address(
|
|
|
|
Swarm::local_peer_id(&fully_connected_swarms[i].1),
|
|
|
|
fully_connected_swarms[i].0.clone(),
|
|
|
|
);
|
|
|
|
}
|
2020-04-17 19:57:35 +02:00
|
|
|
|
|
|
|
let mut swarms = vec![single_swarm];
|
|
|
|
swarms.append(&mut fully_connected_swarms);
|
|
|
|
|
|
|
|
// Drop addresses before returning.
|
|
|
|
swarms.into_iter().map(|(_addr, swarm)| swarm).collect::<Vec<_>>()
|
|
|
|
};
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let keys: HashSet<_> = keys.into_iter().take(num_total).collect();
|
|
|
|
|
|
|
|
// Each test run publishes all records twice.
|
|
|
|
let mut published = false;
|
|
|
|
let mut republished = false;
|
|
|
|
// The accumulated results for one round of publishing.
|
|
|
|
let mut results = Vec::new();
|
|
|
|
|
|
|
|
// Initiate the first round of publishing.
|
2020-05-16 10:43:09 +02:00
|
|
|
let mut qids = HashSet::new();
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
for k in &keys {
|
2020-05-16 10:43:09 +02:00
|
|
|
let qid = swarms[0].start_providing(k.clone()).unwrap();
|
|
|
|
qids.insert(qid);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| loop {
|
|
|
|
// Poll all swarms until they are "Pending".
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
for swarm in &mut swarms {
|
|
|
|
loop {
|
2019-11-28 16:12:02 +01:00
|
|
|
match swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::StartProviding(res), ..
|
|
|
|
})) |
|
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
|
|
|
id, result: QueryResult::RepublishProvider(res), ..
|
|
|
|
})) => {
|
|
|
|
assert!(qids.is_empty() || qids.remove(&id));
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
match res {
|
|
|
|
Err(e) => panic!(e),
|
|
|
|
Ok(ok) => {
|
2019-08-15 11:36:47 +02:00
|
|
|
assert!(keys.contains(&ok.key));
|
|
|
|
results.push(ok.key);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
// Ignore any other event.
|
2020-01-07 11:57:00 +01:00
|
|
|
Poll::Ready(Some(_)) => (),
|
2019-11-28 16:12:02 +01:00
|
|
|
e @ Poll::Ready(_) => panic!("Unexpected return value: {:?}", e),
|
|
|
|
Poll::Pending => break,
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if results.len() == keys.len() {
|
|
|
|
// All requests have been sent for one round of publishing.
|
|
|
|
published = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if !published {
|
|
|
|
// Still waiting for all requests to be sent for one round
|
|
|
|
// of publishing.
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Pending
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// A round of publishing is complete. Consume the results, checking that
|
|
|
|
// each key was published to the `replication_factor` closest peers.
|
|
|
|
while let Some(key) = results.pop() {
|
|
|
|
// Collect the nodes that have a provider record for `key`.
|
2020-04-17 19:57:35 +02:00
|
|
|
let actual = swarms.iter().skip(1)
|
|
|
|
.filter_map(|swarm|
|
|
|
|
if swarm.store.providers(&key).len() == 1 {
|
|
|
|
Some(Swarm::local_peer_id(&swarm).clone())
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
})
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
|
|
|
|
if actual.len() != replication_factor.get() {
|
|
|
|
// Still waiting for some nodes to process the request.
|
|
|
|
results.push(key);
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Pending
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
let mut expected = swarms.iter()
|
|
|
|
.skip(1)
|
|
|
|
.map(Swarm::local_peer_id)
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>();
|
2019-08-15 11:36:47 +02:00
|
|
|
let kbucket_key = kbucket::Key::new(key);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
expected.sort_by(|id1, id2|
|
2020-02-13 10:36:14 +01:00
|
|
|
kbucket::Key::new(id1.clone()).distance(&kbucket_key).cmp(
|
|
|
|
&kbucket::Key::new(id2.clone()).distance(&kbucket_key)));
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
|
|
|
|
let expected = expected
|
|
|
|
.into_iter()
|
|
|
|
.take(replication_factor.get())
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
|
|
|
|
assert_eq!(actual, expected);
|
|
|
|
}
|
|
|
|
|
|
|
|
// One round of publishing is complete.
|
|
|
|
assert!(results.is_empty());
|
2020-04-17 19:57:35 +02:00
|
|
|
for swarm in &swarms {
|
|
|
|
assert_eq!(swarm.queries.size(), 0);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if republished {
|
|
|
|
assert_eq!(swarms[0].store.provided().count(), keys.len());
|
|
|
|
for k in &keys {
|
2019-08-15 11:36:47 +02:00
|
|
|
swarms[0].stop_providing(&k);
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
assert_eq!(swarms[0].store.provided().count(), 0);
|
|
|
|
// All records have been republished, thus the test is complete.
|
2019-11-28 16:12:02 +01:00
|
|
|
return Poll::Ready(());
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate the second round of publishing by telling the
|
|
|
|
// periodic provider job to run asap.
|
|
|
|
swarms[0].add_provider_job.as_mut().unwrap().asap();
|
|
|
|
published = false;
|
|
|
|
republished = true;
|
|
|
|
})
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
QuickCheck::new().tests(3).quickcheck(prop as fn(_,_))
|
|
|
|
}
|
|
|
|
|
2019-11-06 16:09:15 +01:00
|
|
|
/// User code should be able to start queries beyond the internal
|
|
|
|
/// query limit for background jobs. Originally this even produced an
|
|
|
|
/// arithmetic overflow, see https://github.com/libp2p/rust-libp2p/issues/1290.
|
|
|
|
#[test]
|
|
|
|
fn exceed_jobs_max_queries() {
|
2020-04-17 19:57:35 +02:00
|
|
|
let (_addr, mut swarm) = build_node();
|
2019-11-06 16:09:15 +01:00
|
|
|
let num = JOBS_MAX_QUERIES + 1;
|
|
|
|
for _ in 0 .. num {
|
2020-05-16 10:43:09 +02:00
|
|
|
swarm.get_closest_peers(PeerId::random());
|
2019-11-06 16:09:15 +01:00
|
|
|
}
|
|
|
|
|
2020-04-17 19:57:35 +02:00
|
|
|
assert_eq!(swarm.queries.size(), num);
|
2019-11-06 16:09:15 +01:00
|
|
|
|
2019-11-28 16:12:02 +01:00
|
|
|
block_on(
|
|
|
|
poll_fn(move |ctx| {
|
2019-11-06 16:09:15 +01:00
|
|
|
for _ in 0 .. num {
|
|
|
|
// There are no other nodes, so the queries finish instantly.
|
2020-04-17 19:57:35 +02:00
|
|
|
if let Poll::Ready(Some(e)) = swarm.poll_next_unpin(ctx) {
|
2020-05-16 10:43:09 +02:00
|
|
|
if let KademliaEvent::QueryResult {
|
|
|
|
result: QueryResult::GetClosestPeers(Ok(r)), ..
|
|
|
|
} = e {
|
|
|
|
assert!(r.peers.is_empty())
|
2019-11-06 16:09:15 +01:00
|
|
|
} else {
|
|
|
|
panic!("Unexpected event: {:?}", e)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
panic!("Expected event")
|
|
|
|
}
|
|
|
|
}
|
2019-11-28 16:12:02 +01:00
|
|
|
Poll::Ready(())
|
|
|
|
})
|
|
|
|
)
|
2019-11-06 16:09:15 +01:00
|
|
|
}
|
2020-03-18 10:15:33 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn exp_decr_expiration_overflow() {
|
|
|
|
fn prop_no_panic(ttl: Duration, factor: u32) {
|
|
|
|
exp_decrease(ttl, factor);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Right shifting a u64 by >63 results in a panic.
|
|
|
|
prop_no_panic(KademliaConfig::default().record_ttl.unwrap(), 64);
|
|
|
|
|
|
|
|
quickcheck(prop_no_panic as fn(_, _))
|
|
|
|
}
|
2020-06-19 12:22:26 +02:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn disjoint_query_does_not_finish_before_all_paths_did() {
|
|
|
|
let mut config = KademliaConfig::default();
|
|
|
|
config.disjoint_query_paths(true);
|
|
|
|
// I.e. setting the amount disjoint paths to be explored to 2.
|
|
|
|
config.set_parallelism(NonZeroUsize::new(2).unwrap());
|
|
|
|
|
|
|
|
let mut alice = build_node_with_config(config);
|
|
|
|
let mut trudy = build_node(); // Trudy the intrudor, an adversary.
|
|
|
|
let mut bob = build_node();
|
|
|
|
|
|
|
|
let key = Key::new(&multihash::Sha2_256::digest(&thread_rng().gen::<[u8; 32]>()));
|
|
|
|
let record_bob = Record::new(key.clone(), b"bob".to_vec());
|
|
|
|
let record_trudy = Record::new(key.clone(), b"trudy".to_vec());
|
|
|
|
|
|
|
|
// Make `bob` and `trudy` aware of their version of the record searched by
|
|
|
|
// `alice`.
|
|
|
|
bob.1.store.put(record_bob.clone()).unwrap();
|
|
|
|
trudy.1.store.put(record_trudy.clone()).unwrap();
|
|
|
|
|
|
|
|
// Make `trudy` and `bob` known to `alice`.
|
|
|
|
alice.1.add_address(Swarm::local_peer_id(&trudy.1), trudy.0.clone());
|
|
|
|
alice.1.add_address(Swarm::local_peer_id(&bob.1), bob.0.clone());
|
|
|
|
|
|
|
|
// Drop the swarm addresses.
|
|
|
|
let (mut alice, mut bob, mut trudy) = (alice.1, bob.1, trudy.1);
|
|
|
|
|
|
|
|
// Have `alice` query the Dht for `key` with a quorum of 1.
|
|
|
|
alice.get_record(&key, Quorum::One);
|
|
|
|
|
|
|
|
// The default peer timeout is 10 seconds. Choosing 1 seconds here should
|
|
|
|
// give enough head room to prevent connections to `bob` to time out.
|
|
|
|
let mut before_timeout = Delay::new(Duration::from_secs(1));
|
|
|
|
|
|
|
|
// Poll only `alice` and `trudy` expecting `alice` not yet to return a query
|
|
|
|
// result as it is not able to connect to `bob` just yet.
|
|
|
|
block_on(
|
|
|
|
poll_fn(|ctx| {
|
|
|
|
for (i, swarm) in [&mut alice, &mut trudy].iter_mut().enumerate() {
|
|
|
|
loop {
|
|
|
|
match swarm.poll_next_unpin(ctx) {
|
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult{
|
|
|
|
result: QueryResult::GetRecord(result),
|
|
|
|
..
|
|
|
|
})) => {
|
|
|
|
if i != 0 {
|
|
|
|
panic!("Expected `QueryResult` from Alice.")
|
|
|
|
}
|
|
|
|
|
|
|
|
match result {
|
|
|
|
Ok(_) => panic!(
|
|
|
|
"Expected query not to finish until all \
|
|
|
|
disjoint paths have been explored.",
|
|
|
|
),
|
|
|
|
Err(e) => panic!("{:?}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Ignore any other event.
|
|
|
|
Poll::Ready(Some(_)) => (),
|
|
|
|
Poll::Ready(None) => panic!("Expected Kademlia behaviour not to finish."),
|
|
|
|
Poll::Pending => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure not to wait until connections to `bob` time out.
|
|
|
|
before_timeout.poll_unpin(ctx)
|
|
|
|
})
|
|
|
|
);
|
|
|
|
|
|
|
|
// Make sure `alice` has exactly one query with `trudy`'s record only.
|
|
|
|
assert_eq!(1, alice.queries.iter().count());
|
|
|
|
alice.queries.iter().for_each(|q| {
|
|
|
|
match &q.inner.info {
|
|
|
|
QueryInfo::GetRecord{ records, .. } => {
|
|
|
|
assert_eq!(
|
|
|
|
*records,
|
|
|
|
vec![PeerRecord {
|
|
|
|
peer: Some(Swarm::local_peer_id(&trudy).clone()),
|
|
|
|
record: record_trudy.clone(),
|
|
|
|
}],
|
|
|
|
);
|
|
|
|
},
|
|
|
|
i @ _ => panic!("Unexpected query info: {:?}", i),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Poll `alice` and `bob` expecting `alice` to return a successful query
|
|
|
|
// result as it is now able to explore the second disjoint path.
|
|
|
|
let records = block_on(
|
|
|
|
poll_fn(|ctx| {
|
|
|
|
for (i, swarm) in [&mut alice, &mut bob].iter_mut().enumerate() {
|
|
|
|
loop {
|
|
|
|
match swarm.poll_next_unpin(ctx) {
|
|
|
|
Poll::Ready(Some(KademliaEvent::QueryResult{
|
|
|
|
result: QueryResult::GetRecord(result),
|
|
|
|
..
|
|
|
|
})) => {
|
|
|
|
if i != 0 {
|
|
|
|
panic!("Expected `QueryResult` from Alice.")
|
|
|
|
}
|
|
|
|
|
|
|
|
match result {
|
|
|
|
Ok(ok) => return Poll::Ready(ok.records),
|
|
|
|
Err(e) => unreachable!("{:?}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Ignore any other event.
|
|
|
|
Poll::Ready(Some(_)) => (),
|
|
|
|
Poll::Ready(None) => panic!(
|
|
|
|
"Expected Kademlia behaviour not to finish.",
|
|
|
|
),
|
|
|
|
Poll::Pending => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Poll::Pending
|
|
|
|
})
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(2, records.len());
|
|
|
|
assert!(records.contains(&PeerRecord {
|
|
|
|
peer: Some(Swarm::local_peer_id(&bob).clone()),
|
|
|
|
record: record_bob,
|
|
|
|
}));
|
|
|
|
assert!(records.contains(&PeerRecord {
|
|
|
|
peer: Some(Swarm::local_peer_id(&trudy).clone()),
|
|
|
|
record: record_trudy,
|
|
|
|
}));
|
|
|
|
}
|