2018-11-29 12:11:35 +01:00
|
|
|
// Copyright 2018 Parity Technologies (UK) Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
//! Demonstrates how to perform Kademlia queries on the IPFS network.
|
|
|
|
//!
|
|
|
|
//! You can pass as parameter a base58 peer ID to search for. If you don't pass any parameter, a
|
|
|
|
//! peer ID will be generated randomly.
|
|
|
|
|
2019-11-25 10:45:04 +01:00
|
|
|
use async_std::task;
|
2018-11-29 12:11:35 +01:00
|
|
|
use futures::prelude::*;
|
|
|
|
use libp2p::{
|
2019-07-03 16:16:25 +02:00
|
|
|
Swarm,
|
2019-03-11 13:42:53 +01:00
|
|
|
PeerId,
|
2019-07-03 16:16:25 +02:00
|
|
|
identity,
|
|
|
|
build_development_transport
|
2018-11-29 12:11:35 +01:00
|
|
|
};
|
2019-07-10 10:07:18 +02:00
|
|
|
use libp2p::kad::{Kademlia, KademliaConfig, KademliaEvent, GetClosestPeersError};
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
use libp2p::kad::record::store::MemoryStore;
|
2019-11-25 10:45:04 +01:00
|
|
|
use std::{env, error::Error, time::Duration};
|
2018-11-29 12:11:35 +01:00
|
|
|
|
2019-11-25 10:45:04 +01:00
|
|
|
fn main() -> Result<(), Box<dyn Error>> {
|
2019-07-03 16:16:25 +02:00
|
|
|
env_logger::init();
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
// Create a random key for ourselves.
|
2019-03-11 13:42:53 +01:00
|
|
|
let local_key = identity::Keypair::generate_ed25519();
|
|
|
|
let local_peer_id = PeerId::from(local_key.public());
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
// Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol
|
2019-11-25 10:45:04 +01:00
|
|
|
let transport = build_development_transport(local_key)?;
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
// Create a swarm to manage peers and events.
|
|
|
|
let mut swarm = {
|
|
|
|
// Create a Kademlia behaviour.
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
let mut cfg = KademliaConfig::default();
|
2019-07-03 16:16:25 +02:00
|
|
|
cfg.set_query_timeout(Duration::from_secs(5 * 60));
|
Kademlia: Somewhat complete the records implementation. (#1189)
* Somewhat complete the implementation of Kademlia records.
This commit relates to [libp2p-146] and [libp2p-1089].
* All records expire (by default, configurable).
* Provider records are also stored in the RecordStore, and the RecordStore
API extended.
* Background jobs for periodic (re-)replication and (re-)publication
of records. Regular (value-)records are subject to re-replication and
re-publication as per standard Kademlia. Provider records are only
subject to re-publication.
* For standard Kademlia value lookups (quorum = 1), the record is cached
at the closest peer to the key that did not return the value, as per
standard Kademlia.
* Expiration times of regular (value-)records is computed exponentially
inversely proportional to the number of nodes between the local node
and the closest node known to the key (beyond the k closest), as per
standard Kademlia.
The protobuf messages are extended with two fields: `ttl` and `publisher`
in order to implement the different semantics of re-replication (by any
of the k closest peers to the key, not affecting expiry) and re-publication
(by the original publisher, resetting the expiry). This is not done yet in
other libp2p Kademlia implementations, see e.g. [libp2p-go-323]. The new protobuf fields
have been given somewhat unique identifiers to prevent future collision.
Similarly, periodic re-publication of provider records does not seem to
be done yet in other implementations, see e.g. [libp2p-js-98].
[libp2p-146]: https://github.com/libp2p/rust-libp2p/issues/146
[libp2p-1089]: https://github.com/libp2p/rust-libp2p/issues/1089
[libp2p-go-323]: https://github.com/libp2p/go-libp2p-kad-dht/issues/323
[libp2p-js-98]: https://github.com/libp2p/js-libp2p-kad-dht/issues/98
* Tweak kad-ipfs example.
* Add missing files.
* Ensure new delays are polled immediately.
To ensure task notification, since `NotReady` is returned right after.
* Fix ipfs-kad example and use wasm_timer.
* Small cleanup.
* Incorporate some feedback.
* Adjustments after rebase.
* Distinguish events further.
In order for a user to easily distinguish the result of e.g.
a `put_record` operation from the result of a later republication,
different event constructors are used. Furthermore, for now,
re-replication and "caching" of records (at the closest peer to
the key that did not return a value during a successful lookup)
do not yield events for now as they are less interesting.
* Speed up tests for CI.
* Small refinements and more documentation.
* Guard a node against overriding records for which it considers
itself to be the publisher.
* Document the jobs module more extensively.
* More inline docs around removal of "unreachable" addresses.
* Remove wildcard re-exports.
* Use NonZeroUsize for the constants.
* Re-add method lost on merge.
* Add missing 'pub'.
* Further increase the timeout in the ipfs-kad example.
* Readd log dependency to libp2p-kad.
* Simplify RecordStore API slightly.
* Some more commentary.
* Change Addresses::remove to return Result<(),()>.
Change the semantics of `Addresses::remove` so that the error case
is unambiguous, instead of the success case. Use the `Result` for
clearer semantics to that effect.
* Add some documentation to .
2019-07-17 14:40:48 +02:00
|
|
|
let store = MemoryStore::new(local_peer_id.clone());
|
|
|
|
let mut behaviour = Kademlia::with_config(local_peer_id.clone(), store, cfg);
|
2019-07-03 16:16:25 +02:00
|
|
|
|
2019-06-07 14:38:36 +02:00
|
|
|
// TODO: the /dnsaddr/ scheme is not supported (https://github.com/libp2p/rust-libp2p/issues/967)
|
2019-07-03 16:16:25 +02:00
|
|
|
/*behaviour.add_address(&"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
|
|
|
behaviour.add_address(&"QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
|
|
|
behaviour.add_address(&"QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
|
|
|
behaviour.add_address(&"QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());*/
|
|
|
|
|
|
|
|
// The only address that currently works.
|
2019-11-25 10:45:04 +01:00
|
|
|
behaviour.add_address(&"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse()?, "/ip4/104.131.131.82/tcp/4001".parse()?);
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
// The following addresses always fail signature verification, possibly due to
|
|
|
|
// RSA keys with < 2048 bits.
|
|
|
|
// behaviour.add_address(&"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip4/104.236.179.241/tcp/4001".parse().unwrap());
|
|
|
|
// behaviour.add_address(&"QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip4/128.199.219.111/tcp/4001".parse().unwrap());
|
|
|
|
// behaviour.add_address(&"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip4/104.236.76.40/tcp/4001".parse().unwrap());
|
|
|
|
// behaviour.add_address(&"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip4/178.62.158.247/tcp/4001".parse().unwrap());
|
|
|
|
|
|
|
|
// The following addresses are permanently unreachable:
|
|
|
|
// Other(Other(A(Transport(A(Underlying(Os { code: 101, kind: Other, message: "Network is unreachable" }))))))
|
|
|
|
// behaviour.add_address(&"QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip6/2604:a880:1:20::203:d001/tcp/4001".parse().unwrap());
|
|
|
|
// behaviour.add_address(&"QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu".parse().unwrap(), "/ip6/2400:6180:0:d0::151:6001/tcp/4001".parse().unwrap());
|
|
|
|
// behaviour.add_address(&"QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip6/2604:a880:800:10::4a:5001/tcp/4001".parse().unwrap());
|
|
|
|
// behaviour.add_address(&"QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd".parse().unwrap(), "/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001".parse().unwrap());
|
|
|
|
Swarm::new(transport, behaviour, local_peer_id)
|
2018-11-29 12:11:35 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// Order Kademlia to search for a peer.
|
2019-07-03 16:16:25 +02:00
|
|
|
let to_search: PeerId = if let Some(peer_id) = env::args().nth(1) {
|
2019-11-25 10:45:04 +01:00
|
|
|
peer_id.parse()?
|
2018-11-29 12:11:35 +01:00
|
|
|
} else {
|
2019-03-11 13:42:53 +01:00
|
|
|
identity::Keypair::generate_ed25519().public().into()
|
2018-11-29 12:11:35 +01:00
|
|
|
};
|
2019-07-03 16:16:25 +02:00
|
|
|
|
|
|
|
println!("Searching for the closest peers to {:?}", to_search);
|
|
|
|
swarm.get_closest_peers(to_search);
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
// Kick it off!
|
2019-11-25 10:45:04 +01:00
|
|
|
task::block_on(async move {
|
|
|
|
while let Some(event) = swarm.try_next().await? {
|
|
|
|
if let KademliaEvent::GetClosestPeersResult(result) = event {
|
|
|
|
match result {
|
|
|
|
Ok(ok) =>
|
|
|
|
if !ok.peers.is_empty() {
|
|
|
|
println!("Query finished with closest peers: {:#?}", ok.peers)
|
|
|
|
} else {
|
|
|
|
// The example is considered failed as there
|
|
|
|
// should always be at least 1 reachable peer.
|
2019-12-09 16:50:08 +01:00
|
|
|
println!("Query finished with no closest peers.")
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
2019-11-25 10:45:04 +01:00
|
|
|
Err(GetClosestPeersError::Timeout { peers, .. }) =>
|
|
|
|
if !peers.is_empty() {
|
|
|
|
println!("Query timed out with closest peers: {:#?}", peers)
|
|
|
|
} else {
|
|
|
|
// The example is considered failed as there
|
|
|
|
// should always be at least 1 reachable peer.
|
2019-12-09 16:50:08 +01:00
|
|
|
println!("Query timed out with no closest peers.");
|
2019-07-03 16:16:25 +02:00
|
|
|
}
|
2019-12-09 16:50:08 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
break;
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
}
|
2019-12-09 16:50:08 +01:00
|
|
|
|
2019-11-25 10:45:04 +01:00
|
|
|
Ok(())
|
|
|
|
})
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|