mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-04-25 19:02:13 +00:00
commit
c6a6f0a6ad
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@ -38,7 +38,7 @@ jobs:
|
|||||||
container:
|
container:
|
||||||
image: rust
|
image: rust
|
||||||
env:
|
env:
|
||||||
CC: clang-9
|
CC: clang-10
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
@ -50,9 +50,9 @@ jobs:
|
|||||||
- name: Install a recent version of clang
|
- name: Install a recent version of clang
|
||||||
run: |
|
run: |
|
||||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||||
echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" >> /etc/apt/sources.list
|
echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main" >> /etc/apt/sources.list
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install -y clang-9
|
apt-get install -y clang-10
|
||||||
- name: Install CMake
|
- name: Install CMake
|
||||||
run: apt-get install -y cmake
|
run: apt-get install -y cmake
|
||||||
- name: Cache cargo registry
|
- name: Cache cargo registry
|
||||||
|
54
CHANGELOG.md
54
CHANGELOG.md
@ -1,10 +1,64 @@
|
|||||||
# Version ???
|
# Version ???
|
||||||
|
|
||||||
|
|
||||||
|
# Version 0.19.0 (2020-05-18)
|
||||||
|
|
||||||
|
- `libp2p-core`, `libp2p-swarm`: Added support for multiple dialing
|
||||||
|
attempts per peer, with a configurable limit.
|
||||||
|
[PR 1506](https://github.com/libp2p/rust-libp2p/pull/1506)
|
||||||
|
|
||||||
|
- `libp2p-core`: `PeerId`s that use the identity hashing will now be properly
|
||||||
|
displayed using the string representation of an identity multihash, rather
|
||||||
|
than the canonical SHA 256 representation.
|
||||||
|
[PR 1576](https://github.com/libp2p/rust-libp2p/pull/1576)
|
||||||
|
|
||||||
|
- `libp2p-core`: Updated to multihash 0.11.0.
|
||||||
|
[PR 1566](https://github.com/libp2p/rust-libp2p/pull/1566)
|
||||||
|
|
||||||
|
- `libp2p-core`: Make the number of events buffered to/from tasks configurable.
|
||||||
|
[PR 1574](https://github.com/libp2p/rust-libp2p/pull/1574)
|
||||||
|
|
||||||
|
- `libp2p-dns`, `parity-multiaddr`: Added support for the `/dns` multiaddr
|
||||||
|
protocol. Additionally, the `multiaddr::from_url` function will now use
|
||||||
|
`/dns` instead of `/dns4`.
|
||||||
|
[PR 1575](https://github.com/libp2p/rust-libp2p/pull/1575)
|
||||||
|
|
||||||
|
- `libp2p-noise`: Added the `X25519Spec` protocol suite which uses
|
||||||
|
libp2p-noise-spec compliant signatures on static keys as well as the
|
||||||
|
`/noise` protocol upgrade, hence providing a libp2p-noise-spec compliant
|
||||||
|
`XX` handshake. `IK` and `IX` are still supported with `X25519Spec`
|
||||||
|
though not guaranteed to be interoperable with other libp2p
|
||||||
|
implementations as these handshake patterns are not currently
|
||||||
|
included in the libp2p-noise-spec. The `X25519Spec` implementation
|
||||||
|
will eventually replace the current `X25519` implementation, with
|
||||||
|
the former being removed. To upgrade without interruptions, you may
|
||||||
|
temporarily include `NoiseConfig`s for both implementations as
|
||||||
|
alternatives in your transport upgrade pipeline.
|
||||||
|
|
||||||
- `libp2p-kad`: Consider fixed (K_VALUE) amount of peers at closest query
|
- `libp2p-kad`: Consider fixed (K_VALUE) amount of peers at closest query
|
||||||
initialization. Unless `KademliaConfig::set_replication_factor` is used change
|
initialization. Unless `KademliaConfig::set_replication_factor` is used change
|
||||||
has no effect.
|
has no effect.
|
||||||
[PR 1536](https://github.com/libp2p/rust-libp2p/pull/1536)
|
[PR 1536](https://github.com/libp2p/rust-libp2p/pull/1536)
|
||||||
|
|
||||||
|
- `libp2p-kad`: Provide more insight into, and control of, the execution of
|
||||||
|
queries. All query results are now wrapped in `KademliaEvent::QueryResult`.
|
||||||
|
As a side-effect of these changes and for as long as the record storage
|
||||||
|
API is not asynchronous, local storage errors on `put_record` are reported
|
||||||
|
synchronously in a `Result`, instead of being reported asynchronously by
|
||||||
|
an event.
|
||||||
|
[PR 1567](https://github.com/libp2p/rust-libp2p/pull/1567)
|
||||||
|
|
||||||
|
- `libp2p-tcp`, `libp2p`: Made the `libp2p-tcp/async-std` feature flag
|
||||||
|
disabled by default, and split the `libp2p/tcp` feature in two:
|
||||||
|
`tcp-async-std` and `tcp-tokio`. `tcp-async-std` is still enabled by default.
|
||||||
|
[PR 1471](https://github.com/libp2p/rust-libp2p/pull/1471)
|
||||||
|
|
||||||
|
- `libp2p-tcp`: On listeners started with an IPv6 multi-address the socket
|
||||||
|
option `IPV6_V6ONLY` is set to true. Instead of relying on IPv4-mapped IPv6
|
||||||
|
address support, two listeners can be started if IPv4 and IPv6 should both
|
||||||
|
be supported. IPv4 listener addresses are not affected by this change.
|
||||||
|
[PR 1555](https://github.com/libp2p/rust-libp2p/pull/1555)
|
||||||
|
|
||||||
# Version 0.18.1 (2020-04-17)
|
# Version 0.18.1 (2020-04-17)
|
||||||
|
|
||||||
- `libp2p-swarm`: Make sure inject_dial_failure is called in all situations.
|
- `libp2p-swarm`: Make sure inject_dial_failure is called in all situations.
|
||||||
|
60
Cargo.toml
60
Cargo.toml
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p"
|
name = "libp2p"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Peer-to-peer networking library"
|
description = "Peer-to-peer networking library"
|
||||||
version = "0.18.1"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -25,7 +25,7 @@ default = [
|
|||||||
"pnet",
|
"pnet",
|
||||||
"secio",
|
"secio",
|
||||||
"secp256k1",
|
"secp256k1",
|
||||||
"tcp",
|
"tcp-async-std",
|
||||||
"uds",
|
"uds",
|
||||||
"wasm-ext",
|
"wasm-ext",
|
||||||
"websocket",
|
"websocket",
|
||||||
@ -44,7 +44,8 @@ ping = ["libp2p-ping"]
|
|||||||
plaintext = ["libp2p-plaintext"]
|
plaintext = ["libp2p-plaintext"]
|
||||||
pnet = ["libp2p-pnet"]
|
pnet = ["libp2p-pnet"]
|
||||||
secio = ["libp2p-secio"]
|
secio = ["libp2p-secio"]
|
||||||
tcp = ["libp2p-tcp"]
|
tcp-async-std = ["libp2p-tcp", "libp2p-tcp/async-std"]
|
||||||
|
tcp-tokio = ["libp2p-tcp", "libp2p-tcp/tokio"]
|
||||||
uds = ["libp2p-uds"]
|
uds = ["libp2p-uds"]
|
||||||
wasm-ext = ["libp2p-wasm-ext"]
|
wasm-ext = ["libp2p-wasm-ext"]
|
||||||
websocket = ["libp2p-websocket"]
|
websocket = ["libp2p-websocket"]
|
||||||
@ -54,36 +55,36 @@ secp256k1 = ["libp2p-core/secp256k1", "libp2p-secio/secp256k1"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
bytes = "0.5"
|
bytes = "0.5"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
multiaddr = { package = "parity-multiaddr", version = "0.8.0", path = "misc/multiaddr" }
|
multiaddr = { package = "parity-multiaddr", version = "0.9.0", path = "misc/multiaddr" }
|
||||||
multihash = "0.10"
|
multihash = "0.11.0"
|
||||||
lazy_static = "1.2"
|
lazy_static = "1.2"
|
||||||
libp2p-mplex = { version = "0.18.0", path = "muxers/mplex", optional = true }
|
libp2p-mplex = { version = "0.19.0", path = "muxers/mplex", optional = true }
|
||||||
libp2p-identify = { version = "0.18.0", path = "protocols/identify", optional = true }
|
libp2p-identify = { version = "0.19.0", path = "protocols/identify", optional = true }
|
||||||
libp2p-kad = { version = "0.18.0", path = "protocols/kad", optional = true }
|
libp2p-kad = { version = "0.19.0", path = "protocols/kad", optional = true }
|
||||||
libp2p-floodsub = { version = "0.18.0", path = "protocols/floodsub", optional = true }
|
libp2p-floodsub = { version = "0.19.0", path = "protocols/floodsub", optional = true }
|
||||||
libp2p-gossipsub = { version = "0.18.0", path = "./protocols/gossipsub", optional = true }
|
libp2p-gossipsub = { version = "0.19.0", path = "./protocols/gossipsub", optional = true }
|
||||||
libp2p-ping = { version = "0.18.0", path = "protocols/ping", optional = true }
|
libp2p-ping = { version = "0.19.0", path = "protocols/ping", optional = true }
|
||||||
libp2p-plaintext = { version = "0.18.0", path = "protocols/plaintext", optional = true }
|
libp2p-plaintext = { version = "0.19.0", path = "protocols/plaintext", optional = true }
|
||||||
libp2p-pnet = { version = "0.18.0", path = "protocols/pnet", optional = true }
|
libp2p-pnet = { version = "0.19.0", path = "protocols/pnet", optional = true }
|
||||||
libp2p-core = { version = "0.18.0", path = "core" }
|
libp2p-core = { version = "0.19.0", path = "core" }
|
||||||
libp2p-core-derive = { version = "0.18.0", path = "misc/core-derive" }
|
libp2p-core-derive = { version = "0.19.0", path = "misc/core-derive" }
|
||||||
libp2p-secio = { version = "0.18.0", path = "protocols/secio", default-features = false, optional = true }
|
libp2p-secio = { version = "0.19.0", path = "protocols/secio", default-features = false, optional = true }
|
||||||
libp2p-swarm = { version = "0.18.1", path = "swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "swarm" }
|
||||||
libp2p-uds = { version = "0.18.0", path = "transports/uds", optional = true }
|
libp2p-uds = { version = "0.19.0", path = "transports/uds", optional = true }
|
||||||
libp2p-wasm-ext = { version = "0.18.0", path = "transports/wasm-ext", optional = true }
|
libp2p-wasm-ext = { version = "0.19.0", path = "transports/wasm-ext", optional = true }
|
||||||
libp2p-yamux = { version = "0.18.0", path = "muxers/yamux", optional = true }
|
libp2p-yamux = { version = "0.19.0", path = "muxers/yamux", optional = true }
|
||||||
libp2p-noise = { version = "0.18.0", path = "protocols/noise", optional = true }
|
libp2p-noise = { version = "0.19.0", path = "protocols/noise", optional = true }
|
||||||
parking_lot = "0.10.0"
|
parking_lot = "0.10.0"
|
||||||
pin-project = "0.4.6"
|
pin-project = "0.4.6"
|
||||||
smallvec = "1.0"
|
smallvec = "1.0"
|
||||||
wasm-timer = "0.2.4"
|
wasm-timer = "0.2.4"
|
||||||
|
|
||||||
[target.'cfg(not(any(target_os = "emscripten", target_os = "unknown")))'.dependencies]
|
[target.'cfg(not(any(target_os = "emscripten", target_os = "unknown")))'.dependencies]
|
||||||
libp2p-deflate = { version = "0.18.0", path = "protocols/deflate", optional = true }
|
libp2p-deflate = { version = "0.19.0", path = "protocols/deflate", optional = true }
|
||||||
libp2p-dns = { version = "0.18.0", path = "transports/dns", optional = true }
|
libp2p-dns = { version = "0.19.0", path = "transports/dns", optional = true }
|
||||||
libp2p-mdns = { version = "0.18.0", path = "protocols/mdns", optional = true }
|
libp2p-mdns = { version = "0.19.0", path = "protocols/mdns", optional = true }
|
||||||
libp2p-tcp = { version = "0.18.0", path = "transports/tcp", optional = true }
|
libp2p-tcp = { version = "0.19.0", path = "transports/tcp", optional = true }
|
||||||
libp2p-websocket = { version = "0.18.0", path = "transports/websocket", optional = true }
|
libp2p-websocket = { version = "0.19.0", path = "transports/websocket", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
@ -115,13 +116,12 @@ members = [
|
|||||||
"transports/wasm-ext"
|
"transports/wasm-ext"
|
||||||
]
|
]
|
||||||
|
|
||||||
#[patch.'https://github.com/fluencelabs/rust-libp2p']
|
[patch.'https://github.com/fluencelabs/rust-libp2p']
|
||||||
[patch.'ssh://git@github.com/fluencelabs/rust-libp2p.git']
|
|
||||||
libp2p-core = { path = "core" }
|
libp2p-core = { path = "core" }
|
||||||
|
|
||||||
# NOTE: this is required because trust-graph depends on libp2p-core = 0.17.0,
|
# NOTE: this is required because trust-graph depends on libp2p-core,
|
||||||
# and patches it to git only in patch section, which apparently isn't
|
# and patches it to git only in patch section, which apparently isn't
|
||||||
# visible via dependency mechanics (i.e., not visible HERE)
|
# visible via dependency mechanics (i.e., not visible HERE)
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
libp2p-core = { path = "core" }
|
libp2p-core = { path = "core" }
|
||||||
#trust-graph = { path = "../arqada/janus/trust-graph" }
|
wasm-timer = { git = "https://github.com/fluencelabs/wasm-timer", branch = "saturating_duration_since" }
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-core"
|
name = "libp2p-core"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Core traits and structs of libp2p"
|
description = "Core traits and structs of libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -20,8 +20,8 @@ futures-timer = "3"
|
|||||||
lazy_static = "1.2"
|
lazy_static = "1.2"
|
||||||
libsecp256k1 = { version = "0.3.1", optional = true }
|
libsecp256k1 = { version = "0.3.1", optional = true }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
multiaddr = { package = "parity-multiaddr", version = "0.8.0", path = "../misc/multiaddr" }
|
multiaddr = { package = "parity-multiaddr", version = "0.9.0", path = "../misc/multiaddr" }
|
||||||
multihash = "0.10"
|
multihash = "0.11.0"
|
||||||
multistream-select = { version = "0.8.0", path = "../misc/multistream-select" }
|
multistream-select = { version = "0.8.0", path = "../misc/multistream-select" }
|
||||||
parking_lot = "0.10.0"
|
parking_lot = "0.10.0"
|
||||||
pin-project = "0.4.6"
|
pin-project = "0.4.6"
|
||||||
@ -40,9 +40,9 @@ ring = { version = "0.16.9", features = ["alloc", "std"], default-features = fal
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
libp2p-mplex = { version = "0.18.0", path = "../muxers/mplex" }
|
libp2p-mplex = { version = "0.19.0", path = "../muxers/mplex" }
|
||||||
libp2p-secio = { version = "0.18.0", path = "../protocols/secio" }
|
libp2p-secio = { version = "0.19.0", path = "../protocols/secio" }
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../transports/tcp" }
|
||||||
quickcheck = "0.9.0"
|
quickcheck = "0.9.0"
|
||||||
wasm-timer = "0.2"
|
wasm-timer = "0.2"
|
||||||
|
|
||||||
|
@ -99,6 +99,9 @@ pub struct Manager<I, O, H, E, HE, C> {
|
|||||||
/// Next available identifier for a new connection / task.
|
/// Next available identifier for a new connection / task.
|
||||||
next_task_id: TaskId,
|
next_task_id: TaskId,
|
||||||
|
|
||||||
|
/// Size of the task command buffer (per task).
|
||||||
|
task_command_buffer_size: usize,
|
||||||
|
|
||||||
/// The executor to use for running the background tasks. If `None`,
|
/// The executor to use for running the background tasks. If `None`,
|
||||||
/// the tasks are kept in `local_spawns` instead and polled on the
|
/// the tasks are kept in `local_spawns` instead and polled on the
|
||||||
/// current thread when the manager is polled for new events.
|
/// current thread when the manager is polled for new events.
|
||||||
@ -127,6 +130,32 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Configuration options when creating a [`Manager`].
|
||||||
|
///
|
||||||
|
/// The default configuration specifies no dedicated task executor, a
|
||||||
|
/// task event buffer size of 32, and a task command buffer size of 7.
|
||||||
|
#[non_exhaustive]
|
||||||
|
pub struct ManagerConfig {
|
||||||
|
/// Executor to use to spawn tasks.
|
||||||
|
pub executor: Option<Box<dyn Executor + Send>>,
|
||||||
|
|
||||||
|
/// Size of the task command buffer (per task).
|
||||||
|
pub task_command_buffer_size: usize,
|
||||||
|
|
||||||
|
/// Size of the task event buffer (for all tasks).
|
||||||
|
pub task_event_buffer_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ManagerConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
ManagerConfig {
|
||||||
|
executor: None,
|
||||||
|
task_event_buffer_size: 32,
|
||||||
|
task_command_buffer_size: 7,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Internal information about a running task.
|
/// Internal information about a running task.
|
||||||
///
|
///
|
||||||
/// Contains the sender to deliver event messages to the task, and
|
/// Contains the sender to deliver event messages to the task, and
|
||||||
@ -196,12 +225,13 @@ pub enum Event<'a, I, O, H, TE, HE, C> {
|
|||||||
|
|
||||||
impl<I, O, H, TE, HE, C> Manager<I, O, H, TE, HE, C> {
|
impl<I, O, H, TE, HE, C> Manager<I, O, H, TE, HE, C> {
|
||||||
/// Creates a new connection manager.
|
/// Creates a new connection manager.
|
||||||
pub fn new(executor: Option<Box<dyn Executor + Send>>) -> Self {
|
pub fn new(config: ManagerConfig) -> Self {
|
||||||
let (tx, rx) = mpsc::channel(1);
|
let (tx, rx) = mpsc::channel(config.task_event_buffer_size);
|
||||||
Self {
|
Self {
|
||||||
tasks: FnvHashMap::default(),
|
tasks: FnvHashMap::default(),
|
||||||
next_task_id: TaskId(0),
|
next_task_id: TaskId(0),
|
||||||
executor,
|
task_command_buffer_size: config.task_command_buffer_size,
|
||||||
|
executor: config.executor,
|
||||||
local_spawns: FuturesUnordered::new(),
|
local_spawns: FuturesUnordered::new(),
|
||||||
events_tx: tx,
|
events_tx: tx,
|
||||||
events_rx: rx
|
events_rx: rx
|
||||||
@ -234,7 +264,7 @@ impl<I, O, H, TE, HE, C> Manager<I, O, H, TE, HE, C> {
|
|||||||
let task_id = self.next_task_id;
|
let task_id = self.next_task_id;
|
||||||
self.next_task_id.0 += 1;
|
self.next_task_id.0 += 1;
|
||||||
|
|
||||||
let (tx, rx) = mpsc::channel(4);
|
let (tx, rx) = mpsc::channel(self.task_command_buffer_size);
|
||||||
self.tasks.insert(task_id, TaskInfo { sender: tx, state: TaskState::Pending });
|
self.tasks.insert(task_id, TaskInfo { sender: tx, state: TaskState::Pending });
|
||||||
|
|
||||||
let task = Box::pin(Task::pending(task_id, self.events_tx.clone(), rx, future, handler));
|
let task = Box::pin(Task::pending(task_id, self.events_tx.clone(), rx, future, handler));
|
||||||
@ -269,7 +299,7 @@ impl<I, O, H, TE, HE, C> Manager<I, O, H, TE, HE, C> {
|
|||||||
let task_id = self.next_task_id;
|
let task_id = self.next_task_id;
|
||||||
self.next_task_id.0 += 1;
|
self.next_task_id.0 += 1;
|
||||||
|
|
||||||
let (tx, rx) = mpsc::channel(4);
|
let (tx, rx) = mpsc::channel(self.task_command_buffer_size);
|
||||||
self.tasks.insert(task_id, TaskInfo {
|
self.tasks.insert(task_id, TaskInfo {
|
||||||
sender: tx, state: TaskState::Established(info)
|
sender: tx, state: TaskState::Established(info)
|
||||||
});
|
});
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
// DEALINGS IN THE SOFTWARE.
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Executor,
|
|
||||||
ConnectedPoint,
|
ConnectedPoint,
|
||||||
PeerId,
|
PeerId,
|
||||||
connection::{
|
connection::{
|
||||||
@ -36,7 +35,7 @@ use crate::{
|
|||||||
OutgoingInfo,
|
OutgoingInfo,
|
||||||
Substream,
|
Substream,
|
||||||
PendingConnectionError,
|
PendingConnectionError,
|
||||||
manager::{self, Manager},
|
manager::{self, Manager, ManagerConfig},
|
||||||
},
|
},
|
||||||
muxing::StreamMuxer,
|
muxing::StreamMuxer,
|
||||||
};
|
};
|
||||||
@ -175,13 +174,13 @@ where
|
|||||||
/// Creates a new empty `Pool`.
|
/// Creates a new empty `Pool`.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
local_id: TPeerId,
|
local_id: TPeerId,
|
||||||
executor: Option<Box<dyn Executor + Send>>,
|
manager_config: ManagerConfig,
|
||||||
limits: PoolLimits
|
limits: PoolLimits
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Pool {
|
Pool {
|
||||||
local_id,
|
local_id,
|
||||||
limits,
|
limits,
|
||||||
manager: Manager::new(executor),
|
manager: Manager::new(manager_config),
|
||||||
established: Default::default(),
|
established: Default::default(),
|
||||||
pending: Default::default(),
|
pending: Default::default(),
|
||||||
}
|
}
|
||||||
@ -225,12 +224,7 @@ where
|
|||||||
TPeerId: Clone + Send + 'static,
|
TPeerId: Clone + Send + 'static,
|
||||||
{
|
{
|
||||||
let endpoint = info.to_connected_point();
|
let endpoint = info.to_connected_point();
|
||||||
if let Some(limit) = self.limits.max_incoming {
|
self.limits.check_incoming(|| self.iter_pending_incoming().count())?;
|
||||||
let current = self.iter_pending_incoming().count();
|
|
||||||
if current >= limit {
|
|
||||||
return Err(ConnectionLimit { limit, current })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(self.add_pending(future, handler, endpoint, None))
|
Ok(self.add_pending(future, handler, endpoint, None))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,6 +261,11 @@ where
|
|||||||
TPeerId: Clone + Send + 'static,
|
TPeerId: Clone + Send + 'static,
|
||||||
{
|
{
|
||||||
self.limits.check_outgoing(|| self.iter_pending_outgoing().count())?;
|
self.limits.check_outgoing(|| self.iter_pending_outgoing().count())?;
|
||||||
|
|
||||||
|
if let Some(peer) = &info.peer_id {
|
||||||
|
self.limits.check_outgoing_per_peer(|| self.num_peer_outgoing(peer))?;
|
||||||
|
}
|
||||||
|
|
||||||
let endpoint = info.to_connected_point();
|
let endpoint = info.to_connected_point();
|
||||||
Ok(self.add_pending(future, handler, endpoint, info.peer_id.cloned()))
|
Ok(self.add_pending(future, handler, endpoint, info.peer_id.cloned()))
|
||||||
}
|
}
|
||||||
@ -465,6 +464,13 @@ where
|
|||||||
self.established.get(peer).map_or(0, |conns| conns.len())
|
self.established.get(peer).map_or(0, |conns| conns.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Counts the number of pending outgoing connections to the given peer.
|
||||||
|
pub fn num_peer_outgoing(&self, peer: &TPeerId) -> usize {
|
||||||
|
self.iter_pending_outgoing()
|
||||||
|
.filter(|info| info.peer_id == Some(peer))
|
||||||
|
.count()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all established connections of `peer`.
|
/// Returns an iterator over all established connections of `peer`.
|
||||||
pub fn iter_peer_established<'a>(&'a mut self, peer: &TPeerId)
|
pub fn iter_peer_established<'a>(&'a mut self, peer: &TPeerId)
|
||||||
-> EstablishedConnectionIter<'a,
|
-> EstablishedConnectionIter<'a,
|
||||||
@ -837,6 +843,7 @@ pub struct PoolLimits {
|
|||||||
pub max_outgoing: Option<usize>,
|
pub max_outgoing: Option<usize>,
|
||||||
pub max_incoming: Option<usize>,
|
pub max_incoming: Option<usize>,
|
||||||
pub max_established_per_peer: Option<usize>,
|
pub max_established_per_peer: Option<usize>,
|
||||||
|
pub max_outgoing_per_peer: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolLimits {
|
impl PoolLimits {
|
||||||
@ -854,6 +861,20 @@ impl PoolLimits {
|
|||||||
Self::check(current, self.max_outgoing)
|
Self::check(current, self.max_outgoing)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_incoming<F>(&self, current: F) -> Result<(), ConnectionLimit>
|
||||||
|
where
|
||||||
|
F: FnOnce() -> usize
|
||||||
|
{
|
||||||
|
Self::check(current, self.max_incoming)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_outgoing_per_peer<F>(&self, current: F) -> Result<(), ConnectionLimit>
|
||||||
|
where
|
||||||
|
F: FnOnce() -> usize
|
||||||
|
{
|
||||||
|
Self::check(current, self.max_outgoing_per_peer)
|
||||||
|
}
|
||||||
|
|
||||||
fn check<F>(current: F, limit: Option<usize>) -> Result<(), ConnectionLimit>
|
fn check<F>(current: F, limit: Option<usize>) -> Result<(), ConnectionLimit>
|
||||||
where
|
where
|
||||||
F: FnOnce() -> usize
|
F: FnOnce() -> usize
|
||||||
|
@ -43,6 +43,7 @@ use crate::{
|
|||||||
ListenersStream,
|
ListenersStream,
|
||||||
PendingConnectionError,
|
PendingConnectionError,
|
||||||
Substream,
|
Substream,
|
||||||
|
manager::ManagerConfig,
|
||||||
pool::{Pool, PoolEvent, PoolLimits},
|
pool::{Pool, PoolEvent, PoolLimits},
|
||||||
},
|
},
|
||||||
muxing::StreamMuxer,
|
muxing::StreamMuxer,
|
||||||
@ -50,12 +51,14 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use fnv::{FnvHashMap};
|
use fnv::{FnvHashMap};
|
||||||
use futures::{prelude::*, future};
|
use futures::{prelude::*, future};
|
||||||
|
use smallvec::SmallVec;
|
||||||
use std::{
|
use std::{
|
||||||
collections::hash_map,
|
collections::hash_map,
|
||||||
convert::TryFrom as _,
|
convert::TryFrom as _,
|
||||||
error,
|
error,
|
||||||
fmt,
|
fmt,
|
||||||
hash::Hash,
|
hash::Hash,
|
||||||
|
num::NonZeroUsize,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
};
|
};
|
||||||
@ -78,21 +81,17 @@ where
|
|||||||
|
|
||||||
/// The ongoing dialing attempts.
|
/// The ongoing dialing attempts.
|
||||||
///
|
///
|
||||||
/// The `Network` enforces a single ongoing dialing attempt per peer,
|
/// There may be multiple ongoing dialing attempts to the same peer.
|
||||||
/// even if multiple (established) connections per peer are allowed.
|
/// Each dialing attempt is associated with a new connection and hence
|
||||||
/// However, a single dialing attempt operates on a list of addresses
|
/// a new connection ID.
|
||||||
/// to connect to, which can be extended with new addresses while
|
|
||||||
/// the connection attempt is still in progress. Thereby each
|
|
||||||
/// dialing attempt is associated with a new connection and hence a new
|
|
||||||
/// connection ID.
|
|
||||||
///
|
///
|
||||||
/// > **Note**: `dialing` must be consistent with the pending outgoing
|
/// > **Note**: `dialing` must be consistent with the pending outgoing
|
||||||
/// > connections in `pool`. That is, for every entry in `dialing`
|
/// > connections in `pool`. That is, for every entry in `dialing`
|
||||||
/// > there must exist a pending outgoing connection in `pool` with
|
/// > there must exist a pending outgoing connection in `pool` with
|
||||||
/// > the same connection ID. This is ensured by the implementation of
|
/// > the same connection ID. This is ensured by the implementation of
|
||||||
/// > `Network` (see `dial_peer_impl` and `on_connection_failed`)
|
/// > `Network` (see `dial_peer_impl` and `on_connection_failed`)
|
||||||
/// > together with the implementation of `DialingConnection::abort`.
|
/// > together with the implementation of `DialingAttempt::abort`.
|
||||||
dialing: FnvHashMap<TPeerId, peer::DialingAttempt>,
|
dialing: FnvHashMap<TPeerId, SmallVec<[peer::DialingState; 10]>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug for
|
impl<TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug for
|
||||||
@ -157,7 +156,7 @@ where
|
|||||||
Network {
|
Network {
|
||||||
local_peer_id,
|
local_peer_id,
|
||||||
listeners: ListenersStream::new(transport),
|
listeners: ListenersStream::new(transport),
|
||||||
pool: Pool::new(pool_local_id, config.executor, config.pool_limits),
|
pool: Pool::new(pool_local_id, config.manager_config, config.pool_limits),
|
||||||
dialing: Default::default(),
|
dialing: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -381,8 +380,11 @@ where
|
|||||||
Poll::Pending => return Poll::Pending,
|
Poll::Pending => return Poll::Pending,
|
||||||
Poll::Ready(PoolEvent::ConnectionEstablished { connection, num_established }) => {
|
Poll::Ready(PoolEvent::ConnectionEstablished { connection, num_established }) => {
|
||||||
match self.dialing.entry(connection.peer_id().clone()) {
|
match self.dialing.entry(connection.peer_id().clone()) {
|
||||||
hash_map::Entry::Occupied(e) if e.get().id == connection.id() => {
|
hash_map::Entry::Occupied(mut e) => {
|
||||||
e.remove();
|
e.get_mut().retain(|s| s.current.0 != connection.id());
|
||||||
|
if e.get().is_empty() {
|
||||||
|
e.remove();
|
||||||
|
}
|
||||||
},
|
},
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
@ -453,7 +455,7 @@ fn dial_peer_impl<TMuxer, TInEvent, TOutEvent, THandler, TTrans, TConnInfo, TPee
|
|||||||
transport: TTrans,
|
transport: TTrans,
|
||||||
pool: &mut Pool<TInEvent, TOutEvent, THandler, TTrans::Error,
|
pool: &mut Pool<TInEvent, TOutEvent, THandler, TTrans::Error,
|
||||||
<THandler::Handler as ConnectionHandler>::Error, TConnInfo, TPeerId>,
|
<THandler::Handler as ConnectionHandler>::Error, TConnInfo, TPeerId>,
|
||||||
dialing: &mut FnvHashMap<TPeerId, peer::DialingAttempt>,
|
dialing: &mut FnvHashMap<TPeerId, SmallVec<[peer::DialingState; 10]>>,
|
||||||
opts: DialingOpts<TPeerId, THandler>
|
opts: DialingOpts<TPeerId, THandler>
|
||||||
) -> Result<ConnectionId, ConnectionLimit>
|
) -> Result<ConnectionId, ConnectionLimit>
|
||||||
where
|
where
|
||||||
@ -489,14 +491,12 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Ok(id) = &result {
|
if let Ok(id) = &result {
|
||||||
let former = dialing.insert(opts.peer,
|
dialing.entry(opts.peer).or_default().push(
|
||||||
peer::DialingAttempt {
|
peer::DialingState {
|
||||||
id: *id,
|
current: (*id, opts.address),
|
||||||
current: opts.address,
|
remaining: opts.remaining,
|
||||||
next: opts.remaining,
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
debug_assert!(former.is_none());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result
|
result
|
||||||
@ -508,7 +508,7 @@ where
|
|||||||
/// If the failed connection attempt was a dialing attempt and there
|
/// If the failed connection attempt was a dialing attempt and there
|
||||||
/// are more addresses to try, new `DialingOpts` are returned.
|
/// are more addresses to try, new `DialingOpts` are returned.
|
||||||
fn on_connection_failed<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>(
|
fn on_connection_failed<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>(
|
||||||
dialing: &mut FnvHashMap<TPeerId, peer::DialingAttempt>,
|
dialing: &mut FnvHashMap<TPeerId, SmallVec<[peer::DialingState; 10]>>,
|
||||||
id: ConnectionId,
|
id: ConnectionId,
|
||||||
endpoint: ConnectedPoint,
|
endpoint: ConnectedPoint,
|
||||||
error: PendingConnectionError<TTrans::Error>,
|
error: PendingConnectionError<TTrans::Error>,
|
||||||
@ -521,27 +521,34 @@ where
|
|||||||
TPeerId: Eq + Hash + Clone,
|
TPeerId: Eq + Hash + Clone,
|
||||||
{
|
{
|
||||||
// Check if the failed connection is associated with a dialing attempt.
|
// Check if the failed connection is associated with a dialing attempt.
|
||||||
// TODO: could be more optimal than iterating over everything
|
let dialing_failed = dialing.iter_mut()
|
||||||
let dialing_peer = dialing.iter() // (1)
|
.find_map(|(peer, attempts)| {
|
||||||
.find(|(_, a)| a.id == id)
|
if let Some(pos) = attempts.iter().position(|s| s.current.0 == id) {
|
||||||
.map(|(p, _)| p.clone());
|
let attempt = attempts.remove(pos);
|
||||||
|
let last = attempts.is_empty();
|
||||||
|
Some((peer.clone(), attempt, last))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
if let Some(peer_id) = dialing_peer {
|
if let Some((peer_id, mut attempt, last)) = dialing_failed {
|
||||||
// A pending outgoing connection to a known peer failed.
|
if last {
|
||||||
let mut attempt = dialing.remove(&peer_id).expect("by (1)");
|
dialing.remove(&peer_id);
|
||||||
|
}
|
||||||
|
|
||||||
let num_remain = u32::try_from(attempt.next.len()).unwrap();
|
let num_remain = u32::try_from(attempt.remaining.len()).unwrap();
|
||||||
let failed_addr = attempt.current.clone();
|
let failed_addr = attempt.current.1.clone();
|
||||||
|
|
||||||
let (opts, attempts_remaining) =
|
let (opts, attempts_remaining) =
|
||||||
if num_remain > 0 {
|
if num_remain > 0 {
|
||||||
if let Some(handler) = handler {
|
if let Some(handler) = handler {
|
||||||
let next_attempt = attempt.next.remove(0);
|
let next_attempt = attempt.remaining.remove(0);
|
||||||
let opts = DialingOpts {
|
let opts = DialingOpts {
|
||||||
peer: peer_id.clone(),
|
peer: peer_id.clone(),
|
||||||
handler,
|
handler,
|
||||||
address: next_attempt,
|
address: next_attempt,
|
||||||
remaining: attempt.next
|
remaining: attempt.remaining
|
||||||
};
|
};
|
||||||
(Some(opts), num_remain)
|
(Some(opts), num_remain)
|
||||||
} else {
|
} else {
|
||||||
@ -581,25 +588,33 @@ where
|
|||||||
/// Information about the network obtained by [`Network::info()`].
|
/// Information about the network obtained by [`Network::info()`].
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct NetworkInfo {
|
pub struct NetworkInfo {
|
||||||
|
/// The total number of connected peers.
|
||||||
pub num_peers: usize,
|
pub num_peers: usize,
|
||||||
|
/// The total number of connections, both established and pending.
|
||||||
pub num_connections: usize,
|
pub num_connections: usize,
|
||||||
|
/// The total number of pending connections, both incoming and outgoing.
|
||||||
pub num_connections_pending: usize,
|
pub num_connections_pending: usize,
|
||||||
|
/// The total number of established connections.
|
||||||
pub num_connections_established: usize,
|
pub num_connections_established: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The (optional) configuration for a [`Network`].
|
/// The (optional) configuration for a [`Network`].
|
||||||
///
|
///
|
||||||
/// The default configuration specifies no dedicated task executor
|
/// The default configuration specifies no dedicated task executor, no
|
||||||
/// and no connection limits.
|
/// connection limits, a connection event buffer size of 32, and a
|
||||||
|
/// `notify_handler` buffer size of 8.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct NetworkConfig {
|
pub struct NetworkConfig {
|
||||||
executor: Option<Box<dyn Executor + Send>>,
|
/// Note that the `ManagerConfig`s task command buffer always provides
|
||||||
|
/// one "free" slot per task. Thus the given total `notify_handler_buffer_size`
|
||||||
|
/// exposed for configuration on the `Network` is reduced by one.
|
||||||
|
manager_config: ManagerConfig,
|
||||||
pool_limits: PoolLimits,
|
pool_limits: PoolLimits,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkConfig {
|
impl NetworkConfig {
|
||||||
pub fn set_executor(&mut self, e: Box<dyn Executor + Send>) -> &mut Self {
|
pub fn set_executor(&mut self, e: Box<dyn Executor + Send>) -> &mut Self {
|
||||||
self.executor = Some(e);
|
self.manager_config.executor = Some(e);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -616,7 +631,30 @@ impl NetworkConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn executor(&self) -> Option<&Box<dyn Executor + Send>> {
|
pub fn executor(&self) -> Option<&Box<dyn Executor + Send>> {
|
||||||
self.executor.as_ref()
|
self.manager_config.executor.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the maximum number of events sent to a connection's background task
|
||||||
|
/// that may be buffered, if the task cannot keep up with their consumption and
|
||||||
|
/// delivery to the connection handler.
|
||||||
|
///
|
||||||
|
/// When the buffer for a particular connection is full, `notify_handler` will no
|
||||||
|
/// longer be able to deliver events to the associated `ConnectionHandler`,
|
||||||
|
/// thus exerting back-pressure on the connection and peer API.
|
||||||
|
pub fn set_notify_handler_buffer_size(&mut self, n: NonZeroUsize) -> &mut Self {
|
||||||
|
self.manager_config.task_command_buffer_size = n.get() - 1;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the maximum number of buffered connection events (beyond a guaranteed
|
||||||
|
/// buffer of 1 event per connection).
|
||||||
|
///
|
||||||
|
/// When the buffer is full, the background tasks of all connections will stall.
|
||||||
|
/// In this way, the consumers of network events exert back-pressure on
|
||||||
|
/// the network connection I/O.
|
||||||
|
pub fn set_connection_event_buffer_size(&mut self, n: usize) -> &mut Self {
|
||||||
|
self.manager_config.task_event_buffer_size = n;
|
||||||
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_incoming_limit(&mut self, n: usize) -> &mut Self {
|
pub fn set_incoming_limit(&mut self, n: usize) -> &mut Self {
|
||||||
@ -633,4 +671,9 @@ impl NetworkConfig {
|
|||||||
self.pool_limits.max_established_per_peer = Some(n);
|
self.pool_limits.max_established_per_peer = Some(n);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn set_outgoing_per_peer_limit(&mut self, n: usize) -> &mut Self {
|
||||||
|
self.pool_limits.max_outgoing_per_peer = Some(n);
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,8 +35,11 @@ use crate::{
|
|||||||
IntoConnectionHandler,
|
IntoConnectionHandler,
|
||||||
PendingConnection,
|
PendingConnection,
|
||||||
Substream,
|
Substream,
|
||||||
|
pool::Pool,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use smallvec::SmallVec;
|
||||||
use std::{
|
use std::{
|
||||||
collections::hash_map,
|
collections::hash_map,
|
||||||
error,
|
error,
|
||||||
@ -47,6 +50,10 @@ use super::{Network, DialingOpts};
|
|||||||
|
|
||||||
/// The possible representations of a peer in a [`Network`], as
|
/// The possible representations of a peer in a [`Network`], as
|
||||||
/// seen by the local node.
|
/// seen by the local node.
|
||||||
|
///
|
||||||
|
/// > **Note**: In any state there may always be a pending incoming
|
||||||
|
/// > connection attempt from the peer, however, the remote identity
|
||||||
|
/// > of a peer is only known once a connection is fully established.
|
||||||
pub enum Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
pub enum Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
where
|
where
|
||||||
TTrans: Transport,
|
TTrans: Transport,
|
||||||
@ -63,10 +70,6 @@ where
|
|||||||
/// There exists no established connection to the peer and there is
|
/// There exists no established connection to the peer and there is
|
||||||
/// currently no ongoing dialing (i.e. outgoing connection) attempt
|
/// currently no ongoing dialing (i.e. outgoing connection) attempt
|
||||||
/// in progress.
|
/// in progress.
|
||||||
///
|
|
||||||
/// > **Note**: In this state there may always be a pending incoming
|
|
||||||
/// > connection attempt from the peer, however, the remote identity
|
|
||||||
/// > of a peer is only known once a connection is fully established.
|
|
||||||
Disconnected(DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>),
|
Disconnected(DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>),
|
||||||
|
|
||||||
/// The peer represents the local node.
|
/// The peer represents the local node.
|
||||||
@ -82,20 +85,20 @@ where
|
|||||||
TPeerId: fmt::Debug + Eq + Hash,
|
TPeerId: fmt::Debug + Eq + Hash,
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
match *self {
|
match self {
|
||||||
Peer::Connected(ConnectedPeer { ref peer_id, .. }) => {
|
Peer::Connected(p) => {
|
||||||
f.debug_struct("Connected")
|
f.debug_struct("Connected")
|
||||||
.field("peer_id", peer_id)
|
.field("peer", &p)
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
Peer::Dialing(DialingPeer { ref peer_id, .. } ) => {
|
Peer::Dialing(p) => {
|
||||||
f.debug_struct("DialingPeer")
|
f.debug_struct("Dialing")
|
||||||
.field("peer_id", peer_id)
|
.field("peer", &p)
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
Peer::Disconnected(DisconnectedPeer { ref peer_id, .. }) => {
|
Peer::Disconnected(p) => {
|
||||||
f.debug_struct("Disconnected")
|
f.debug_struct("Disconnected")
|
||||||
.field("peer_id", peer_id)
|
.field("peer", &p)
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
Peer::Local => {
|
Peer::Local => {
|
||||||
@ -164,12 +167,11 @@ where
|
|||||||
TTrans::Dial: Send + 'static,
|
TTrans::Dial: Send + 'static,
|
||||||
TMuxer: StreamMuxer + Send + Sync + 'static,
|
TMuxer: StreamMuxer + Send + Sync + 'static,
|
||||||
TMuxer::OutboundSubstream: Send,
|
TMuxer::OutboundSubstream: Send,
|
||||||
TMuxer::Substream: Send,
|
|
||||||
TInEvent: Send + 'static,
|
TInEvent: Send + 'static,
|
||||||
TOutEvent: Send + 'static,
|
TOutEvent: Send + 'static,
|
||||||
THandler: IntoConnectionHandler<TConnInfo> + Send + 'static,
|
THandler: IntoConnectionHandler<TConnInfo> + Send + 'static,
|
||||||
THandler::Handler: ConnectionHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent> + Send + 'static,
|
THandler::Handler: ConnectionHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent> + Send,
|
||||||
<THandler::Handler as ConnectionHandler>::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be necessary
|
<THandler::Handler as ConnectionHandler>::OutboundOpenInfo: Send,
|
||||||
<THandler::Handler as ConnectionHandler>::Error: error::Error + Send + 'static,
|
<THandler::Handler as ConnectionHandler>::Error: error::Error + Send + 'static,
|
||||||
TConnInfo: fmt::Debug + ConnectionInfo<PeerId = TPeerId> + Send + 'static,
|
TConnInfo: fmt::Debug + ConnectionInfo<PeerId = TPeerId> + Send + 'static,
|
||||||
TPeerId: Eq + Hash + Clone + Send + 'static,
|
TPeerId: Eq + Hash + Clone + Send + 'static,
|
||||||
@ -208,7 +210,41 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts the peer into a `ConnectedPeer`, if there an established connection exists.
|
/// Initiates a new dialing attempt to this peer using the given addresses.
|
||||||
|
///
|
||||||
|
/// The connection ID of the first connection attempt, i.e. to `address`,
|
||||||
|
/// is returned, together with a [`DialingPeer`] for further use. The
|
||||||
|
/// `remaining` addresses are tried in order in subsequent connection
|
||||||
|
/// attempts in the context of the same dialing attempt, if the connection
|
||||||
|
/// attempt to the first address fails.
|
||||||
|
pub fn dial<I>(self, address: Multiaddr, remaining: I, handler: THandler)
|
||||||
|
-> Result<
|
||||||
|
(ConnectionId, DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>),
|
||||||
|
ConnectionLimit
|
||||||
|
>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = Multiaddr>,
|
||||||
|
{
|
||||||
|
let (peer_id, network) = match self {
|
||||||
|
Peer::Connected(p) => (p.peer_id, p.network),
|
||||||
|
Peer::Dialing(p) => (p.peer_id, p.network),
|
||||||
|
Peer::Disconnected(p) => (p.peer_id, p.network),
|
||||||
|
Peer::Local => return Err(ConnectionLimit { current: 0, limit: 0 })
|
||||||
|
};
|
||||||
|
|
||||||
|
let id = network.dial_peer(DialingOpts {
|
||||||
|
peer: peer_id.clone(),
|
||||||
|
handler,
|
||||||
|
address,
|
||||||
|
remaining: remaining.into_iter().collect(),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok((id, DialingPeer { network, peer_id }))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts the peer into a `ConnectedPeer`, if an established connection exists.
|
||||||
|
///
|
||||||
|
/// Succeeds if the there is at least one established connection to the peer.
|
||||||
pub fn into_connected(self) -> Option<
|
pub fn into_connected(self) -> Option<
|
||||||
ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
> {
|
> {
|
||||||
@ -221,6 +257,8 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Converts the peer into a `DialingPeer`, if a dialing attempt exists.
|
/// Converts the peer into a `DialingPeer`, if a dialing attempt exists.
|
||||||
|
///
|
||||||
|
/// Succeeds if the there is at least one pending outgoing connection to the peer.
|
||||||
pub fn into_dialing(self) -> Option<
|
pub fn into_dialing(self) -> Option<
|
||||||
DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
> {
|
> {
|
||||||
@ -245,7 +283,8 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The representation of a peer in a [`Network`] to whom at least
|
/// The representation of a peer in a [`Network`] to whom at least
|
||||||
/// one established connection exists.
|
/// one established connection exists. There may also be additional ongoing
|
||||||
|
/// dialing attempts to the peer.
|
||||||
pub struct ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
pub struct ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
where
|
where
|
||||||
TTrans: Transport,
|
TTrans: Transport,
|
||||||
@ -267,57 +306,12 @@ where
|
|||||||
&self.peer_id
|
&self.peer_id
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to establish a new connection to this peer using the given addresses,
|
/// Returns the `ConnectedPeer` into a `Peer`.
|
||||||
/// if there is currently no ongoing dialing attempt.
|
pub fn into_peer(self) -> Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> {
|
||||||
///
|
Peer::Connected(self)
|
||||||
/// Existing established connections are not affected.
|
|
||||||
///
|
|
||||||
/// > **Note**: If there is an ongoing dialing attempt, a `DialingPeer`
|
|
||||||
/// > is returned with the given addresses and handler being ignored.
|
|
||||||
/// > You may want to check [`ConnectedPeer::is_dialing`] first.
|
|
||||||
pub fn connect<I, TMuxer>(self, address: Multiaddr, remaining: I, handler: THandler)
|
|
||||||
-> Result<DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>,
|
|
||||||
ConnectionLimit>
|
|
||||||
where
|
|
||||||
I: IntoIterator<Item = Multiaddr>,
|
|
||||||
THandler: Send + 'static,
|
|
||||||
THandler::Handler: Send,
|
|
||||||
<THandler::Handler as ConnectionHandler>::Error: error::Error + Send,
|
|
||||||
<THandler::Handler as ConnectionHandler>::OutboundOpenInfo: Send,
|
|
||||||
THandler::Handler: ConnectionHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent> + Send,
|
|
||||||
TTrans: Transport<Output = (TConnInfo, TMuxer)> + Clone,
|
|
||||||
TTrans::Error: Send + 'static,
|
|
||||||
TTrans::Dial: Send + 'static,
|
|
||||||
TMuxer: StreamMuxer + Send + Sync + 'static,
|
|
||||||
TMuxer::OutboundSubstream: Send,
|
|
||||||
TMuxer::Substream: Send,
|
|
||||||
TConnInfo: fmt::Debug + Send + 'static,
|
|
||||||
TPeerId: Eq + Hash + Clone + Send + 'static,
|
|
||||||
TInEvent: Send + 'static,
|
|
||||||
TOutEvent: Send + 'static,
|
|
||||||
|
|
||||||
{
|
|
||||||
if self.network.dialing.contains_key(&self.peer_id) {
|
|
||||||
let peer = DialingPeer {
|
|
||||||
network: self.network,
|
|
||||||
peer_id: self.peer_id
|
|
||||||
};
|
|
||||||
Ok(peer)
|
|
||||||
} else {
|
|
||||||
self.network.dial_peer(DialingOpts {
|
|
||||||
peer: self.peer_id.clone(),
|
|
||||||
handler,
|
|
||||||
address,
|
|
||||||
remaining: remaining.into_iter().collect(),
|
|
||||||
})?;
|
|
||||||
Ok(DialingPeer {
|
|
||||||
network: self.network,
|
|
||||||
peer_id: self.peer_id,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Obtains an existing connection to the peer.
|
/// Obtains an established connection to the peer by ID.
|
||||||
pub fn connection<'b>(&'b mut self, id: ConnectionId)
|
pub fn connection<'b>(&'b mut self, id: ConnectionId)
|
||||||
-> Option<EstablishedConnection<'b, TInEvent, TConnInfo, TPeerId>>
|
-> Option<EstablishedConnection<'b, TInEvent, TConnInfo, TPeerId>>
|
||||||
{
|
{
|
||||||
@ -348,7 +342,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets an iterator over all established connections of the peer.
|
/// Gets an iterator over all established connections to the peer.
|
||||||
pub fn connections<'b>(&'b mut self) ->
|
pub fn connections<'b>(&'b mut self) ->
|
||||||
EstablishedConnectionIter<'b,
|
EstablishedConnectionIter<'b,
|
||||||
impl Iterator<Item = ConnectionId>,
|
impl Iterator<Item = ConnectionId>,
|
||||||
@ -386,11 +380,13 @@ impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug f
|
|||||||
where
|
where
|
||||||
TTrans: Transport,
|
TTrans: Transport,
|
||||||
THandler: IntoConnectionHandler<TConnInfo>,
|
THandler: IntoConnectionHandler<TConnInfo>,
|
||||||
TPeerId: fmt::Debug,
|
TPeerId: Eq + Hash + fmt::Debug,
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
f.debug_struct("ConnectedPeer")
|
f.debug_struct("ConnectedPeer")
|
||||||
.field("peer_id", &self.peer_id)
|
.field("peer_id", &self.peer_id)
|
||||||
|
.field("established", &self.network.pool.iter_peer_established_info(&self.peer_id))
|
||||||
|
.field("attempts", &self.network.dialing.get(&self.peer_id))
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -419,8 +415,16 @@ where
|
|||||||
&self.peer_id
|
&self.peer_id
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Disconnects from this peer, closing all pending connections.
|
/// Returns the `DialingPeer` into a `Peer`.
|
||||||
pub fn disconnect(self) -> DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> {
|
pub fn into_peer(self) -> Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> {
|
||||||
|
Peer::Dialing(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disconnects from this peer, closing all established connections and
|
||||||
|
/// aborting all dialing attempts.
|
||||||
|
pub fn disconnect(self)
|
||||||
|
-> DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
|
{
|
||||||
self.network.disconnect(&self.peer_id);
|
self.network.disconnect(&self.peer_id);
|
||||||
DisconnectedPeer { network: self.network, peer_id: self.peer_id }
|
DisconnectedPeer { network: self.network, peer_id: self.peer_id }
|
||||||
}
|
}
|
||||||
@ -443,20 +447,50 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Obtains the connection that is currently being established.
|
/// Obtains a dialing attempt to the peer by connection ID of
|
||||||
pub fn connection<'b>(&'b mut self) -> DialingConnection<'b, TInEvent, TConnInfo, TPeerId> {
|
/// the current connection attempt.
|
||||||
let attempt = match self.network.dialing.entry(self.peer_id.clone()) {
|
pub fn attempt<'b>(&'b mut self, id: ConnectionId)
|
||||||
hash_map::Entry::Occupied(e) => e,
|
-> Option<DialingAttempt<'b, TInEvent, TConnInfo, TPeerId>>
|
||||||
_ => unreachable!("By `Peer::new` and the definition of `DialingPeer`.")
|
{
|
||||||
};
|
if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id.clone()) {
|
||||||
|
if let Some(pos) = attempts.get().iter().position(|s| s.current.0 == id) {
|
||||||
let inner = self.network.pool
|
if let Some(inner) = self.network.pool.get_outgoing(id) {
|
||||||
.get_outgoing(attempt.get().id)
|
return Some(DialingAttempt { pos, inner, attempts })
|
||||||
.expect("By consistency of `network.pool` with `network.dialing`.");
|
}
|
||||||
|
}
|
||||||
DialingConnection {
|
|
||||||
inner, dialing: attempt, peer_id: &self.peer_id
|
|
||||||
}
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The number of ongoing dialing attempts, i.e. pending outgoing connections
|
||||||
|
/// to this peer.
|
||||||
|
pub fn num_attempts(&self) -> usize {
|
||||||
|
self.network.pool.num_peer_outgoing(&self.peer_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets an iterator over all dialing (i.e. pending outgoing) connections to the peer.
|
||||||
|
pub fn attempts<'b>(&'b mut self)
|
||||||
|
-> DialingAttemptIter<'b,
|
||||||
|
TInEvent,
|
||||||
|
TOutEvent,
|
||||||
|
THandler,
|
||||||
|
TTrans::Error,
|
||||||
|
<THandler::Handler as ConnectionHandler>::Error,
|
||||||
|
TConnInfo,
|
||||||
|
TPeerId>
|
||||||
|
{
|
||||||
|
DialingAttemptIter::new(&self.peer_id, &mut self.network.pool, &mut self.network.dialing)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Obtains some dialing connection to the peer.
|
||||||
|
///
|
||||||
|
/// At least one dialing connection is guaranteed to exist on a `DialingPeer`.
|
||||||
|
pub fn some_attempt<'b>(&'b mut self)
|
||||||
|
-> DialingAttempt<'b, TInEvent, TConnInfo, TPeerId>
|
||||||
|
{
|
||||||
|
self.attempts()
|
||||||
|
.into_first()
|
||||||
|
.expect("By `Peer::new` and the definition of `DialingPeer`.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -465,11 +499,13 @@ impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> fmt::Debug f
|
|||||||
where
|
where
|
||||||
TTrans: Transport,
|
TTrans: Transport,
|
||||||
THandler: IntoConnectionHandler<TConnInfo>,
|
THandler: IntoConnectionHandler<TConnInfo>,
|
||||||
TPeerId: fmt::Debug,
|
TPeerId: Eq + Hash + fmt::Debug,
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
f.debug_struct("DialingPeer")
|
f.debug_struct("DialingPeer")
|
||||||
.field("peer_id", &self.peer_id)
|
.field("peer_id", &self.peer_id)
|
||||||
|
.field("established", &self.network.pool.iter_peer_established_info(&self.peer_id))
|
||||||
|
.field("attempts", &self.network.dialing.get(&self.peer_id))
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -500,46 +536,19 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, TTrans, TInEvent, TOutEvent, TMuxer, THandler, TConnInfo, TPeerId>
|
impl<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
DisconnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>
|
||||||
where
|
where
|
||||||
TTrans: Transport<Output = (TConnInfo, TMuxer)> + Clone,
|
TTrans: Transport,
|
||||||
TTrans::Error: Send + 'static,
|
THandler: IntoConnectionHandler<TConnInfo>,
|
||||||
TTrans::Dial: Send + 'static,
|
|
||||||
TMuxer: StreamMuxer + Send + Sync + 'static,
|
|
||||||
TMuxer::OutboundSubstream: Send,
|
|
||||||
TMuxer::Substream: Send,
|
|
||||||
THandler: IntoConnectionHandler<TConnInfo> + Send + 'static,
|
|
||||||
THandler::Handler: ConnectionHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent> + Send,
|
|
||||||
<THandler::Handler as ConnectionHandler>::OutboundOpenInfo: Send,
|
|
||||||
<THandler::Handler as ConnectionHandler>::Error: error::Error + Send,
|
|
||||||
TInEvent: Send + 'static,
|
|
||||||
TOutEvent: Send + 'static,
|
|
||||||
{
|
{
|
||||||
pub fn id(&self) -> &TPeerId {
|
pub fn id(&self) -> &TPeerId {
|
||||||
&self.peer_id
|
&self.peer_id
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to connect to this peer using the given addresses.
|
/// Returns the `DisconnectedPeer` into a `Peer`.
|
||||||
pub fn connect<TIter>(self, first: Multiaddr, rest: TIter, handler: THandler)
|
pub fn into_peer(self) -> Peer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId> {
|
||||||
-> Result<DialingPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>,
|
Peer::Disconnected(self)
|
||||||
ConnectionLimit>
|
|
||||||
where
|
|
||||||
TIter: IntoIterator<Item = Multiaddr>,
|
|
||||||
TConnInfo: fmt::Debug + ConnectionInfo<PeerId = TPeerId> + Send + 'static,
|
|
||||||
TPeerId: Eq + Hash + Clone + Send + 'static,
|
|
||||||
{
|
|
||||||
self.network.dial_peer(DialingOpts {
|
|
||||||
peer: self.peer_id.clone(),
|
|
||||||
handler,
|
|
||||||
address: first,
|
|
||||||
remaining: rest.into_iter().collect(),
|
|
||||||
})?;
|
|
||||||
Ok(DialingPeer {
|
|
||||||
network: self.network,
|
|
||||||
peer_id: self.peer_id,
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Moves the peer into a connected state by supplying an existing
|
/// Moves the peer into a connected state by supplying an existing
|
||||||
@ -550,8 +559,7 @@ where
|
|||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// Panics if `connected.peer_id()` does not identify the current peer.
|
/// Panics if `connected.peer_id()` does not identify the current peer.
|
||||||
///
|
pub fn set_connected<TMuxer>(
|
||||||
pub fn set_connected(
|
|
||||||
self,
|
self,
|
||||||
connected: Connected<TConnInfo>,
|
connected: Connected<TConnInfo>,
|
||||||
connection: Connection<TMuxer, THandler::Handler>,
|
connection: Connection<TMuxer, THandler::Handler>,
|
||||||
@ -559,8 +567,17 @@ where
|
|||||||
ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>,
|
ConnectedPeer<'a, TTrans, TInEvent, TOutEvent, THandler, TConnInfo, TPeerId>,
|
||||||
ConnectionLimit
|
ConnectionLimit
|
||||||
> where
|
> where
|
||||||
|
TInEvent: Send + 'static,
|
||||||
|
TOutEvent: Send + 'static,
|
||||||
|
THandler: Send + 'static,
|
||||||
|
TTrans::Error: Send + 'static,
|
||||||
|
THandler::Handler: ConnectionHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent> + Send,
|
||||||
|
<THandler::Handler as ConnectionHandler>::OutboundOpenInfo: Send,
|
||||||
|
<THandler::Handler as ConnectionHandler>::Error: error::Error + Send + 'static,
|
||||||
TConnInfo: fmt::Debug + ConnectionInfo<PeerId = TPeerId> + Clone + Send + 'static,
|
TConnInfo: fmt::Debug + ConnectionInfo<PeerId = TPeerId> + Clone + Send + 'static,
|
||||||
TPeerId: Eq + Hash + Clone + fmt::Debug,
|
TPeerId: Eq + Hash + Clone + Send + fmt::Debug + 'static,
|
||||||
|
TMuxer: StreamMuxer + Send + Sync + 'static,
|
||||||
|
TMuxer::OutboundSubstream: Send,
|
||||||
{
|
{
|
||||||
if connected.peer_id() != &self.peer_id {
|
if connected.peer_id() != &self.peer_id {
|
||||||
panic!("Invalid peer ID given: {:?}. Expected: {:?}", connected.peer_id(), self.peer_id)
|
panic!("Invalid peer ID given: {:?}. Expected: {:?}", connected.peer_id(), self.peer_id)
|
||||||
@ -574,71 +591,142 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempt to reach a peer.
|
/// The (internal) state of a `DialingAttempt`, tracking the
|
||||||
|
/// current connection attempt as well as remaining addresses.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub(super) struct DialingAttempt {
|
pub(super) struct DialingState {
|
||||||
/// Identifier for the reach attempt.
|
/// The ID and (remote) address of the current connection attempt.
|
||||||
pub(super) id: ConnectionId,
|
pub(super) current: (ConnectionId, Multiaddr),
|
||||||
/// Multiaddr currently being attempted.
|
|
||||||
pub(super) current: Multiaddr,
|
|
||||||
/// Multiaddresses to attempt if the current one fails.
|
/// Multiaddresses to attempt if the current one fails.
|
||||||
pub(super) next: Vec<Multiaddr>,
|
pub(super) remaining: Vec<Multiaddr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `DialingConnection` is a [`PendingConnection`] where the local peer
|
/// A `DialingAttempt` is an ongoing outgoing connection attempt to
|
||||||
/// has the role of the dialer (i.e. initiator) and the (expected) remote
|
/// a known / expected remote peer ID and a list of alternative addresses
|
||||||
/// peer ID is known.
|
/// to connect to, if the current connection attempt fails.
|
||||||
pub struct DialingConnection<'a, TInEvent, TConnInfo, TPeerId> {
|
pub struct DialingAttempt<'a, TInEvent, TConnInfo, TPeerId> {
|
||||||
peer_id: &'a TPeerId,
|
/// The underlying pending connection in the `Pool`.
|
||||||
inner: PendingConnection<'a, TInEvent, TConnInfo, TPeerId>,
|
inner: PendingConnection<'a, TInEvent, TConnInfo, TPeerId>,
|
||||||
dialing: hash_map::OccupiedEntry<'a, TPeerId, DialingAttempt>,
|
/// All current dialing attempts of the peer.
|
||||||
|
attempts: hash_map::OccupiedEntry<'a, TPeerId, SmallVec<[DialingState; 10]>>,
|
||||||
|
/// The position of the current `DialingState` of this connection in the `attempts`.
|
||||||
|
pos: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, TInEvent, TConnInfo, TPeerId>
|
impl<'a, TInEvent, TConnInfo, TPeerId>
|
||||||
DialingConnection<'a, TInEvent, TConnInfo, TPeerId>
|
DialingAttempt<'a, TInEvent, TConnInfo, TPeerId>
|
||||||
{
|
{
|
||||||
/// Returns the local connection ID.
|
/// Returns the ID of the current connection attempt.
|
||||||
pub fn id(&self) -> ConnectionId {
|
pub fn id(&self) -> ConnectionId {
|
||||||
self.inner.id()
|
self.inner.id()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the (expected) peer ID of the ongoing connection attempt.
|
/// Returns the (expected) peer ID of the dialing attempt.
|
||||||
pub fn peer_id(&self) -> &TPeerId {
|
pub fn peer_id(&self) -> &TPeerId {
|
||||||
self.peer_id
|
self.attempts.key()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns information about this endpoint of the connection attempt.
|
/// Returns the remote address of the current connection attempt.
|
||||||
pub fn endpoint(&self) -> &ConnectedPoint {
|
pub fn address(&self) -> &Multiaddr {
|
||||||
self.inner.endpoint()
|
match self.inner.endpoint() {
|
||||||
}
|
ConnectedPoint::Dialer { address } => address,
|
||||||
|
ConnectedPoint::Listener { .. } => unreachable!("by definition of a `DialingAttempt`.")
|
||||||
/// Aborts the connection attempt.
|
|
||||||
pub fn abort(self)
|
|
||||||
where
|
|
||||||
TPeerId: Eq + Hash + Clone,
|
|
||||||
{
|
|
||||||
self.dialing.remove();
|
|
||||||
self.inner.abort();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds new candidate addresses to the end of the addresses used
|
|
||||||
/// in the ongoing dialing process.
|
|
||||||
///
|
|
||||||
/// Duplicates are ignored.
|
|
||||||
pub fn add_addresses(&mut self, addrs: impl IntoIterator<Item = Multiaddr>) {
|
|
||||||
for addr in addrs {
|
|
||||||
self.add_address(addr);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds an address to the end of the addresses used in the ongoing
|
/// Aborts the dialing attempt.
|
||||||
/// dialing process.
|
|
||||||
///
|
///
|
||||||
/// Duplicates are ignored.
|
/// Aborting a dialing attempt involves aborting the current connection
|
||||||
|
/// attempt and dropping any remaining addresses given to [`Peer::dial()`]
|
||||||
|
/// that have not yet been tried.
|
||||||
|
pub fn abort(mut self) {
|
||||||
|
self.attempts.get_mut().remove(self.pos);
|
||||||
|
if self.attempts.get().is_empty() {
|
||||||
|
self.attempts.remove();
|
||||||
|
}
|
||||||
|
self.inner.abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds an address to the end of the remaining addresses
|
||||||
|
/// for this dialing attempt. Duplicates are ignored.
|
||||||
pub fn add_address(&mut self, addr: Multiaddr) {
|
pub fn add_address(&mut self, addr: Multiaddr) {
|
||||||
if self.dialing.get().next.iter().all(|a| a != &addr) {
|
let remaining = &mut self.attempts.get_mut()[self.pos].remaining;
|
||||||
self.dialing.get_mut().next.push(addr);
|
if remaining.iter().all(|a| a != &addr) {
|
||||||
|
remaining.push(addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An iterator over the ongoing dialing attempts to a peer.
|
||||||
|
pub struct DialingAttemptIter<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> {
|
||||||
|
/// The peer whose dialing attempts are being iterated.
|
||||||
|
peer_id: &'a TPeerId,
|
||||||
|
/// The underlying connection `Pool` of the `Network`.
|
||||||
|
pool: &'a mut Pool<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId>,
|
||||||
|
/// The state of all current dialing attempts known to the `Network`.
|
||||||
|
///
|
||||||
|
/// Ownership of the `OccupiedEntry` for `peer_id` containing all attempts must be
|
||||||
|
/// borrowed to each `DialingAttempt` in order for it to remove the entry if the
|
||||||
|
/// last dialing attempt is aborted.
|
||||||
|
dialing: &'a mut FnvHashMap<TPeerId, SmallVec<[DialingState; 10]>>,
|
||||||
|
/// The current position of the iterator in `dialing[peer_id]`.
|
||||||
|
pos: usize,
|
||||||
|
/// The total number of elements in `dialing[peer_id]` to iterate over.
|
||||||
|
end: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: Ideally this would be an implementation of `Iterator`, but that
|
||||||
|
// requires GATs (cf. https://github.com/rust-lang/rust/issues/44265) and
|
||||||
|
// a different definition of `Iterator`.
|
||||||
|
impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId>
|
||||||
|
DialingAttemptIter<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId>
|
||||||
|
where
|
||||||
|
TConnInfo: ConnectionInfo<PeerId = TPeerId>,
|
||||||
|
TPeerId: Eq + Hash + Clone,
|
||||||
|
{
|
||||||
|
fn new(
|
||||||
|
peer_id: &'a TPeerId,
|
||||||
|
pool: &'a mut Pool<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId>,
|
||||||
|
dialing: &'a mut FnvHashMap<TPeerId, SmallVec<[DialingState; 10]>>,
|
||||||
|
) -> Self {
|
||||||
|
let end = dialing.get(peer_id).map_or(0, |conns| conns.len());
|
||||||
|
Self { pos: 0, end, pool, dialing, peer_id }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Obtains the next dialing connection, if any.
|
||||||
|
pub fn next<'b>(&'b mut self) -> Option<DialingAttempt<'b, TInEvent, TConnInfo, TPeerId>> {
|
||||||
|
if self.pos == self.end {
|
||||||
|
return None
|
||||||
|
}
|
||||||
|
|
||||||
|
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) {
|
||||||
|
let id = attempts.get()[self.pos].current.0;
|
||||||
|
if let Some(inner) = self.pool.get_outgoing(id) {
|
||||||
|
let conn = DialingAttempt { pos: self.pos, inner, attempts };
|
||||||
|
self.pos += 1;
|
||||||
|
return Some(conn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the first connection, if any, consuming the iterator.
|
||||||
|
pub fn into_first<'b>(self)
|
||||||
|
-> Option<DialingAttempt<'b, TInEvent, TConnInfo, TPeerId>>
|
||||||
|
where 'a: 'b
|
||||||
|
{
|
||||||
|
if self.pos == self.end {
|
||||||
|
return None
|
||||||
|
}
|
||||||
|
|
||||||
|
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) {
|
||||||
|
let id = attempts.get()[self.pos].current.0;
|
||||||
|
if let Some(inner) = self.pool.get_outgoing(id) {
|
||||||
|
return Some(DialingAttempt { pos: self.pos, inner, attempts })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -77,10 +77,9 @@ impl PeerId {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let canonical = canonical_algorithm.map(|alg|
|
let canonical = canonical_algorithm.map(|alg|
|
||||||
alg.hasher().expect("SHA2-256 hasher is always supported").digest(&key_enc));
|
alg.digest(&key_enc));
|
||||||
|
|
||||||
let multihash = hash_algorithm.hasher()
|
let multihash = hash_algorithm.digest(&key_enc);
|
||||||
.expect("Identity and SHA-256 hasher are always supported").digest(&key_enc);
|
|
||||||
|
|
||||||
PeerId { multihash, canonical }
|
PeerId { multihash, canonical }
|
||||||
}
|
}
|
||||||
@ -148,7 +147,7 @@ impl PeerId {
|
|||||||
|
|
||||||
/// Returns a base-58 encoded string of this `PeerId`.
|
/// Returns a base-58 encoded string of this `PeerId`.
|
||||||
pub fn to_base58(&self) -> String {
|
pub fn to_base58(&self) -> String {
|
||||||
bs58::encode(self.borrow() as &[u8]).into_string()
|
bs58::encode(self.as_bytes()).into_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks whether the public key passed as parameter matches the public key of this `PeerId`.
|
/// Checks whether the public key passed as parameter matches the public key of this `PeerId`.
|
||||||
@ -158,7 +157,7 @@ impl PeerId {
|
|||||||
pub fn is_public_key(&self, public_key: &PublicKey) -> Option<bool> {
|
pub fn is_public_key(&self, public_key: &PublicKey) -> Option<bool> {
|
||||||
let alg = self.multihash.algorithm();
|
let alg = self.multihash.algorithm();
|
||||||
let enc = public_key.clone().into_protobuf_encoding();
|
let enc = public_key.clone().into_protobuf_encoding();
|
||||||
Some(alg.hasher()?.digest(&enc) == self.multihash)
|
Some(alg.digest(&enc) == self.multihash)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns public key if it was inlined in this `PeerId`.
|
/// Returns public key if it was inlined in this `PeerId`.
|
||||||
@ -336,8 +335,8 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn property(data: Vec<u8>, algo1: HashAlgo, algo2: HashAlgo) -> bool {
|
fn property(data: Vec<u8>, algo1: HashAlgo, algo2: HashAlgo) -> bool {
|
||||||
let a = PeerId::try_from(algo1.0.hasher().unwrap().digest(&data)).unwrap();
|
let a = PeerId::try_from(algo1.0.digest(&data)).unwrap();
|
||||||
let b = PeerId::try_from(algo2.0.hasher().unwrap().digest(&data)).unwrap();
|
let b = PeerId::try_from(algo2.0.digest(&data)).unwrap();
|
||||||
|
|
||||||
if algo1 == algo2 || algo1.0 == Code::Identity || algo2.0 == Code::Identity {
|
if algo1 == algo2 || algo1.0 == Code::Identity || algo2.0 == Code::Identity {
|
||||||
a == b
|
a == b
|
||||||
|
@ -37,9 +37,14 @@ use multiaddr::{Multiaddr, Protocol};
|
|||||||
/// If the first [`Protocol`]s are not IP addresses, `None` is returned instead.
|
/// If the first [`Protocol`]s are not IP addresses, `None` is returned instead.
|
||||||
pub fn address_translation(original: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
|
pub fn address_translation(original: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
|
||||||
original.replace(0, move |proto| match proto {
|
original.replace(0, move |proto| match proto {
|
||||||
Protocol::Ip4(_) | Protocol::Ip6(_) | Protocol::Dns4(_) | Protocol::Dns6(_) => match observed.iter().next() {
|
Protocol::Ip4(_)
|
||||||
|
| Protocol::Ip6(_)
|
||||||
|
| Protocol::Dns(_)
|
||||||
|
| Protocol::Dns4(_)
|
||||||
|
| Protocol::Dns6(_) => match observed.iter().next() {
|
||||||
x @ Some(Protocol::Ip4(_)) => x,
|
x @ Some(Protocol::Ip4(_)) => x,
|
||||||
x @ Some(Protocol::Ip6(_)) => x,
|
x @ Some(Protocol::Ip6(_)) => x,
|
||||||
|
x @ Some(Protocol::Dns(_)) => x,
|
||||||
x @ Some(Protocol::Dns4(_)) => x,
|
x @ Some(Protocol::Dns4(_)) => x,
|
||||||
x @ Some(Protocol::Dns6(_)) => x,
|
x @ Some(Protocol::Dns6(_)) => x,
|
||||||
_ => None,
|
_ => None,
|
||||||
|
@ -22,47 +22,60 @@ mod util;
|
|||||||
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p_core::identity;
|
use libp2p_core::identity;
|
||||||
use libp2p_core::multiaddr::multiaddr;
|
use libp2p_core::multiaddr::{multiaddr, Multiaddr};
|
||||||
use libp2p_core::{
|
use libp2p_core::{
|
||||||
Network,
|
Network,
|
||||||
PeerId,
|
PeerId,
|
||||||
Transport,
|
Transport,
|
||||||
connection::PendingConnectionError,
|
connection::PendingConnectionError,
|
||||||
muxing::StreamMuxerBox,
|
muxing::StreamMuxerBox,
|
||||||
network::NetworkEvent,
|
network::{NetworkEvent, NetworkConfig},
|
||||||
|
transport,
|
||||||
upgrade,
|
upgrade,
|
||||||
};
|
};
|
||||||
|
use rand::Rng;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use std::{io, task::Poll};
|
use std::{io, error::Error, fmt, task::Poll};
|
||||||
use util::TestHandler;
|
use util::TestHandler;
|
||||||
|
|
||||||
type TestNetwork<TTrans> = Network<TTrans, (), (), TestHandler>;
|
type TestNetwork = Network<TestTransport, (), (), TestHandler>;
|
||||||
|
type TestTransport = transport::boxed::Boxed<(PeerId, StreamMuxerBox), BoxError>;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct BoxError(Box<dyn Error + Send + 'static>);
|
||||||
|
|
||||||
|
impl Error for BoxError {}
|
||||||
|
|
||||||
|
impl fmt::Display for BoxError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "Transport error: {}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_network(cfg: NetworkConfig) -> TestNetwork {
|
||||||
|
let local_key = identity::Keypair::generate_ed25519();
|
||||||
|
let local_public_key = local_key.public();
|
||||||
|
let transport: TestTransport = libp2p_tcp::TcpConfig::new()
|
||||||
|
.upgrade(upgrade::Version::V1)
|
||||||
|
.authenticate(libp2p_secio::SecioConfig::new(local_key))
|
||||||
|
.multiplex(libp2p_mplex::MplexConfig::new())
|
||||||
|
.map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer)))
|
||||||
|
.and_then(|(peer, mplex), _| {
|
||||||
|
// Gracefully close the connection to allow protocol
|
||||||
|
// negotiation to complete.
|
||||||
|
util::CloseMuxer::new(mplex).map_ok(move |mplex| (peer, mplex))
|
||||||
|
})
|
||||||
|
.map_err(|e| BoxError(Box::new(e)))
|
||||||
|
.boxed();
|
||||||
|
TestNetwork::new(transport, local_public_key.into(), cfg)
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn deny_incoming_connec() {
|
fn deny_incoming_connec() {
|
||||||
// Checks whether refusing an incoming connection on a swarm triggers the correct events.
|
// Checks whether refusing an incoming connection on a swarm triggers the correct events.
|
||||||
|
|
||||||
let mut swarm1 = {
|
let mut swarm1 = new_network(NetworkConfig::default());
|
||||||
let local_key = identity::Keypair::generate_ed25519();
|
let mut swarm2 = new_network(NetworkConfig::default());
|
||||||
let local_public_key = local_key.public();
|
|
||||||
let transport = libp2p_tcp::TcpConfig::new()
|
|
||||||
.upgrade(upgrade::Version::V1)
|
|
||||||
.authenticate(libp2p_secio::SecioConfig::new(local_key))
|
|
||||||
.multiplex(libp2p_mplex::MplexConfig::new())
|
|
||||||
.map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer)));
|
|
||||||
TestNetwork::new(transport, local_public_key.into(), Default::default())
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut swarm2 = {
|
|
||||||
let local_key = identity::Keypair::generate_ed25519();
|
|
||||||
let local_public_key = local_key.public();
|
|
||||||
let transport = libp2p_tcp::TcpConfig::new()
|
|
||||||
.upgrade(upgrade::Version::V1)
|
|
||||||
.authenticate(libp2p_secio::SecioConfig::new(local_key))
|
|
||||||
.multiplex(libp2p_mplex::MplexConfig::new())
|
|
||||||
.map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer)));
|
|
||||||
TestNetwork::new(transport, local_public_key.into(), Default::default())
|
|
||||||
};
|
|
||||||
|
|
||||||
swarm1.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap();
|
swarm1.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap();
|
||||||
|
|
||||||
@ -76,8 +89,7 @@ fn deny_incoming_connec() {
|
|||||||
|
|
||||||
swarm2
|
swarm2
|
||||||
.peer(swarm1.local_peer_id().clone())
|
.peer(swarm1.local_peer_id().clone())
|
||||||
.into_disconnected().unwrap()
|
.dial(address.clone(), Vec::new(), TestHandler())
|
||||||
.connect(address.clone(), Vec::new(), TestHandler())
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
async_std::task::block_on(future::poll_fn(|cx| -> Poll<Result<(), io::Error>> {
|
async_std::task::block_on(future::poll_fn(|cx| -> Poll<Result<(), io::Error>> {
|
||||||
@ -119,22 +131,7 @@ fn dial_self() {
|
|||||||
//
|
//
|
||||||
// The last two can happen in any order.
|
// The last two can happen in any order.
|
||||||
|
|
||||||
let mut swarm = {
|
let mut swarm = new_network(NetworkConfig::default());
|
||||||
let local_key = identity::Keypair::generate_ed25519();
|
|
||||||
let local_public_key = local_key.public();
|
|
||||||
let transport = libp2p_tcp::TcpConfig::new()
|
|
||||||
.upgrade(upgrade::Version::V1)
|
|
||||||
.authenticate(libp2p_secio::SecioConfig::new(local_key))
|
|
||||||
.multiplex(libp2p_mplex::MplexConfig::new())
|
|
||||||
.and_then(|(peer, mplex), _| {
|
|
||||||
// Gracefully close the connection to allow protocol
|
|
||||||
// negotiation to complete.
|
|
||||||
util::CloseMuxer::new(mplex).map_ok(move |mplex| (peer, mplex))
|
|
||||||
})
|
|
||||||
.map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer)));
|
|
||||||
TestNetwork::new(transport, local_public_key.into(), Default::default())
|
|
||||||
};
|
|
||||||
|
|
||||||
swarm.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap();
|
swarm.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap();
|
||||||
|
|
||||||
let (local_address, mut swarm) = async_std::task::block_on(
|
let (local_address, mut swarm) = async_std::task::block_on(
|
||||||
@ -193,36 +190,16 @@ fn dial_self() {
|
|||||||
fn dial_self_by_id() {
|
fn dial_self_by_id() {
|
||||||
// Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first
|
// Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first
|
||||||
// place.
|
// place.
|
||||||
|
let mut swarm = new_network(NetworkConfig::default());
|
||||||
let mut swarm = {
|
|
||||||
let local_key = identity::Keypair::generate_ed25519();
|
|
||||||
let local_public_key = local_key.public();
|
|
||||||
let transport = libp2p_tcp::TcpConfig::new()
|
|
||||||
.upgrade(upgrade::Version::V1)
|
|
||||||
.authenticate(libp2p_secio::SecioConfig::new(local_key))
|
|
||||||
.multiplex(libp2p_mplex::MplexConfig::new())
|
|
||||||
.map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer)));
|
|
||||||
TestNetwork::new(transport, local_public_key.into(), Default::default())
|
|
||||||
};
|
|
||||||
|
|
||||||
let peer_id = swarm.local_peer_id().clone();
|
let peer_id = swarm.local_peer_id().clone();
|
||||||
assert!(swarm.peer(peer_id).into_disconnected().is_none());
|
assert!(swarm.peer(peer_id).into_disconnected().is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn multiple_addresses_err() {
|
fn multiple_addresses_err() {
|
||||||
// Tries dialing multiple addresses, and makes sure there's one dialing error per addresses.
|
// Tries dialing multiple addresses, and makes sure there's one dialing error per address.
|
||||||
|
|
||||||
let mut swarm = {
|
let mut swarm = new_network(NetworkConfig::default());
|
||||||
let local_key = identity::Keypair::generate_ed25519();
|
|
||||||
let local_public_key = local_key.public();
|
|
||||||
let transport = libp2p_tcp::TcpConfig::new()
|
|
||||||
.upgrade(upgrade::Version::V1)
|
|
||||||
.authenticate(libp2p_secio::SecioConfig::new(local_key))
|
|
||||||
.multiplex(libp2p_mplex::MplexConfig::new())
|
|
||||||
.map(|(conn_info, muxer), _| (conn_info, StreamMuxerBox::new(muxer)));
|
|
||||||
TestNetwork::new(transport, local_public_key.into(), Default::default())
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut addresses = Vec::new();
|
let mut addresses = Vec::new();
|
||||||
for _ in 0 .. 3 {
|
for _ in 0 .. 3 {
|
||||||
@ -238,8 +215,7 @@ fn multiple_addresses_err() {
|
|||||||
|
|
||||||
let target = PeerId::random();
|
let target = PeerId::random();
|
||||||
swarm.peer(target.clone())
|
swarm.peer(target.clone())
|
||||||
.into_disconnected().unwrap()
|
.dial(first, rest, TestHandler())
|
||||||
.connect(first, rest, TestHandler())
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
async_std::task::block_on(future::poll_fn(|cx| -> Poll<Result<(), io::Error>> {
|
async_std::task::block_on(future::poll_fn(|cx| -> Poll<Result<(), io::Error>> {
|
||||||
@ -267,3 +243,44 @@ fn multiple_addresses_err() {
|
|||||||
}
|
}
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn connection_limit() {
|
||||||
|
let outgoing_per_peer_limit = rand::thread_rng().gen_range(1, 10);
|
||||||
|
let outgoing_limit = 2 * outgoing_per_peer_limit;
|
||||||
|
|
||||||
|
let mut cfg = NetworkConfig::default();
|
||||||
|
cfg.set_outgoing_per_peer_limit(outgoing_per_peer_limit);
|
||||||
|
cfg.set_outgoing_limit(outgoing_limit);
|
||||||
|
let mut network = new_network(cfg);
|
||||||
|
|
||||||
|
let target = PeerId::random();
|
||||||
|
for _ in 0 .. outgoing_per_peer_limit {
|
||||||
|
network.peer(target.clone())
|
||||||
|
.dial(Multiaddr::empty(), Vec::new(), TestHandler())
|
||||||
|
.ok()
|
||||||
|
.expect("Unexpected connection limit.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let err = network.peer(target)
|
||||||
|
.dial(Multiaddr::empty(), Vec::new(), TestHandler())
|
||||||
|
.expect_err("Unexpected dialing success.");
|
||||||
|
|
||||||
|
assert_eq!(err.current, outgoing_per_peer_limit);
|
||||||
|
assert_eq!(err.limit, outgoing_per_peer_limit);
|
||||||
|
|
||||||
|
let target2 = PeerId::random();
|
||||||
|
for _ in outgoing_per_peer_limit .. outgoing_limit {
|
||||||
|
network.peer(target2.clone())
|
||||||
|
.dial(Multiaddr::empty(), Vec::new(), TestHandler())
|
||||||
|
.ok()
|
||||||
|
.expect("Unexpected connection limit.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let err = network.peer(target2)
|
||||||
|
.dial(Multiaddr::empty(), Vec::new(), TestHandler())
|
||||||
|
.expect_err("Unexpected dialing success.");
|
||||||
|
|
||||||
|
assert_eq!(err.current, outgoing_limit);
|
||||||
|
assert_eq!(err.limit, outgoing_limit);
|
||||||
|
}
|
||||||
|
@ -31,19 +31,26 @@
|
|||||||
//
|
//
|
||||||
// use async_std::{io, task};
|
// use async_std::{io, task};
|
||||||
// use futures::prelude::*;
|
// use futures::prelude::*;
|
||||||
// use libp2p::identity::Keypair;
|
|
||||||
// use libp2p::kad::record::store::MemoryStore;
|
// use libp2p::kad::record::store::MemoryStore;
|
||||||
// use libp2p::kad::{record::Key, Kademlia, KademliaEvent, PutRecordOk, Quorum, Record};
|
// use libp2p::kad::{
|
||||||
|
// record::Key,
|
||||||
|
// Kademlia,
|
||||||
|
// KademliaEvent,
|
||||||
|
// PutRecordOk,
|
||||||
|
// QueryResult,
|
||||||
|
// Quorum,
|
||||||
|
// Record
|
||||||
|
// };
|
||||||
// use libp2p::{
|
// use libp2p::{
|
||||||
// build_development_transport, identity,
|
// NetworkBehaviour,
|
||||||
|
// PeerId,
|
||||||
|
// Swarm,
|
||||||
|
// build_development_transport,
|
||||||
|
// identity,
|
||||||
// mdns::{Mdns, MdnsEvent},
|
// mdns::{Mdns, MdnsEvent},
|
||||||
// swarm::NetworkBehaviourEventProcess,
|
// swarm::NetworkBehaviourEventProcess
|
||||||
// NetworkBehaviour, PeerId, Swarm,
|
|
||||||
// };
|
|
||||||
// use std::{
|
|
||||||
// error::Error,
|
|
||||||
// task::{Context, Poll},
|
|
||||||
// };
|
// };
|
||||||
|
// use std::{error::Error, task::{Context, Poll}};
|
||||||
//
|
//
|
||||||
// fn main() -> Result<(), Box<dyn Error>> {
|
// fn main() -> Result<(), Box<dyn Error>> {
|
||||||
// env_logger::init();
|
// env_logger::init();
|
||||||
@ -53,13 +60,13 @@
|
|||||||
// let local_peer_id = PeerId::from(local_key.public());
|
// let local_peer_id = PeerId::from(local_key.public());
|
||||||
//
|
//
|
||||||
// // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol.
|
// // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol.
|
||||||
// let transport = build_development_transport(local_key.clone())?;
|
// let transport = build_development_transport(local_key)?;
|
||||||
//
|
//
|
||||||
// // We create a custom network behaviour that combines Kademlia and mDNS.
|
// // We create a custom network behaviour that combines Kademlia and mDNS.
|
||||||
// #[derive(NetworkBehaviour)]
|
// #[derive(NetworkBehaviour)]
|
||||||
// struct MyBehaviour {
|
// struct MyBehaviour {
|
||||||
// kademlia: Kademlia<MemoryStore>,
|
// kademlia: Kademlia<MemoryStore>,
|
||||||
// mdns: Mdns,
|
// mdns: Mdns
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// impl NetworkBehaviourEventProcess<MdnsEvent> for MyBehaviour {
|
// impl NetworkBehaviourEventProcess<MdnsEvent> for MyBehaviour {
|
||||||
@ -77,26 +84,29 @@
|
|||||||
// // Called when `kademlia` produces an event.
|
// // Called when `kademlia` produces an event.
|
||||||
// fn inject_event(&mut self, message: KademliaEvent) {
|
// fn inject_event(&mut self, message: KademliaEvent) {
|
||||||
// match message {
|
// match message {
|
||||||
// KademliaEvent::GetRecordResult(Ok(result)) => {
|
// KademliaEvent::QueryResult { result, .. } => match result {
|
||||||
// for Record { key, value, .. } in result.records {
|
// QueryResult::GetRecord(Ok(ok)) => {
|
||||||
|
// for Record { key, value, .. } in ok.records {
|
||||||
|
// println!(
|
||||||
|
// "Got record {:?} {:?}",
|
||||||
|
// std::str::from_utf8(key.as_ref()).unwrap(),
|
||||||
|
// std::str::from_utf8(&value).unwrap(),
|
||||||
|
// );
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// QueryResult::GetRecord(Err(err)) => {
|
||||||
|
// eprintln!("Failed to get record: {:?}", err);
|
||||||
|
// }
|
||||||
|
// QueryResult::PutRecord(Ok(PutRecordOk { key })) => {
|
||||||
// println!(
|
// println!(
|
||||||
// "Got record {:?} {:?}",
|
// "Successfully put record {:?}",
|
||||||
// std::str::from_utf8(key.as_ref()).unwrap(),
|
// std::str::from_utf8(key.as_ref()).unwrap()
|
||||||
// std::str::from_utf8(&value).unwrap(),
|
|
||||||
// );
|
// );
|
||||||
// }
|
// }
|
||||||
// }
|
// QueryResult::PutRecord(Err(err)) => {
|
||||||
// KademliaEvent::GetRecordResult(Err(err)) => {
|
// eprintln!("Failed to put record: {:?}", err);
|
||||||
// eprintln!("Failed to get record: {:?}", err);
|
// }
|
||||||
// }
|
// _ => {}
|
||||||
// KademliaEvent::PutRecordResult(Ok(PutRecordOk { key })) => {
|
|
||||||
// println!(
|
|
||||||
// "Successfully put record {:?}",
|
|
||||||
// std::str::from_utf8(key.as_ref()).unwrap()
|
|
||||||
// );
|
|
||||||
// }
|
|
||||||
// KademliaEvent::PutRecordResult(Err(err)) => {
|
|
||||||
// eprintln!("Failed to put record: {:?}", err);
|
|
||||||
// }
|
// }
|
||||||
// _ => {}
|
// _ => {}
|
||||||
// }
|
// }
|
||||||
@ -107,8 +117,7 @@
|
|||||||
// let mut swarm = {
|
// let mut swarm = {
|
||||||
// // Create a Kademlia behaviour.
|
// // Create a Kademlia behaviour.
|
||||||
// let store = MemoryStore::new(local_peer_id.clone());
|
// let store = MemoryStore::new(local_peer_id.clone());
|
||||||
// let Keypair::Ed25519(local_key) = local_key;
|
// let kademlia = Kademlia::new(local_peer_id.clone(), store);
|
||||||
// let kademlia = Kademlia::new(local_key, local_peer_id.clone(), store);
|
|
||||||
// let mdns = Mdns::new()?;
|
// let mdns = Mdns::new()?;
|
||||||
// let behaviour = MyBehaviour { kademlia, mdns };
|
// let behaviour = MyBehaviour { kademlia, mdns };
|
||||||
// Swarm::new(transport, behaviour, local_peer_id)
|
// Swarm::new(transport, behaviour, local_peer_id)
|
||||||
@ -127,7 +136,7 @@
|
|||||||
// match stdin.try_poll_next_unpin(cx)? {
|
// match stdin.try_poll_next_unpin(cx)? {
|
||||||
// Poll::Ready(Some(line)) => handle_input_line(&mut swarm.kademlia, line),
|
// Poll::Ready(Some(line)) => handle_input_line(&mut swarm.kademlia, line),
|
||||||
// Poll::Ready(None) => panic!("Stdin closed"),
|
// Poll::Ready(None) => panic!("Stdin closed"),
|
||||||
// Poll::Pending => break,
|
// Poll::Pending => break
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// loop {
|
// loop {
|
||||||
@ -141,7 +150,7 @@
|
|||||||
// listening = true;
|
// listening = true;
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// break;
|
// break
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
@ -190,7 +199,7 @@
|
|||||||
// publisher: None,
|
// publisher: None,
|
||||||
// expires: None,
|
// expires: None,
|
||||||
// };
|
// };
|
||||||
// kademlia.put_record(record, Quorum::One);
|
// kademlia.put_record(record, Quorum::One).expect("Failed to store record locally.");
|
||||||
// }
|
// }
|
||||||
// _ => {
|
// _ => {
|
||||||
// eprintln!("expected GET or PUT");
|
// eprintln!("expected GET or PUT");
|
||||||
|
@ -24,10 +24,20 @@
|
|||||||
// //! peer ID will be generated randomly.
|
// //! peer ID will be generated randomly.
|
||||||
//
|
//
|
||||||
// use async_std::task;
|
// use async_std::task;
|
||||||
// use libp2p::identity::Keypair;
|
// use libp2p::{
|
||||||
|
// Swarm,
|
||||||
|
// PeerId,
|
||||||
|
// identity,
|
||||||
|
// build_development_transport
|
||||||
|
// };
|
||||||
|
// use libp2p::kad::{
|
||||||
|
// Kademlia,
|
||||||
|
// KademliaConfig,
|
||||||
|
// KademliaEvent,
|
||||||
|
// GetClosestPeersError,
|
||||||
|
// QueryResult,
|
||||||
|
// };
|
||||||
// use libp2p::kad::record::store::MemoryStore;
|
// use libp2p::kad::record::store::MemoryStore;
|
||||||
// use libp2p::kad::{GetClosestPeersError, Kademlia, KademliaConfig, KademliaEvent};
|
|
||||||
// use libp2p::{build_development_transport, identity, PeerId, Swarm};
|
|
||||||
// use std::{env, error::Error, time::Duration};
|
// use std::{env, error::Error, time::Duration};
|
||||||
//
|
//
|
||||||
// fn main() -> Result<(), Box<dyn Error>> {
|
// fn main() -> Result<(), Box<dyn Error>> {
|
||||||
@ -38,7 +48,7 @@
|
|||||||
// let local_peer_id = PeerId::from(local_key.public());
|
// let local_peer_id = PeerId::from(local_key.public());
|
||||||
//
|
//
|
||||||
// // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol
|
// // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol
|
||||||
// let transport = build_development_transport(local_key.clone())?;
|
// let transport = build_development_transport(local_key)?;
|
||||||
//
|
//
|
||||||
// // Create a swarm to manage peers and events.
|
// // Create a swarm to manage peers and events.
|
||||||
// let mut swarm = {
|
// let mut swarm = {
|
||||||
@ -46,7 +56,7 @@
|
|||||||
// let mut cfg = KademliaConfig::default();
|
// let mut cfg = KademliaConfig::default();
|
||||||
// cfg.set_query_timeout(Duration::from_secs(5 * 60));
|
// cfg.set_query_timeout(Duration::from_secs(5 * 60));
|
||||||
// let store = MemoryStore::new(local_peer_id.clone());
|
// let store = MemoryStore::new(local_peer_id.clone());
|
||||||
// let mut behaviour = Kademlia::with_config(local_key, local_peer_id.clone(), store, cfg);
|
// let mut behaviour = Kademlia::with_config(local_peer_id.clone(), store, cfg);
|
||||||
//
|
//
|
||||||
// // TODO: the /dnsaddr/ scheme is not supported (https://github.com/libp2p/rust-libp2p/issues/967)
|
// // TODO: the /dnsaddr/ scheme is not supported (https://github.com/libp2p/rust-libp2p/issues/967)
|
||||||
// /*behaviour.add_address(&"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
// /*behaviour.add_address(&"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());
|
||||||
@ -55,10 +65,7 @@
|
|||||||
// behaviour.add_address(&"QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());*/
|
// behaviour.add_address(&"QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt".parse().unwrap(), "/dnsaddr/bootstrap.libp2p.io".parse().unwrap());*/
|
||||||
//
|
//
|
||||||
// // The only address that currently works.
|
// // The only address that currently works.
|
||||||
// behaviour.add_address(
|
// behaviour.add_address(&"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse()?, "/ip4/104.131.131.82/tcp/4001".parse()?);
|
||||||
// &"QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse()?,
|
|
||||||
// "/ip4/104.131.131.82/tcp/4001".parse()?,
|
|
||||||
// );
|
|
||||||
//
|
//
|
||||||
// // The following addresses always fail signature verification, possibly due to
|
// // The following addresses always fail signature verification, possibly due to
|
||||||
// // RSA keys with < 2048 bits.
|
// // RSA keys with < 2048 bits.
|
||||||
@ -90,9 +97,12 @@
|
|||||||
// task::block_on(async move {
|
// task::block_on(async move {
|
||||||
// loop {
|
// loop {
|
||||||
// let event = swarm.next().await;
|
// let event = swarm.next().await;
|
||||||
// if let KademliaEvent::GetClosestPeersResult(result) = event {
|
// if let KademliaEvent::QueryResult {
|
||||||
|
// result: QueryResult::GetClosestPeers(result),
|
||||||
|
// ..
|
||||||
|
// } = event {
|
||||||
// match result {
|
// match result {
|
||||||
// Ok(ok) => {
|
// Ok(ok) =>
|
||||||
// if !ok.peers.is_empty() {
|
// if !ok.peers.is_empty() {
|
||||||
// println!("Query finished with closest peers: {:#?}", ok.peers)
|
// println!("Query finished with closest peers: {:#?}", ok.peers)
|
||||||
// } else {
|
// } else {
|
||||||
@ -100,8 +110,7 @@
|
|||||||
// // should always be at least 1 reachable peer.
|
// // should always be at least 1 reachable peer.
|
||||||
// println!("Query finished with no closest peers.")
|
// println!("Query finished with no closest peers.")
|
||||||
// }
|
// }
|
||||||
// }
|
// Err(GetClosestPeersError::Timeout { peers, .. }) =>
|
||||||
// Err(GetClosestPeersError::Timeout { peers, .. }) => {
|
|
||||||
// if !peers.is_empty() {
|
// if !peers.is_empty() {
|
||||||
// println!("Query timed out with closest peers: {:#?}", peers)
|
// println!("Query timed out with closest peers: {:#?}", peers)
|
||||||
// } else {
|
// } else {
|
||||||
@ -109,7 +118,6 @@
|
|||||||
// // should always be at least 1 reachable peer.
|
// // should always be at least 1 reachable peer.
|
||||||
// println!("Query timed out with no closest peers.");
|
// println!("Query timed out with no closest peers.");
|
||||||
// }
|
// }
|
||||||
// }
|
|
||||||
// };
|
// };
|
||||||
//
|
//
|
||||||
// break;
|
// break;
|
||||||
@ -120,4 +128,4 @@
|
|||||||
// })
|
// })
|
||||||
// }
|
// }
|
||||||
|
|
||||||
fn main() {}
|
fn main() {}
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-core-derive"
|
name = "libp2p-core-derive"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Procedural macros of libp2p-core"
|
description = "Procedural macros of libp2p-core"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -17,4 +17,4 @@ syn = { version = "1.0.8", default-features = false, features = ["clone-impls",
|
|||||||
quote = "1.0"
|
quote = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
libp2p = { version = "0.18.0", path = "../.." }
|
libp2p = { version = "0.19.0", path = "../.." }
|
||||||
|
@ -288,7 +288,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
|||||||
quote!{ ev }
|
quote!{ ev }
|
||||||
};
|
};
|
||||||
|
|
||||||
for _ in 0 .. data_struct.fields.iter().filter(|f| !is_ignored(f)).count() - 1 - field_n {
|
for _ in 0 .. data_struct.fields.iter().filter(|f| !is_ignored(f)).count() - 1 - enum_n {
|
||||||
elem = quote!{ #either_ident::First(#elem) };
|
elem = quote!{ #either_ident::First(#elem) };
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,7 +378,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
|
|||||||
} else {
|
} else {
|
||||||
quote!{ event }
|
quote!{ event }
|
||||||
};
|
};
|
||||||
for _ in 0 .. data_struct.fields.iter().filter(|f| !is_ignored(f)).count() - 1 - field_n {
|
for _ in 0 .. data_struct.fields.iter().filter(|f| !is_ignored(f)).count() - 1 - enum_n {
|
||||||
wrapped_event = quote!{ #either_ident::First(#wrapped_event) };
|
wrapped_event = quote!{ #either_ident::First(#wrapped_event) };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,6 +109,33 @@ fn three_fields() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn three_fields_non_last_ignored() {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
#[derive(NetworkBehaviour)]
|
||||||
|
struct Foo {
|
||||||
|
ping: libp2p::ping::Ping,
|
||||||
|
#[behaviour(ignore)]
|
||||||
|
identify: String,
|
||||||
|
kad: libp2p::kad::Kademlia<libp2p::kad::record::store::MemoryStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl libp2p::swarm::NetworkBehaviourEventProcess<libp2p::ping::PingEvent> for Foo {
|
||||||
|
fn inject_event(&mut self, _: libp2p::ping::PingEvent) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl libp2p::swarm::NetworkBehaviourEventProcess<libp2p::kad::KademliaEvent> for Foo {
|
||||||
|
fn inject_event(&mut self, _: libp2p::kad::KademliaEvent) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn foo() {
|
||||||
|
require_net_behaviour::<Foo>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn custom_polling() {
|
fn custom_polling() {
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
|
@ -6,14 +6,14 @@ description = "Implementation of the multiaddr format"
|
|||||||
homepage = "https://github.com/libp2p/rust-libp2p"
|
homepage = "https://github.com/libp2p/rust-libp2p"
|
||||||
keywords = ["multiaddr", "ipfs"]
|
keywords = ["multiaddr", "ipfs"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
arrayref = "0.3"
|
arrayref = "0.3"
|
||||||
bs58 = "0.3.0"
|
bs58 = "0.3.0"
|
||||||
byteorder = "1.3.1"
|
byteorder = "1.3.1"
|
||||||
data-encoding = "2.1"
|
data-encoding = "2.1"
|
||||||
multihash = "0.10"
|
multihash = "0.11.0"
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding = "2.1.0"
|
||||||
serde = "1.0.70"
|
serde = "1.0.70"
|
||||||
static_assertions = "1.1"
|
static_assertions = "1.1"
|
||||||
|
@ -70,7 +70,7 @@ fn from_url_inner_http_ws(url: url::Url, lossy: bool) -> std::result::Result<Mul
|
|||||||
if let Ok(ip) = hostname.parse::<IpAddr>() {
|
if let Ok(ip) = hostname.parse::<IpAddr>() {
|
||||||
Protocol::from(ip)
|
Protocol::from(ip)
|
||||||
} else {
|
} else {
|
||||||
Protocol::Dns4(hostname.into())
|
Protocol::Dns(hostname.into())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(FromUrlErr::BadUrl);
|
return Err(FromUrlErr::BadUrl);
|
||||||
@ -185,31 +185,31 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn dns_addr_ws() {
|
fn dns_addr_ws() {
|
||||||
let addr = from_url("ws://example.com").unwrap();
|
let addr = from_url("ws://example.com").unwrap();
|
||||||
assert_eq!(addr, "/dns4/example.com/tcp/80/ws".parse().unwrap());
|
assert_eq!(addr, "/dns/example.com/tcp/80/ws".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dns_addr_http() {
|
fn dns_addr_http() {
|
||||||
let addr = from_url("http://example.com").unwrap();
|
let addr = from_url("http://example.com").unwrap();
|
||||||
assert_eq!(addr, "/dns4/example.com/tcp/80/http".parse().unwrap());
|
assert_eq!(addr, "/dns/example.com/tcp/80/http".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dns_addr_wss() {
|
fn dns_addr_wss() {
|
||||||
let addr = from_url("wss://example.com").unwrap();
|
let addr = from_url("wss://example.com").unwrap();
|
||||||
assert_eq!(addr, "/dns4/example.com/tcp/443/wss".parse().unwrap());
|
assert_eq!(addr, "/dns/example.com/tcp/443/wss".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn dns_addr_https() {
|
fn dns_addr_https() {
|
||||||
let addr = from_url("https://example.com").unwrap();
|
let addr = from_url("https://example.com").unwrap();
|
||||||
assert_eq!(addr, "/dns4/example.com/tcp/443/https".parse().unwrap());
|
assert_eq!(addr, "/dns/example.com/tcp/443/https".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn bad_hostname() {
|
fn bad_hostname() {
|
||||||
let addr = from_url("wss://127.0.0.1x").unwrap();
|
let addr = from_url("wss://127.0.0.1x").unwrap();
|
||||||
assert_eq!(addr, "/dns4/127.0.0.1x/tcp/443/wss".parse().unwrap());
|
assert_eq!(addr, "/dns/127.0.0.1x/tcp/443/wss".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -223,7 +223,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn dns_and_port() {
|
fn dns_and_port() {
|
||||||
let addr = from_url("http://example.com:1000").unwrap();
|
let addr = from_url("http://example.com:1000").unwrap();
|
||||||
assert_eq!(addr, "/dns4/example.com/tcp/1000/http".parse().unwrap());
|
assert_eq!(addr, "/dns/example.com/tcp/1000/http".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -17,6 +17,7 @@ use unsigned_varint::{encode, decode};
|
|||||||
use crate::onion_addr::Onion3Addr;
|
use crate::onion_addr::Onion3Addr;
|
||||||
|
|
||||||
const DCCP: u32 = 33;
|
const DCCP: u32 = 33;
|
||||||
|
const DNS: u32 = 53;
|
||||||
const DNS4: u32 = 54;
|
const DNS4: u32 = 54;
|
||||||
const DNS6: u32 = 55;
|
const DNS6: u32 = 55;
|
||||||
const DNSADDR: u32 = 56;
|
const DNSADDR: u32 = 56;
|
||||||
@ -66,6 +67,7 @@ const PATH_SEGMENT_ENCODE_SET: &percent_encoding::AsciiSet = &percent_encoding::
|
|||||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||||
pub enum Protocol<'a> {
|
pub enum Protocol<'a> {
|
||||||
Dccp(u16),
|
Dccp(u16),
|
||||||
|
Dns(Cow<'a, str>),
|
||||||
Dns4(Cow<'a, str>),
|
Dns4(Cow<'a, str>),
|
||||||
Dns6(Cow<'a, str>),
|
Dns6(Cow<'a, str>),
|
||||||
Dnsaddr(Cow<'a, str>),
|
Dnsaddr(Cow<'a, str>),
|
||||||
@ -125,6 +127,10 @@ impl<'a> Protocol<'a> {
|
|||||||
let s = iter.next().ok_or(Error::InvalidProtocolString)?;
|
let s = iter.next().ok_or(Error::InvalidProtocolString)?;
|
||||||
Ok(Protocol::Ip6(Ipv6Addr::from_str(s)?))
|
Ok(Protocol::Ip6(Ipv6Addr::from_str(s)?))
|
||||||
}
|
}
|
||||||
|
"dns" => {
|
||||||
|
let s = iter.next().ok_or(Error::InvalidProtocolString)?;
|
||||||
|
Ok(Protocol::Dns(Cow::Borrowed(s)))
|
||||||
|
}
|
||||||
"dns4" => {
|
"dns4" => {
|
||||||
let s = iter.next().ok_or(Error::InvalidProtocolString)?;
|
let s = iter.next().ok_or(Error::InvalidProtocolString)?;
|
||||||
Ok(Protocol::Dns4(Cow::Borrowed(s)))
|
Ok(Protocol::Dns4(Cow::Borrowed(s)))
|
||||||
@ -206,6 +212,11 @@ impl<'a> Protocol<'a> {
|
|||||||
let num = rdr.read_u16::<BigEndian>()?;
|
let num = rdr.read_u16::<BigEndian>()?;
|
||||||
Ok((Protocol::Dccp(num), rest))
|
Ok((Protocol::Dccp(num), rest))
|
||||||
}
|
}
|
||||||
|
DNS => {
|
||||||
|
let (n, input) = decode::usize(input)?;
|
||||||
|
let (data, rest) = split_at(n, input)?;
|
||||||
|
Ok((Protocol::Dns(Cow::Borrowed(str::from_utf8(data)?)), rest))
|
||||||
|
}
|
||||||
DNS4 => {
|
DNS4 => {
|
||||||
let (n, input) = decode::usize(input)?;
|
let (n, input) = decode::usize(input)?;
|
||||||
let (data, rest) = split_at(n, input)?;
|
let (data, rest) = split_at(n, input)?;
|
||||||
@ -345,6 +356,12 @@ impl<'a> Protocol<'a> {
|
|||||||
w.write_all(encode::u32(SCTP, &mut buf))?;
|
w.write_all(encode::u32(SCTP, &mut buf))?;
|
||||||
w.write_u16::<BigEndian>(*port)?
|
w.write_u16::<BigEndian>(*port)?
|
||||||
}
|
}
|
||||||
|
Protocol::Dns(s) => {
|
||||||
|
w.write_all(encode::u32(DNS, &mut buf))?;
|
||||||
|
let bytes = s.as_bytes();
|
||||||
|
w.write_all(encode::usize(bytes.len(), &mut encode::usize_buffer()))?;
|
||||||
|
w.write_all(&bytes)?
|
||||||
|
}
|
||||||
Protocol::Dns4(s) => {
|
Protocol::Dns4(s) => {
|
||||||
w.write_all(encode::u32(DNS4, &mut buf))?;
|
w.write_all(encode::u32(DNS4, &mut buf))?;
|
||||||
let bytes = s.as_bytes();
|
let bytes = s.as_bytes();
|
||||||
@ -421,6 +438,7 @@ impl<'a> Protocol<'a> {
|
|||||||
use self::Protocol::*;
|
use self::Protocol::*;
|
||||||
match self {
|
match self {
|
||||||
Dccp(a) => Dccp(a),
|
Dccp(a) => Dccp(a),
|
||||||
|
Dns(cow) => Dns(Cow::Owned(cow.into_owned())),
|
||||||
Dns4(cow) => Dns4(Cow::Owned(cow.into_owned())),
|
Dns4(cow) => Dns4(Cow::Owned(cow.into_owned())),
|
||||||
Dns6(cow) => Dns6(Cow::Owned(cow.into_owned())),
|
Dns6(cow) => Dns6(Cow::Owned(cow.into_owned())),
|
||||||
Dnsaddr(cow) => Dnsaddr(Cow::Owned(cow.into_owned())),
|
Dnsaddr(cow) => Dnsaddr(Cow::Owned(cow.into_owned())),
|
||||||
@ -454,6 +472,7 @@ impl<'a> fmt::Display for Protocol<'a> {
|
|||||||
use self::Protocol::*;
|
use self::Protocol::*;
|
||||||
match self {
|
match self {
|
||||||
Dccp(port) => write!(f, "/dccp/{}", port),
|
Dccp(port) => write!(f, "/dccp/{}", port),
|
||||||
|
Dns(s) => write!(f, "/dns/{}", s),
|
||||||
Dns4(s) => write!(f, "/dns4/{}", s),
|
Dns4(s) => write!(f, "/dns4/{}", s),
|
||||||
Dns6(s) => write!(f, "/dns6/{}", s),
|
Dns6(s) => write!(f, "/dns6/{}", s),
|
||||||
Dnsaddr(s) => write!(f, "/dnsaddr/{}", s),
|
Dnsaddr(s) => write!(f, "/dnsaddr/{}", s),
|
||||||
|
@ -76,36 +76,37 @@ struct Proto(Protocol<'static>);
|
|||||||
impl Arbitrary for Proto {
|
impl Arbitrary for Proto {
|
||||||
fn arbitrary<G: Gen>(g: &mut G) -> Self {
|
fn arbitrary<G: Gen>(g: &mut G) -> Self {
|
||||||
use Protocol::*;
|
use Protocol::*;
|
||||||
match g.gen_range(0, 24) { // TODO: Add Protocol::Quic
|
match g.gen_range(0, 25) { // TODO: Add Protocol::Quic
|
||||||
0 => Proto(Dccp(g.gen())),
|
0 => Proto(Dccp(g.gen())),
|
||||||
1 => Proto(Dns4(Cow::Owned(SubString::arbitrary(g).0))),
|
1 => Proto(Dns(Cow::Owned(SubString::arbitrary(g).0))),
|
||||||
2 => Proto(Dns6(Cow::Owned(SubString::arbitrary(g).0))),
|
2 => Proto(Dns4(Cow::Owned(SubString::arbitrary(g).0))),
|
||||||
3 => Proto(Http),
|
3 => Proto(Dns6(Cow::Owned(SubString::arbitrary(g).0))),
|
||||||
4 => Proto(Https),
|
4 => Proto(Http),
|
||||||
5 => Proto(Ip4(Ipv4Addr::arbitrary(g))),
|
5 => Proto(Https),
|
||||||
6 => Proto(Ip6(Ipv6Addr::arbitrary(g))),
|
6 => Proto(Ip4(Ipv4Addr::arbitrary(g))),
|
||||||
7 => Proto(P2pWebRtcDirect),
|
7 => Proto(Ip6(Ipv6Addr::arbitrary(g))),
|
||||||
8 => Proto(P2pWebRtcStar),
|
8 => Proto(P2pWebRtcDirect),
|
||||||
9 => Proto(P2pWebSocketStar),
|
9 => Proto(P2pWebRtcStar),
|
||||||
10 => Proto(Memory(g.gen())),
|
10 => Proto(P2pWebSocketStar),
|
||||||
|
11 => Proto(Memory(g.gen())),
|
||||||
// TODO: impl Arbitrary for Multihash:
|
// TODO: impl Arbitrary for Multihash:
|
||||||
11 => Proto(P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))),
|
12 => Proto(P2p(multihash("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"))),
|
||||||
12 => Proto(P2pCircuit),
|
13 => Proto(P2pCircuit),
|
||||||
13 => Proto(Quic),
|
14 => Proto(Quic),
|
||||||
14 => Proto(Sctp(g.gen())),
|
15 => Proto(Sctp(g.gen())),
|
||||||
15 => Proto(Tcp(g.gen())),
|
16 => Proto(Tcp(g.gen())),
|
||||||
16 => Proto(Udp(g.gen())),
|
17 => Proto(Udp(g.gen())),
|
||||||
17 => Proto(Udt),
|
18 => Proto(Udt),
|
||||||
18 => Proto(Unix(Cow::Owned(SubString::arbitrary(g).0))),
|
19 => Proto(Unix(Cow::Owned(SubString::arbitrary(g).0))),
|
||||||
19 => Proto(Utp),
|
20 => Proto(Utp),
|
||||||
20 => Proto(Ws("/".into())),
|
21 => Proto(Ws("/".into())),
|
||||||
21 => Proto(Wss("/".into())),
|
22 => Proto(Wss("/".into())),
|
||||||
22 => {
|
23 => {
|
||||||
let mut a = [0; 10];
|
let mut a = [0; 10];
|
||||||
g.fill(&mut a);
|
g.fill(&mut a);
|
||||||
Proto(Onion(Cow::Owned(a), g.gen_range(1, std::u16::MAX)))
|
Proto(Onion(Cow::Owned(a), g.gen_range(1, std::u16::MAX)))
|
||||||
},
|
},
|
||||||
23 => {
|
24 => {
|
||||||
let mut a = [0; 35];
|
let mut a = [0; 35];
|
||||||
g.fill_bytes(&mut a);
|
g.fill_bytes(&mut a);
|
||||||
Proto(Onion3((a, g.gen_range(1, std::u16::MAX)).into()))
|
Proto(Onion3((a, g.gen_range(1, std::u16::MAX)).into()))
|
||||||
|
@ -11,5 +11,5 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
publish = false
|
publish = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
num_cpus = "1.8"
|
num_cpus = "1.8"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-mplex"
|
name = "libp2p-mplex"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Mplex multiplexing protocol for libp2p"
|
description = "Mplex multiplexing protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -14,11 +14,11 @@ bytes = "0.5"
|
|||||||
fnv = "1.0"
|
fnv = "1.0"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
futures_codec = "0.3.4"
|
futures_codec = "0.3.4"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
parking_lot = "0.10"
|
parking_lot = "0.10"
|
||||||
unsigned-varint = { version = "0.3", features = ["futures-codec"] }
|
unsigned-varint = { version = "0.3", features = ["futures-codec"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" }
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-yamux"
|
name = "libp2p-yamux"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Yamux multiplexing protocol for libp2p"
|
description = "Yamux multiplexing protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
parking_lot = "0.10"
|
parking_lot = "0.10"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
yamux = "0.4.5"
|
yamux = "0.4.5"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-deflate"
|
name = "libp2p-deflate"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Deflate encryption protocol for libp2p"
|
description = "Deflate encryption protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,11 +11,11 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" }
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
quickcheck = "0.9"
|
quickcheck = "0.9"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-floodsub"
|
name = "libp2p-floodsub"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Floodsub protocol for libp2p"
|
description = "Floodsub protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -13,8 +13,8 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
cuckoofilter = "0.3.2"
|
cuckoofilter = "0.3.2"
|
||||||
fnv = "1.0"
|
fnv = "1.0"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
libp2p-swarm = { version = "0.18.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "../../swarm" }
|
||||||
prost = "0.6.1"
|
prost = "0.6.1"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
smallvec = "1.0"
|
smallvec = "1.0"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-gossipsub"
|
name = "libp2p-gossipsub"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Gossipsub protocol for libp2p"
|
description = "Gossipsub protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Age Manning <Age@AgeManning.com>"]
|
authors = ["Age Manning <Age@AgeManning.com>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -10,8 +10,8 @@ keywords = ["peer-to-peer", "libp2p", "networking"]
|
|||||||
categories = ["network-programming", "asynchronous"]
|
categories = ["network-programming", "asynchronous"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
libp2p-swarm = { version = "0.18.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "../../swarm" }
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
bytes = "0.5.4"
|
bytes = "0.5.4"
|
||||||
byteorder = "1.3.2"
|
byteorder = "1.3.2"
|
||||||
fnv = "1.0.6"
|
fnv = "1.0.6"
|
||||||
@ -30,8 +30,8 @@ prost = "0.6.1"
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.4.0"
|
async-std = "1.4.0"
|
||||||
env_logger = "0.7.1"
|
env_logger = "0.7.1"
|
||||||
libp2p-plaintext = { version = "0.18.0", path = "../plaintext" }
|
libp2p-plaintext = { version = "0.19.0", path = "../plaintext" }
|
||||||
libp2p-yamux = { version = "0.18.0", path = "../../muxers/yamux" }
|
libp2p-yamux = { version = "0.19.0", path = "../../muxers/yamux" }
|
||||||
quickcheck = "0.9.2"
|
quickcheck = "0.9.2"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
@ -27,7 +27,7 @@ use libp2p_swarm::protocols_handler::{
|
|||||||
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||||
};
|
};
|
||||||
use libp2p_swarm::NegotiatedSubstream;
|
use libp2p_swarm::NegotiatedSubstream;
|
||||||
use log::{debug, trace, warn};
|
use log::{debug, error, trace, warn};
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
@ -274,7 +274,13 @@ impl ProtocolsHandler for GossipsubHandler {
|
|||||||
Some(OutboundSubstreamState::PendingFlush(substream))
|
Some(OutboundSubstreamState::PendingFlush(substream))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Poll::Ready(ProtocolsHandlerEvent::Close(e));
|
if let io::ErrorKind::PermissionDenied = e.kind() {
|
||||||
|
error!("Message over the maximum transmission limit was not sent.");
|
||||||
|
self.outbound_substream =
|
||||||
|
Some(OutboundSubstreamState::WaitingOutput(substream));
|
||||||
|
} else {
|
||||||
|
return Poll::Ready(ProtocolsHandlerEvent::Close(e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-identify"
|
name = "libp2p-identify"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Nodes identifcation protocol for libp2p"
|
description = "Nodes identifcation protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,8 +11,8 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
libp2p-swarm = { version = "0.18.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "../../swarm" }
|
||||||
log = "0.4.1"
|
log = "0.4.1"
|
||||||
prost = "0.6.1"
|
prost = "0.6.1"
|
||||||
smallvec = "1.0"
|
smallvec = "1.0"
|
||||||
@ -20,9 +20,9 @@ wasm-timer = "0.2"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
libp2p-mplex = { version = "0.18.0", path = "../../muxers/mplex" }
|
libp2p-mplex = { version = "0.19.0", path = "../../muxers/mplex" }
|
||||||
libp2p-secio = { version = "0.18.0", path = "../../protocols/secio" }
|
libp2p-secio = { version = "0.19.0", path = "../../protocols/secio" }
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
prost-build = "0.6"
|
prost-build = "0.6"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-kad"
|
name = "libp2p-kad"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Kademlia protocol for libp2p"
|
description = "Kademlia protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -17,9 +17,9 @@ fnv = "1.0"
|
|||||||
futures_codec = "0.3.4"
|
futures_codec = "0.3.4"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
libp2p-swarm = { version = "0.18.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "../../swarm" }
|
||||||
multihash = "0.10"
|
multihash = "0.11.0"
|
||||||
prost = "0.6.1"
|
prost = "0.6.1"
|
||||||
rand = "0.7.2"
|
rand = "0.7.2"
|
||||||
sha2 = "0.8.0"
|
sha2 = "0.8.0"
|
||||||
@ -31,11 +31,11 @@ void = "1.0"
|
|||||||
bs58 = "0.3.0"
|
bs58 = "0.3.0"
|
||||||
derivative = "2.0.2"
|
derivative = "2.0.2"
|
||||||
|
|
||||||
trust-graph = { git = "ssh://git@github.com/fluencelabs/arqada.git", branch = "memory_store_set" }
|
trust-graph = { git = "https://github.com/fluencelabs/fluence", branch = "libp2p_0.19.0" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
libp2p-secio = { version = "0.18.0", path = "../secio" }
|
libp2p-secio = { version = "0.19.0", path = "../secio" }
|
||||||
libp2p-yamux = { version = "0.18.0", path = "../../muxers/yamux" }
|
libp2p-yamux = { version = "0.19.0", path = "../../muxers/yamux" }
|
||||||
quickcheck = "0.9.0"
|
quickcheck = "0.9.0"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -152,10 +152,11 @@ fn bootstrap() {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect();
|
let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect();
|
||||||
|
|
||||||
swarms[0].bootstrap();
|
let qid = swarms[0].bootstrap().unwrap();
|
||||||
|
|
||||||
// Expected known peers
|
// Expected known peers
|
||||||
let expected_known = swarm_ids.iter().skip(1).cloned().collect::<HashSet<_>>();
|
let expected_known = swarm_ids.iter().skip(1).cloned().collect::<HashSet<_>>();
|
||||||
|
let mut first = true;
|
||||||
|
|
||||||
// Run test
|
// Run test
|
||||||
block_on(
|
block_on(
|
||||||
@ -163,14 +164,23 @@ fn bootstrap() {
|
|||||||
for (i, swarm) in swarms.iter_mut().enumerate() {
|
for (i, swarm) in swarms.iter_mut().enumerate() {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::BootstrapResult(Ok(ok)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::Bootstrap(Ok(ok)), ..
|
||||||
|
})) => {
|
||||||
|
assert_eq!(id, qid);
|
||||||
assert_eq!(i, 0);
|
assert_eq!(i, 0);
|
||||||
assert_eq!(ok.peer, swarm_ids[0]);
|
if first {
|
||||||
let known = swarm.kbuckets.iter()
|
// Bootstrapping must start with a self-lookup.
|
||||||
.map(|e| e.node.key.preimage().clone())
|
assert_eq!(ok.peer, swarm_ids[0]);
|
||||||
.collect::<HashSet<_>>();
|
}
|
||||||
assert_eq!(expected_known, known);
|
first = false;
|
||||||
return Poll::Ready(())
|
if ok.num_remaining == 0 {
|
||||||
|
let known = swarm.kbuckets.iter()
|
||||||
|
.map(|e| e.node.key.preimage().clone())
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
assert_eq!(expected_known, known);
|
||||||
|
return Poll::Ready(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Ignore any other event.
|
// Ignore any other event.
|
||||||
Poll::Ready(Some(_)) => (),
|
Poll::Ready(Some(_)) => (),
|
||||||
@ -210,7 +220,17 @@ fn query_iter() {
|
|||||||
// propagate forwards through the list of peers.
|
// propagate forwards through the list of peers.
|
||||||
let search_target = PeerId::random();
|
let search_target = PeerId::random();
|
||||||
let search_target_key = kbucket::Key::new(search_target.clone());
|
let search_target_key = kbucket::Key::new(search_target.clone());
|
||||||
swarms[0].get_closest_peers(search_target.clone());
|
let qid = swarms[0].get_closest_peers(search_target.clone());
|
||||||
|
|
||||||
|
match swarms[0].query(&qid) {
|
||||||
|
Some(q) => match q.info() {
|
||||||
|
QueryInfo::GetClosestPeers { key } => {
|
||||||
|
assert_eq!(&key[..], search_target.borrow() as &[u8])
|
||||||
|
},
|
||||||
|
i => panic!("Unexpected query info: {:?}", i)
|
||||||
|
}
|
||||||
|
None => panic!("Query not found: {:?}", qid)
|
||||||
|
}
|
||||||
|
|
||||||
// Set up expectations.
|
// Set up expectations.
|
||||||
let expected_swarm_id = swarm_ids[0].clone();
|
let expected_swarm_id = swarm_ids[0].clone();
|
||||||
@ -224,7 +244,10 @@ fn query_iter() {
|
|||||||
for (i, swarm) in swarms.iter_mut().enumerate() {
|
for (i, swarm) in swarms.iter_mut().enumerate() {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::GetClosestPeersResult(Ok(ok)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::GetClosestPeers(Ok(ok)), ..
|
||||||
|
})) => {
|
||||||
|
assert_eq!(id, qid);
|
||||||
assert_eq!(&ok.key[..], search_target.as_bytes());
|
assert_eq!(&ok.key[..], search_target.as_bytes());
|
||||||
assert_eq!(swarm_ids[i], expected_swarm_id);
|
assert_eq!(swarm_ids[i], expected_swarm_id);
|
||||||
assert_eq!(swarm.queries.size(), 0);
|
assert_eq!(swarm.queries.size(), 0);
|
||||||
@ -275,7 +298,9 @@ fn unresponsive_not_returned_direct() {
|
|||||||
for (_, swarm) in &mut swarms {
|
for (_, swarm) in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::GetClosestPeersResult(Ok(ok)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
result: QueryResult::GetClosestPeers(Ok(ok)), ..
|
||||||
|
})) => {
|
||||||
assert_eq!(&ok.key[..], search_target.as_bytes());
|
assert_eq!(&ok.key[..], search_target.as_bytes());
|
||||||
assert_eq!(ok.peers.len(), 0);
|
assert_eq!(ok.peers.len(), 0);
|
||||||
return Poll::Ready(());
|
return Poll::Ready(());
|
||||||
@ -325,7 +350,9 @@ fn unresponsive_not_returned_indirect() {
|
|||||||
for swarm in &mut swarms {
|
for swarm in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::GetClosestPeersResult(Ok(ok)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
result: QueryResult::GetClosestPeers(Ok(ok)), ..
|
||||||
|
})) => {
|
||||||
assert_eq!(&ok.key[..], search_target.as_bytes());
|
assert_eq!(&ok.key[..], search_target.as_bytes());
|
||||||
assert_eq!(ok.peers.len(), 1);
|
assert_eq!(ok.peers.len(), 1);
|
||||||
assert_eq!(ok.peers[0], first_peer_id);
|
assert_eq!(ok.peers[0], first_peer_id);
|
||||||
@ -363,14 +390,17 @@ fn get_record_not_found() {
|
|||||||
let mut swarms = swarms.into_iter().map(|(_, _addr, swarm)| swarm).collect::<Vec<_>>();
|
let mut swarms = swarms.into_iter().map(|(_, _addr, swarm)| swarm).collect::<Vec<_>>();
|
||||||
|
|
||||||
let target_key = record::Key::from(random_multihash());
|
let target_key = record::Key::from(random_multihash());
|
||||||
swarms[0].get_record(&target_key, Quorum::One);
|
let qid = swarms[0].get_record(&target_key, Quorum::One);
|
||||||
|
|
||||||
block_on(
|
block_on(
|
||||||
poll_fn(move |ctx| {
|
poll_fn(move |ctx| {
|
||||||
for swarm in &mut swarms {
|
for swarm in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::GetRecordResult(Err(e)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::GetRecord(Err(e)), ..
|
||||||
|
})) => {
|
||||||
|
assert_eq!(id, qid);
|
||||||
if let GetRecordError::NotFound { key, closest_peers, } = e {
|
if let GetRecordError::NotFound { key, closest_peers, } = e {
|
||||||
assert_eq!(key, target_key);
|
assert_eq!(key, target_key);
|
||||||
assert_eq!(closest_peers.len(), 2);
|
assert_eq!(closest_peers.len(), 2);
|
||||||
@ -436,8 +466,23 @@ fn put_record() {
|
|||||||
})
|
})
|
||||||
.collect::<HashMap<_,_>>();
|
.collect::<HashMap<_,_>>();
|
||||||
|
|
||||||
|
// Initiate put_record queries.
|
||||||
|
let mut qids = HashSet::new();
|
||||||
for r in records.values() {
|
for r in records.values() {
|
||||||
swarms[0].put_record(r.clone(), Quorum::All);
|
let qid = swarms[0].put_record(r.clone(), Quorum::All).unwrap();
|
||||||
|
match swarms[0].query(&qid) {
|
||||||
|
Some(q) => match q.info() {
|
||||||
|
QueryInfo::PutRecord { phase, record, .. } => {
|
||||||
|
assert_eq!(phase, &PutRecordPhase::GetClosestPeers);
|
||||||
|
assert_eq!(record.key, r.key);
|
||||||
|
assert_eq!(record.value, r.value);
|
||||||
|
assert!(record.expires.is_some());
|
||||||
|
qids.insert(qid);
|
||||||
|
},
|
||||||
|
i => panic!("Unexpected query info: {:?}", i)
|
||||||
|
}
|
||||||
|
None => panic!("Query not found: {:?}", qid)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Each test run republishes all records once.
|
// Each test run republishes all records once.
|
||||||
@ -451,8 +496,17 @@ fn put_record() {
|
|||||||
for swarm in &mut swarms {
|
for swarm in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::PutRecordResult(res))) |
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
Poll::Ready(Some(KademliaEvent::RepublishRecordResult(res))) => {
|
id, result: QueryResult::PutRecord(res), stats
|
||||||
|
})) |
|
||||||
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::RepublishRecord(res), stats
|
||||||
|
})) => {
|
||||||
|
assert!(qids.is_empty() || qids.remove(&id));
|
||||||
|
assert!(stats.duration().is_some());
|
||||||
|
assert!(stats.num_successes() >= replication_factor.get() as u32);
|
||||||
|
assert!(stats.num_requests() >= stats.num_successes());
|
||||||
|
assert_eq!(stats.num_failures(), 0);
|
||||||
match res {
|
match res {
|
||||||
Err(e) => panic!("{:?}", e),
|
Err(e) => panic!("{:?}", e),
|
||||||
Ok(ok) => {
|
Ok(ok) => {
|
||||||
@ -551,7 +605,7 @@ fn put_record() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn get_value() {
|
fn get_record() {
|
||||||
let mut swarms = build_nodes(3);
|
let mut swarms = build_nodes(3);
|
||||||
|
|
||||||
// Let first peer know of second peer and second peer know of third peer.
|
// Let first peer know of second peer and second peer know of third peer.
|
||||||
@ -566,14 +620,17 @@ fn get_value() {
|
|||||||
let record = Record::new(random_multihash(), vec![4,5,6]);
|
let record = Record::new(random_multihash(), vec![4,5,6]);
|
||||||
|
|
||||||
swarms[1].1.store.put(record.clone()).unwrap();
|
swarms[1].1.store.put(record.clone()).unwrap();
|
||||||
swarms[0].1.get_record(&record.key, Quorum::One);
|
let qid = swarms[0].1.get_record(&record.key, Quorum::One);
|
||||||
|
|
||||||
block_on(
|
block_on(
|
||||||
poll_fn(move |ctx| {
|
poll_fn(move |ctx| {
|
||||||
for (_, swarm) in &mut swarms {
|
for (_, swarm) in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::GetRecordResult(Ok(ok)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::GetRecord(Ok(ok)), ..
|
||||||
|
})) => {
|
||||||
|
assert_eq!(id, qid);
|
||||||
assert_eq!(ok.records.len(), 1);
|
assert_eq!(ok.records.len(), 1);
|
||||||
assert_eq!(ok.records.first(), Some(&record));
|
assert_eq!(ok.records.first(), Some(&record));
|
||||||
return Poll::Ready(());
|
return Poll::Ready(());
|
||||||
@ -592,7 +649,7 @@ fn get_value() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn get_value_many() {
|
fn get_record_many() {
|
||||||
// TODO: Randomise
|
// TODO: Randomise
|
||||||
let num_nodes = 12;
|
let num_nodes = 12;
|
||||||
let mut swarms = build_connected_nodes(num_nodes, 3).into_iter()
|
let mut swarms = build_connected_nodes(num_nodes, 3).into_iter()
|
||||||
@ -607,14 +664,17 @@ fn get_value_many() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let quorum = Quorum::N(NonZeroUsize::new(num_results).unwrap());
|
let quorum = Quorum::N(NonZeroUsize::new(num_results).unwrap());
|
||||||
swarms[0].get_record(&record.key, quorum);
|
let qid = swarms[0].get_record(&record.key, quorum);
|
||||||
|
|
||||||
block_on(
|
block_on(
|
||||||
poll_fn(move |ctx| {
|
poll_fn(move |ctx| {
|
||||||
for swarm in &mut swarms {
|
for swarm in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::GetRecordResult(Ok(ok)))) => {
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::GetRecord(Ok(ok)), ..
|
||||||
|
})) => {
|
||||||
|
assert_eq!(id, qid);
|
||||||
assert_eq!(ok.records.len(), num_results);
|
assert_eq!(ok.records.len(), num_results);
|
||||||
assert_eq!(ok.records.first(), Some(&record));
|
assert_eq!(ok.records.first(), Some(&record));
|
||||||
return Poll::Ready(());
|
return Poll::Ready(());
|
||||||
@ -672,8 +732,10 @@ fn add_provider() {
|
|||||||
let mut results = Vec::new();
|
let mut results = Vec::new();
|
||||||
|
|
||||||
// Initiate the first round of publishing.
|
// Initiate the first round of publishing.
|
||||||
|
let mut qids = HashSet::new();
|
||||||
for k in &keys {
|
for k in &keys {
|
||||||
swarms[0].start_providing(k.clone());
|
let qid = swarms[0].start_providing(k.clone()).unwrap();
|
||||||
|
qids.insert(qid);
|
||||||
}
|
}
|
||||||
|
|
||||||
block_on(
|
block_on(
|
||||||
@ -682,8 +744,13 @@ fn add_provider() {
|
|||||||
for swarm in &mut swarms {
|
for swarm in &mut swarms {
|
||||||
loop {
|
loop {
|
||||||
match swarm.poll_next_unpin(ctx) {
|
match swarm.poll_next_unpin(ctx) {
|
||||||
Poll::Ready(Some(KademliaEvent::StartProvidingResult(res))) |
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
Poll::Ready(Some(KademliaEvent::RepublishProviderResult(res))) => {
|
id, result: QueryResult::StartProviding(res), ..
|
||||||
|
})) |
|
||||||
|
Poll::Ready(Some(KademliaEvent::QueryResult {
|
||||||
|
id, result: QueryResult::RepublishProvider(res), ..
|
||||||
|
})) => {
|
||||||
|
assert!(qids.is_empty() || qids.remove(&id));
|
||||||
match res {
|
match res {
|
||||||
Err(e) => panic!(e),
|
Err(e) => panic!(e),
|
||||||
Ok(ok) => {
|
Ok(ok) => {
|
||||||
@ -784,7 +851,7 @@ fn exceed_jobs_max_queries() {
|
|||||||
let (_, _addr, mut swarm) = build_node();
|
let (_, _addr, mut swarm) = build_node();
|
||||||
let num = JOBS_MAX_QUERIES + 1;
|
let num = JOBS_MAX_QUERIES + 1;
|
||||||
for _ in 0 .. num {
|
for _ in 0 .. num {
|
||||||
swarm.bootstrap();
|
swarm.get_closest_peers(PeerId::random());
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(swarm.queries.size(), num);
|
assert_eq!(swarm.queries.size(), num);
|
||||||
@ -794,8 +861,10 @@ fn exceed_jobs_max_queries() {
|
|||||||
for _ in 0 .. num {
|
for _ in 0 .. num {
|
||||||
// There are no other nodes, so the queries finish instantly.
|
// There are no other nodes, so the queries finish instantly.
|
||||||
if let Poll::Ready(Some(e)) = swarm.poll_next_unpin(ctx) {
|
if let Poll::Ready(Some(e)) = swarm.poll_next_unpin(ctx) {
|
||||||
if let KademliaEvent::BootstrapResult(r) = e {
|
if let KademliaEvent::QueryResult {
|
||||||
assert!(r.is_ok(), "Unexpected error")
|
result: QueryResult::GetClosestPeers(Ok(r)), ..
|
||||||
|
} = e {
|
||||||
|
assert!(r.peers.is_empty())
|
||||||
} else {
|
} else {
|
||||||
panic!("Unexpected event: {:?}", e)
|
panic!("Unexpected event: {:?}", e)
|
||||||
}
|
}
|
||||||
|
@ -41,10 +41,33 @@ mod dht_proto {
|
|||||||
|
|
||||||
pub use addresses::Addresses;
|
pub use addresses::Addresses;
|
||||||
pub use behaviour::{
|
pub use behaviour::{
|
||||||
AddProviderError, AddProviderOk, AddProviderResult, BootstrapError, BootstrapOk,
|
QueryResult,
|
||||||
BootstrapResult, GetClosestPeersError, GetClosestPeersOk, GetClosestPeersResult,
|
QueryInfo,
|
||||||
GetProvidersError, GetProvidersOk, GetProvidersResult, GetRecordError, GetRecordOk,
|
QueryStats,
|
||||||
GetRecordResult, PutRecordError, PutRecordOk, PutRecordResult,
|
|
||||||
|
BootstrapResult,
|
||||||
|
BootstrapOk,
|
||||||
|
BootstrapError,
|
||||||
|
|
||||||
|
GetRecordResult,
|
||||||
|
GetRecordOk,
|
||||||
|
GetRecordError,
|
||||||
|
|
||||||
|
PutRecordResult,
|
||||||
|
PutRecordOk,
|
||||||
|
PutRecordError,
|
||||||
|
|
||||||
|
GetClosestPeersResult,
|
||||||
|
GetClosestPeersOk,
|
||||||
|
GetClosestPeersError,
|
||||||
|
|
||||||
|
AddProviderResult,
|
||||||
|
AddProviderOk,
|
||||||
|
AddProviderError,
|
||||||
|
|
||||||
|
GetProvidersResult,
|
||||||
|
GetProvidersOk,
|
||||||
|
GetProvidersError,
|
||||||
};
|
};
|
||||||
pub use behaviour::{Kademlia, KademliaConfig, KademliaEvent, Quorum};
|
pub use behaviour::{Kademlia, KademliaConfig, KademliaEvent, Quorum};
|
||||||
pub use protocol::KadConnectionType;
|
pub use protocol::KadConnectionType;
|
||||||
|
@ -26,24 +26,22 @@
|
|||||||
//! to poll the underlying transport for incoming messages, and the `Sink` component
|
//! to poll the underlying transport for incoming messages, and the `Sink` component
|
||||||
//! is used to send messages to remote peers.
|
//! is used to send messages to remote peers.
|
||||||
|
|
||||||
use std::{borrow::Cow, convert::TryFrom, time::Duration};
|
|
||||||
use std::{io, iter};
|
|
||||||
|
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use codec::UviBytes;
|
use codec::UviBytes;
|
||||||
use futures::prelude::*;
|
|
||||||
use futures_codec::Framed;
|
|
||||||
use prost::Message;
|
|
||||||
use unsigned_varint::codec;
|
|
||||||
use derivative::Derivative;
|
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
use libp2p_core::{Multiaddr, PeerId};
|
|
||||||
use libp2p_core::identity::ed25519::PublicKey;
|
|
||||||
use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
|
|
||||||
|
|
||||||
use crate::dht_proto as proto;
|
use crate::dht_proto as proto;
|
||||||
use crate::record::{self, Record};
|
use crate::record::{self, Record};
|
||||||
|
use futures::prelude::*;
|
||||||
|
use futures_codec::Framed;
|
||||||
|
use libp2p_core::{Multiaddr, PeerId};
|
||||||
|
use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
|
||||||
|
use prost::Message;
|
||||||
|
use std::{borrow::Cow, convert::TryFrom, time::Duration};
|
||||||
|
use std::{io, iter};
|
||||||
|
use unsigned_varint::codec;
|
||||||
|
use wasm_timer::Instant;
|
||||||
|
|
||||||
|
use derivative::Derivative;
|
||||||
|
use libp2p_core::identity::ed25519::PublicKey;
|
||||||
|
|
||||||
/// The protocol name used for negotiating with multistream-select.
|
/// The protocol name used for negotiating with multistream-select.
|
||||||
pub const DEFAULT_PROTO_NAME: &[u8] = b"/ipfs/kad/1.0.0";
|
pub const DEFAULT_PROTO_NAME: &[u8] = b"/ipfs/kad/1.0.0";
|
||||||
|
@ -89,16 +89,40 @@ impl<TInner> QueryPool<TInner> {
|
|||||||
/// Adds a query to the pool that contacts a fixed set of peers.
|
/// Adds a query to the pool that contacts a fixed set of peers.
|
||||||
pub fn add_fixed<I>(&mut self, peers: I, inner: TInner) -> QueryId
|
pub fn add_fixed<I>(&mut self, peers: I, inner: TInner) -> QueryId
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = Key<PeerId>>
|
I: IntoIterator<Item = PeerId>
|
||||||
{
|
{
|
||||||
let peers = peers.into_iter().map(|k| k.into_preimage()).collect::<Vec<_>>();
|
let id = self.next_query_id();
|
||||||
|
self.continue_fixed(id, peers, inner);
|
||||||
|
id
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Continues an earlier query with a fixed set of peers, reusing
|
||||||
|
/// the given query ID, which must be from a query that finished
|
||||||
|
/// earlier.
|
||||||
|
pub fn continue_fixed<I>(&mut self, id: QueryId, peers: I, inner: TInner)
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = PeerId>
|
||||||
|
{
|
||||||
|
assert!(!self.queries.contains_key(&id));
|
||||||
let parallelism = self.config.replication_factor.get();
|
let parallelism = self.config.replication_factor.get();
|
||||||
let peer_iter = QueryPeerIter::Fixed(FixedPeersIter::new(peers, parallelism));
|
let peer_iter = QueryPeerIter::Fixed(FixedPeersIter::new(peers, parallelism));
|
||||||
self.add(peer_iter, inner)
|
let query = Query::new(id, peer_iter, inner);
|
||||||
|
self.queries.insert(id, query);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a query to the pool that iterates towards the closest peers to the target.
|
/// Adds a query to the pool that iterates towards the closest peers to the target.
|
||||||
pub fn add_iter_closest<T, I>(&mut self, target: T, peers: I, inner: TInner) -> QueryId
|
pub fn add_iter_closest<T, I>(&mut self, target: T, peers: I, inner: TInner) -> QueryId
|
||||||
|
where
|
||||||
|
T: Into<KeyBytes>,
|
||||||
|
I: IntoIterator<Item = Key<PeerId>>
|
||||||
|
{
|
||||||
|
let id = self.next_query_id();
|
||||||
|
self.continue_iter_closest(id, target, peers, inner);
|
||||||
|
id
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a query to the pool that iterates towards the closest peers to the target.
|
||||||
|
pub fn continue_iter_closest<T, I>(&mut self, id: QueryId, target: T, peers: I, inner: TInner)
|
||||||
where
|
where
|
||||||
T: Into<KeyBytes>,
|
T: Into<KeyBytes>,
|
||||||
I: IntoIterator<Item = Key<PeerId>>
|
I: IntoIterator<Item = Key<PeerId>>
|
||||||
@ -108,14 +132,13 @@ impl<TInner> QueryPool<TInner> {
|
|||||||
.. ClosestPeersIterConfig::default()
|
.. ClosestPeersIterConfig::default()
|
||||||
};
|
};
|
||||||
let peer_iter = QueryPeerIter::Closest(ClosestPeersIter::with_config(cfg, target, peers));
|
let peer_iter = QueryPeerIter::Closest(ClosestPeersIter::with_config(cfg, target, peers));
|
||||||
self.add(peer_iter, inner)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add(&mut self, peer_iter: QueryPeerIter, inner: TInner) -> QueryId {
|
|
||||||
let id = QueryId(self.next_id);
|
|
||||||
self.next_id = self.next_id.wrapping_add(1);
|
|
||||||
let query = Query::new(id, peer_iter, inner);
|
let query = Query::new(id, peer_iter, inner);
|
||||||
self.queries.insert(id, query);
|
self.queries.insert(id, query);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn next_query_id(&mut self) -> QueryId {
|
||||||
|
let id = QueryId(self.next_id);
|
||||||
|
self.next_id = self.next_id.wrapping_add(1);
|
||||||
id
|
id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,7 +159,7 @@ impl<TInner> QueryPool<TInner> {
|
|||||||
let mut waiting = None;
|
let mut waiting = None;
|
||||||
|
|
||||||
for (&query_id, query) in self.queries.iter_mut() {
|
for (&query_id, query) in self.queries.iter_mut() {
|
||||||
query.started = query.started.or(Some(now));
|
query.stats.start = query.stats.start.or(Some(now));
|
||||||
match query.next(now) {
|
match query.next(now) {
|
||||||
PeersIterState::Finished => {
|
PeersIterState::Finished => {
|
||||||
finished = Some(query_id);
|
finished = Some(query_id);
|
||||||
@ -148,7 +171,7 @@ impl<TInner> QueryPool<TInner> {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
PeersIterState::Waiting(None) | PeersIterState::WaitingAtCapacity => {
|
PeersIterState::Waiting(None) | PeersIterState::WaitingAtCapacity => {
|
||||||
let elapsed = now - query.started.unwrap_or(now);
|
let elapsed = now - query.stats.start.unwrap_or(now);
|
||||||
if elapsed >= self.config.timeout {
|
if elapsed >= self.config.timeout {
|
||||||
timeout = Some(query_id);
|
timeout = Some(query_id);
|
||||||
break
|
break
|
||||||
@ -163,12 +186,14 @@ impl<TInner> QueryPool<TInner> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(query_id) = finished {
|
if let Some(query_id) = finished {
|
||||||
let query = self.queries.remove(&query_id).expect("s.a.");
|
let mut query = self.queries.remove(&query_id).expect("s.a.");
|
||||||
|
query.stats.end = Some(now);
|
||||||
return QueryPoolState::Finished(query)
|
return QueryPoolState::Finished(query)
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(query_id) = timeout {
|
if let Some(query_id) = timeout {
|
||||||
let query = self.queries.remove(&query_id).expect("s.a.");
|
let mut query = self.queries.remove(&query_id).expect("s.a.");
|
||||||
|
query.stats.end = Some(now);
|
||||||
return QueryPoolState::Timeout(query)
|
return QueryPoolState::Timeout(query)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,9 +231,8 @@ pub struct Query<TInner> {
|
|||||||
id: QueryId,
|
id: QueryId,
|
||||||
/// The peer iterator that drives the query state.
|
/// The peer iterator that drives the query state.
|
||||||
peer_iter: QueryPeerIter,
|
peer_iter: QueryPeerIter,
|
||||||
/// The instant when the query started (i.e. began waiting for the first
|
/// Execution statistics of the query.
|
||||||
/// result from a peer).
|
stats: QueryStats,
|
||||||
started: Option<Instant>,
|
|
||||||
/// The opaque inner query state.
|
/// The opaque inner query state.
|
||||||
pub inner: TInner,
|
pub inner: TInner,
|
||||||
}
|
}
|
||||||
@ -222,7 +246,7 @@ enum QueryPeerIter {
|
|||||||
impl<TInner> Query<TInner> {
|
impl<TInner> Query<TInner> {
|
||||||
/// Creates a new query without starting it.
|
/// Creates a new query without starting it.
|
||||||
fn new(id: QueryId, peer_iter: QueryPeerIter, inner: TInner) -> Self {
|
fn new(id: QueryId, peer_iter: QueryPeerIter, inner: TInner) -> Self {
|
||||||
Query { id, inner, peer_iter, started: None }
|
Query { id, inner, peer_iter, stats: QueryStats::empty() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the unique ID of the query.
|
/// Gets the unique ID of the query.
|
||||||
@ -230,11 +254,19 @@ impl<TInner> Query<TInner> {
|
|||||||
self.id
|
self.id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the current execution statistics of the query.
|
||||||
|
pub fn stats(&self) -> &QueryStats {
|
||||||
|
&self.stats
|
||||||
|
}
|
||||||
|
|
||||||
/// Informs the query that the attempt to contact `peer` failed.
|
/// Informs the query that the attempt to contact `peer` failed.
|
||||||
pub fn on_failure(&mut self, peer: &PeerId) {
|
pub fn on_failure(&mut self, peer: &PeerId) {
|
||||||
match &mut self.peer_iter {
|
let updated = match &mut self.peer_iter {
|
||||||
QueryPeerIter::Closest(iter) => iter.on_failure(peer),
|
QueryPeerIter::Closest(iter) => iter.on_failure(peer),
|
||||||
QueryPeerIter::Fixed(iter) => iter.on_failure(peer)
|
QueryPeerIter::Fixed(iter) => iter.on_failure(peer)
|
||||||
|
};
|
||||||
|
if updated {
|
||||||
|
self.stats.failure += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,9 +277,12 @@ impl<TInner> Query<TInner> {
|
|||||||
where
|
where
|
||||||
I: IntoIterator<Item = PeerId>
|
I: IntoIterator<Item = PeerId>
|
||||||
{
|
{
|
||||||
match &mut self.peer_iter {
|
let updated = match &mut self.peer_iter {
|
||||||
QueryPeerIter::Closest(iter) => iter.on_success(peer, new_peers),
|
QueryPeerIter::Closest(iter) => iter.on_success(peer, new_peers),
|
||||||
QueryPeerIter::Fixed(iter) => iter.on_success(peer)
|
QueryPeerIter::Fixed(iter) => iter.on_success(peer)
|
||||||
|
};
|
||||||
|
if updated {
|
||||||
|
self.stats.success += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,10 +296,16 @@ impl<TInner> Query<TInner> {
|
|||||||
|
|
||||||
/// Advances the state of the underlying peer iterator.
|
/// Advances the state of the underlying peer iterator.
|
||||||
fn next(&mut self, now: Instant) -> PeersIterState {
|
fn next(&mut self, now: Instant) -> PeersIterState {
|
||||||
match &mut self.peer_iter {
|
let state = match &mut self.peer_iter {
|
||||||
QueryPeerIter::Closest(iter) => iter.next(now),
|
QueryPeerIter::Closest(iter) => iter.next(now),
|
||||||
QueryPeerIter::Fixed(iter) => iter.next()
|
QueryPeerIter::Fixed(iter) => iter.next()
|
||||||
|
};
|
||||||
|
|
||||||
|
if let PeersIterState::Waiting(Some(_)) = state {
|
||||||
|
self.stats.requests += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
state
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Finishes the query prematurely.
|
/// Finishes the query prematurely.
|
||||||
@ -278,13 +319,24 @@ impl<TInner> Query<TInner> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks whether the query has finished.
|
||||||
|
///
|
||||||
|
/// A finished query is eventually reported by `QueryPool::next()` and
|
||||||
|
/// removed from the pool.
|
||||||
|
pub fn is_finished(&self) -> bool {
|
||||||
|
match &self.peer_iter {
|
||||||
|
QueryPeerIter::Closest(iter) => iter.is_finished(),
|
||||||
|
QueryPeerIter::Fixed(iter) => iter.is_finished()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Consumes the query, producing the final `QueryResult`.
|
/// Consumes the query, producing the final `QueryResult`.
|
||||||
pub fn into_result(self) -> QueryResult<TInner, impl Iterator<Item = PeerId>> {
|
pub fn into_result(self) -> QueryResult<TInner, impl Iterator<Item = PeerId>> {
|
||||||
let peers = match self.peer_iter {
|
let peers = match self.peer_iter {
|
||||||
QueryPeerIter::Closest(iter) => Either::Left(iter.into_result()),
|
QueryPeerIter::Closest(iter) => Either::Left(iter.into_result()),
|
||||||
QueryPeerIter::Fixed(iter) => Either::Right(iter.into_result())
|
QueryPeerIter::Fixed(iter) => Either::Right(iter.into_result())
|
||||||
};
|
};
|
||||||
QueryResult { inner: self.inner, peers }
|
QueryResult { peers, inner: self.inner, stats: self.stats }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,6 +345,90 @@ pub struct QueryResult<TInner, TPeers> {
|
|||||||
/// The opaque inner query state.
|
/// The opaque inner query state.
|
||||||
pub inner: TInner,
|
pub inner: TInner,
|
||||||
/// The successfully contacted peers.
|
/// The successfully contacted peers.
|
||||||
pub peers: TPeers
|
pub peers: TPeers,
|
||||||
|
/// The collected query statistics.
|
||||||
|
pub stats: QueryStats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Execution statistics of a query.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct QueryStats {
|
||||||
|
requests: u32,
|
||||||
|
success: u32,
|
||||||
|
failure: u32,
|
||||||
|
start: Option<Instant>,
|
||||||
|
end: Option<Instant>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl QueryStats {
|
||||||
|
pub fn empty() -> Self {
|
||||||
|
QueryStats {
|
||||||
|
requests: 0,
|
||||||
|
success: 0,
|
||||||
|
failure: 0,
|
||||||
|
start: None,
|
||||||
|
end: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the total number of requests initiated by the query.
|
||||||
|
pub fn num_requests(&self) -> u32 {
|
||||||
|
self.requests
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the number of successful requests.
|
||||||
|
pub fn num_successes(&self) -> u32 {
|
||||||
|
self.success
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the number of failed requests.
|
||||||
|
pub fn num_failures(&self) -> u32 {
|
||||||
|
self.failure
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the number of pending requests.
|
||||||
|
///
|
||||||
|
/// > **Note**: A query can finish while still having pending
|
||||||
|
/// > requests, if the termination conditions are already met.
|
||||||
|
pub fn num_pending(&self) -> u32 {
|
||||||
|
self.requests - (self.success + self.failure)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the duration of the query.
|
||||||
|
///
|
||||||
|
/// If the query has not yet finished, the duration is measured from the
|
||||||
|
/// start of the query to the current instant.
|
||||||
|
///
|
||||||
|
/// If the query did not yet start (i.e. yield the first peer to contact),
|
||||||
|
/// `None` is returned.
|
||||||
|
pub fn duration(&self) -> Option<Duration> {
|
||||||
|
if let Some(s) = self.start {
|
||||||
|
if let Some(e) = self.end {
|
||||||
|
Some(e - s)
|
||||||
|
} else {
|
||||||
|
Some(Instant::now() - s)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merges these stats with the given stats of another query,
|
||||||
|
/// e.g. to accumulate statistics from a multi-phase query.
|
||||||
|
///
|
||||||
|
/// Counters are merged cumulatively while the instants for
|
||||||
|
/// start and end of the queries are taken as the minimum and
|
||||||
|
/// maximum, respectively.
|
||||||
|
pub fn merge(self, other: QueryStats) -> Self {
|
||||||
|
QueryStats {
|
||||||
|
requests: self.requests + other.requests,
|
||||||
|
success: self.success + other.success,
|
||||||
|
failure: self.failure + other.failure,
|
||||||
|
start: match (self.start, other.start) {
|
||||||
|
(Some(a), Some(b)) => Some(std::cmp::min(a, b)),
|
||||||
|
(a, b) => a.or(b)
|
||||||
|
},
|
||||||
|
end: std::cmp::max(self.end, other.end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -132,8 +132,7 @@ impl ClosestPeersIter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Callback for delivering the result of a successful request to a peer
|
/// Callback for delivering the result of a successful request to a peer.
|
||||||
/// that the iterator is waiting on.
|
|
||||||
///
|
///
|
||||||
/// Delivering results of requests back to the iterator allows the iterator to make
|
/// Delivering results of requests back to the iterator allows the iterator to make
|
||||||
/// progress. The iterator is said to make progress either when the given
|
/// progress. The iterator is said to make progress either when the given
|
||||||
@ -141,18 +140,20 @@ impl ClosestPeersIter {
|
|||||||
/// or when the iterator did not yet accumulate `num_results` closest peers and
|
/// or when the iterator did not yet accumulate `num_results` closest peers and
|
||||||
/// `closer_peers` contains a new peer, regardless of its distance to the target.
|
/// `closer_peers` contains a new peer, regardless of its distance to the target.
|
||||||
///
|
///
|
||||||
/// After calling this function, `next` should eventually be called again
|
/// If the iterator is currently waiting for a result from `peer`,
|
||||||
/// to advance the state of the iterator.
|
/// the iterator state is updated and `true` is returned. In that
|
||||||
|
/// case, after calling this function, `next` should eventually be
|
||||||
|
/// called again to obtain the new state of the iterator.
|
||||||
///
|
///
|
||||||
/// If the iterator is finished, it is not currently waiting for a
|
/// If the iterator is finished, it is not currently waiting for a
|
||||||
/// result from `peer`, or a result for `peer` has already been reported,
|
/// result from `peer`, or a result for `peer` has already been reported,
|
||||||
/// calling this function has no effect.
|
/// calling this function has no effect and `false` is returned.
|
||||||
pub fn on_success<I>(&mut self, peer: &PeerId, closer_peers: I)
|
pub fn on_success<I>(&mut self, peer: &PeerId, closer_peers: I) -> bool
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = PeerId>
|
I: IntoIterator<Item = PeerId>
|
||||||
{
|
{
|
||||||
if let State::Finished = self.state {
|
if let State::Finished = self.state {
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
let key = Key::from(peer.clone());
|
let key = Key::from(peer.clone());
|
||||||
@ -160,7 +161,7 @@ impl ClosestPeersIter {
|
|||||||
|
|
||||||
// Mark the peer as succeeded.
|
// Mark the peer as succeeded.
|
||||||
match self.closest_peers.entry(distance) {
|
match self.closest_peers.entry(distance) {
|
||||||
Entry::Vacant(..) => return,
|
Entry::Vacant(..) => return false,
|
||||||
Entry::Occupied(mut e) => match e.get().state {
|
Entry::Occupied(mut e) => match e.get().state {
|
||||||
PeerState::Waiting(..) => {
|
PeerState::Waiting(..) => {
|
||||||
debug_assert!(self.num_waiting > 0);
|
debug_assert!(self.num_waiting > 0);
|
||||||
@ -172,7 +173,7 @@ impl ClosestPeersIter {
|
|||||||
}
|
}
|
||||||
PeerState::NotContacted
|
PeerState::NotContacted
|
||||||
| PeerState::Failed
|
| PeerState::Failed
|
||||||
| PeerState::Succeeded => return
|
| PeerState::Succeeded => return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,28 +210,31 @@ impl ClosestPeersIter {
|
|||||||
State::Stalled
|
State::Stalled
|
||||||
}
|
}
|
||||||
State::Finished => State::Finished
|
State::Finished => State::Finished
|
||||||
}
|
};
|
||||||
|
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Callback for informing the iterator about a failed request to a peer
|
/// Callback for informing the iterator about a failed request to a peer.
|
||||||
/// that the iterator is waiting on.
|
|
||||||
///
|
///
|
||||||
/// After calling this function, `next` should eventually be called again
|
/// If the iterator is currently waiting for a result from `peer`,
|
||||||
/// to advance the state of the iterator.
|
/// the iterator state is updated and `true` is returned. In that
|
||||||
|
/// case, after calling this function, `next` should eventually be
|
||||||
|
/// called again to obtain the new state of the iterator.
|
||||||
///
|
///
|
||||||
/// If the iterator is finished, it is not currently waiting for a
|
/// If the iterator is finished, it is not currently waiting for a
|
||||||
/// result from `peer`, or a result for `peer` has already been reported,
|
/// result from `peer`, or a result for `peer` has already been reported,
|
||||||
/// calling this function has no effect.
|
/// calling this function has no effect and `false` is returned.
|
||||||
pub fn on_failure(&mut self, peer: &PeerId) {
|
pub fn on_failure(&mut self, peer: &PeerId) -> bool {
|
||||||
if let State::Finished = self.state {
|
if let State::Finished = self.state {
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
let key = Key::from(peer.clone());
|
let key = Key::from(peer.clone());
|
||||||
let distance = key.distance(&self.target);
|
let distance = key.distance(&self.target);
|
||||||
|
|
||||||
match self.closest_peers.entry(distance) {
|
match self.closest_peers.entry(distance) {
|
||||||
Entry::Vacant(_) => return,
|
Entry::Vacant(_) => return false,
|
||||||
Entry::Occupied(mut e) => match e.get().state {
|
Entry::Occupied(mut e) => match e.get().state {
|
||||||
PeerState::Waiting(_) => {
|
PeerState::Waiting(_) => {
|
||||||
debug_assert!(self.num_waiting > 0);
|
debug_assert!(self.num_waiting > 0);
|
||||||
@ -240,9 +244,13 @@ impl ClosestPeersIter {
|
|||||||
PeerState::Unresponsive => {
|
PeerState::Unresponsive => {
|
||||||
e.get_mut().set_state(PeerState::Failed);
|
e.get_mut().set_state(PeerState::Failed);
|
||||||
}
|
}
|
||||||
_ => {}
|
PeerState::NotContacted
|
||||||
|
| PeerState::Failed
|
||||||
|
| PeerState::Succeeded => return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the list of peers for which the iterator is currently waiting
|
/// Returns the list of peers for which the iterator is currently waiting
|
||||||
@ -398,7 +406,7 @@ impl ClosestPeersIter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Checks whether the iterator has finished.
|
/// Checks whether the iterator has finished.
|
||||||
pub fn finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
self.state == State::Finished
|
self.state == State::Finished
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,25 +531,16 @@ mod tests {
|
|||||||
|
|
||||||
use multihash::Multihash;
|
use multihash::Multihash;
|
||||||
use quickcheck::*;
|
use quickcheck::*;
|
||||||
use rand::{Rng, thread_rng};
|
use rand::{Rng, rngs::StdRng, SeedableRng};
|
||||||
|
|
||||||
use libp2p_core::PeerId;
|
use libp2p_core::PeerId;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
fn random_peers(n: usize) -> impl Iterator<Item = PeerId> + Clone {
|
fn random_peers<R: Rng>(n: usize, g: &mut R) -> Vec<PeerId> {
|
||||||
(0 .. n).map(|_| PeerId::random())
|
(0 .. n).map(|_| PeerId::from_multihash(
|
||||||
}
|
multihash::wrap(multihash::Code::Sha2_256, &g.gen::<[u8; 32]>())
|
||||||
|
).unwrap()).collect()
|
||||||
fn random_iter<G: Rng>(g: &mut G) -> ClosestPeersIter {
|
|
||||||
let known_closest_peers = random_peers(g.gen_range(1, 60)).map(Key::from);
|
|
||||||
let target = Key::from(Into::<Multihash>::into(PeerId::random()));
|
|
||||||
let config = ClosestPeersIterConfig {
|
|
||||||
parallelism: g.gen_range(1, 10),
|
|
||||||
num_results: g.gen_range(1, 25),
|
|
||||||
peer_timeout: Duration::from_secs(g.gen_range(10, 30)),
|
|
||||||
};
|
|
||||||
ClosestPeersIter::with_config(config, target, known_closest_peers)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sorted<T: AsRef<KeyBytes>>(target: &T, peers: &Vec<Key<PeerId>>) -> bool {
|
fn sorted<T: AsRef<KeyBytes>>(target: &T, peers: &Vec<Key<PeerId>>) -> bool {
|
||||||
@ -550,42 +549,63 @@ mod tests {
|
|||||||
|
|
||||||
impl Arbitrary for ClosestPeersIter {
|
impl Arbitrary for ClosestPeersIter {
|
||||||
fn arbitrary<G: Gen>(g: &mut G) -> ClosestPeersIter {
|
fn arbitrary<G: Gen>(g: &mut G) -> ClosestPeersIter {
|
||||||
random_iter(g)
|
let known_closest_peers = random_peers(g.gen_range(1, 60), g)
|
||||||
|
.into_iter()
|
||||||
|
.map(Key::from);
|
||||||
|
let target = Key::from(Into::<Multihash>::into(PeerId::random()));
|
||||||
|
let config = ClosestPeersIterConfig {
|
||||||
|
parallelism: g.gen_range(1, 10),
|
||||||
|
num_results: g.gen_range(1, 25),
|
||||||
|
peer_timeout: Duration::from_secs(g.gen_range(10, 30)),
|
||||||
|
};
|
||||||
|
ClosestPeersIter::with_config(config, target, known_closest_peers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct Seed([u8; 32]);
|
||||||
|
|
||||||
|
impl Arbitrary for Seed {
|
||||||
|
fn arbitrary<G: Gen>(g: &mut G) -> Seed {
|
||||||
|
Seed(g.gen())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn new_iter() {
|
fn new_iter() {
|
||||||
let iter = random_iter(&mut thread_rng());
|
fn prop(iter: ClosestPeersIter) {
|
||||||
let target = iter.target.clone();
|
let target = iter.target.clone();
|
||||||
|
|
||||||
let (keys, states): (Vec<_>, Vec<_>) = iter.closest_peers
|
let (keys, states): (Vec<_>, Vec<_>) = iter.closest_peers
|
||||||
.values()
|
.values()
|
||||||
.map(|e| (e.key.clone(), &e.state))
|
.map(|e| (e.key.clone(), &e.state))
|
||||||
.unzip();
|
.unzip();
|
||||||
|
|
||||||
let none_contacted = states
|
let none_contacted = states
|
||||||
.iter()
|
.iter()
|
||||||
.all(|s| match s {
|
.all(|s| match s {
|
||||||
PeerState::NotContacted => true,
|
PeerState::NotContacted => true,
|
||||||
_ => false
|
_ => false
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(none_contacted,
|
assert!(none_contacted,
|
||||||
"Unexpected peer state in new iterator.");
|
"Unexpected peer state in new iterator.");
|
||||||
assert!(sorted(&target, &keys),
|
assert!(sorted(&target, &keys),
|
||||||
"Closest peers in new iterator not sorted by distance to target.");
|
"Closest peers in new iterator not sorted by distance to target.");
|
||||||
assert_eq!(iter.num_waiting(), 0,
|
assert_eq!(iter.num_waiting(), 0,
|
||||||
"Unexpected peers in progress in new iterator.");
|
"Unexpected peers in progress in new iterator.");
|
||||||
assert_eq!(iter.into_result().count(), 0,
|
assert_eq!(iter.into_result().count(), 0,
|
||||||
"Unexpected closest peers in new iterator");
|
"Unexpected closest peers in new iterator");
|
||||||
|
}
|
||||||
|
|
||||||
|
QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn termination_and_parallelism() {
|
fn termination_and_parallelism() {
|
||||||
fn prop(mut iter: ClosestPeersIter) {
|
fn prop(mut iter: ClosestPeersIter, seed: Seed) {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut rng = thread_rng();
|
let mut rng = StdRng::from_seed(seed.0);
|
||||||
|
|
||||||
let mut expected = iter.closest_peers
|
let mut expected = iter.closest_peers
|
||||||
.values()
|
.values()
|
||||||
@ -632,7 +652,7 @@ mod tests {
|
|||||||
for (i, k) in expected.iter().enumerate() {
|
for (i, k) in expected.iter().enumerate() {
|
||||||
if rng.gen_bool(0.75) {
|
if rng.gen_bool(0.75) {
|
||||||
let num_closer = rng.gen_range(0, iter.config.num_results + 1);
|
let num_closer = rng.gen_range(0, iter.config.num_results + 1);
|
||||||
let closer_peers = random_peers(num_closer).collect::<Vec<_>>();
|
let closer_peers = random_peers(num_closer, &mut rng);
|
||||||
remaining.extend(closer_peers.iter().cloned().map(Key::from));
|
remaining.extend(closer_peers.iter().cloned().map(Key::from));
|
||||||
iter.on_success(k.preimage(), closer_peers);
|
iter.on_success(k.preimage(), closer_peers);
|
||||||
} else {
|
} else {
|
||||||
@ -680,14 +700,16 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _)
|
QuickCheck::new().tests(10).quickcheck(prop as fn(_, _) -> _)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn no_duplicates() {
|
fn no_duplicates() {
|
||||||
fn prop(mut iter: ClosestPeersIter) -> bool {
|
fn prop(mut iter: ClosestPeersIter, seed: Seed) -> bool {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let closer = random_peers(1).collect::<Vec<_>>();
|
let mut rng = StdRng::from_seed(seed.0);
|
||||||
|
|
||||||
|
let closer = random_peers(1, &mut rng);
|
||||||
|
|
||||||
// A first peer reports a "closer" peer.
|
// A first peer reports a "closer" peer.
|
||||||
let peer1 = match iter.next(now) {
|
let peer1 = match iter.next(now) {
|
||||||
@ -702,7 +724,7 @@ mod tests {
|
|||||||
match iter.next(now) {
|
match iter.next(now) {
|
||||||
PeersIterState::Waiting(Some(p)) => {
|
PeersIterState::Waiting(Some(p)) => {
|
||||||
let peer2 = p.into_owned();
|
let peer2 = p.into_owned();
|
||||||
iter.on_success(&peer2, closer.clone())
|
assert!(iter.on_success(&peer2, closer.clone()))
|
||||||
}
|
}
|
||||||
PeersIterState::Finished => {}
|
PeersIterState::Finished => {}
|
||||||
_ => panic!("Unexpectedly iter state."),
|
_ => panic!("Unexpectedly iter state."),
|
||||||
@ -715,7 +737,7 @@ mod tests {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _)
|
QuickCheck::new().tests(10).quickcheck(prop as fn(_, _) -> _)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -742,7 +764,7 @@ mod tests {
|
|||||||
Peer { state, .. } => panic!("Unexpected peer state: {:?}", state)
|
Peer { state, .. } => panic!("Unexpected peer state: {:?}", state)
|
||||||
}
|
}
|
||||||
|
|
||||||
let finished = iter.finished();
|
let finished = iter.is_finished();
|
||||||
iter.on_success(&peer, iter::empty());
|
iter.on_success(&peer, iter::empty());
|
||||||
let closest = iter.into_result().collect::<Vec<_>>();
|
let closest = iter.into_result().collect::<Vec<_>>();
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ pub struct FixedPeersIter {
|
|||||||
state: State,
|
state: State,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
enum State {
|
enum State {
|
||||||
Waiting { num_waiting: usize },
|
Waiting { num_waiting: usize },
|
||||||
Finished
|
Finished
|
||||||
@ -57,7 +58,12 @@ enum PeerState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FixedPeersIter {
|
impl FixedPeersIter {
|
||||||
pub fn new(peers: Vec<PeerId>, parallelism: usize) -> Self {
|
pub fn new<I>(peers: I, parallelism: usize) -> Self
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = PeerId>
|
||||||
|
{
|
||||||
|
let peers = peers.into_iter().collect::<Vec<_>>();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
parallelism,
|
parallelism,
|
||||||
peers: FnvHashMap::default(),
|
peers: FnvHashMap::default(),
|
||||||
@ -66,21 +72,46 @@ impl FixedPeersIter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_success(&mut self, peer: &PeerId) {
|
/// Callback for delivering the result of a successful request to a peer.
|
||||||
|
///
|
||||||
|
/// If the iterator is currently waiting for a result from `peer`,
|
||||||
|
/// the iterator state is updated and `true` is returned. In that
|
||||||
|
/// case, after calling this function, `next` should eventually be
|
||||||
|
/// called again to obtain the new state of the iterator.
|
||||||
|
///
|
||||||
|
/// If the iterator is finished, it is not currently waiting for a
|
||||||
|
/// result from `peer`, or a result for `peer` has already been reported,
|
||||||
|
/// calling this function has no effect and `false` is returned.
|
||||||
|
pub fn on_success(&mut self, peer: &PeerId) -> bool {
|
||||||
if let State::Waiting { num_waiting } = &mut self.state {
|
if let State::Waiting { num_waiting } = &mut self.state {
|
||||||
if let Some(state @ PeerState::Waiting) = self.peers.get_mut(peer) {
|
if let Some(state @ PeerState::Waiting) = self.peers.get_mut(peer) {
|
||||||
*state = PeerState::Succeeded;
|
*state = PeerState::Succeeded;
|
||||||
*num_waiting -= 1;
|
*num_waiting -= 1;
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_failure(&mut self, peer: &PeerId) {
|
/// Callback for informing the iterator about a failed request to a peer.
|
||||||
if let State::Waiting { .. } = &self.state {
|
///
|
||||||
|
/// If the iterator is currently waiting for a result from `peer`,
|
||||||
|
/// the iterator state is updated and `true` is returned. In that
|
||||||
|
/// case, after calling this function, `next` should eventually be
|
||||||
|
/// called again to obtain the new state of the iterator.
|
||||||
|
///
|
||||||
|
/// If the iterator is finished, it is not currently waiting for a
|
||||||
|
/// result from `peer`, or a result for `peer` has already been reported,
|
||||||
|
/// calling this function has no effect and `false` is returned.
|
||||||
|
pub fn on_failure(&mut self, peer: &PeerId) -> bool {
|
||||||
|
if let State::Waiting { num_waiting } = &mut self.state {
|
||||||
if let Some(state @ PeerState::Waiting) = self.peers.get_mut(peer) {
|
if let Some(state @ PeerState::Waiting) = self.peers.get_mut(peer) {
|
||||||
*state = PeerState::Failed;
|
*state = PeerState::Failed;
|
||||||
|
*num_waiting -= 1;
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_waiting(&self, peer: &PeerId) -> bool {
|
pub fn is_waiting(&self, peer: &PeerId) -> bool {
|
||||||
@ -93,6 +124,11 @@ impl FixedPeersIter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks whether the iterator has finished.
|
||||||
|
pub fn is_finished(&self) -> bool {
|
||||||
|
self.state == State::Finished
|
||||||
|
}
|
||||||
|
|
||||||
pub fn next(&mut self) -> PeersIterState {
|
pub fn next(&mut self) -> PeersIterState {
|
||||||
match &mut self.state {
|
match &mut self.state {
|
||||||
State::Finished => return PeersIterState::Finished,
|
State::Finished => return PeersIterState::Finished,
|
||||||
@ -133,3 +169,30 @@ impl FixedPeersIter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decrease_num_waiting_on_failure() {
|
||||||
|
let mut iter = FixedPeersIter::new(vec![PeerId::random(), PeerId::random()], 1);
|
||||||
|
|
||||||
|
match iter.next() {
|
||||||
|
PeersIterState::Waiting(Some(peer)) => {
|
||||||
|
let peer = peer.into_owned();
|
||||||
|
iter.on_failure(&peer);
|
||||||
|
},
|
||||||
|
_ => panic!("Expected iterator to yield peer."),
|
||||||
|
}
|
||||||
|
|
||||||
|
match iter.next() {
|
||||||
|
PeersIterState::Waiting(Some(_)) => {},
|
||||||
|
PeersIterState::WaitingAtCapacity => panic!(
|
||||||
|
"Expected iterator to return another peer given that the \
|
||||||
|
previous `on_failure` call should have allowed another peer \
|
||||||
|
to be queried.",
|
||||||
|
),
|
||||||
|
_ => panic!("Expected iterator to yield peer."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -70,7 +70,7 @@ impl From<Multihash> for Key {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A record stored in the DHT.
|
/// A record stored in the DHT.
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
pub struct Record {
|
pub struct Record {
|
||||||
/// Key of the record.
|
/// Key of the record.
|
||||||
pub key: Key,
|
pub key: Key,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "libp2p-mdns"
|
name = "libp2p-mdns"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
description = "Implementation of the libp2p mDNS discovery method"
|
description = "Implementation of the libp2p mDNS discovery method"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
@ -16,8 +16,8 @@ dns-parser = "0.8"
|
|||||||
either = "1.5.3"
|
either = "1.5.3"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
lazy_static = "1.2"
|
lazy_static = "1.2"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
libp2p-swarm = { version = "0.18.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "../../swarm" }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
net2 = "0.2"
|
net2 = "0.2"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "libp2p-noise"
|
name = "libp2p-noise"
|
||||||
description = "Cryptographic handshake protocol using the noise framework."
|
description = "Cryptographic handshake protocol using the noise framework."
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,7 +11,7 @@ edition = "2018"
|
|||||||
curve25519-dalek = "2.0.0"
|
curve25519-dalek = "2.0.0"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
lazy_static = "1.2"
|
lazy_static = "1.2"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
prost = "0.6.1"
|
prost = "0.6.1"
|
||||||
rand = "0.7.2"
|
rand = "0.7.2"
|
||||||
@ -28,7 +28,7 @@ snow = { version = "0.6.1", features = ["default-resolver"], default-features =
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
env_logger = "0.7.1"
|
env_logger = "0.7.1"
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" }
|
||||||
quickcheck = "0.9.0"
|
quickcheck = "0.9.0"
|
||||||
sodiumoxide = "^0.2.5"
|
sodiumoxide = "^0.2.5"
|
||||||
|
|
||||||
|
@ -365,10 +365,10 @@ where
|
|||||||
|
|
||||||
let mut payload_buf = vec![0; len];
|
let mut payload_buf = vec![0; len];
|
||||||
state.io.read_exact(&mut payload_buf).await?;
|
state.io.read_exact(&mut payload_buf).await?;
|
||||||
let pb = payload_proto::Identity::decode(&payload_buf[..])?;
|
let pb = payload_proto::NoiseHandshakePayload::decode(&payload_buf[..])?;
|
||||||
|
|
||||||
if !pb.pubkey.is_empty() {
|
if !pb.identity_key.is_empty() {
|
||||||
let pk = identity::PublicKey::from_protobuf_encoding(&pb.pubkey)
|
let pk = identity::PublicKey::from_protobuf_encoding(&pb.identity_key)
|
||||||
.map_err(|_| NoiseError::InvalidKey)?;
|
.map_err(|_| NoiseError::InvalidKey)?;
|
||||||
if let Some(ref k) = state.id_remote_pubkey {
|
if let Some(ref k) = state.id_remote_pubkey {
|
||||||
if k != &pk {
|
if k != &pk {
|
||||||
@ -377,8 +377,8 @@ where
|
|||||||
}
|
}
|
||||||
state.id_remote_pubkey = Some(pk);
|
state.id_remote_pubkey = Some(pk);
|
||||||
}
|
}
|
||||||
if !pb.signature.is_empty() {
|
if !pb.identity_sig.is_empty() {
|
||||||
state.dh_remote_pubkey_sig = Some(pb.signature);
|
state.dh_remote_pubkey_sig = Some(pb.identity_sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -389,12 +389,12 @@ async fn send_identity<T>(state: &mut State<T>) -> Result<(), NoiseError>
|
|||||||
where
|
where
|
||||||
T: AsyncWrite + Unpin,
|
T: AsyncWrite + Unpin,
|
||||||
{
|
{
|
||||||
let mut pb = payload_proto::Identity::default();
|
let mut pb = payload_proto::NoiseHandshakePayload::default();
|
||||||
if state.send_identity {
|
if state.send_identity {
|
||||||
pb.pubkey = state.identity.public.clone().into_protobuf_encoding()
|
pb.identity_key = state.identity.public.clone().into_protobuf_encoding()
|
||||||
}
|
}
|
||||||
if let Some(ref sig) = state.identity.signature {
|
if let Some(ref sig) = state.identity.signature {
|
||||||
pb.signature = sig.clone()
|
pb.identity_sig = sig.clone()
|
||||||
}
|
}
|
||||||
let mut buf = Vec::with_capacity(pb.encoded_len());
|
let mut buf = Vec::with_capacity(pb.encoded_len());
|
||||||
pb.encode(&mut buf).expect("Vec<u8> provides capacity as needed");
|
pb.encode(&mut buf).expect("Vec<u8> provides capacity as needed");
|
||||||
|
@ -4,8 +4,8 @@ package payload.proto;
|
|||||||
|
|
||||||
// Payloads for Noise handshake messages.
|
// Payloads for Noise handshake messages.
|
||||||
|
|
||||||
message Identity {
|
message NoiseHandshakePayload {
|
||||||
bytes pubkey = 1;
|
bytes identity_key = 1;
|
||||||
bytes signature = 2;
|
bytes identity_sig = 2;
|
||||||
|
bytes data = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,6 +27,9 @@
|
|||||||
//! implementations for various noise handshake patterns (currently `IK`, `IX`, and `XX`)
|
//! implementations for various noise handshake patterns (currently `IK`, `IX`, and `XX`)
|
||||||
//! over a particular choice of Diffie–Hellman key agreement (currently only X25519).
|
//! over a particular choice of Diffie–Hellman key agreement (currently only X25519).
|
||||||
//!
|
//!
|
||||||
|
//! > **Note**: Only the `XX` handshake pattern is currently guaranteed to provide
|
||||||
|
//! > interoperability with other libp2p implementations.
|
||||||
|
//!
|
||||||
//! All upgrades produce as output a pair, consisting of the remote's static public key
|
//! All upgrades produce as output a pair, consisting of the remote's static public key
|
||||||
//! and a `NoiseOutput` which represents the established cryptographic session with the
|
//! and a `NoiseOutput` which represents the established cryptographic session with the
|
||||||
//! remote, implementing `futures::io::AsyncRead` and `futures::io::AsyncWrite`.
|
//! remote, implementing `futures::io::AsyncRead` and `futures::io::AsyncWrite`.
|
||||||
@ -38,11 +41,11 @@
|
|||||||
//! ```
|
//! ```
|
||||||
//! use libp2p_core::{identity, Transport, upgrade};
|
//! use libp2p_core::{identity, Transport, upgrade};
|
||||||
//! use libp2p_tcp::TcpConfig;
|
//! use libp2p_tcp::TcpConfig;
|
||||||
//! use libp2p_noise::{Keypair, X25519, NoiseConfig};
|
//! use libp2p_noise::{Keypair, X25519Spec, NoiseConfig};
|
||||||
//!
|
//!
|
||||||
//! # fn main() {
|
//! # fn main() {
|
||||||
//! let id_keys = identity::Keypair::generate_ed25519();
|
//! let id_keys = identity::Keypair::generate_ed25519();
|
||||||
//! let dh_keys = Keypair::<X25519>::new().into_authentic(&id_keys).unwrap();
|
//! let dh_keys = Keypair::<X25519Spec>::new().into_authentic(&id_keys).unwrap();
|
||||||
//! let noise = NoiseConfig::xx(dh_keys).into_authenticated();
|
//! let noise = NoiseConfig::xx(dh_keys).into_authenticated();
|
||||||
//! let builder = TcpConfig::new().upgrade(upgrade::Version::V1).authenticate(noise);
|
//! let builder = TcpConfig::new().upgrade(upgrade::Version::V1).authenticate(noise);
|
||||||
//! // let transport = builder.multiplex(...);
|
//! // let transport = builder.multiplex(...);
|
||||||
@ -60,7 +63,8 @@ pub use io::NoiseOutput;
|
|||||||
pub use io::handshake;
|
pub use io::handshake;
|
||||||
pub use io::handshake::{Handshake, RemoteIdentity, IdentityExchange};
|
pub use io::handshake::{Handshake, RemoteIdentity, IdentityExchange};
|
||||||
pub use protocol::{Keypair, AuthenticKeypair, KeypairIdentity, PublicKey, SecretKey};
|
pub use protocol::{Keypair, AuthenticKeypair, KeypairIdentity, PublicKey, SecretKey};
|
||||||
pub use protocol::{Protocol, ProtocolParams, x25519::X25519, IX, IK, XX};
|
pub use protocol::{Protocol, ProtocolParams, IX, IK, XX};
|
||||||
|
pub use protocol::{x25519::X25519, x25519_spec::X25519Spec};
|
||||||
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p_core::{identity, PeerId, UpgradeInfo, InboundUpgrade, OutboundUpgrade};
|
use libp2p_core::{identity, PeerId, UpgradeInfo, InboundUpgrade, OutboundUpgrade};
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
//! Components of a Noise protocol.
|
//! Components of a Noise protocol.
|
||||||
|
|
||||||
pub mod x25519;
|
pub mod x25519;
|
||||||
|
pub mod x25519_spec;
|
||||||
|
|
||||||
use crate::NoiseError;
|
use crate::NoiseError;
|
||||||
use libp2p_core::identity;
|
use libp2p_core::identity;
|
||||||
@ -71,6 +72,7 @@ pub trait Protocol<C> {
|
|||||||
///
|
///
|
||||||
/// The trivial case is when the keys are byte for byte identical.
|
/// The trivial case is when the keys are byte for byte identical.
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
|
#[deprecated]
|
||||||
fn linked(id_pk: &identity::PublicKey, dh_pk: &PublicKey<C>) -> bool {
|
fn linked(id_pk: &identity::PublicKey, dh_pk: &PublicKey<C>) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@ -87,6 +89,7 @@ pub trait Protocol<C> {
|
|||||||
/// without a signature, otherwise a signature over the static DH public key
|
/// without a signature, otherwise a signature over the static DH public key
|
||||||
/// must be given and is verified with the public identity key, establishing
|
/// must be given and is verified with the public identity key, establishing
|
||||||
/// the authenticity of the static DH public key w.r.t. the public identity key.
|
/// the authenticity of the static DH public key w.r.t. the public identity key.
|
||||||
|
#[allow(deprecated)]
|
||||||
fn verify(id_pk: &identity::PublicKey, dh_pk: &PublicKey<C>, sig: &Option<Vec<u8>>) -> bool
|
fn verify(id_pk: &identity::PublicKey, dh_pk: &PublicKey<C>, sig: &Option<Vec<u8>>) -> bool
|
||||||
where
|
where
|
||||||
C: AsRef<[u8]>
|
C: AsRef<[u8]>
|
||||||
@ -95,6 +98,13 @@ pub trait Protocol<C> {
|
|||||||
||
|
||
|
||||||
sig.as_ref().map_or(false, |s| id_pk.verify(dh_pk.as_ref(), s))
|
sig.as_ref().map_or(false, |s| id_pk.verify(dh_pk.as_ref(), s))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn sign(id_keys: &identity::Keypair, dh_pk: &PublicKey<C>) -> Result<Vec<u8>, NoiseError>
|
||||||
|
where
|
||||||
|
C: AsRef<[u8]>
|
||||||
|
{
|
||||||
|
Ok(id_keys.sign(dh_pk.as_ref())?)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DH keypair.
|
/// DH keypair.
|
||||||
@ -151,9 +161,10 @@ impl<T: Zeroize> Keypair<T> {
|
|||||||
/// is authentic w.r.t. the given identity keypair, by signing the DH public key.
|
/// is authentic w.r.t. the given identity keypair, by signing the DH public key.
|
||||||
pub fn into_authentic(self, id_keys: &identity::Keypair) -> Result<AuthenticKeypair<T>, NoiseError>
|
pub fn into_authentic(self, id_keys: &identity::Keypair) -> Result<AuthenticKeypair<T>, NoiseError>
|
||||||
where
|
where
|
||||||
T: AsRef<[u8]>
|
T: AsRef<[u8]>,
|
||||||
|
T: Protocol<T>
|
||||||
{
|
{
|
||||||
let sig = id_keys.sign(self.public.as_ref())?;
|
let sig = T::sign(id_keys, &self.public)?;
|
||||||
|
|
||||||
let identity = KeypairIdentity {
|
let identity = KeypairIdentity {
|
||||||
public: id_keys.public(),
|
public: id_keys.public(),
|
||||||
|
@ -18,7 +18,10 @@
|
|||||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
// DEALINGS IN THE SOFTWARE.
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
//! Noise protocols based on X25519.
|
//! Legacy Noise protocols based on X25519.
|
||||||
|
//!
|
||||||
|
//! **Note**: This set of protocols is not interoperable with other
|
||||||
|
//! libp2p implementations.
|
||||||
|
|
||||||
use crate::{NoiseConfig, NoiseError, Protocol, ProtocolParams};
|
use crate::{NoiseConfig, NoiseError, Protocol, ProtocolParams};
|
||||||
use curve25519_dalek::edwards::CompressedEdwardsY;
|
use curve25519_dalek::edwards::CompressedEdwardsY;
|
||||||
@ -92,7 +95,11 @@ impl<R> UpgradeInfo for NoiseConfig<IK, X25519, R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Noise protocols for X25519.
|
/// Legacy Noise protocol for X25519.
|
||||||
|
///
|
||||||
|
/// **Note**: This `Protocol` provides no configuration that
|
||||||
|
/// is interoperable with other libp2p implementations.
|
||||||
|
/// See [`crate::X25519Spec`] instead.
|
||||||
impl Protocol<X25519> for X25519 {
|
impl Protocol<X25519> for X25519 {
|
||||||
fn params_ik() -> ProtocolParams {
|
fn params_ik() -> ProtocolParams {
|
||||||
PARAMS_IK.clone()
|
PARAMS_IK.clone()
|
||||||
|
150
protocols/noise/src/protocol/x25519_spec.rs
Normal file
150
protocols/noise/src/protocol/x25519_spec.rs
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
// Copyright 2019 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
// copy of this software and associated documentation files (the "Software"),
|
||||||
|
// to deal in the Software without restriction, including without limitation
|
||||||
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
// and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
// Software is furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in
|
||||||
|
// all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! [libp2p-noise-spec] compliant Noise protocols based on X25519.
|
||||||
|
//!
|
||||||
|
//! [libp2p-noise-spec]: https://github.com/libp2p/specs/tree/master/noise
|
||||||
|
|
||||||
|
use crate::{NoiseConfig, NoiseError, Protocol, ProtocolParams};
|
||||||
|
use libp2p_core::UpgradeInfo;
|
||||||
|
use libp2p_core::identity;
|
||||||
|
use rand::Rng;
|
||||||
|
use x25519_dalek::{X25519_BASEPOINT_BYTES, x25519};
|
||||||
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
|
use super::{*, x25519::X25519};
|
||||||
|
|
||||||
|
/// Prefix of static key signatures for domain separation.
|
||||||
|
const STATIC_KEY_DOMAIN: &str = "noise-libp2p-static-key:";
|
||||||
|
|
||||||
|
/// A X25519 key.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct X25519Spec([u8; 32]);
|
||||||
|
|
||||||
|
impl AsRef<[u8]> for X25519Spec {
|
||||||
|
fn as_ref(&self) -> &[u8] {
|
||||||
|
self.0.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Zeroize for X25519Spec {
|
||||||
|
fn zeroize(&mut self) {
|
||||||
|
self.0.zeroize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Keypair<X25519Spec> {
|
||||||
|
/// Create a new X25519 keypair.
|
||||||
|
pub fn new() -> Keypair<X25519Spec> {
|
||||||
|
let mut sk_bytes = [0u8; 32];
|
||||||
|
rand::thread_rng().fill(&mut sk_bytes);
|
||||||
|
let sk = SecretKey(X25519Spec(sk_bytes)); // Copy
|
||||||
|
sk_bytes.zeroize();
|
||||||
|
Self::from(sk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Promote a X25519 secret key into a keypair.
|
||||||
|
impl From<SecretKey<X25519Spec>> for Keypair<X25519Spec> {
|
||||||
|
fn from(secret: SecretKey<X25519Spec>) -> Keypair<X25519Spec> {
|
||||||
|
let public = PublicKey(X25519Spec(x25519((secret.0).0, X25519_BASEPOINT_BYTES)));
|
||||||
|
Keypair { secret, public }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UpgradeInfo for NoiseConfig<XX, X25519Spec> {
|
||||||
|
type Info = &'static [u8];
|
||||||
|
type InfoIter = std::iter::Once<Self::Info>;
|
||||||
|
|
||||||
|
fn protocol_info(&self) -> Self::InfoIter {
|
||||||
|
std::iter::once(b"/noise")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Noise protocols for X25519 with libp2p-spec compliant signatures.
|
||||||
|
///
|
||||||
|
/// **Note**: Only the XX handshake pattern is currently guaranteed to be
|
||||||
|
/// interoperable with other libp2p implementations.
|
||||||
|
impl Protocol<X25519Spec> for X25519Spec {
|
||||||
|
fn params_ik() -> ProtocolParams {
|
||||||
|
X25519::params_ik()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn params_ix() -> ProtocolParams {
|
||||||
|
X25519::params_ix()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn params_xx() -> ProtocolParams {
|
||||||
|
X25519::params_xx()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn public_from_bytes(bytes: &[u8]) -> Result<PublicKey<X25519Spec>, NoiseError> {
|
||||||
|
if bytes.len() != 32 {
|
||||||
|
return Err(NoiseError::InvalidKey)
|
||||||
|
}
|
||||||
|
let mut pk = [0u8; 32];
|
||||||
|
pk.copy_from_slice(bytes);
|
||||||
|
Ok(PublicKey(X25519Spec(pk)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify(id_pk: &identity::PublicKey, dh_pk: &PublicKey<X25519Spec>, sig: &Option<Vec<u8>>) -> bool
|
||||||
|
{
|
||||||
|
sig.as_ref().map_or(false, |s| {
|
||||||
|
id_pk.verify(&[STATIC_KEY_DOMAIN.as_bytes(), dh_pk.as_ref()].concat(), s)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign(id_keys: &identity::Keypair, dh_pk: &PublicKey<X25519Spec>) -> Result<Vec<u8>, NoiseError> {
|
||||||
|
Ok(id_keys.sign(&[STATIC_KEY_DOMAIN.as_bytes(), dh_pk.as_ref()].concat())?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
impl snow::types::Dh for Keypair<X25519Spec> {
|
||||||
|
fn name(&self) -> &'static str { "25519" }
|
||||||
|
fn pub_len(&self) -> usize { 32 }
|
||||||
|
fn priv_len(&self) -> usize { 32 }
|
||||||
|
fn pubkey(&self) -> &[u8] { self.public.as_ref() }
|
||||||
|
fn privkey(&self) -> &[u8] { self.secret.as_ref() }
|
||||||
|
|
||||||
|
fn set(&mut self, sk: &[u8]) {
|
||||||
|
let mut secret = [0u8; 32];
|
||||||
|
secret.copy_from_slice(&sk[..]);
|
||||||
|
self.secret = SecretKey(X25519Spec(secret)); // Copy
|
||||||
|
self.public = PublicKey(X25519Spec(x25519(secret, X25519_BASEPOINT_BYTES)));
|
||||||
|
secret.zeroize();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate(&mut self, rng: &mut dyn snow::types::Random) {
|
||||||
|
let mut secret = [0u8; 32];
|
||||||
|
rng.fill_bytes(&mut secret);
|
||||||
|
self.secret = SecretKey(X25519Spec(secret)); // Copy
|
||||||
|
self.public = PublicKey(X25519Spec(x25519(secret, X25519_BASEPOINT_BYTES)));
|
||||||
|
secret.zeroize();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dh(&self, pk: &[u8], shared_secret: &mut [u8]) -> Result<(), ()> {
|
||||||
|
let mut p = [0; 32];
|
||||||
|
p.copy_from_slice(&pk[.. 32]);
|
||||||
|
let ss = x25519((self.secret.0).0, p);
|
||||||
|
shared_secret[.. 32].copy_from_slice(&ss[..]);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -22,7 +22,7 @@ use futures::{future::{self, Either}, prelude::*};
|
|||||||
use libp2p_core::identity;
|
use libp2p_core::identity;
|
||||||
use libp2p_core::upgrade::{self, Negotiated, apply_inbound, apply_outbound};
|
use libp2p_core::upgrade::{self, Negotiated, apply_inbound, apply_outbound};
|
||||||
use libp2p_core::transport::{Transport, ListenerEvent};
|
use libp2p_core::transport::{Transport, ListenerEvent};
|
||||||
use libp2p_noise::{Keypair, X25519, NoiseConfig, RemoteIdentity, NoiseError, NoiseOutput};
|
use libp2p_noise::{Keypair, X25519, X25519Spec, NoiseConfig, RemoteIdentity, NoiseError, NoiseOutput};
|
||||||
use libp2p_tcp::{TcpConfig, TcpTransStream};
|
use libp2p_tcp::{TcpConfig, TcpTransStream};
|
||||||
use log::info;
|
use log::info;
|
||||||
use quickcheck::QuickCheck;
|
use quickcheck::QuickCheck;
|
||||||
@ -38,6 +38,37 @@ fn core_upgrade_compat() {
|
|||||||
let _ = TcpConfig::new().upgrade(upgrade::Version::V1).authenticate(noise);
|
let _ = TcpConfig::new().upgrade(upgrade::Version::V1).authenticate(noise);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn xx_spec() {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
fn prop(mut messages: Vec<Message>) -> bool {
|
||||||
|
messages.truncate(5);
|
||||||
|
let server_id = identity::Keypair::generate_ed25519();
|
||||||
|
let client_id = identity::Keypair::generate_ed25519();
|
||||||
|
|
||||||
|
let server_id_public = server_id.public();
|
||||||
|
let client_id_public = client_id.public();
|
||||||
|
|
||||||
|
let server_dh = Keypair::<X25519Spec>::new().into_authentic(&server_id).unwrap();
|
||||||
|
let server_transport = TcpConfig::new()
|
||||||
|
.and_then(move |output, endpoint| {
|
||||||
|
upgrade::apply(output, NoiseConfig::xx(server_dh), endpoint, upgrade::Version::V1)
|
||||||
|
})
|
||||||
|
.and_then(move |out, _| expect_identity(out, &client_id_public));
|
||||||
|
|
||||||
|
let client_dh = Keypair::<X25519Spec>::new().into_authentic(&client_id).unwrap();
|
||||||
|
let client_transport = TcpConfig::new()
|
||||||
|
.and_then(move |output, endpoint| {
|
||||||
|
upgrade::apply(output, NoiseConfig::xx(client_dh), endpoint, upgrade::Version::V1)
|
||||||
|
})
|
||||||
|
.and_then(move |out, _| expect_identity(out, &server_id_public));
|
||||||
|
|
||||||
|
run(server_transport, client_transport, messages);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
QuickCheck::new().max_tests(30).quickcheck(prop as fn(Vec<Message>) -> bool)
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn xx() {
|
fn xx() {
|
||||||
let _ = env_logger::try_init();
|
let _ = env_logger::try_init();
|
||||||
@ -144,15 +175,15 @@ fn ik_xx() {
|
|||||||
QuickCheck::new().max_tests(30).quickcheck(prop as fn(Vec<Message>) -> bool)
|
QuickCheck::new().max_tests(30).quickcheck(prop as fn(Vec<Message>) -> bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Output = (RemoteIdentity<X25519>, NoiseOutput<Negotiated<TcpTransStream>>);
|
type Output<C> = (RemoteIdentity<C>, NoiseOutput<Negotiated<TcpTransStream>>);
|
||||||
|
|
||||||
fn run<T, U, I>(server_transport: T, client_transport: U, messages: I)
|
fn run<T, U, I, C>(server_transport: T, client_transport: U, messages: I)
|
||||||
where
|
where
|
||||||
T: Transport<Output = Output>,
|
T: Transport<Output = Output<C>>,
|
||||||
T::Dial: Send + 'static,
|
T::Dial: Send + 'static,
|
||||||
T::Listener: Send + Unpin + 'static,
|
T::Listener: Send + Unpin + 'static,
|
||||||
T::ListenerUpgrade: Send + 'static,
|
T::ListenerUpgrade: Send + 'static,
|
||||||
U: Transport<Output = Output>,
|
U: Transport<Output = Output<C>>,
|
||||||
U::Dial: Send + 'static,
|
U::Dial: Send + 'static,
|
||||||
U::Listener: Send + 'static,
|
U::Listener: Send + 'static,
|
||||||
U::ListenerUpgrade: Send + 'static,
|
U::ListenerUpgrade: Send + 'static,
|
||||||
@ -218,8 +249,8 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn expect_identity(output: Output, pk: &identity::PublicKey)
|
fn expect_identity<C>(output: Output<C>, pk: &identity::PublicKey)
|
||||||
-> impl Future<Output = Result<Output, NoiseError>>
|
-> impl Future<Output = Result<Output<C>, NoiseError>>
|
||||||
{
|
{
|
||||||
match output.0 {
|
match output.0 {
|
||||||
RemoteIdentity::IdentityKey(ref k) if k == pk => future::ok(output),
|
RemoteIdentity::IdentityKey(ref k) if k == pk => future::ok(output),
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-ping"
|
name = "libp2p-ping"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Ping protocol for libp2p"
|
description = "Ping protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,8 +11,8 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
libp2p-swarm = { version = "0.18.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.19.0", path = "../../swarm" }
|
||||||
log = "0.4.1"
|
log = "0.4.1"
|
||||||
rand = "0.7.2"
|
rand = "0.7.2"
|
||||||
void = "1.0"
|
void = "1.0"
|
||||||
@ -20,7 +20,7 @@ wasm-timer = "0.2"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" }
|
||||||
libp2p-secio = { version = "0.18.0", path = "../../protocols/secio" }
|
libp2p-secio = { version = "0.19.0", path = "../../protocols/secio" }
|
||||||
libp2p-yamux = { version = "0.18.0", path = "../../muxers/yamux" }
|
libp2p-yamux = { version = "0.19.0", path = "../../muxers/yamux" }
|
||||||
quickcheck = "0.9.0"
|
quickcheck = "0.9.0"
|
||||||
|
@ -83,7 +83,7 @@ where
|
|||||||
type Future = BoxFuture<'static, Result<Duration, io::Error>>;
|
type Future = BoxFuture<'static, Result<Duration, io::Error>>;
|
||||||
|
|
||||||
fn upgrade_outbound(self, mut socket: TSocket, _: Self::Info) -> Self::Future {
|
fn upgrade_outbound(self, mut socket: TSocket, _: Self::Info) -> Self::Future {
|
||||||
let payload: [u8; 32] = thread_rng().sample(distributions::Standard);
|
let payload: [u8; PING_SIZE] = thread_rng().sample(distributions::Standard);
|
||||||
debug!("Preparing ping payload {:?}", payload);
|
debug!("Preparing ping payload {:?}", payload);
|
||||||
async move {
|
async move {
|
||||||
socket.write_all(&payload).await?;
|
socket.write_all(&payload).await?;
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-plaintext"
|
name = "libp2p-plaintext"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Plaintext encryption dummy protocol for libp2p"
|
description = "Plaintext encryption dummy protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
bytes = "0.5"
|
bytes = "0.5"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
futures_codec = "0.3.4"
|
futures_codec = "0.3.4"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
prost = "0.6.1"
|
prost = "0.6.1"
|
||||||
rw-stream-sink = "0.2.0"
|
rw-stream-sink = "0.2.0"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-pnet"
|
name = "libp2p-pnet"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Private swarm support for libp2p"
|
description = "Private swarm support for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
|
@ -55,6 +55,11 @@ const FINGERPRINT_SIZE: usize = 16;
|
|||||||
pub struct PreSharedKey([u8; KEY_SIZE]);
|
pub struct PreSharedKey([u8; KEY_SIZE]);
|
||||||
|
|
||||||
impl PreSharedKey {
|
impl PreSharedKey {
|
||||||
|
/// Create a new pre shared key from raw bytes
|
||||||
|
pub fn new(data: [u8; KEY_SIZE]) -> Self {
|
||||||
|
Self(data)
|
||||||
|
}
|
||||||
|
|
||||||
/// Compute PreSharedKey fingerprint identical to the go-libp2p fingerprint.
|
/// Compute PreSharedKey fingerprint identical to the go-libp2p fingerprint.
|
||||||
/// The computation of the fingerprint is not specified in the spec.
|
/// The computation of the fingerprint is not specified in the spec.
|
||||||
///
|
///
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-secio"
|
name = "libp2p-secio"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Secio encryption protocol for libp2p"
|
description = "Secio encryption protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -16,7 +16,7 @@ ctr = "0.3"
|
|||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
hmac = "0.7.0"
|
hmac = "0.7.0"
|
||||||
lazy_static = "1.2.0"
|
lazy_static = "1.2.0"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
prost = "0.6.1"
|
prost = "0.6.1"
|
||||||
pin-project = "0.4.6"
|
pin-project = "0.4.6"
|
||||||
@ -48,8 +48,8 @@ aes-all = ["aesni"]
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
async-std = "1.0"
|
async-std = "1.0"
|
||||||
criterion = "0.3"
|
criterion = "0.3"
|
||||||
libp2p-mplex = { version = "0.18.0", path = "../../muxers/mplex" }
|
libp2p-mplex = { version = "0.19.0", path = "../../muxers/mplex" }
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../../transports/tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "bench"
|
name = "bench"
|
||||||
|
30
src/lib.rs
30
src/lib.rs
@ -85,7 +85,7 @@
|
|||||||
//! Example ([`secio`] + [`yamux`] Protocol Upgrade):
|
//! Example ([`secio`] + [`yamux`] Protocol Upgrade):
|
||||||
//!
|
//!
|
||||||
//! ```rust
|
//! ```rust
|
||||||
//! # #[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "secio", feature = "yamux"))] {
|
//! # #[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "secio", feature = "yamux"))] {
|
||||||
//! use libp2p::{Transport, core::upgrade, tcp::TcpConfig, secio::SecioConfig, identity::Keypair, yamux};
|
//! use libp2p::{Transport, core::upgrade, tcp::TcpConfig, secio::SecioConfig, identity::Keypair, yamux};
|
||||||
//! let tcp = TcpConfig::new();
|
//! let tcp = TcpConfig::new();
|
||||||
//! let secio = SecioConfig::new(Keypair::generate_ed25519());
|
//! let secio = SecioConfig::new(Keypair::generate_ed25519());
|
||||||
@ -217,8 +217,8 @@ pub use libp2p_plaintext as plaintext;
|
|||||||
pub use libp2p_secio as secio;
|
pub use libp2p_secio as secio;
|
||||||
#[doc(inline)]
|
#[doc(inline)]
|
||||||
pub use libp2p_swarm as swarm;
|
pub use libp2p_swarm as swarm;
|
||||||
#[cfg(feature = "tcp")]
|
#[cfg(any(feature = "tcp-async-std", feature = "tcp-tokio-std"))]
|
||||||
#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
|
#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-async-std", feature = "tcp-tokio-std"))))]
|
||||||
#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))]
|
#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))]
|
||||||
#[doc(inline)]
|
#[doc(inline)]
|
||||||
pub use libp2p_tcp as tcp;
|
pub use libp2p_tcp as tcp;
|
||||||
@ -262,16 +262,14 @@ pub use self::simple::SimpleProtocol;
|
|||||||
pub use self::swarm::Swarm;
|
pub use self::swarm::Swarm;
|
||||||
pub use self::transport_ext::TransportExt;
|
pub use self::transport_ext::TransportExt;
|
||||||
|
|
||||||
use std::{error, io, time::Duration};
|
|
||||||
|
|
||||||
/// Builds a `Transport` that supports the most commonly-used protocols that libp2p supports.
|
/// Builds a `Transport` that supports the most commonly-used protocols that libp2p supports.
|
||||||
///
|
///
|
||||||
/// > **Note**: This `Transport` is not suitable for production usage, as its implementation
|
/// > **Note**: This `Transport` is not suitable for production usage, as its implementation
|
||||||
/// > reserves the right to support additional protocols or remove deprecated protocols.
|
/// > reserves the right to support additional protocols or remove deprecated protocols.
|
||||||
#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))]
|
#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))]
|
||||||
#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))]
|
#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))]
|
||||||
pub fn build_development_transport(keypair: identity::Keypair)
|
pub fn build_development_transport(keypair: identity::Keypair)
|
||||||
-> io::Result<impl Transport<Output = (PeerId, impl core::muxing::StreamMuxer<OutboundSubstream = impl Send, Substream = impl Send, Error = impl Into<io::Error>> + Send + Sync), Error = impl error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone>
|
-> std::io::Result<impl Transport<Output = (PeerId, impl core::muxing::StreamMuxer<OutboundSubstream = impl Send, Substream = impl Send, Error = impl Into<std::io::Error>> + Send + Sync), Error = impl std::error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone>
|
||||||
{
|
{
|
||||||
build_tcp_ws_secio_mplex_yamux(keypair)
|
build_tcp_ws_secio_mplex_yamux(keypair)
|
||||||
}
|
}
|
||||||
@ -282,10 +280,10 @@ pub fn build_development_transport(keypair: identity::Keypair)
|
|||||||
/// and mplex or yamux as the multiplexing layer.
|
/// and mplex or yamux as the multiplexing layer.
|
||||||
///
|
///
|
||||||
/// > **Note**: If you ever need to express the type of this `Transport`.
|
/// > **Note**: If you ever need to express the type of this `Transport`.
|
||||||
#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))]
|
#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))]
|
||||||
#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))]
|
#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))]
|
||||||
pub fn build_tcp_ws_secio_mplex_yamux(keypair: identity::Keypair)
|
pub fn build_tcp_ws_secio_mplex_yamux(keypair: identity::Keypair)
|
||||||
-> io::Result<impl Transport<Output = (PeerId, impl core::muxing::StreamMuxer<OutboundSubstream = impl Send, Substream = impl Send, Error = impl Into<io::Error>> + Send + Sync), Error = impl error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone>
|
-> std::io::Result<impl Transport<Output = (PeerId, impl core::muxing::StreamMuxer<OutboundSubstream = impl Send, Substream = impl Send, Error = impl Into<std::io::Error>> + Send + Sync), Error = impl std::error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone>
|
||||||
{
|
{
|
||||||
let transport = {
|
let transport = {
|
||||||
let tcp = tcp::TcpConfig::new().nodelay(true);
|
let tcp = tcp::TcpConfig::new().nodelay(true);
|
||||||
@ -299,7 +297,7 @@ pub fn build_tcp_ws_secio_mplex_yamux(keypair: identity::Keypair)
|
|||||||
.authenticate(secio::SecioConfig::new(keypair))
|
.authenticate(secio::SecioConfig::new(keypair))
|
||||||
.multiplex(core::upgrade::SelectUpgrade::new(yamux::Config::default(), mplex::MplexConfig::new()))
|
.multiplex(core::upgrade::SelectUpgrade::new(yamux::Config::default(), mplex::MplexConfig::new()))
|
||||||
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
||||||
.timeout(Duration::from_secs(20)))
|
.timeout(std::time::Duration::from_secs(20)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds an implementation of `Transport` that is suitable for usage with the `Swarm`.
|
/// Builds an implementation of `Transport` that is suitable for usage with the `Swarm`.
|
||||||
@ -308,10 +306,10 @@ pub fn build_tcp_ws_secio_mplex_yamux(keypair: identity::Keypair)
|
|||||||
/// and mplex or yamux as the multiplexing layer.
|
/// and mplex or yamux as the multiplexing layer.
|
||||||
///
|
///
|
||||||
/// > **Note**: If you ever need to express the type of this `Transport`.
|
/// > **Note**: If you ever need to express the type of this `Transport`.
|
||||||
#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))]
|
#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))]
|
||||||
#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))))]
|
#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))))]
|
||||||
pub fn build_tcp_ws_pnet_secio_mplex_yamux(keypair: identity::Keypair, psk: PreSharedKey)
|
pub fn build_tcp_ws_pnet_secio_mplex_yamux(keypair: identity::Keypair, psk: PreSharedKey)
|
||||||
-> io::Result<impl Transport<Output = (PeerId, impl core::muxing::StreamMuxer<OutboundSubstream = impl Send, Substream = impl Send, Error = impl Into<io::Error>> + Send + Sync), Error = impl error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone>
|
-> std::io::Result<impl Transport<Output = (PeerId, impl core::muxing::StreamMuxer<OutboundSubstream = impl Send, Substream = impl Send, Error = impl Into<std::io::Error>> + Send + Sync), Error = impl std::error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone>
|
||||||
{
|
{
|
||||||
let transport = {
|
let transport = {
|
||||||
let tcp = tcp::TcpConfig::new().nodelay(true);
|
let tcp = tcp::TcpConfig::new().nodelay(true);
|
||||||
@ -326,5 +324,5 @@ pub fn build_tcp_ws_pnet_secio_mplex_yamux(keypair: identity::Keypair, psk: PreS
|
|||||||
.authenticate(secio::SecioConfig::new(keypair))
|
.authenticate(secio::SecioConfig::new(keypair))
|
||||||
.multiplex(core::upgrade::SelectUpgrade::new(yamux::Config::default(), mplex::MplexConfig::new()))
|
.multiplex(core::upgrade::SelectUpgrade::new(yamux::Config::default(), mplex::MplexConfig::new()))
|
||||||
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
.map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer)))
|
||||||
.timeout(Duration::from_secs(20)))
|
.timeout(std::time::Duration::from_secs(20)))
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-swarm"
|
name = "libp2p-swarm"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "The libp2p swarm"
|
description = "The libp2p swarm"
|
||||||
version = "0.18.1"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../core" }
|
libp2p-core = { version = "0.19.0", path = "../core" }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
smallvec = "1.0"
|
smallvec = "1.0"
|
||||||
@ -19,6 +19,6 @@ wasm-timer = "0.2"
|
|||||||
void = "1"
|
void = "1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
libp2p-mplex = { version = "0.18.0", path = "../muxers/mplex" }
|
libp2p-mplex = { version = "0.19.0", path = "../muxers/mplex" }
|
||||||
quickcheck = "0.9.0"
|
quickcheck = "0.9.0"
|
||||||
rand = "0.7.2"
|
rand = "0.7.2"
|
||||||
|
@ -291,12 +291,10 @@ pub enum DialPeerCondition {
|
|||||||
/// If there is an ongoing dialing attempt, the addresses reported by
|
/// If there is an ongoing dialing attempt, the addresses reported by
|
||||||
/// [`NetworkBehaviour::addresses_of_peer`] are added to the ongoing
|
/// [`NetworkBehaviour::addresses_of_peer`] are added to the ongoing
|
||||||
/// dialing attempt, ignoring duplicates.
|
/// dialing attempt, ignoring duplicates.
|
||||||
///
|
|
||||||
/// This condition implies [`DialPeerCondition::Disconnected`].
|
|
||||||
NotDialing,
|
NotDialing,
|
||||||
// TODO: Once multiple dialing attempts per peer are permitted.
|
/// A new dialing attempt is always initiated, only subject to the
|
||||||
// See https://github.com/libp2p/rust-libp2p/pull/1506.
|
/// configured connection limits.
|
||||||
// Always,
|
Always,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for DialPeerCondition {
|
impl Default for DialPeerCondition {
|
||||||
|
167
swarm/src/lib.rs
167
swarm/src/lib.rs
@ -115,7 +115,6 @@ use libp2p_core::{
|
|||||||
NetworkInfo,
|
NetworkInfo,
|
||||||
NetworkEvent,
|
NetworkEvent,
|
||||||
NetworkConfig,
|
NetworkConfig,
|
||||||
Peer,
|
|
||||||
peer::ConnectedPeer,
|
peer::ConnectedPeer,
|
||||||
},
|
},
|
||||||
upgrade::ProtocolName,
|
upgrade::ProtocolName,
|
||||||
@ -124,7 +123,7 @@ use registry::{Addresses, AddressIntoIter};
|
|||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::{error, fmt, hash::Hash, io, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll}};
|
use std::{error, fmt, hash::Hash, io, ops::{Deref, DerefMut}, pin::Pin, task::{Context, Poll}};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::num::NonZeroU32;
|
use std::num::{NonZeroU32, NonZeroUsize};
|
||||||
use upgrade::UpgradeInfoSend as _;
|
use upgrade::UpgradeInfoSend as _;
|
||||||
|
|
||||||
/// Contains the state of the network, plus the way it should behave.
|
/// Contains the state of the network, plus the way it should behave.
|
||||||
@ -379,70 +378,31 @@ where TBehaviour: NetworkBehaviour<ProtocolsHandler = THandler>,
|
|||||||
///
|
///
|
||||||
/// If a new dialing attempt has been initiated, `Ok(true)` is returned.
|
/// If a new dialing attempt has been initiated, `Ok(true)` is returned.
|
||||||
///
|
///
|
||||||
/// If there is an ongoing dialing attempt, the current addresses of the
|
|
||||||
/// peer, as reported by [`NetworkBehaviour::addresses_of_peer`] are added
|
|
||||||
/// to the ongoing dialing attempt, ignoring duplicates. In this case no
|
|
||||||
/// new dialing attempt is initiated.
|
|
||||||
///
|
|
||||||
/// If no new dialing attempt has been initiated, meaning there is an ongoing
|
/// If no new dialing attempt has been initiated, meaning there is an ongoing
|
||||||
/// dialing attempt or `addresses_of_peer` reports no addresses, `Ok(false)`
|
/// dialing attempt or `addresses_of_peer` reports no addresses, `Ok(false)`
|
||||||
/// is returned.
|
/// is returned.
|
||||||
pub fn dial(me: &mut Self, peer_id: &PeerId) -> Result<bool, ConnectionLimit> {
|
pub fn dial(me: &mut Self, peer_id: &PeerId) -> Result<(), DialError> {
|
||||||
let mut addrs = me.behaviour.addresses_of_peer(peer_id).into_iter();
|
let mut addrs = me.behaviour.addresses_of_peer(peer_id).into_iter();
|
||||||
match me.network.peer(peer_id.clone()) {
|
let peer = me.network.peer(peer_id.clone());
|
||||||
Peer::Disconnected(peer) => {
|
|
||||||
if let Some(first) = addrs.next() {
|
let result =
|
||||||
let handler = me.behaviour.new_handler().into_node_handler_builder();
|
if let Some(first) = addrs.next() {
|
||||||
match peer.connect(first, addrs, handler) {
|
let handler = me.behaviour.new_handler().into_node_handler_builder();
|
||||||
Ok(_) => return Ok(true),
|
peer.dial(first, addrs, handler)
|
||||||
Err(error) => {
|
.map(|_| ())
|
||||||
log::debug!(
|
.map_err(DialError::ConnectionLimit)
|
||||||
"New dialing attempt to disconnected peer {:?} failed: {:?}.",
|
} else {
|
||||||
peer_id, error);
|
Err(DialError::NoAddresses)
|
||||||
me.behaviour.inject_dial_failure(&peer_id);
|
};
|
||||||
return Err(error)
|
|
||||||
}
|
if let Err(error) = &result {
|
||||||
}
|
log::debug!(
|
||||||
} else {
|
"New dialing attempt to peer {:?} failed: {:?}.",
|
||||||
log::debug!(
|
peer_id, error);
|
||||||
"New dialing attempt to disconnected peer {:?} failed: no address.",
|
me.behaviour.inject_dial_failure(&peer_id);
|
||||||
peer_id
|
|
||||||
);
|
|
||||||
me.behaviour.inject_dial_failure(&peer_id);
|
|
||||||
}
|
|
||||||
Ok(false)
|
|
||||||
},
|
|
||||||
Peer::Connected(peer) => {
|
|
||||||
if let Some(first) = addrs.next() {
|
|
||||||
let handler = me.behaviour.new_handler().into_node_handler_builder();
|
|
||||||
match peer.connect(first, addrs, handler) {
|
|
||||||
Ok(_) => return Ok(true),
|
|
||||||
Err(error) => {
|
|
||||||
log::debug!(
|
|
||||||
"New dialing attempt to connected peer {:?} failed: {:?}.",
|
|
||||||
peer_id, error);
|
|
||||||
me.behaviour.inject_dial_failure(&peer_id);
|
|
||||||
return Err(error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log::debug!(
|
|
||||||
"New dialing attempt to disconnected peer {:?} failed: no address.",
|
|
||||||
peer_id
|
|
||||||
);
|
|
||||||
me.behaviour.inject_dial_failure(&peer_id);
|
|
||||||
}
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
Peer::Dialing(mut peer) => {
|
|
||||||
peer.connection().add_addresses(addrs);
|
|
||||||
Ok(false)
|
|
||||||
},
|
|
||||||
Peer::Local => {
|
|
||||||
me.behaviour.inject_dial_failure(&peer_id);
|
|
||||||
Err(ConnectionLimit { current: 0, limit: 0 })
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator that produces the list of addresses we're listening on.
|
/// Returns an iterator that produces the list of addresses we're listening on.
|
||||||
@ -721,18 +681,22 @@ where TBehaviour: NetworkBehaviour<ProtocolsHandler = THandler>,
|
|||||||
if !this.network.is_dialing(&peer_id) => true,
|
if !this.network.is_dialing(&peer_id) => true,
|
||||||
_ => false
|
_ => false
|
||||||
};
|
};
|
||||||
|
|
||||||
if condition_matched {
|
if condition_matched {
|
||||||
if let Ok(true) = ExpandedSwarm::dial(this, &peer_id) {
|
if ExpandedSwarm::dial(this, &peer_id).is_ok() {
|
||||||
return Poll::Ready(SwarmEvent::Dialing(peer_id));
|
return Poll::Ready(SwarmEvent::Dialing(peer_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
// Even if the condition for a _new_ dialing attempt is not met,
|
||||||
|
// we always add any potentially new addresses of the peer to an
|
||||||
|
// ongoing dialing attempt, if there is one.
|
||||||
log::trace!("Condition for new dialing attempt to {:?} not met: {:?}",
|
log::trace!("Condition for new dialing attempt to {:?} not met: {:?}",
|
||||||
peer_id, condition);
|
peer_id, condition);
|
||||||
if let Some(mut peer) = this.network.peer(peer_id.clone()).into_dialing() {
|
if let Some(mut peer) = this.network.peer(peer_id.clone()).into_dialing() {
|
||||||
let addrs = this.behaviour.addresses_of_peer(peer.id());
|
let addrs = this.behaviour.addresses_of_peer(peer.id());
|
||||||
peer.connection().add_addresses(addrs);
|
let mut attempt = peer.some_attempt();
|
||||||
|
for addr in addrs {
|
||||||
|
attempt.add_address(addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1038,6 +1002,48 @@ where TBehaviour: NetworkBehaviour,
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Configures the number of events from the [`NetworkBehaviour`] in
|
||||||
|
/// destination to the [`ProtocolsHandler`] that can be buffered before
|
||||||
|
/// the [`Swarm`] has to wait. An individual buffer with this number of
|
||||||
|
/// events exists for each individual connection.
|
||||||
|
///
|
||||||
|
/// The ideal value depends on the executor used, the CPU speed, and the
|
||||||
|
/// volume of events. If this value is too low, then the [`Swarm`] will
|
||||||
|
/// be sleeping more often than necessary. Increasing this value increases
|
||||||
|
/// the overall memory usage.
|
||||||
|
pub fn notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self {
|
||||||
|
self.network_config.set_notify_handler_buffer_size(n);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configures the number of extra events from the [`ProtocolsHandler`] in
|
||||||
|
/// destination to the [`NetworkBehaviour`] that can be buffered before
|
||||||
|
/// the [`ProtocolsHandler`] has to go to sleep.
|
||||||
|
///
|
||||||
|
/// There exists a buffer of events received from [`ProtocolsHandler`]s
|
||||||
|
/// that the [`NetworkBehaviour`] has yet to process. This buffer is
|
||||||
|
/// shared between all instances of [`ProtocolsHandler`]. Each instance of
|
||||||
|
/// [`ProtocolsHandler`] is guaranteed one slot in this buffer, meaning
|
||||||
|
/// that delivering an event for the first time is guaranteed to be
|
||||||
|
/// instantaneous. Any extra event delivery, however, must wait for that
|
||||||
|
/// first event to be delivered or for an "extra slot" to be available.
|
||||||
|
///
|
||||||
|
/// This option configures the number of such "extra slots" in this
|
||||||
|
/// shared buffer. These extra slots are assigned in a first-come,
|
||||||
|
/// first-served basis.
|
||||||
|
///
|
||||||
|
/// The ideal value depends on the executor used, the CPU speed, the
|
||||||
|
/// average number of connections, and the volume of events. If this value
|
||||||
|
/// is too low, then the [`ProtocolsHandler`]s will be sleeping more often
|
||||||
|
/// than necessary. Increasing this value increases the overall memory
|
||||||
|
/// usage, and more importantly the latency between the moment when an
|
||||||
|
/// event is emitted and the moment when it is received by the
|
||||||
|
/// [`NetworkBehaviour`].
|
||||||
|
pub fn connection_event_buffer_size(mut self, n: usize) -> Self {
|
||||||
|
self.network_config.set_connection_event_buffer_size(n);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Configures a limit for the number of simultaneous incoming
|
/// Configures a limit for the number of simultaneous incoming
|
||||||
/// connection attempts.
|
/// connection attempts.
|
||||||
pub fn incoming_connection_limit(mut self, n: usize) -> Self {
|
pub fn incoming_connection_limit(mut self, n: usize) -> Self {
|
||||||
@ -1104,6 +1110,35 @@ where TBehaviour: NetworkBehaviour,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The possible failures of [`ExpandedSwarm::dial`].
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum DialError {
|
||||||
|
/// The configured limit for simultaneous outgoing connections
|
||||||
|
/// has been reached.
|
||||||
|
ConnectionLimit(ConnectionLimit),
|
||||||
|
/// [`NetworkBehaviour::addresses_of_peer`] returned no addresses
|
||||||
|
/// for the peer to dial.
|
||||||
|
NoAddresses
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for DialError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
DialError::ConnectionLimit(err) => write!(f, "Dial error: {}", err),
|
||||||
|
DialError::NoAddresses => write!(f, "Dial error: no addresses for peer.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl error::Error for DialError {
|
||||||
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||||
|
match self {
|
||||||
|
DialError::ConnectionLimit(err) => Some(err),
|
||||||
|
DialError::NoAddresses => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Dummy implementation of [`NetworkBehaviour`] that doesn't do anything.
|
/// Dummy implementation of [`NetworkBehaviour`] that doesn't do anything.
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct DummyBehaviour {
|
pub struct DummyBehaviour {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-dns"
|
name = "libp2p-dns"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "DNS transport implementation for libp2p"
|
description = "DNS transport implementation for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -10,6 +10,6 @@ keywords = ["peer-to-peer", "libp2p", "networking"]
|
|||||||
categories = ["network-programming", "asynchronous"]
|
categories = ["network-programming", "asynchronous"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4.1"
|
log = "0.4.1"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
|
@ -124,6 +124,7 @@ where
|
|||||||
// As an optimization, we immediately pass through if no component of the address contain
|
// As an optimization, we immediately pass through if no component of the address contain
|
||||||
// a DNS protocol.
|
// a DNS protocol.
|
||||||
let contains_dns = addr.iter().any(|cmp| match cmp {
|
let contains_dns = addr.iter().any(|cmp| match cmp {
|
||||||
|
Protocol::Dns(_) => true,
|
||||||
Protocol::Dns4(_) => true,
|
Protocol::Dns4(_) => true,
|
||||||
Protocol::Dns6(_) => true,
|
Protocol::Dns6(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
@ -139,7 +140,7 @@ where
|
|||||||
trace!("Dialing address with DNS: {}", addr);
|
trace!("Dialing address with DNS: {}", addr);
|
||||||
let resolve_futs = addr.iter()
|
let resolve_futs = addr.iter()
|
||||||
.map(|cmp| match cmp {
|
.map(|cmp| match cmp {
|
||||||
Protocol::Dns4(ref name) | Protocol::Dns6(ref name) => {
|
Protocol::Dns(ref name) | Protocol::Dns4(ref name) | Protocol::Dns6(ref name) => {
|
||||||
let name = name.to_string();
|
let name = name.to_string();
|
||||||
let to_resolve = format!("{}:0", name);
|
let to_resolve = format!("{}:0", name);
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
@ -151,7 +152,12 @@ where
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
let is_dns4 = if let Protocol::Dns4(_) = cmp { true } else { false };
|
let (dns4, dns6) = match cmp {
|
||||||
|
Protocol::Dns(_) => (true, true),
|
||||||
|
Protocol::Dns4(_) => (true, false),
|
||||||
|
Protocol::Dns6(_) => (false, true),
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let list = rx.await
|
let list = rx.await
|
||||||
@ -166,7 +172,7 @@ where
|
|||||||
|
|
||||||
list.into_iter()
|
list.into_iter()
|
||||||
.filter_map(|addr| {
|
.filter_map(|addr| {
|
||||||
if (is_dns4 && addr.is_ipv4()) || (!is_dns4 && addr.is_ipv6()) {
|
if (dns4 && addr.is_ipv4()) || (dns6 && addr.is_ipv6()) {
|
||||||
Some(Protocol::from(addr))
|
Some(Protocol::from(addr))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-tcp"
|
name = "libp2p-tcp"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "TCP/IP transport protocol for libp2p"
|
description = "TCP/IP transport protocol for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -15,9 +15,7 @@ futures = "0.3.1"
|
|||||||
futures-timer = "3.0"
|
futures-timer = "3.0"
|
||||||
get_if_addrs = "0.5.3"
|
get_if_addrs = "0.5.3"
|
||||||
ipnet = "2.0.0"
|
ipnet = "2.0.0"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4.1"
|
log = "0.4.1"
|
||||||
|
socket2 = "0.3.12"
|
||||||
tokio = { version = "0.2", default-features = false, features = ["tcp"], optional = true }
|
tokio = { version = "0.2", default-features = false, features = ["tcp"], optional = true }
|
||||||
|
|
||||||
[features]
|
|
||||||
default = ["async-std"]
|
|
||||||
|
@ -39,8 +39,10 @@ use libp2p_core::{
|
|||||||
transport::{ListenerEvent, TransportError}
|
transport::{ListenerEvent, TransportError}
|
||||||
};
|
};
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
|
use socket2::{Socket, Domain, Type};
|
||||||
use std::{
|
use std::{
|
||||||
collections::VecDeque,
|
collections::VecDeque,
|
||||||
|
convert::TryFrom,
|
||||||
io,
|
io,
|
||||||
iter::{self, FromIterator},
|
iter::{self, FromIterator},
|
||||||
net::{IpAddr, SocketAddr},
|
net::{IpAddr, SocketAddr},
|
||||||
@ -108,7 +110,22 @@ impl Transport for $tcp_config {
|
|||||||
async fn do_listen(cfg: $tcp_config, socket_addr: SocketAddr)
|
async fn do_listen(cfg: $tcp_config, socket_addr: SocketAddr)
|
||||||
-> Result<impl Stream<Item = Result<ListenerEvent<Ready<Result<$tcp_trans_stream, io::Error>>, io::Error>, io::Error>>, io::Error>
|
-> Result<impl Stream<Item = Result<ListenerEvent<Ready<Result<$tcp_trans_stream, io::Error>>, io::Error>, io::Error>>, io::Error>
|
||||||
{
|
{
|
||||||
let listener = <$tcp_listener>::bind(&socket_addr).await?;
|
let socket = if socket_addr.is_ipv4() {
|
||||||
|
Socket::new(Domain::ipv4(), Type::stream(), Some(socket2::Protocol::tcp()))?
|
||||||
|
} else {
|
||||||
|
let s = Socket::new(Domain::ipv6(), Type::stream(), Some(socket2::Protocol::tcp()))?;
|
||||||
|
s.set_only_v6(true)?;
|
||||||
|
s
|
||||||
|
};
|
||||||
|
if cfg!(target_family = "unix") {
|
||||||
|
socket.set_reuse_address(true)?;
|
||||||
|
}
|
||||||
|
socket.bind(&socket_addr.into())?;
|
||||||
|
socket.listen(1024)?; // we may want to make this configurable
|
||||||
|
|
||||||
|
let listener = <$tcp_listener>::try_from(socket.into_tcp_listener())
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||||
|
|
||||||
let local_addr = listener.local_addr()?;
|
let local_addr = listener.local_addr()?;
|
||||||
let port = local_addr.port();
|
let port = local_addr.port();
|
||||||
|
|
||||||
@ -485,42 +502,45 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
#[cfg(feature = "async-std")]
|
#[cfg(feature = "async-std")]
|
||||||
fn wildcard_expansion() {
|
fn wildcard_expansion() {
|
||||||
let mut listener = TcpConfig::new()
|
fn test(addr: Multiaddr) {
|
||||||
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
|
let mut listener = TcpConfig::new().listen_on(addr).expect("listener");
|
||||||
.expect("listener");
|
|
||||||
|
|
||||||
// Get the first address.
|
// Get the first address.
|
||||||
let addr = futures::executor::block_on_stream(listener.by_ref())
|
let addr = futures::executor::block_on_stream(listener.by_ref())
|
||||||
.next()
|
.next()
|
||||||
.expect("some event")
|
.expect("some event")
|
||||||
.expect("no error")
|
.expect("no error")
|
||||||
.into_new_address()
|
.into_new_address()
|
||||||
.expect("listen address");
|
.expect("listen address");
|
||||||
|
|
||||||
// Process all initial `NewAddress` events and make sure they
|
// Process all initial `NewAddress` events and make sure they
|
||||||
// do not contain wildcard address or port.
|
// do not contain wildcard address or port.
|
||||||
let server = listener
|
let server = listener
|
||||||
.take_while(|event| match event.as_ref().unwrap() {
|
.take_while(|event| match event.as_ref().unwrap() {
|
||||||
ListenerEvent::NewAddress(a) => {
|
ListenerEvent::NewAddress(a) => {
|
||||||
let mut iter = a.iter();
|
let mut iter = a.iter();
|
||||||
match iter.next().expect("ip address") {
|
match iter.next().expect("ip address") {
|
||||||
Protocol::Ip4(ip) => assert!(!ip.is_unspecified()),
|
Protocol::Ip4(ip) => assert!(!ip.is_unspecified()),
|
||||||
Protocol::Ip6(ip) => assert!(!ip.is_unspecified()),
|
Protocol::Ip6(ip) => assert!(!ip.is_unspecified()),
|
||||||
other => panic!("Unexpected protocol: {}", other)
|
other => panic!("Unexpected protocol: {}", other)
|
||||||
|
}
|
||||||
|
if let Protocol::Tcp(port) = iter.next().expect("port") {
|
||||||
|
assert_ne!(0, port)
|
||||||
|
} else {
|
||||||
|
panic!("No TCP port in address: {}", a)
|
||||||
|
}
|
||||||
|
futures::future::ready(true)
|
||||||
}
|
}
|
||||||
if let Protocol::Tcp(port) = iter.next().expect("port") {
|
_ => futures::future::ready(false)
|
||||||
assert_ne!(0, port)
|
})
|
||||||
} else {
|
.for_each(|_| futures::future::ready(()));
|
||||||
panic!("No TCP port in address: {}", a)
|
|
||||||
}
|
|
||||||
futures::future::ready(true)
|
|
||||||
}
|
|
||||||
_ => futures::future::ready(false)
|
|
||||||
})
|
|
||||||
.for_each(|_| futures::future::ready(()));
|
|
||||||
|
|
||||||
let client = TcpConfig::new().dial(addr).expect("dialer");
|
let client = TcpConfig::new().dial(addr).expect("dialer");
|
||||||
async_std::task::block_on(futures::future::join(server, client)).1.unwrap();
|
async_std::task::block_on(futures::future::join(server, client)).1.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
test("/ip4/0.0.0.0/tcp/0".parse().unwrap());
|
||||||
|
test("/ip6/::1/tcp/0".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -575,43 +595,47 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
#[cfg(feature = "async-std")]
|
#[cfg(feature = "async-std")]
|
||||||
fn communicating_between_dialer_and_listener() {
|
fn communicating_between_dialer_and_listener() {
|
||||||
let (ready_tx, ready_rx) = futures::channel::oneshot::channel();
|
fn test(addr: Multiaddr) {
|
||||||
let mut ready_tx = Some(ready_tx);
|
let (ready_tx, ready_rx) = futures::channel::oneshot::channel();
|
||||||
|
let mut ready_tx = Some(ready_tx);
|
||||||
|
|
||||||
async_std::task::spawn(async move {
|
async_std::task::spawn(async move {
|
||||||
let addr = "/ip4/127.0.0.1/tcp/0".parse::<Multiaddr>().unwrap();
|
let tcp = TcpConfig::new();
|
||||||
let tcp = TcpConfig::new();
|
let mut listener = tcp.listen_on(addr).unwrap();
|
||||||
let mut listener = tcp.listen_on(addr).unwrap();
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match listener.next().await.unwrap().unwrap() {
|
match listener.next().await.unwrap().unwrap() {
|
||||||
ListenerEvent::NewAddress(listen_addr) => {
|
ListenerEvent::NewAddress(listen_addr) => {
|
||||||
ready_tx.take().unwrap().send(listen_addr).unwrap();
|
ready_tx.take().unwrap().send(listen_addr).unwrap();
|
||||||
},
|
},
|
||||||
ListenerEvent::Upgrade { upgrade, .. } => {
|
ListenerEvent::Upgrade { upgrade, .. } => {
|
||||||
let mut upgrade = upgrade.await.unwrap();
|
let mut upgrade = upgrade.await.unwrap();
|
||||||
let mut buf = [0u8; 3];
|
let mut buf = [0u8; 3];
|
||||||
upgrade.read_exact(&mut buf).await.unwrap();
|
upgrade.read_exact(&mut buf).await.unwrap();
|
||||||
assert_eq!(buf, [1, 2, 3]);
|
assert_eq!(buf, [1, 2, 3]);
|
||||||
upgrade.write_all(&[4, 5, 6]).await.unwrap();
|
upgrade.write_all(&[4, 5, 6]).await.unwrap();
|
||||||
},
|
},
|
||||||
_ => unreachable!()
|
_ => unreachable!()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
});
|
|
||||||
|
|
||||||
async_std::task::block_on(async move {
|
async_std::task::block_on(async move {
|
||||||
let addr = ready_rx.await.unwrap();
|
let addr = ready_rx.await.unwrap();
|
||||||
let tcp = TcpConfig::new();
|
let tcp = TcpConfig::new();
|
||||||
|
|
||||||
// Obtain a future socket through dialing
|
// Obtain a future socket through dialing
|
||||||
let mut socket = tcp.dial(addr.clone()).unwrap().await.unwrap();
|
let mut socket = tcp.dial(addr.clone()).unwrap().await.unwrap();
|
||||||
socket.write_all(&[0x1, 0x2, 0x3]).await.unwrap();
|
socket.write_all(&[0x1, 0x2, 0x3]).await.unwrap();
|
||||||
|
|
||||||
let mut buf = [0u8; 3];
|
let mut buf = [0u8; 3];
|
||||||
socket.read_exact(&mut buf).await.unwrap();
|
socket.read_exact(&mut buf).await.unwrap();
|
||||||
assert_eq!(buf, [4, 5, 6]);
|
assert_eq!(buf, [4, 5, 6]);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
test("/ip4/127.0.0.1/tcp/0".parse().unwrap());
|
||||||
|
test("/ip6/::1/tcp/0".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-uds"
|
name = "libp2p-uds"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Unix domain sockets transport for libp2p"
|
description = "Unix domain sockets transport for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[target.'cfg(all(unix, not(any(target_os = "emscripten", target_os = "unknown"))))'.dependencies]
|
[target.'cfg(all(unix, not(any(target_os = "emscripten", target_os = "unknown"))))'.dependencies]
|
||||||
async-std = { version = "1.0", optional = true }
|
async-std = { version = "1.0", optional = true }
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4.1"
|
log = "0.4.1"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
tokio = { version = "0.2", default-features = false, features = ["uds"], optional = true }
|
tokio = { version = "0.2", default-features = false, features = ["uds"], optional = true }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "libp2p-wasm-ext"
|
name = "libp2p-wasm-ext"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>"]
|
authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Allows passing in an external transport in a WASM environment"
|
description = "Allows passing in an external transport in a WASM environment"
|
||||||
@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
js-sys = "0.3.19"
|
js-sys = "0.3.19"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
parity-send-wrapper = "0.1.0"
|
parity-send-wrapper = "0.1.0"
|
||||||
wasm-bindgen = "0.2.42"
|
wasm-bindgen = "0.2.42"
|
||||||
wasm-bindgen-futures = "0.4.4"
|
wasm-bindgen-futures = "0.4.4"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
name = "libp2p-websocket"
|
name = "libp2p-websocket"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "WebSocket transport for libp2p"
|
description = "WebSocket transport for libp2p"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/libp2p/rust-libp2p"
|
repository = "https://github.com/libp2p/rust-libp2p"
|
||||||
@ -14,7 +14,7 @@ async-tls = "0.7.0"
|
|||||||
bytes = "0.5"
|
bytes = "0.5"
|
||||||
either = "1.5.3"
|
either = "1.5.3"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.18.0", path = "../../core" }
|
libp2p-core = { version = "0.19.0", path = "../../core" }
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
quicksink = "0.1"
|
quicksink = "0.1"
|
||||||
rustls = "0.17.0"
|
rustls = "0.17.0"
|
||||||
@ -25,4 +25,4 @@ webpki = "0.21"
|
|||||||
webpki-roots = "0.18"
|
webpki-roots = "0.18"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
libp2p-tcp = { version = "0.18.0", path = "../tcp" }
|
libp2p-tcp = { version = "0.19.0", path = "../tcp" }
|
||||||
|
@ -353,6 +353,8 @@ fn host_and_dnsname<T>(addr: &Multiaddr) -> Result<(String, Option<webpki::DNSNa
|
|||||||
Ok((format!("{}:{}", ip, port), None)),
|
Ok((format!("{}:{}", ip, port), None)),
|
||||||
(Some(Protocol::Ip6(ip)), Some(Protocol::Tcp(port))) =>
|
(Some(Protocol::Ip6(ip)), Some(Protocol::Tcp(port))) =>
|
||||||
Ok((format!("{}:{}", ip, port), None)),
|
Ok((format!("{}:{}", ip, port), None)),
|
||||||
|
(Some(Protocol::Dns(h)), Some(Protocol::Tcp(port))) =>
|
||||||
|
Ok((format!("{}:{}", &h, port), Some(tls::dns_name_ref(&h)?.to_owned()))),
|
||||||
(Some(Protocol::Dns4(h)), Some(Protocol::Tcp(port))) =>
|
(Some(Protocol::Dns4(h)), Some(Protocol::Tcp(port))) =>
|
||||||
Ok((format!("{}:{}", &h, port), Some(tls::dns_name_ref(&h)?.to_owned()))),
|
Ok((format!("{}:{}", &h, port), Some(tls::dns_name_ref(&h)?.to_owned()))),
|
||||||
(Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) =>
|
(Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) =>
|
||||||
@ -371,7 +373,7 @@ fn location_to_multiaddr<T>(location: &str) -> Result<Multiaddr, Error<T>> {
|
|||||||
let mut a = Multiaddr::empty();
|
let mut a = Multiaddr::empty();
|
||||||
match url.host() {
|
match url.host() {
|
||||||
Some(url::Host::Domain(h)) => {
|
Some(url::Host::Domain(h)) => {
|
||||||
a.push(Protocol::Dns4(h.into()))
|
a.push(Protocol::Dns(h.into()))
|
||||||
}
|
}
|
||||||
Some(url::Host::Ipv4(ip)) => {
|
Some(url::Host::Ipv4(ip)) => {
|
||||||
a.push(Protocol::Ip4(ip))
|
a.push(Protocol::Ip4(ip))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user