diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6a904536..f77c5f10 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,26 +11,26 @@ jobs: name: Build and test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 - - name: Cache cargo registry - uses: actions/cache@v1 - with: - path: ~/.cargo/registry - key: cargo-registry-${{ hashFiles('Cargo.toml') }} - - name: Cache cargo index - uses: actions/cache@v1 - with: - path: ~/.cargo/git - key: cargo-index-${{ hashFiles('Cargo.toml') }} - - name: Cache cargo build - uses: actions/cache@v1 - with: - path: target - key: cargo-build-target-${{ hashFiles('Cargo.toml') }} - - name: Run tests, with no feature - run: cargo test --workspace --no-default-features - - name: Run tests, with all features - run: cargo test --workspace --all-features + - uses: actions/checkout@v1 + - name: Cache cargo registry + uses: actions/cache@v1 + with: + path: ~/.cargo/registry + key: cargo-registry-${{ hashFiles('Cargo.toml') }} + - name: Cache cargo index + uses: actions/cache@v1 + with: + path: ~/.cargo/git + key: cargo-index-${{ hashFiles('Cargo.toml') }} + - name: Cache cargo build + uses: actions/cache@v1 + with: + path: target + key: cargo-build-target-${{ hashFiles('Cargo.toml') }} + - name: Run tests, with no feature + run: cargo test --workspace --no-default-features + - name: Run tests, with all features + run: cargo test --workspace --all-features test-wasm: name: Build on WASM @@ -40,40 +40,40 @@ jobs: env: CC: clang-10 steps: - - uses: actions/checkout@v1 - - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - target: wasm32-unknown-unknown - override: true - - name: Install a recent version of clang - run: | - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - - echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main" >> /etc/apt/sources.list - apt-get update - apt-get install -y clang-10 - - name: Install CMake - run: apt-get install -y cmake - - name: Cache cargo registry - uses: actions/cache@v1 - with: - path: ~/.cargo/registry - key: wasm-cargo-registry-${{ hashFiles('Cargo.toml') }} - - name: Cache cargo index - uses: actions/cache@v1 - with: - path: ~/.cargo/git - key: wasm-cargo-index-${{ hashFiles('Cargo.toml') }} - - name: Cache cargo build - uses: actions/cache@v1 - with: - path: target - key: wasm-cargo-build-target-${{ hashFiles('Cargo.toml') }} - - name: Build on WASM - # TODO: also run `cargo test` - # TODO: ideally we would build `--workspace`, but not all crates compile for WASM - run: cargo build --target=wasm32-unknown-unknown + - uses: actions/checkout@v1 + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: wasm32-unknown-unknown + override: true + - name: Install a recent version of clang + run: | + wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - + echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main" >> /etc/apt/sources.list + apt-get update + apt-get install -y clang-10 + - name: Install CMake + run: apt-get install -y cmake + - name: Cache cargo registry + uses: actions/cache@v1 + with: + path: ~/.cargo/registry + key: wasm-cargo-registry-${{ hashFiles('Cargo.toml') }} + - name: Cache cargo index + uses: actions/cache@v1 + with: + path: ~/.cargo/git + key: wasm-cargo-index-${{ hashFiles('Cargo.toml') }} + - name: Cache cargo build + uses: actions/cache@v1 + with: + path: target + key: wasm-cargo-build-target-${{ hashFiles('Cargo.toml') }} + - name: Build on WASM + # TODO: also run `cargo test` + # TODO: ideally we would build `--workspace`, but not all crates compile for WASM + run: cargo build --target=wasm32-unknown-unknown check-rustdoc-links: name: Check rustdoc intra-doc links @@ -81,13 +81,13 @@ jobs: container: image: rust steps: - - uses: actions/checkout@v1 - - name: Install nightly Rust - # TODO: intra-doc links are available on nightly only - # see https://doc.rust-lang.org/nightly/rustdoc/lints.html#intra_doc_link_resolution_failure - run: rustup default nightly-2020-05-20 - - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny intra_doc_link_resolution_failure" cargo doc --verbose --workspace --no-deps --document-private-items + - uses: actions/checkout@v1 + - name: Install nightly Rust + # TODO: intra-doc links are available on nightly only + # see https://doc.rust-lang.org/nightly/rustdoc/lints.html#intra_doc_link_resolution_failure + run: rustup default nightly-2020-05-20 + - name: Check rustdoc links + run: RUSTDOCFLAGS="--deny intra_doc_link_resolution_failure" cargo doc --verbose --workspace --no-deps --document-private-items integration-test: name: Integration tests @@ -95,21 +95,21 @@ jobs: container: image: rust steps: - - uses: actions/checkout@v1 - - name: Cache cargo registry - uses: actions/cache@v1 - with: - path: ~/.cargo/registry - key: cargo-registry-${{ hashFiles('Cargo.toml') }} - - name: Cache cargo index - uses: actions/cache@v1 - with: - path: ~/.cargo/git - key: cargo-index-${{ hashFiles('Cargo.toml') }} - - name: Cache cargo build - uses: actions/cache@v1 - with: - path: target - key: cargo-build-target-${{ hashFiles('Cargo.toml') }} - - name: Run ipfs-kad example - run: RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run --example ipfs-kad + - uses: actions/checkout@v1 + - name: Cache cargo registry + uses: actions/cache@v1 + with: + path: ~/.cargo/registry + key: cargo-registry-${{ hashFiles('Cargo.toml') }} + - name: Cache cargo index + uses: actions/cache@v1 + with: + path: ~/.cargo/git + key: cargo-index-${{ hashFiles('Cargo.toml') }} + - name: Cache cargo build + uses: actions/cache@v1 + with: + path: target + key: cargo-build-target-${{ hashFiles('Cargo.toml') }} + - name: Run ipfs-kad example + run: RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run --example ipfs-kad diff --git a/CHANGELOG.md b/CHANGELOG.md index f2e73fe1..2d3e9f51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ -# Version ??? +- [`libp2p-core` CHANGELOG](core/CHANGELOG.md) +- [`libp2p-deflate` CHANGELOG](protocols/deflate/CHANGELOG.md) +- [`libp2p-floodsub` CHANGELOG](protocols/floodsub/CHANGELOG.md) +- [`libp2p-gossipsub` CHANGELOG](protocols/gossipsub/CHANGELOG.md) +- [`libp2p-identify` CHANGELOG](protocols/identify/CHANGELOG.md) +- [`libp2p-kad` CHANGELOG](protocols/kad/CHANGELOG.md) +- [`libp2p-mdns` CHANGELOG](protocols/mdns/CHANGELOG.md) +- [`libp2p-mplex` CHANGELOG](muxers/mplex/CHANGELOG.md) +- [`libp2p-noise` CHANGELOG](protocols/noise/CHANGELOG.md) +- [`libp2p-ping` CHANGELOG](protocols/ping/CHANGELOG.md) +- [`libp2p-plaintext` CHANGELOG](protocols/plaintext/CHANGELOG.md) +- [`libp2p-pnet` CHANGELOG](protocols/pnet/CHANGELOG.md) +- [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md) +- [`libp2p-secio` CHANGELOG](protocols/secio/CHANGELOG.md) +- [`libp2p-swarm` CHANGELOG](swarm/CHANGELOG.md) +- [`libp2p-tcp` CHANGELOG](transports/tcp/CHANGELOG.md) +- [`libp2p-uds` CHANGELOG](transports/uds/CHANGELOG.md) +- [`libp2p-websocket` CHANGELOG](transports/websocket/CHANGELOG.md) +- [`libp2p-yamux` CHANGELOG](muxers/yamux/CHANGELOG.md) +- [`multistream-select` CHANGELOG](misc/multistream-select/CHANGELOG.md) +- [`parity-multiaddr` CHANGELOG](misc/multiaddr/CHANGELOG.md) +# Version 0.19.1 (2020-05-25) + +- Temporarily pin all `async-std` dependencies to `< 1.6`. + [PR 1589](https://github.com/libp2p/rust-libp2p/pull/1589) + +- `libp2p-core-derive`: Fully qualified std::result::Result in macro + [PR 1587](https://github.com/libp2p/rust-libp2p/pull/1587) # Version 0.19.0 (2020-05-18) diff --git a/Cargo.toml b/Cargo.toml index f5e6abf4..230f095f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p" edition = "2018" description = "Peer-to-peer networking library" -version = "0.19.0" +version = "0.20.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -23,6 +23,7 @@ default = [ "ping", "plaintext", "pnet", + "request-response", "secio", "secp256k1", "tcp-async-std", @@ -43,6 +44,7 @@ noise = ["libp2p-noise"] ping = ["libp2p-ping"] plaintext = ["libp2p-plaintext"] pnet = ["libp2p-pnet"] +request-response = ["libp2p-request-response"] secio = ["libp2p-secio"] tcp-async-std = ["libp2p-tcp", "libp2p-tcp/async-std"] tcp-tokio = ["libp2p-tcp", "libp2p-tcp/tokio"] @@ -52,42 +54,46 @@ websocket = ["libp2p-websocket"] yamux = ["libp2p-yamux"] secp256k1 = ["libp2p-core/secp256k1", "libp2p-secio/secp256k1"] +[package.metadata.docs.rs] +all-features = true + [dependencies] bytes = "0.5" futures = "0.3.1" -multiaddr = { package = "parity-multiaddr", version = "0.9.0", path = "misc/multiaddr" } -multihash = "0.11.0" lazy_static = "1.2" -libp2p-mplex = { version = "0.19.0", path = "muxers/mplex", optional = true } -libp2p-identify = { version = "0.19.0", path = "protocols/identify", optional = true } -libp2p-kad = { version = "0.19.0", path = "protocols/kad", optional = true } -libp2p-floodsub = { version = "0.19.0", path = "protocols/floodsub", optional = true } -libp2p-gossipsub = { version = "0.19.0", path = "./protocols/gossipsub", optional = true } -libp2p-ping = { version = "0.19.0", path = "protocols/ping", optional = true } -libp2p-plaintext = { version = "0.19.0", path = "protocols/plaintext", optional = true } -libp2p-pnet = { version = "0.19.0", path = "protocols/pnet", optional = true } -libp2p-core = { version = "0.19.0", path = "core" } -libp2p-core-derive = { version = "0.19.0", path = "misc/core-derive" } -libp2p-secio = { version = "0.19.0", path = "protocols/secio", default-features = false, optional = true } -libp2p-swarm = { version = "0.19.0", path = "swarm" } -libp2p-uds = { version = "0.19.0", path = "transports/uds", optional = true } +libp2p-core = { version = "0.19.2", path = "core" } +libp2p-core-derive = { version = "0.19.1", path = "misc/core-derive" } +libp2p-floodsub = { version = "0.19.1", path = "protocols/floodsub", optional = true } +libp2p-gossipsub = { version = "0.19.3", path = "./protocols/gossipsub", optional = true } +libp2p-identify = { version = "0.19.2", path = "protocols/identify", optional = true } +libp2p-kad = { version = "0.20.1", path = "protocols/kad", optional = true } +libp2p-mplex = { version = "0.19.2", path = "muxers/mplex", optional = true } +libp2p-noise = { version = "0.19.1", path = "protocols/noise", optional = true } +libp2p-ping = { version = "0.19.3", path = "protocols/ping", optional = true } +libp2p-plaintext = { version = "0.19.1", path = "protocols/plaintext", optional = true } +libp2p-pnet = { version = "0.19.1", path = "protocols/pnet", optional = true } +libp2p-request-response = { version = "0.1.0", path = "protocols/request-response", optional = true } +libp2p-secio = { version = "0.19.2", path = "protocols/secio", default-features = false, optional = true } +libp2p-swarm = { version = "0.19.1", path = "swarm" } +libp2p-uds = { version = "0.19.2", path = "transports/uds", optional = true } libp2p-wasm-ext = { version = "0.19.0", path = "transports/wasm-ext", optional = true } -libp2p-yamux = { version = "0.19.0", path = "muxers/yamux", optional = true } -libp2p-noise = { version = "0.19.0", path = "protocols/noise", optional = true } +libp2p-yamux = { version = "0.19.1", path = "muxers/yamux", optional = true } +multiaddr = { package = "parity-multiaddr", version = "0.9.1", path = "misc/multiaddr" } +multihash = "0.11.0" parking_lot = "0.10.0" -pin-project = "0.4.6" +pin-project = "0.4.17" smallvec = "1.0" wasm-timer = "0.2.4" -[target.'cfg(not(any(target_os = "emscripten", target_os = "unknown")))'.dependencies] -libp2p-deflate = { version = "0.19.0", path = "protocols/deflate", optional = true } +[target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] +libp2p-deflate = { version = "0.19.2", path = "protocols/deflate", optional = true } libp2p-dns = { version = "0.19.0", path = "transports/dns", optional = true } -libp2p-mdns = { version = "0.19.0", path = "protocols/mdns", optional = true } -libp2p-tcp = { version = "0.19.0", path = "transports/tcp", optional = true } -libp2p-websocket = { version = "0.19.0", path = "transports/websocket", optional = true } +libp2p-mdns = { version = "0.19.2", path = "protocols/mdns", optional = true } +libp2p-tcp = { version = "0.19.2", path = "transports/tcp", optional = true } +libp2p-websocket = { version = "0.20.0", path = "transports/websocket", optional = true } [dev-dependencies] -async-std = "~1.5.0" +async-std = "1.6.2" env_logger = "0.7.1" [workspace] @@ -107,6 +113,7 @@ members = [ "protocols/noise", "protocols/ping", "protocols/plaintext", + "protocols/request-response", "protocols/secio", "swarm", "transports/dns", diff --git a/README.md b/README.md index e3bb8519..3e84e060 100644 --- a/README.md +++ b/README.md @@ -32,3 +32,5 @@ Where to ask questions? - https://github.com/sigp/lighthouse - https://github.com/golemfactory/golem-libp2p - https://github.com/comit-network/comit-rs +- https://github.com/rs-ipfs/rust-ipfs +- https://github.com/marcopoloprotocol/marcopolo diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md new file mode 100644 index 00000000..48d04a6f --- /dev/null +++ b/core/CHANGELOG.md @@ -0,0 +1,18 @@ +# 0.20.0 [????-??-??] + +- Rename `StreamMuxer::poll_inbound` to `poll_event` and change the +return value to `StreamMuxerEvent`. This new `StreamMuxerEvent` makes +it possible for the multiplexing layer to notify the upper layers of +a change in the address of the underlying connection. + +- Add `ConnectionHandler::inject_address_change`. + +# 0.19.2 [2020-06-22] + +- Add PartialOrd and Ord for PeerId + ([PR 1594](https://github.com/libp2p/rust-libp2p/pull/1594)). + +- Updated dependencies. + +- Deprecate `StreamMuxer::is_remote_acknowledged` + ([PR 1616](https://github.com/libp2p/rust-libp2p/pull/1616)). diff --git a/core/Cargo.toml b/core/Cargo.toml index 3b11a7fc..82704f0b 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-core" edition = "2018" description = "Core traits and structs of libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -20,30 +20,30 @@ futures-timer = "3" lazy_static = "1.2" libsecp256k1 = { version = "0.3.1", optional = true } log = "0.4" -multiaddr = { package = "parity-multiaddr", version = "0.9.0", path = "../misc/multiaddr" } +multiaddr = { package = "parity-multiaddr", version = "0.9.1", path = "../misc/multiaddr" } multihash = "0.11.0" -multistream-select = { version = "0.8.0", path = "../misc/multistream-select" } +multistream-select = { version = "0.8.2", path = "../misc/multistream-select" } parking_lot = "0.10.0" -pin-project = "0.4.6" +pin-project = "0.4.17" prost = "0.6.1" rand = "0.7" rw-stream-sink = "0.2.0" sha2 = "0.8.0" smallvec = "1.0" thiserror = "1.0" -unsigned-varint = "0.3" +unsigned-varint = "0.4" void = "1" zeroize = "1" serde = { version = "1.0.114", default-features = false } -[target.'cfg(not(any(target_os = "emscripten", target_os = "unknown")))'.dependencies] +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false } [dev-dependencies] -async-std = "~1.5.0" -libp2p-mplex = { version = "0.19.0", path = "../muxers/mplex" } -libp2p-secio = { version = "0.19.0", path = "../protocols/secio" } -libp2p-tcp = { version = "0.19.0", path = "../transports/tcp" } +async-std = "1.6.2" +libp2p-mplex = { path = "../muxers/mplex" } +libp2p-secio = { path = "../protocols/secio" } +libp2p-tcp = { path = "../transports/tcp", features = ["async-std"] } quickcheck = "0.9.0" wasm-timer = "0.2" diff --git a/core/src/connection.rs b/core/src/connection.rs index 82747f60..d160aeb2 100644 --- a/core/src/connection.rs +++ b/core/src/connection.rs @@ -132,6 +132,16 @@ impl ConnectedPoint { ConnectedPoint::Listener { .. } => true } } + + /// Modifies the address of the remote stored in this struct. + /// + /// For `Dialer`, this modifies `address`. For `Listener`, this modifies `send_back_addr`. + pub fn set_remote_address(&mut self, new_address: Multiaddr) { + match self { + ConnectedPoint::Dialer { address } => *address = new_address, + ConnectedPoint::Listener { send_back_addr, .. } => *send_back_addr = new_address, + } + } } /// Information about a successfully established connection. @@ -169,6 +179,15 @@ impl ConnectionInfo for PeerId { } } +/// Event generated by a [`Connection`]. +#[derive(Debug, Clone)] +pub enum Event { + /// Event generated by the [`ConnectionHandler`]. + Handler(T), + /// Address of the remote has changed. + AddressChange(Multiaddr), +} + /// A multiplexed connection to a peer with an associated `ConnectionHandler`. pub struct Connection where @@ -230,14 +249,6 @@ where self.handler.inject_event(event); } - /// Returns `true` if the remote has shown any sign of activity - /// since the connection has been established. - /// - /// See also [`StreamMuxer::is_remote_acknowledged`]. - pub fn is_remote_acknowledged(&self) -> bool { - self.muxing.is_remote_acknowledged() - } - /// Begins an orderly shutdown of the connection, returning a /// `Future` that resolves when connection shutdown is complete. pub fn close(self) -> Close { @@ -247,7 +258,7 @@ where /// Polls the connection for events produced by the associated handler /// as a result of I/O activity on the substream multiplexer. pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) - -> Poll>> + -> Poll, ConnectionError>> { loop { let mut io_pending = false; @@ -263,6 +274,10 @@ where let endpoint = SubstreamEndpoint::Dialer(user_data); self.handler.inject_substream(substream, endpoint) } + Poll::Ready(Ok(SubstreamEvent::AddressChange(address))) => { + self.handler.inject_address_change(&address); + return Poll::Ready(Ok(Event::AddressChange(address))); + } Poll::Ready(Err(err)) => return Poll::Ready(Err(ConnectionError::IO(err))), } @@ -277,7 +292,7 @@ where self.muxing.open_substream(user_data); } Poll::Ready(Ok(ConnectionHandlerEvent::Custom(event))) => { - return Poll::Ready(Ok(event)); + return Poll::Ready(Ok(Event::Handler(event))); } Poll::Ready(Err(err)) => return Poll::Ready(Err(ConnectionError::Handler(err))), } diff --git a/core/src/connection/handler.rs b/core/src/connection/handler.rs index 0379ace1..07006f8c 100644 --- a/core/src/connection/handler.rs +++ b/core/src/connection/handler.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::PeerId; +use crate::{Multiaddr, PeerId}; use std::{task::Context, task::Poll}; use super::{Connected, SubstreamEndpoint}; @@ -58,6 +58,9 @@ pub trait ConnectionHandler { /// Notifies the handler of an event. fn inject_event(&mut self, event: Self::InEvent); + /// Notifies the handler of a change in the address of the remote. + fn inject_address_change(&mut self, new_address: &Multiaddr); + /// Polls the handler for events. /// /// Returning an error will close the connection to the remote. diff --git a/core/src/connection/manager.rs b/core/src/connection/manager.rs index 9dbb644a..30d7554a 100644 --- a/core/src/connection/manager.rs +++ b/core/src/connection/manager.rs @@ -32,11 +32,13 @@ use std::{ collections::hash_map, error, fmt, + mem, pin::Pin, task::{Context, Poll}, }; use super::{ Connected, + ConnectedPoint, Connection, ConnectionError, ConnectionHandler, @@ -220,7 +222,17 @@ pub enum Event<'a, I, O, H, TE, HE, C> { entry: EstablishedEntry<'a, I, C>, /// The produced event. event: O - } + }, + + /// A connection to a node has changed its address. + AddressChange { + /// The entry associated with the connection that changed address. + entry: EstablishedEntry<'a, I, C>, + /// The former [`ConnectedPoint`]. + old_endpoint: ConnectedPoint, + /// The new [`ConnectedPoint`]. + new_endpoint: ConnectedPoint, + }, } impl Manager { @@ -369,6 +381,23 @@ impl Manager { let _ = task.remove(); Event::PendingConnectionError { id, error, handler } } + task::Event::AddressChange { id: _, new_address } => { + let (new, old) = if let TaskState::Established(c) = &mut task.get_mut().state { + let mut new_endpoint = c.endpoint.clone(); + new_endpoint.set_remote_address(new_address); + let old_endpoint = mem::replace(&mut c.endpoint, new_endpoint.clone()); + (new_endpoint, old_endpoint) + } else { + unreachable!( + "`Event::AddressChange` implies (2) occurred on that task and thus (3)." + ) + }; + Event::AddressChange { + entry: EstablishedEntry { task }, + old_endpoint: old, + new_endpoint: new, + } + }, task::Event::Error { id, error } => { let id = ConnectionId(id); let task = task.remove(); diff --git a/core/src/connection/manager/task.rs b/core/src/connection/manager/task.rs index 4272722d..7ecd7145 100644 --- a/core/src/connection/manager/task.rs +++ b/core/src/connection/manager/task.rs @@ -19,8 +19,10 @@ // DEALINGS IN THE SOFTWARE. use crate::{ + Multiaddr, muxing::StreamMuxer, connection::{ + self, Close, Connected, Connection, @@ -55,8 +57,10 @@ pub enum Event { Error { id: TaskId, error: ConnectionError }, /// A pending connection failed. Failed { id: TaskId, error: PendingConnectionError, handler: H }, + /// A node we are connected to has changed its address. + AddressChange { id: TaskId, new_address: Multiaddr }, /// Notify the manager of an event from the connection. - Notify { id: TaskId, event: T } + Notify { id: TaskId, event: T }, } impl Event { @@ -64,8 +68,9 @@ impl Event { match self { Event::Established { id, .. } => id, Event::Error { id, .. } => id, - Event::Notify { id, .. } => id, Event::Failed { id, .. } => id, + Event::AddressChange { id, .. } => id, + Event::Notify { id, .. } => id, } } } @@ -245,13 +250,20 @@ where this.state = State::EstablishedPending(connection); return Poll::Pending } - Poll::Ready(Ok(event)) => { + Poll::Ready(Ok(connection::Event::Handler(event))) => { this.state = State::EstablishedReady { connection: Some(connection), event: Event::Notify { id, event } }; continue 'poll } + Poll::Ready(Ok(connection::Event::AddressChange(new_address))) => { + this.state = State::EstablishedReady { + connection: Some(connection), + event: Event::AddressChange { id, new_address } + }; + continue 'poll + } Poll::Ready(Err(error)) => { // Notify the manager of the error via an event, // dropping the connection. diff --git a/core/src/connection/pool.rs b/core/src/connection/pool.rs index b319ca76..c4a7b83d 100644 --- a/core/src/connection/pool.rs +++ b/core/src/connection/pool.rs @@ -125,6 +125,16 @@ pub enum PoolEvent<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TC /// The produced event. event: TOutEvent, }, + + /// The connection to a node has changed its address. + AddressChange { + /// The connection that has changed address. + connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + /// The new endpoint. + new_endpoint: ConnectedPoint, + /// The old endpoint. + old_endpoint: ConnectedPoint, + }, } impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr, TConnInfo, TPeerId> fmt::Debug @@ -162,6 +172,13 @@ where .field("event", event) .finish() }, + PoolEvent::AddressChange { ref connection, ref new_endpoint, ref old_endpoint } => { + f.debug_struct("PoolEvent::AddressChange") + .field("conn_info", connection.info()) + .field("new_endpoint", new_endpoint) + .field("old_endpoint", old_endpoint) + .finish() + }, } } } @@ -639,7 +656,27 @@ where }), _ => unreachable!("since `entry` is an `EstablishedEntry`.") } - } + }, + manager::Event::AddressChange { entry, new_endpoint, old_endpoint } => { + let id = entry.id(); + + match self.established.get_mut(entry.connected().peer_id()) { + Some(list) => *list.get_mut(&id) + .expect("state inconsistency: entry is `EstablishedEntry` but absent \ + from `established`") = new_endpoint.clone(), + None => unreachable!("since `entry` is an `EstablishedEntry`.") + }; + + match self.get(id) { + Some(PoolConnection::Established(connection)) => + return Poll::Ready(PoolEvent::AddressChange { + connection, + new_endpoint, + old_endpoint, + }), + _ => unreachable!("since `entry` is an `EstablishedEntry`.") + } + }, } } } diff --git a/core/src/connection/substream.rs b/core/src/connection/substream.rs index f496e43a..cbba375c 100644 --- a/core/src/connection/substream.rs +++ b/core/src/connection/substream.rs @@ -18,8 +18,9 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::muxing::{StreamMuxer, SubstreamRef, substream_from_ref}; +use crate::muxing::{StreamMuxer, StreamMuxerEvent, SubstreamRef, substream_from_ref}; use futures::prelude::*; +use multiaddr::Multiaddr; use smallvec::SmallVec; use std::sync::Arc; use std::{fmt, io::Error as IoError, pin::Pin, task::Context, task::Poll}; @@ -95,6 +96,12 @@ where /// destroyed or `close_graceful` is called. substream: Substream, }, + + /// Address to the remote has changed. The previous one is now obsolete. + /// + /// > **Note**: This can for example happen when using the QUIC protocol, where the two nodes + /// > can change their IP address while retaining the same QUIC connection. + AddressChange(Multiaddr), } /// Identifier for a substream being opened. @@ -123,13 +130,6 @@ where self.outbound_substreams.push((user_data, raw)); } - /// Returns `true` if the remote has shown any sign of activity after the muxer has been open. - /// - /// See `StreamMuxer::is_remote_acknowledged`. - pub fn is_remote_acknowledged(&self) -> bool { - self.inner.is_remote_acknowledged() - } - /// Destroys the node stream and returns all the pending outbound substreams, plus an object /// that signals the remote that we shut down the connection. #[must_use] @@ -152,13 +152,15 @@ where /// Provides an API similar to `Future`. pub fn poll(&mut self, cx: &mut Context) -> Poll, IoError>> { // Polling inbound substream. - match self.inner.poll_inbound(cx) { - Poll::Ready(Ok(substream)) => { + match self.inner.poll_event(cx) { + Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(substream))) => { let substream = substream_from_ref(self.inner.clone(), substream); return Poll::Ready(Ok(SubstreamEvent::InboundSubstream { substream, })); } + Poll::Ready(Ok(StreamMuxerEvent::AddressChange(addr))) => + return Poll::Ready(Ok(SubstreamEvent::AddressChange(addr))), Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())), Poll::Pending => {} } @@ -260,6 +262,11 @@ where .field("substream", substream) .finish() }, + SubstreamEvent::AddressChange(address) => { + f.debug_struct("SubstreamEvent::AddressChange") + .field("address", address) + .finish() + }, } } } diff --git a/core/src/either.rs b/core/src/either.rs index 1c3a09cc..3d97c192 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -19,13 +19,13 @@ // DEALINGS IN THE SOFTWARE. use crate::{ - muxing::StreamMuxer, + muxing::{StreamMuxer, StreamMuxerEvent}, ProtocolName, transport::{Transport, ListenerEvent, TransportError}, Multiaddr }; use futures::{prelude::*, io::{IoSlice, IoSliceMut}}; -use pin_project::{pin_project, project}; +use pin_project::pin_project; use std::{fmt, io::{Error as IoError}, pin::Pin, task::Context, task::Poll}; #[derive(Debug, Copy, Clone)] @@ -62,7 +62,7 @@ where /// Implements `AsyncRead` and `AsyncWrite` and dispatches all method calls to /// either `First` or `Second`. -#[pin_project] +#[pin_project(project = EitherOutputProj)] #[derive(Debug, Copy, Clone)] pub enum EitherOutput { First(#[pin] A), @@ -74,23 +74,19 @@ where A: AsyncRead, B: AsyncRead, { - #[project] fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => AsyncRead::poll_read(a, cx, buf), - EitherOutput::Second(b) => AsyncRead::poll_read(b, cx, buf), + EitherOutputProj::First(a) => AsyncRead::poll_read(a, cx, buf), + EitherOutputProj::Second(b) => AsyncRead::poll_read(b, cx, buf), } } - #[project] fn poll_read_vectored(self: Pin<&mut Self>, cx: &mut Context, bufs: &mut [IoSliceMut]) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => AsyncRead::poll_read_vectored(a, cx, bufs), - EitherOutput::Second(b) => AsyncRead::poll_read_vectored(b, cx, bufs), + EitherOutputProj::First(a) => AsyncRead::poll_read_vectored(a, cx, bufs), + EitherOutputProj::Second(b) => AsyncRead::poll_read_vectored(b, cx, bufs), } } } @@ -100,41 +96,33 @@ where A: AsyncWrite, B: AsyncWrite, { - #[project] fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => AsyncWrite::poll_write(a, cx, buf), - EitherOutput::Second(b) => AsyncWrite::poll_write(b, cx, buf), + EitherOutputProj::First(a) => AsyncWrite::poll_write(a, cx, buf), + EitherOutputProj::Second(b) => AsyncWrite::poll_write(b, cx, buf), } } - #[project] fn poll_write_vectored(self: Pin<&mut Self>, cx: &mut Context, bufs: &[IoSlice]) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => AsyncWrite::poll_write_vectored(a, cx, bufs), - EitherOutput::Second(b) => AsyncWrite::poll_write_vectored(b, cx, bufs), + EitherOutputProj::First(a) => AsyncWrite::poll_write_vectored(a, cx, bufs), + EitherOutputProj::Second(b) => AsyncWrite::poll_write_vectored(b, cx, bufs), } } - #[project] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => AsyncWrite::poll_flush(a, cx), - EitherOutput::Second(b) => AsyncWrite::poll_flush(b, cx), + EitherOutputProj::First(a) => AsyncWrite::poll_flush(a, cx), + EitherOutputProj::Second(b) => AsyncWrite::poll_flush(b, cx), } } - #[project] fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => AsyncWrite::poll_close(a, cx), - EitherOutput::Second(b) => AsyncWrite::poll_close(b, cx), + EitherOutputProj::First(a) => AsyncWrite::poll_close(a, cx), + EitherOutputProj::Second(b) => AsyncWrite::poll_close(b, cx), } } } @@ -146,13 +134,11 @@ where { type Item = Result>; - #[project] fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => TryStream::try_poll_next(a, cx) + EitherOutputProj::First(a) => TryStream::try_poll_next(a, cx) .map(|v| v.map(|r| r.map_err(EitherError::A))), - EitherOutput::Second(b) => TryStream::try_poll_next(b, cx) + EitherOutputProj::Second(b) => TryStream::try_poll_next(b, cx) .map(|v| v.map(|r| r.map_err(EitherError::B))), } } @@ -165,39 +151,31 @@ where { type Error = EitherError; - #[project] fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => Sink::poll_ready(a, cx).map_err(EitherError::A), - EitherOutput::Second(b) => Sink::poll_ready(b, cx).map_err(EitherError::B), + EitherOutputProj::First(a) => Sink::poll_ready(a, cx).map_err(EitherError::A), + EitherOutputProj::Second(b) => Sink::poll_ready(b, cx).map_err(EitherError::B), } } - #[project] fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - #[project] match self.project() { - EitherOutput::First(a) => Sink::start_send(a, item).map_err(EitherError::A), - EitherOutput::Second(b) => Sink::start_send(b, item).map_err(EitherError::B), + EitherOutputProj::First(a) => Sink::start_send(a, item).map_err(EitherError::A), + EitherOutputProj::Second(b) => Sink::start_send(b, item).map_err(EitherError::B), } } - #[project] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => Sink::poll_flush(a, cx).map_err(EitherError::A), - EitherOutput::Second(b) => Sink::poll_flush(b, cx).map_err(EitherError::B), + EitherOutputProj::First(a) => Sink::poll_flush(a, cx).map_err(EitherError::A), + EitherOutputProj::Second(b) => Sink::poll_flush(b, cx).map_err(EitherError::B), } } - #[project] fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherOutput::First(a) => Sink::poll_close(a, cx).map_err(EitherError::A), - EitherOutput::Second(b) => Sink::poll_close(b, cx).map_err(EitherError::B), + EitherOutputProj::First(a) => Sink::poll_close(a, cx).map_err(EitherError::A), + EitherOutputProj::Second(b) => Sink::poll_close(b, cx).map_err(EitherError::B), } } } @@ -211,10 +189,26 @@ where type OutboundSubstream = EitherOutbound; type Error = IoError; - fn poll_inbound(&self, cx: &mut Context) -> Poll> { + fn poll_event(&self, cx: &mut Context) -> Poll, Self::Error>> { match self { - EitherOutput::First(inner) => inner.poll_inbound(cx).map(|p| p.map(EitherOutput::First)).map_err(|e| e.into()), - EitherOutput::Second(inner) => inner.poll_inbound(cx).map(|p| p.map(EitherOutput::Second)).map_err(|e| e.into()), + EitherOutput::First(inner) => inner.poll_event(cx).map(|result| { + result.map_err(|e| e.into()).map(|event| { + match event { + StreamMuxerEvent::AddressChange(addr) => StreamMuxerEvent::AddressChange(addr), + StreamMuxerEvent::InboundSubstream(substream) => + StreamMuxerEvent::InboundSubstream(EitherOutput::First(substream)) + } + }) + }), + EitherOutput::Second(inner) => inner.poll_event(cx).map(|result| { + result.map_err(|e| e.into()).map(|event| { + match event { + StreamMuxerEvent::AddressChange(addr) => StreamMuxerEvent::AddressChange(addr), + StreamMuxerEvent::InboundSubstream(substream) => + StreamMuxerEvent::InboundSubstream(EitherOutput::Second(substream)) + } + }) + }), } } @@ -319,13 +313,6 @@ where } } - fn is_remote_acknowledged(&self) -> bool { - match self { - EitherOutput::First(inner) => inner.is_remote_acknowledged(), - EitherOutput::Second(inner) => inner.is_remote_acknowledged() - } - } - fn close(&self, cx: &mut Context) -> Poll> { match self { EitherOutput::First(inner) => inner.close(cx).map_err(|e| e.into()), @@ -349,7 +336,7 @@ pub enum EitherOutbound { } /// Implements `Stream` and dispatches all method calls to either `First` or `Second`. -#[pin_project] +#[pin_project(project = EitherListenStreamProj)] #[derive(Debug, Copy, Clone)] #[must_use = "futures do nothing unless polled"] pub enum EitherListenStream { @@ -364,17 +351,15 @@ where { type Item = Result, EitherError>, EitherError>; - #[project] fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project() { - EitherListenStream::First(a) => match TryStream::try_poll_next(a, cx) { + EitherListenStreamProj::First(a) => match TryStream::try_poll_next(a, cx) { Poll::Pending => Poll::Pending, Poll::Ready(None) => Poll::Ready(None), Poll::Ready(Some(Ok(le))) => Poll::Ready(Some(Ok(le.map(EitherFuture::First).map_err(EitherError::A)))), Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(EitherError::A(err)))), }, - EitherListenStream::Second(a) => match TryStream::try_poll_next(a, cx) { + EitherListenStreamProj::Second(a) => match TryStream::try_poll_next(a, cx) { Poll::Pending => Poll::Pending, Poll::Ready(None) => Poll::Ready(None), Poll::Ready(Some(Ok(le))) => Poll::Ready(Some(Ok(le.map(EitherFuture::Second).map_err(EitherError::B)))), @@ -385,7 +370,7 @@ where } /// Implements `Future` and dispatches all method calls to either `First` or `Second`. -#[pin_project] +#[pin_project(project = EitherFutureProj)] #[derive(Debug, Copy, Clone)] #[must_use = "futures do nothing unless polled"] pub enum EitherFuture { @@ -400,19 +385,17 @@ where { type Output = Result, EitherError>; - #[project] fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - #[project] match self.project() { - EitherFuture::First(a) => TryFuture::try_poll(a, cx) + EitherFutureProj::First(a) => TryFuture::try_poll(a, cx) .map_ok(EitherOutput::First).map_err(EitherError::A), - EitherFuture::Second(a) => TryFuture::try_poll(a, cx) + EitherFutureProj::Second(a) => TryFuture::try_poll(a, cx) .map_ok(EitherOutput::Second).map_err(EitherError::B), } } } -#[pin_project] +#[pin_project(project = EitherFuture2Proj)] #[derive(Debug, Copy, Clone)] #[must_use = "futures do nothing unless polled"] pub enum EitherFuture2 { A(#[pin] A), B(#[pin] B) } @@ -424,13 +407,11 @@ where { type Output = Result, EitherError>; - #[project] fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - #[project] match self.project() { - EitherFuture2::A(a) => TryFuture::try_poll(a, cx) + EitherFuture2Proj::A(a) => TryFuture::try_poll(a, cx) .map_ok(EitherOutput::First).map_err(EitherError::A), - EitherFuture2::B(a) => TryFuture::try_poll(a, cx) + EitherFuture2Proj::B(a) => TryFuture::try_poll(a, cx) .map_ok(EitherOutput::Second).map_err(EitherError::B), } } diff --git a/core/src/identity.rs b/core/src/identity.rs index da5ecb56..4b65f1a5 100644 --- a/core/src/identity.rs +++ b/core/src/identity.rs @@ -21,7 +21,7 @@ //! A node's network identity keys. pub mod ed25519; -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(target_arch = "wasm32"))] pub mod rsa; #[cfg(feature = "secp256k1")] pub mod secp256k1; @@ -52,7 +52,7 @@ use crate::{PeerId, keys_proto}; pub enum Keypair { /// An Ed25519 keypair. Ed25519(ed25519::Keypair), - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] /// An RSA keypair. Rsa(rsa::Keypair), /// A Secp256k1 keypair. @@ -76,7 +76,7 @@ impl Keypair { /// format (i.e. unencrypted) as defined in [RFC5208]. /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { rsa::Keypair::from_pkcs8(pkcs8_der).map(Keypair::Rsa) } @@ -97,7 +97,7 @@ impl Keypair { use Keypair::*; match self { Ed25519(ref pair) => Ok(pair.sign(msg)), - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] Rsa(ref pair) => pair.sign(msg), #[cfg(feature = "secp256k1")] Secp256k1(ref pair) => pair.secret().sign(msg) @@ -109,7 +109,7 @@ impl Keypair { use Keypair::*; match self { Ed25519(pair) => PublicKey::Ed25519(pair.public()), - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] Rsa(pair) => PublicKey::Rsa(pair.public()), #[cfg(feature = "secp256k1")] Secp256k1(pair) => PublicKey::Secp256k1(pair.public().clone()), @@ -122,7 +122,7 @@ impl Keypair { pub enum PublicKey { /// A public Ed25519 key. Ed25519(ed25519::PublicKey), - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] /// A public RSA key. Rsa(rsa::PublicKey), #[cfg(feature = "secp256k1")] @@ -139,7 +139,7 @@ impl PublicKey { use PublicKey::*; match self { Ed25519(pk) => pk.verify(msg, sig), - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] Rsa(pk) => pk.verify(msg, sig), #[cfg(feature = "secp256k1")] Secp256k1(pk) => pk.verify(msg, sig) @@ -157,7 +157,7 @@ impl PublicKey { r#type: keys_proto::KeyType::Ed25519 as i32, data: key.encode().to_vec() }, - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] PublicKey::Rsa(key) => keys_proto::PublicKey { r#type: keys_proto::KeyType::Rsa as i32, @@ -192,11 +192,11 @@ impl PublicKey { keys_proto::KeyType::Ed25519 => { ed25519::PublicKey::decode(&pubkey.data).map(PublicKey::Ed25519) }, - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(target_arch = "wasm32"))] keys_proto::KeyType::Rsa => { rsa::PublicKey::decode_x509(&pubkey.data).map(PublicKey::Rsa) } - #[cfg(any(target_os = "emscripten", target_os = "unknown"))] + #[cfg(target_arch = "wasm32")] keys_proto::KeyType::Rsa => { log::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::new("Unsupported")) diff --git a/core/src/muxing.rs b/core/src/muxing.rs index 64a93051..e4c2e63c 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -53,6 +53,7 @@ use fnv::FnvHashMap; use futures::{future, prelude::*, task::Context, task::Poll}; +use multiaddr::Multiaddr; use parking_lot::Mutex; use std::{io, ops::Deref, fmt, pin::Pin, sync::atomic::{AtomicUsize, Ordering}}; @@ -64,11 +65,11 @@ mod singleton; /// /// The state of a muxer, as exposed by this API, is the following: /// -/// - A connection to the remote. The `is_remote_acknowledged`, `flush_all` and `close` methods -/// operate on this. -/// - A list of substreams that are open. The `poll_inbound`, `poll_outbound`, `read_substream`, -/// `write_substream`, `flush_substream`, `shutdown_substream` and `destroy_substream` methods -/// allow controlling these entries. +/// - A connection to the remote. The `poll_event`, `flush_all` and `close` methods operate +/// on this. +/// - A list of substreams that are open. The `poll_outbound`, `read_substream`, `write_substream`, +/// `flush_substream`, `shutdown_substream` and `destroy_substream` methods allow controlling +/// these entries. /// - A list of outbound substreams being opened. The `open_outbound`, `poll_outbound` and /// `destroy_outbound` methods allow controlling these entries. /// @@ -82,7 +83,7 @@ pub trait StreamMuxer { /// Error type of the muxer type Error: Into; - /// Polls for an inbound substream. + /// Polls for a connection-wide event. /// /// This function behaves the same as a `Stream`. /// @@ -91,7 +92,7 @@ pub trait StreamMuxer { /// Only the latest task that was used to call this method may be notified. /// /// An error can be generated if the connection has been closed. - fn poll_inbound(&self, cx: &mut Context) -> Poll>; + fn poll_event(&self, cx: &mut Context) -> Poll, Self::Error>>; /// Opens a new outgoing substream, and produces the equivalent to a future that will be /// resolved when it becomes available. @@ -180,7 +181,10 @@ pub trait StreamMuxer { /// allowed to assume that the handshake has succeeded when it didn't in fact succeed. This /// method can be called in order to determine whether the remote has accepted our handshake or /// has potentially not received it yet. - fn is_remote_acknowledged(&self) -> bool; + #[deprecated(note = "This method is unused and will be removed in the future")] + fn is_remote_acknowledged(&self) -> bool { + true + } /// Closes this `StreamMuxer`. /// @@ -204,18 +208,49 @@ pub trait StreamMuxer { fn flush_all(&self, cx: &mut Context) -> Poll>; } -/// Polls for an inbound from the muxer but wraps the output in an object that -/// implements `Read`/`Write`/`AsyncRead`/`AsyncWrite`. -pub fn inbound_from_ref_and_wrap

( +/// Event about a connection, reported by an implementation of [`StreamMuxer`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StreamMuxerEvent { + /// Remote has opened a new substream. Contains the substream in question. + InboundSubstream(T), + + /// Address to the remote has changed. The previous one is now obsolete. + /// + /// > **Note**: This can for example happen when using the QUIC protocol, where the two nodes + /// > can change their IP address while retaining the same QUIC connection. + AddressChange(Multiaddr), +} + +impl StreamMuxerEvent { + /// If `self` is a [`StreamMuxerEvent::InboundSubstream`], returns the content. Otherwise + /// returns `None`. + pub fn into_inbound_substream(self) -> Option { + if let StreamMuxerEvent::InboundSubstream(s) = self { + Some(s) + } else { + None + } + } +} + +/// Polls for an event from the muxer and, if an inbound substream, wraps this substream in an +/// object that implements `Read`/`Write`/`AsyncRead`/`AsyncWrite`. +pub fn event_from_ref_and_wrap

( muxer: P, -) -> impl Future, ::Error>> +) -> impl Future>, ::Error>> where P: Deref + Clone, P::Target: StreamMuxer, { let muxer2 = muxer.clone(); - future::poll_fn(move |cx| muxer.poll_inbound(cx)) - .map_ok(|substream| substream_from_ref(muxer2, substream)) + future::poll_fn(move |cx| muxer.poll_event(cx)) + .map_ok(|event| { + match event { + StreamMuxerEvent::InboundSubstream(substream) => + StreamMuxerEvent::InboundSubstream(substream_from_ref(muxer2, substream)), + StreamMuxerEvent::AddressChange(addr) => StreamMuxerEvent::AddressChange(addr), + } + }) } /// Same as `outbound_from_ref`, but wraps the output in an object that @@ -476,8 +511,8 @@ impl StreamMuxer for StreamMuxerBox { type Error = io::Error; #[inline] - fn poll_inbound(&self, cx: &mut Context) -> Poll> { - self.inner.poll_inbound(cx) + fn poll_event(&self, cx: &mut Context) -> Poll, Self::Error>> { + self.inner.poll_event(cx) } #[inline] @@ -525,11 +560,6 @@ impl StreamMuxer for StreamMuxerBox { self.inner.close(cx) } - #[inline] - fn is_remote_acknowledged(&self) -> bool { - self.inner.is_remote_acknowledged() - } - #[inline] fn flush_all(&self, cx: &mut Context) -> Poll> { self.inner.flush_all(cx) @@ -553,16 +583,18 @@ where type Error = io::Error; #[inline] - fn poll_inbound(&self, cx: &mut Context) -> Poll> { - let substream = match self.inner.poll_inbound(cx) { + fn poll_event(&self, cx: &mut Context) -> Poll, Self::Error>> { + let substream = match self.inner.poll_event(cx) { Poll::Pending => return Poll::Pending, - Poll::Ready(Ok(s)) => s, + Poll::Ready(Ok(StreamMuxerEvent::AddressChange(a))) => + return Poll::Ready(Ok(StreamMuxerEvent::AddressChange(a))), + Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(s))) => s, Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())), }; let id = self.next_substream.fetch_add(1, Ordering::Relaxed); self.substreams.lock().insert(id, substream); - Poll::Ready(Ok(id)) + Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(id))) } #[inline] @@ -631,11 +663,6 @@ where self.inner.close(cx).map_err(|e| e.into()) } - #[inline] - fn is_remote_acknowledged(&self) -> bool { - self.inner.is_remote_acknowledged() - } - #[inline] fn flush_all(&self, cx: &mut Context) -> Poll> { self.inner.flush_all(cx).map_err(|e| e.into()) diff --git a/core/src/muxing/singleton.rs b/core/src/muxing/singleton.rs index bc2521ad..e3a6c3b2 100644 --- a/core/src/muxing/singleton.rs +++ b/core/src/muxing/singleton.rs @@ -18,7 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{connection::Endpoint, muxing::StreamMuxer}; +use crate::{connection::Endpoint, muxing::{StreamMuxer, StreamMuxerEvent}}; + use futures::prelude::*; use parking_lot::Mutex; use std::{io, pin::Pin, sync::atomic::{AtomicBool, Ordering}, task::Context, task::Poll}; @@ -35,8 +36,6 @@ pub struct SingletonMuxer { substream_extracted: AtomicBool, /// Our local endpoint. Always the same value as was passed to `new`. endpoint: Endpoint, - /// If true, we have received data from the remote. - remote_acknowledged: AtomicBool, } impl SingletonMuxer { @@ -49,7 +48,6 @@ impl SingletonMuxer { inner: Mutex::new(inner), substream_extracted: AtomicBool::new(false), endpoint, - remote_acknowledged: AtomicBool::new(false), } } } @@ -67,14 +65,14 @@ where type OutboundSubstream = OutboundSubstream; type Error = io::Error; - fn poll_inbound(&self, _: &mut Context) -> Poll> { + fn poll_event(&self, _: &mut Context) -> Poll, io::Error>> { match self.endpoint { Endpoint::Dialer => return Poll::Pending, Endpoint::Listener => {} } if !self.substream_extracted.swap(true, Ordering::Relaxed) { - Poll::Ready(Ok(Substream {})) + Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(Substream {}))) } else { Poll::Pending } @@ -101,11 +99,7 @@ where } fn read_substream(&self, cx: &mut Context, _: &mut Self::Substream, buf: &mut [u8]) -> Poll> { - let res = AsyncRead::poll_read(Pin::new(&mut *self.inner.lock()), cx, buf); - if let Poll::Ready(Ok(_)) = res { - self.remote_acknowledged.store(true, Ordering::Release); - } - res + AsyncRead::poll_read(Pin::new(&mut *self.inner.lock()), cx, buf) } fn write_substream(&self, cx: &mut Context, _: &mut Self::Substream, buf: &[u8]) -> Poll> { @@ -123,10 +117,6 @@ where fn destroy_substream(&self, _: Self::Substream) { } - fn is_remote_acknowledged(&self) -> bool { - self.remote_acknowledged.load(Ordering::Acquire) - } - fn close(&self, cx: &mut Context) -> Poll> { // The `StreamMuxer` trait requires that `close()` implies `flush_all()`. self.flush_all(cx) diff --git a/core/src/network.rs b/core/src/network.rs index 1e89dcd7..f57f25f3 100644 --- a/core/src/network.rs +++ b/core/src/network.rs @@ -416,7 +416,14 @@ where Poll::Ready(PoolEvent::ConnectionEvent { connection, event }) => { NetworkEvent::ConnectionEvent { connection, - event + event, + } + } + Poll::Ready(PoolEvent::AddressChange { connection, new_endpoint, old_endpoint }) => { + NetworkEvent::AddressChange { + connection, + new_endpoint, + old_endpoint, } } }; diff --git a/core/src/network/event.rs b/core/src/network/event.rs index a63dc479..5e06be51 100644 --- a/core/src/network/event.rs +++ b/core/src/network/event.rs @@ -155,6 +155,16 @@ where /// Event that was produced by the node. event: TOutEvent, }, + + /// An established connection has changed its address. + AddressChange { + /// The connection whose address has changed. + connection: EstablishedConnection<'a, TInEvent, TConnInfo, TPeerId>, + /// New endpoint of this connection. + new_endpoint: ConnectedPoint, + /// Old endpoint of this connection. + old_endpoint: ConnectedPoint, + }, } impl fmt::Debug for @@ -240,6 +250,13 @@ where .field("event", event) .finish() } + NetworkEvent::AddressChange { connection, new_endpoint, old_endpoint } => { + f.debug_struct("AddressChange") + .field("connection", connection) + .field("new_endpoint", new_endpoint) + .field("old_endpoint", old_endpoint) + .finish() + } } } } diff --git a/core/src/peer_id.rs b/core/src/peer_id.rs index 0c203a44..c5a84afb 100644 --- a/core/src/peer_id.rs +++ b/core/src/peer_id.rs @@ -23,7 +23,7 @@ use bs58; use thiserror::Error; use multihash::{self, Code, Sha2_256}; use rand::Rng; -use std::{convert::TryFrom, borrow::Borrow, fmt, hash, str::FromStr}; +use std::{convert::TryFrom, borrow::Borrow, fmt, hash, str::FromStr, cmp}; /// Public keys with byte-lengths smaller than `MAX_INLINE_KEY_LENGTH` will be /// automatically used as the peer id using an identity multihash. @@ -57,6 +57,21 @@ impl fmt::Display for PeerId { } } +impl cmp::PartialOrd for PeerId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(Ord::cmp(self, other)) + } +} + +impl cmp::Ord for PeerId { + fn cmp(&self, other: &Self) -> cmp::Ordering { + // must use borrow, because as_bytes is not consistent with equality + let lhs: &[u8] = self.borrow(); + let rhs: &[u8] = other.borrow(); + lhs.cmp(rhs) + } +} + impl PeerId { /// Builds a `PeerId` from a public key. pub fn from_public_key(key: PublicKey) -> PeerId { diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index dbe3a5b7..9798ae6c 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -87,12 +87,12 @@ pub use self::{ /// /// # Context /// -/// In situations where we provide a list of protocols that we support, the elements of that list are required to -/// implement the [`ProtocolName`] trait. +/// In situations where we provide a list of protocols that we support, +/// the elements of that list are required to implement the [`ProtocolName`] trait. /// -/// Libp2p will call the [`ProtocolName::protocol_name`] trait method on each element of that list, and transmit the -/// returned value on the network. If the remote accepts a given protocol, the element serves as the return value of -/// the function that performed the negotiation. +/// Libp2p will call [`ProtocolName::protocol_name`] on each element of that list, and transmit the +/// returned value on the network. If the remote accepts a given protocol, the element +/// serves as the return value of the function that performed the negotiation. /// /// # Example /// @@ -118,6 +118,9 @@ pub use self::{ /// pub trait ProtocolName { /// The protocol name as bytes. Transmitted on the network. + /// + /// **Note:** Valid protocol names must start with `/` and + /// not exceed 140 bytes in length. fn protocol_name(&self) -> &[u8]; } diff --git a/core/tests/util.rs b/core/tests/util.rs index 95accf1b..40a60ccc 100644 --- a/core/tests/util.rs +++ b/core/tests/util.rs @@ -2,15 +2,15 @@ #![allow(dead_code)] use futures::prelude::*; -use libp2p_core::muxing::StreamMuxer; use libp2p_core::{ + Multiaddr, connection::{ ConnectionHandler, ConnectionHandlerEvent, Substream, SubstreamEndpoint, }, - muxing::StreamMuxerBox, + muxing::{StreamMuxer, StreamMuxerBox}, }; use std::{io, pin::Pin, task::Context, task::Poll}; @@ -29,6 +29,9 @@ impl ConnectionHandler for TestHandler { fn inject_event(&mut self, _: Self::InEvent) {} + fn inject_address_change(&mut self, _: &Multiaddr) + {} + fn poll(&mut self, _: &mut Context) -> Poll, Self::Error>> { diff --git a/examples/distributed-key-value-store.rs b/examples/distributed-key-value-store.rs index 8266744e..71773c00 100644 --- a/examples/distributed-key-value-store.rs +++ b/examples/distributed-key-value-store.rs @@ -1,210 +1,209 @@ -// // Copyright 20l9 Parity Technologies (UK) Ltd. -// // -// // Permission is hereby granted, free of charge, to any person obtaining a -// // copy of this software and associated documentation files (the "Software"), -// // to deal in the Software without restriction, including without limitation -// // the rights to use, copy, modify, merge, publish, distribute, sublicense, -// // and/or sell copies of the Software, and to permit persons to whom the -// // Software is furnished to do so, subject to the following conditions: -// // -// // The above copyright notice and this permission notice shall be included in -// // all copies or substantial portions of the Software. -// // -// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// // DEALINGS IN THE SOFTWARE. +// Copyright 20l9 Parity Technologies (UK) Ltd. // -// //! A basic key value store demonstrating libp2p and the mDNS and Kademlia protocols. -// //! -// //! 1. Using two terminal windows, start two instances. If you local network -// //! allows mDNS, they will automatically connect. -// //! -// //! 2. Type `PUT my-key my-value` in terminal one and hit return. -// //! -// //! 3. Type `GET my-key` in terminal two and hit return. -// //! -// //! 4. Close with Ctrl-c. +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: // -// use async_std::{io, task}; -// use futures::prelude::*; -// use libp2p::kad::record::store::MemoryStore; -// use libp2p::kad::{ -// record::Key, -// Kademlia, -// KademliaEvent, -// PutRecordOk, -// QueryResult, -// Quorum, -// Record -// }; -// use libp2p::{ -// NetworkBehaviour, -// PeerId, -// Swarm, -// build_development_transport, -// identity, -// mdns::{Mdns, MdnsEvent}, -// swarm::NetworkBehaviourEventProcess -// }; -// use std::{error::Error, task::{Context, Poll}}; +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. // -// fn main() -> Result<(), Box> { -// env_logger::init(); -// -// // Create a random key for ourselves. -// let local_key = identity::Keypair::generate_ed25519(); -// let local_peer_id = PeerId::from(local_key.public()); -// -// // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol. -// let transport = build_development_transport(local_key)?; -// -// // We create a custom network behaviour that combines Kademlia and mDNS. -// #[derive(NetworkBehaviour)] -// struct MyBehaviour { -// kademlia: Kademlia, -// mdns: Mdns -// } -// -// impl NetworkBehaviourEventProcess for MyBehaviour { -// // Called when `mdns` produces an event. -// fn inject_event(&mut self, event: MdnsEvent) { -// if let MdnsEvent::Discovered(list) = event { -// for (peer_id, multiaddr) in list { -// self.kademlia.add_address(&peer_id, multiaddr); -// } -// } -// } -// } -// -// impl NetworkBehaviourEventProcess for MyBehaviour { -// // Called when `kademlia` produces an event. -// fn inject_event(&mut self, message: KademliaEvent) { -// match message { -// KademliaEvent::QueryResult { result, .. } => match result { -// QueryResult::GetRecord(Ok(ok)) => { -// for Record { key, value, .. } in ok.records { -// println!( -// "Got record {:?} {:?}", -// std::str::from_utf8(key.as_ref()).unwrap(), -// std::str::from_utf8(&value).unwrap(), -// ); -// } -// } -// QueryResult::GetRecord(Err(err)) => { -// eprintln!("Failed to get record: {:?}", err); -// } -// QueryResult::PutRecord(Ok(PutRecordOk { key })) => { -// println!( -// "Successfully put record {:?}", -// std::str::from_utf8(key.as_ref()).unwrap() -// ); -// } -// QueryResult::PutRecord(Err(err)) => { -// eprintln!("Failed to put record: {:?}", err); -// } -// _ => {} -// } -// _ => {} -// } -// } -// } -// -// // Create a swarm to manage peers and events. -// let mut swarm = { -// // Create a Kademlia behaviour. -// let store = MemoryStore::new(local_peer_id.clone()); -// let kademlia = Kademlia::new(local_peer_id.clone(), store); -// let mdns = Mdns::new()?; -// let behaviour = MyBehaviour { kademlia, mdns }; -// Swarm::new(transport, behaviour, local_peer_id) -// }; -// -// // Read full lines from stdin -// let mut stdin = io::BufReader::new(io::stdin()).lines(); -// -// // Listen on all interfaces and whatever port the OS assigns. -// Swarm::listen_on(&mut swarm, "/ip4/0.0.0.0/tcp/0".parse()?)?; -// -// // Kick it off. -// let mut listening = false; -// task::block_on(future::poll_fn(move |cx: &mut Context| { -// loop { -// match stdin.try_poll_next_unpin(cx)? { -// Poll::Ready(Some(line)) => handle_input_line(&mut swarm.kademlia, line), -// Poll::Ready(None) => panic!("Stdin closed"), -// Poll::Pending => break -// } -// } -// loop { -// match swarm.poll_next_unpin(cx) { -// Poll::Ready(Some(event)) => println!("{:?}", event), -// Poll::Ready(None) => return Poll::Ready(Ok(())), -// Poll::Pending => { -// if !listening { -// if let Some(a) = Swarm::listeners(&swarm).next() { -// println!("Listening on {:?}", a); -// listening = true; -// } -// } -// break -// } -// } -// } -// Poll::Pending -// })) -// } -// -// fn handle_input_line(kademlia: &mut Kademlia, line: String) { -// let mut args = line.split(" "); -// -// match args.next() { -// Some("GET") => { -// let key = { -// match args.next() { -// Some(key) => Key::new(&key), -// None => { -// eprintln!("Expected key"); -// return; -// } -// } -// }; -// kademlia.get_record(&key, Quorum::One); -// } -// Some("PUT") => { -// let key = { -// match args.next() { -// Some(key) => Key::new(&key), -// None => { -// eprintln!("Expected key"); -// return; -// } -// } -// }; -// let value = { -// match args.next() { -// Some(value) => value.as_bytes().to_vec(), -// None => { -// eprintln!("Expected value"); -// return; -// } -// } -// }; -// let record = Record { -// key, -// value, -// publisher: None, -// expires: None, -// }; -// kademlia.put_record(record, Quorum::One).expect("Failed to store record locally."); -// } -// _ => { -// eprintln!("expected GET or PUT"); -// } -// } -// } +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. -fn main() {} +//! A basic key value store demonstrating libp2p and the mDNS and Kademlia protocols. +//! +//! 1. Using two terminal windows, start two instances. If you local network +//! allows mDNS, they will automatically connect. +//! +//! 2. Type `PUT my-key my-value` in terminal one and hit return. +//! +//! 3. Type `GET my-key` in terminal two and hit return. +//! +//! 4. Close with Ctrl-c. + +use async_std::{io, task}; +use futures::prelude::*; +use libp2p::kad::record::store::MemoryStore; +use libp2p::kad::{ + Kademlia, + KademliaEvent, + PeerRecord, + PutRecordOk, + QueryResult, + Quorum, + Record, + record::Key, +}; +use libp2p::{ + NetworkBehaviour, + PeerId, + Swarm, + build_development_transport, + identity, + mdns::{Mdns, MdnsEvent}, + swarm::NetworkBehaviourEventProcess +}; +use std::{error::Error, task::{Context, Poll}}; + +fn main() -> Result<(), Box> { + env_logger::init(); + + // Create a random key for ourselves. + let local_key = identity::Keypair::generate_ed25519(); + let local_peer_id = PeerId::from(local_key.public()); + + // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol. + let transport = build_development_transport(local_key)?; + + // We create a custom network behaviour that combines Kademlia and mDNS. + #[derive(NetworkBehaviour)] + struct MyBehaviour { + kademlia: Kademlia, + mdns: Mdns + } + + impl NetworkBehaviourEventProcess for MyBehaviour { + // Called when `mdns` produces an event. + fn inject_event(&mut self, event: MdnsEvent) { + if let MdnsEvent::Discovered(list) = event { + for (peer_id, multiaddr) in list { + self.kademlia.add_address(&peer_id, multiaddr); + } + } + } + } + + impl NetworkBehaviourEventProcess for MyBehaviour { + // Called when `kademlia` produces an event. + fn inject_event(&mut self, message: KademliaEvent) { + match message { + KademliaEvent::QueryResult { result, .. } => match result { + QueryResult::GetRecord(Ok(ok)) => { + for PeerRecord { record: Record { key, value, .. }, ..} in ok.records { + println!( + "Got record {:?} {:?}", + std::str::from_utf8(key.as_ref()).unwrap(), + std::str::from_utf8(&value).unwrap(), + ); + } + } + QueryResult::GetRecord(Err(err)) => { + eprintln!("Failed to get record: {:?}", err); + } + QueryResult::PutRecord(Ok(PutRecordOk { key })) => { + println!( + "Successfully put record {:?}", + std::str::from_utf8(key.as_ref()).unwrap() + ); + } + QueryResult::PutRecord(Err(err)) => { + eprintln!("Failed to put record: {:?}", err); + } + _ => {} + } + _ => {} + } + } + } + + // Create a swarm to manage peers and events. + let mut swarm = { + // Create a Kademlia behaviour. + let store = MemoryStore::new(local_peer_id.clone()); + let kademlia = Kademlia::new(local_peer_id.clone(), store); + let mdns = Mdns::new()?; + let behaviour = MyBehaviour { kademlia, mdns }; + Swarm::new(transport, behaviour, local_peer_id) + }; + + // Read full lines from stdin + let mut stdin = io::BufReader::new(io::stdin()).lines(); + + // Listen on all interfaces and whatever port the OS assigns. + Swarm::listen_on(&mut swarm, "/ip4/0.0.0.0/tcp/0".parse()?)?; + + // Kick it off. + let mut listening = false; + task::block_on(future::poll_fn(move |cx: &mut Context| { + loop { + match stdin.try_poll_next_unpin(cx)? { + Poll::Ready(Some(line)) => handle_input_line(&mut swarm.kademlia, line), + Poll::Ready(None) => panic!("Stdin closed"), + Poll::Pending => break + } + } + loop { + match swarm.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => println!("{:?}", event), + Poll::Ready(None) => return Poll::Ready(Ok(())), + Poll::Pending => { + if !listening { + if let Some(a) = Swarm::listeners(&swarm).next() { + println!("Listening on {:?}", a); + listening = true; + } + } + break + } + } + } + Poll::Pending + })) +} + +fn handle_input_line(kademlia: &mut Kademlia, line: String) { + let mut args = line.split(" "); + + match args.next() { + Some("GET") => { + let key = { + match args.next() { + Some(key) => Key::new(&key), + None => { + eprintln!("Expected key"); + return; + } + } + }; + kademlia.get_record(&key, Quorum::One); + } + Some("PUT") => { + let key = { + match args.next() { + Some(key) => Key::new(&key), + None => { + eprintln!("Expected key"); + return; + } + } + }; + let value = { + match args.next() { + Some(value) => value.as_bytes().to_vec(), + None => { + eprintln!("Expected value"); + return; + } + } + }; + let record = Record { + key, + value, + publisher: None, + expires: None, + }; + kademlia.put_record(record, Quorum::One).expect("Failed to store record locally."); + } + _ => { + eprintln!("expected GET or PUT"); + } + } +} diff --git a/misc/core-derive/Cargo.toml b/misc/core-derive/Cargo.toml index b5521838..41ac18b1 100644 --- a/misc/core-derive/Cargo.toml +++ b/misc/core-derive/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-core-derive" edition = "2018" description = "Procedural macros of libp2p-core" -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -17,4 +17,4 @@ syn = { version = "1.0.8", default-features = false, features = ["clone-impls", quote = "1.0" [dev-dependencies] -libp2p = { version = "0.19.0", path = "../.." } +libp2p = { path = "../.." } diff --git a/misc/core-derive/src/lib.rs b/misc/core-derive/src/lib.rs index ba9b0411..7ce859fe 100644 --- a/misc/core-derive/src/lib.rs +++ b/misc/core-derive/src/lib.rs @@ -168,6 +168,19 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { }) }; + // Build the list of statements to put in the body of `inject_address_change()`. + let inject_address_change_stmts = { + data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| { + if is_ignored(&field) { + return None; + } + Some(match field.ident { + Some(ref i) => quote!{ self.#i.inject_address_change(peer_id, connection_id, old, new); }, + None => quote!{ self.#field_n.inject_address_change(peer_id, connection_id, old, new); }, + }) + }) + }; + // Build the list of statements to put in the body of `inject_connection_closed()`. let inject_connection_closed_stmts = { data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| { @@ -441,6 +454,10 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { #(#inject_connection_established_stmts);* } + fn inject_address_change(&mut self, peer_id: &#peer_id, connection_id: &#connection_id, old: &#connected_point, new: &#connected_point) { + #(#inject_address_change_stmts);* + } + fn inject_connection_closed(&mut self, peer_id: &#peer_id, connection_id: &#connection_id, endpoint: &#connected_point) { #(#inject_connection_closed_stmts);* } @@ -469,7 +486,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { #(#inject_listener_error_stmts);* } - fn inject_listener_closed(&mut self, id: #listener_id, reason: Result<(), &std::io::Error>) { + fn inject_listener_closed(&mut self, id: #listener_id, reason: std::result::Result<(), &std::io::Error>) { #(#inject_listener_closed_stmts);* } diff --git a/misc/multiaddr/CHANGELOG.md b/misc/multiaddr/CHANGELOG.md new file mode 100644 index 00000000..b73ac170 --- /dev/null +++ b/misc/multiaddr/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.9.1 [2020-06-22] + +Updated dependencies. diff --git a/misc/multiaddr/Cargo.toml b/misc/multiaddr/Cargo.toml index 38f920e6..620dc637 100644 --- a/misc/multiaddr/Cargo.toml +++ b/misc/multiaddr/Cargo.toml @@ -6,7 +6,7 @@ description = "Implementation of the multiaddr format" homepage = "https://github.com/libp2p/rust-libp2p" keywords = ["multiaddr", "ipfs"] license = "MIT" -version = "0.9.0" +version = "0.9.1" [dependencies] arrayref = "0.3" @@ -17,7 +17,7 @@ multihash = "0.11.0" percent-encoding = "2.1.0" serde = "1.0.70" static_assertions = "1.1" -unsigned-varint = "0.3" +unsigned-varint = "0.4" url = { version = "2.1.0", default-features = false } [dev-dependencies] diff --git a/misc/multistream-select/CHANGELOG.md b/misc/multistream-select/CHANGELOG.md new file mode 100644 index 00000000..30ee6e2f --- /dev/null +++ b/misc/multistream-select/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.8.2 [2020-06-22] + +Updated dependencies. diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index fa7233e5..e30081cf 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "multistream-select" description = "Multistream-select negotiation protocol for libp2p" -version = "0.8.0" +version = "0.8.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,12 +13,12 @@ edition = "2018" bytes = "0.5" futures = "0.3" log = "0.4" -pin-project = "0.4.8" +pin-project = "0.4.17" smallvec = "1.0" -unsigned-varint = "0.3.2" +unsigned-varint = "0.4" [dev-dependencies] -async-std = "~1.5.0" +async-std = "1.6.2" quickcheck = "0.9.0" rand = "0.7.2" rw-stream-sink = "0.2.1" diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index f5b368c6..f309ba9e 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -22,7 +22,7 @@ use crate::protocol::{Protocol, MessageReader, Message, Version, ProtocolError}; use bytes::{BytesMut, Buf}; use futures::{prelude::*, io::{IoSlice, IoSliceMut}, ready}; -use pin_project::{pin_project, project}; +use pin_project::pin_project; use std::{error::Error, fmt, io, mem, pin::Pin, task::{Context, Poll}}; /// An I/O stream that has settled on an (application-layer) protocol to use. @@ -87,7 +87,6 @@ impl Negotiated { } /// Polls the `Negotiated` for completion. - #[project] fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> where TInner: AsyncRead + AsyncWrite + Unpin @@ -107,9 +106,8 @@ impl Negotiated { let mut this = self.project(); - #[project] match this.state.as_mut().project() { - State::Completed { remaining, .. } => { + StateProj::Completed { remaining, .. } => { debug_assert!(remaining.is_empty()); return Poll::Ready(Ok(())) } @@ -163,7 +161,7 @@ impl Negotiated { } /// The states of a `Negotiated` I/O stream. -#[pin_project] +#[pin_project(project = StateProj)] #[derive(Debug)] enum State { /// In this state, a `Negotiated` is still expecting to @@ -193,14 +191,12 @@ impl AsyncRead for Negotiated where TInner: AsyncRead + AsyncWrite + Unpin { - #[project] fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { loop { - #[project] match self.as_mut().project().state.project() { - State::Completed { io, remaining } => { + StateProj::Completed { io, remaining } => { // If protocol negotiation is complete and there is no // remaining data to be flushed, commence with reading. if remaining.is_empty() { @@ -229,14 +225,12 @@ where } }*/ - #[project] fn poll_read_vectored(mut self: Pin<&mut Self>, cx: &mut Context, bufs: &mut [IoSliceMut]) -> Poll> { loop { - #[project] match self.as_mut().project().state.project() { - State::Completed { io, remaining } => { + StateProj::Completed { io, remaining } => { // If protocol negotiation is complete and there is no // remaining data to be flushed, commence with reading. if remaining.is_empty() { @@ -261,11 +255,9 @@ impl AsyncWrite for Negotiated where TInner: AsyncWrite + AsyncRead + Unpin { - #[project] fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { - #[project] match self.project().state.project() { - State::Completed { mut io, remaining } => { + StateProj::Completed { mut io, remaining } => { while !remaining.is_empty() { let n = ready!(io.as_mut().poll_write(cx, &remaining)?); if n == 0 { @@ -275,16 +267,14 @@ where } io.poll_write(cx, buf) }, - State::Expecting { io, .. } => io.poll_write(cx, buf), - State::Invalid => panic!("Negotiated: Invalid state"), + StateProj::Expecting { io, .. } => io.poll_write(cx, buf), + StateProj::Invalid => panic!("Negotiated: Invalid state"), } } - #[project] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - #[project] match self.project().state.project() { - State::Completed { mut io, remaining } => { + StateProj::Completed { mut io, remaining } => { while !remaining.is_empty() { let n = ready!(io.as_mut().poll_write(cx, &remaining)?); if n == 0 { @@ -294,12 +284,11 @@ where } io.poll_flush(cx) }, - State::Expecting { io, .. } => io.poll_flush(cx), - State::Invalid => panic!("Negotiated: Invalid state"), + StateProj::Expecting { io, .. } => io.poll_flush(cx), + StateProj::Invalid => panic!("Negotiated: Invalid state"), } } - #[project] fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { // Ensure all data has been flushed and expected negotiation messages // have been received. @@ -307,21 +296,18 @@ where ready!(self.as_mut().poll_flush(cx).map_err(Into::::into)?); // Continue with the shutdown of the underlying I/O stream. - #[project] match self.project().state.project() { - State::Completed { io, .. } => io.poll_close(cx), - State::Expecting { io, .. } => io.poll_close(cx), - State::Invalid => panic!("Negotiated: Invalid state"), + StateProj::Completed { io, .. } => io.poll_close(cx), + StateProj::Expecting { io, .. } => io.poll_close(cx), + StateProj::Invalid => panic!("Negotiated: Invalid state"), } } - #[project] fn poll_write_vectored(self: Pin<&mut Self>, cx: &mut Context, bufs: &[IoSlice]) -> Poll> { - #[project] match self.project().state.project() { - State::Completed { mut io, remaining } => { + StateProj::Completed { mut io, remaining } => { while !remaining.is_empty() { let n = ready!(io.as_mut().poll_write(cx, &remaining)?); if n == 0 { @@ -331,8 +317,8 @@ where } io.poll_write_vectored(cx, bufs) }, - State::Expecting { io, .. } => io.poll_write_vectored(cx, bufs), - State::Invalid => panic!("Negotiated: Invalid state"), + StateProj::Expecting { io, .. } => io.poll_write_vectored(cx, bufs), + StateProj::Invalid => panic!("Negotiated: Invalid state"), } } } @@ -460,4 +446,3 @@ mod tests { quickcheck(prop as fn(_,_,_,_) -> _) } } - diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md new file mode 100644 index 00000000..8b19702a --- /dev/null +++ b/muxers/mplex/CHANGELOG.md @@ -0,0 +1,6 @@ +# 0.19.2 [2020-06-22] + +- Deprecated method `Multiplex::is_remote_acknowledged` has been removed + as part of [PR 1616](https://github.com/libp2p/rust-libp2p/pull/1616). + +- Updated dependencies. diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index 1a00aeb8..8791e9d0 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mplex" edition = "2018" description = "Mplex multiplexing protocol for libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,12 +13,12 @@ categories = ["network-programming", "asynchronous"] bytes = "0.5" fnv = "1.0" futures = "0.3.1" -futures_codec = "0.3.4" -libp2p-core = { version = "0.19.0", path = "../../core" } +futures_codec = "0.4" +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4" parking_lot = "0.10" -unsigned-varint = { version = "0.3", features = ["futures-codec"] } +unsigned-varint = { version = "0.4", features = ["futures-codec"] } [dev-dependencies] -async-std = "~1.5.0" -libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" } +async-std = "1.6.2" +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index d8350606..812aab03 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -28,6 +28,7 @@ use bytes::Bytes; use libp2p_core::{ Endpoint, StreamMuxer, + muxing::StreamMuxerEvent, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, }; use log::{debug, trace}; @@ -110,7 +111,6 @@ impl MplexConfig { to_wake: Mutex::new(Default::default()), }), is_shutdown: false, - is_acknowledged: false, }) } } @@ -175,6 +175,9 @@ where } /// Multiplexer. Implements the `StreamMuxer` trait. +/// +/// This implementation isn't capable of detecting when the underlying socket changes its address, +/// and no [`StreamMuxerEvent::AddressChange`] event is ever emitted. pub struct Multiplex { inner: Mutex>, } @@ -203,8 +206,6 @@ struct MultiplexInner { /// If true, the connection has been shut down. We need to be careful not to accidentally /// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`. is_shutdown: bool, - /// If true, the remote has sent data to us. - is_acknowledged: bool, } struct Notifier { @@ -295,7 +296,6 @@ where C: AsyncRead + AsyncWrite + Unpin, }; trace!("Received message: {:?}", elem); - inner.is_acknowledged = true; // Handle substreams opening/closing. match elem { @@ -366,7 +366,7 @@ where C: AsyncRead + AsyncWrite + Unpin type OutboundSubstream = OutboundSubstream; type Error = IoError; - fn poll_inbound(&self, cx: &mut Context) -> Poll> { + fn poll_event(&self, cx: &mut Context) -> Poll, IoError>> { let mut inner = self.inner.lock(); if inner.opened_substreams.len() >= inner.config.max_substreams { @@ -388,13 +388,13 @@ where C: AsyncRead + AsyncWrite + Unpin }; debug!("Successfully opened inbound substream {}", num); - Poll::Ready(Ok(Substream { + Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(Substream { current_data: Bytes::new(), num, endpoint: Endpoint::Listener, local_open: true, remote_open: true, - })) + }))) } fn open_outbound(&self) -> Self::OutboundSubstream { @@ -587,10 +587,6 @@ where C: AsyncRead + AsyncWrite + Unpin }) } - fn is_remote_acknowledged(&self) -> bool { - self.inner.lock().is_acknowledged - } - fn close(&self, cx: &mut Context) -> Poll> { let inner = &mut *self.inner.lock(); if inner.is_shutdown { diff --git a/muxers/mplex/tests/async_write.rs b/muxers/mplex/tests/async_write.rs index e0b708e3..1414db14 100644 --- a/muxers/mplex/tests/async_write.rs +++ b/muxers/mplex/tests/async_write.rs @@ -52,7 +52,7 @@ fn async_write() { .unwrap() .unwrap() .into_upgrade().unwrap().0.await.unwrap(); - + let mut outbound = muxing::outbound_from_ref_and_wrap(Arc::new(client)).await.unwrap(); let mut buf = Vec::new(); @@ -64,9 +64,14 @@ fn async_write() { let mplex = libp2p_mplex::MplexConfig::new(); let transport = TcpConfig::new().and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)); - - let client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); - let mut inbound = muxing::inbound_from_ref_and_wrap(Arc::new(client)).await.unwrap(); + + let client = Arc::new(transport.dial(rx.await.unwrap()).unwrap().await.unwrap()); + let mut inbound = loop { + if let Some(s) = muxing::event_from_ref_and_wrap(client.clone()).await.unwrap() + .into_inbound_substream() { + break s; + } + }; inbound.write_all(b"hello world").await.unwrap(); // The test consists in making sure that this flushes the substream. diff --git a/muxers/mplex/tests/two_peers.rs b/muxers/mplex/tests/two_peers.rs index 51293a37..54b939a5 100644 --- a/muxers/mplex/tests/two_peers.rs +++ b/muxers/mplex/tests/two_peers.rs @@ -52,7 +52,7 @@ fn client_to_server_outbound() { .unwrap() .unwrap() .into_upgrade().unwrap().0.await.unwrap(); - + let mut outbound = muxing::outbound_from_ref_and_wrap(Arc::new(client)).await.unwrap(); let mut buf = Vec::new(); @@ -64,9 +64,14 @@ fn client_to_server_outbound() { let mplex = libp2p_mplex::MplexConfig::new(); let transport = TcpConfig::new().and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)); - - let client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); - let mut inbound = muxing::inbound_from_ref_and_wrap(Arc::new(client)).await.unwrap(); + + let client = Arc::new(transport.dial(rx.await.unwrap()).unwrap().await.unwrap()); + let mut inbound = loop { + if let Some(s) = muxing::event_from_ref_and_wrap(client.clone()).await.unwrap() + .into_inbound_substream() { + break s; + } + }; inbound.write_all(b"hello world").await.unwrap(); inbound.close().await.unwrap(); @@ -98,13 +103,18 @@ fn client_to_server_inbound() { tx.send(addr).unwrap(); - let client = listener + let client = Arc::new(listener .next().await .unwrap() .unwrap() - .into_upgrade().unwrap().0.await.unwrap(); - - let mut inbound = muxing::inbound_from_ref_and_wrap(Arc::new(client)).await.unwrap(); + .into_upgrade().unwrap().0.await.unwrap()); + + let mut inbound = loop { + if let Some(s) = muxing::event_from_ref_and_wrap(client.clone()).await.unwrap() + .into_inbound_substream() { + break s; + } + }; let mut buf = Vec::new(); inbound.read_to_end(&mut buf).await.unwrap(); @@ -115,7 +125,7 @@ fn client_to_server_inbound() { let mplex = libp2p_mplex::MplexConfig::new(); let transport = TcpConfig::new().and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1)); - + let client = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); let mut outbound = muxing::outbound_from_ref_and_wrap(Arc::new(client)).await.unwrap(); outbound.write_all(b"hello world").await.unwrap(); diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md new file mode 100644 index 00000000..6c20cb21 --- /dev/null +++ b/muxers/yamux/CHANGELOG.md @@ -0,0 +1,4 @@ +# 0.19.1 [2020-06-22] + +Deprecated method `Yamux::is_remote_acknowledged` has been removed +as part of [PR 1616](https://github.com/libp2p/rust-libp2p/pull/1616). diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 2901556a..c1f80c7c 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-yamux" edition = "2018" description = "Yamux multiplexing protocol for libp2p" -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-core = { version = "0.19.2", path = "../../core" } parking_lot = "0.10" thiserror = "1.0" yamux = "0.4.5" diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index fb25c803..13143abe 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -22,6 +22,7 @@ //! [specification](https://github.com/hashicorp/yamux/blob/master/spec.md). use futures::{future, prelude::*, ready, stream::{BoxStream, LocalBoxStream}}; +use libp2p_core::muxing::StreamMuxerEvent; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use parking_lot::Mutex; use std::{fmt, io, iter, ops::{Deref, DerefMut}, pin::Pin, task::Context}; @@ -30,6 +31,9 @@ use thiserror::Error; pub use yamux::WindowUpdateMode; /// A Yamux connection. +/// +/// This implementation isn't capable of detecting when the underlying socket changes its address, +/// and no [`StreamMuxerEvent::AddressChange`] event is ever emitted. pub struct Yamux(Mutex>); impl fmt::Debug for Yamux { @@ -43,8 +47,6 @@ struct Inner { incoming: S, /// Handle to control the connection. control: yamux::Control, - /// True, once we have received an inbound substream. - acknowledged: bool } /// A token to poll for an outbound substream. @@ -66,7 +68,6 @@ where _marker: std::marker::PhantomData }, control: ctrl, - acknowledged: false }; Yamux(Mutex::new(inner)) } @@ -87,7 +88,6 @@ where _marker: std::marker::PhantomData }, control: ctrl, - acknowledged: false }; Yamux(Mutex::new(inner)) } @@ -103,13 +103,10 @@ where type OutboundSubstream = OpenSubstreamToken; type Error = YamuxError; - fn poll_inbound(&self, c: &mut Context) -> Poll { + fn poll_event(&self, c: &mut Context) -> Poll> { let mut inner = self.0.lock(); match ready!(inner.incoming.poll_next_unpin(c)) { - Some(Ok(s)) => { - inner.acknowledged = true; - Poll::Ready(Ok(s)) - } + Some(Ok(s)) => Poll::Ready(Ok(StreamMuxerEvent::InboundSubstream(s))), Some(Err(e)) => Poll::Ready(Err(e)), None => Poll::Ready(Err(yamux::ConnectionError::Closed.into())) } @@ -146,10 +143,6 @@ where fn destroy_substream(&self, _: Self::Substream) { } - fn is_remote_acknowledged(&self) -> bool { - self.0.lock().acknowledged - } - fn close(&self, c: &mut Context) -> Poll<()> { let mut inner = self.0.lock(); if let std::task::Poll::Ready(x) = Pin::new(&mut inner.control).poll_close(c) { diff --git a/protocols/deflate/CHANGELOG.md b/protocols/deflate/CHANGELOG.md new file mode 100644 index 00000000..7bb1e881 --- /dev/null +++ b/protocols/deflate/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/protocols/deflate/Cargo.toml b/protocols/deflate/Cargo.toml index dc7b5938..c3983d24 100644 --- a/protocols/deflate/Cargo.toml +++ b/protocols/deflate/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-deflate" edition = "2018" description = "Deflate encryption protocol for libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,11 +11,11 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-core = { version = "0.19.2", path = "../../core" } flate2 = "1.0" [dev-dependencies] -async-std = "~1.5.0" -libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" } -rand = "0.7" +async-std = "1.6.2" +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } quickcheck = "0.9" +rand = "0.7" diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md new file mode 100644 index 00000000..0d5b7cf2 --- /dev/null +++ b/protocols/floodsub/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.1 [2020-06-22] + +Updated dependencies. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 45701703..4f605aec 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-floodsub" edition = "2018" description = "Floodsub protocol for libp2p" -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,8 +13,8 @@ categories = ["network-programming", "asynchronous"] cuckoofilter = "0.3.2" fnv = "1.0" futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../../core" } -libp2p-swarm = { version = "0.19.0", path = "../../swarm" } +libp2p-core = { version = "0.19.2", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } prost = "0.6.1" rand = "0.7" smallvec = "1.0" diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 4bb6aa08..c1d66572 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -289,7 +289,7 @@ impl NetworkBehaviour for Floodsub { _connection: ConnectionId, event: InnerMessage, ) { - // We ignore successful sends event. + // We ignore successful sends or timeouts. let event = match event { InnerMessage::Rx(event) => event, InnerMessage::Sent => return, diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md new file mode 100644 index 00000000..28578ba2 --- /dev/null +++ b/protocols/gossipsub/CHANGELOG.md @@ -0,0 +1,7 @@ +# 0.19.3 [2020-06-23] + +Maintenance release fixing linter warnings. + +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 9944963d..183686d2 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-gossipsub" edition = "2018" description = "Gossipsub protocol for libp2p" -version = "0.19.0" +version = "0.19.3" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -10,16 +10,16 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -libp2p-swarm = { version = "0.19.0", path = "../../swarm" } -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } +libp2p-core = { version = "0.19.2", path = "../../core" } bytes = "0.5.4" byteorder = "1.3.2" fnv = "1.0.6" futures = "0.3.1" rand = "0.7.3" -futures_codec = "0.3.4" +futures_codec = "0.4.0" wasm-timer = "0.2.4" -unsigned-varint = { version = "0.3.0", features = ["futures-codec"] } +unsigned-varint = { version = "0.4.0", features = ["futures-codec"] } log = "0.4.8" sha2 = "0.8.1" base64 = "0.11.0" @@ -28,10 +28,10 @@ smallvec = "1.1.0" prost = "0.6.1" [dev-dependencies] -async-std = "~1.5.0" +async-std = "1.6.2" env_logger = "0.7.1" -libp2p-plaintext = { version = "0.19.0", path = "../plaintext" } -libp2p-yamux = { version = "0.19.0", path = "../../muxers/yamux" } +libp2p-plaintext = { path = "../plaintext" } +libp2p-yamux = { path = "../../muxers/yamux" } quickcheck = "0.9.2" [build-dependencies] diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 63611df9..2a17efaf 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -363,7 +363,7 @@ impl Gossipsub { &self.topic_peers, topic_hash, self.config.mesh_n - added_peers.len(), - { |_| true }, + |_| true, ); added_peers.extend_from_slice(&new_peers); // add them to the mesh diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index e2073151..c9336594 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -571,22 +571,22 @@ mod tests { gs.topic_peers.insert(topic_hash.clone(), peers.clone()); let random_peers = - Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, { |_| true }); + Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, |_| true); assert!(random_peers.len() == 5, "Expected 5 peers to be returned"); let random_peers = - Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 30, { |_| true }); + Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 30, |_| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!(random_peers == peers, "Expected no shuffling"); let random_peers = - Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 20, { |_| true }); + Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 20, |_| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!(random_peers == peers, "Expected no shuffling"); let random_peers = - Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 0, { |_| true }); + Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 0, |_| true); assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); // test the filter let random_peers = - Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, { |_| false }); + Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, |_| false); assert!(random_peers.len() == 0, "Expected 0 peers to be returned"); let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 10, { diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md new file mode 100644 index 00000000..7bb1e881 --- /dev/null +++ b/protocols/identify/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 5b680848..e1d7415e 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-identify" edition = "2018" description = "Nodes identifcation protocol for libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,18 +11,18 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../../core" } -libp2p-swarm = { version = "0.19.0", path = "../../swarm" } +libp2p-core = { version = "0.19.2", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } log = "0.4.1" prost = "0.6.1" smallvec = "1.0" wasm-timer = "0.2" [dev-dependencies] -async-std = "~1.5.0" -libp2p-mplex = { version = "0.19.0", path = "../../muxers/mplex" } -libp2p-secio = { version = "0.19.0", path = "../../protocols/secio" } -libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" } +async-std = "1.6.2" +libp2p-mplex = { path = "../../muxers/mplex" } +libp2p-secio = { path = "../../protocols/secio" } +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } [build-dependencies] prost-build = "0.6" diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md new file mode 100644 index 00000000..04a50300 --- /dev/null +++ b/protocols/kad/CHANGELOG.md @@ -0,0 +1,26 @@ +# 0.21.0 [????-??-??] + +- More control and insight for k-buckets + ([PR 1628](https://github.com/libp2p/rust-libp2p/pull/1628)). + In particular, `Kademlia::kbuckets_entries` has been removed and + replaced by `Kademlia::kbuckets`/`Kademlia::kbucket` which provide + more information than just the peer IDs. Furthermore `Kademlia::add_address` + now returns a result and two new events, `KademliaEvent::RoutablePeer` + and `KademliaEvent::PendingRoutablePeer` are introduced (but are not + required to be acted upon in order to retain existing behaviour). + For more details, see the PR description. + +# 0.20.1 [2020-06-23] + +Maintenance release ([PR 1623](https://github.com/libp2p/rust-libp2p/pull/1623)). + +# 0.20.0 [2020-06-22] + +- Optionally require iterative queries to use disjoint paths based + on S/Kademlia for increased resiliency in the presence of potentially + adversarial nodes ([PR 1473](https://github.com/libp2p/rust-libp2p/pull/1473)). + +- Re-export several types + ([PR 1595](https://github.com/libp2p/rust-libp2p/pull/1595)). + +- Updated dependencies. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 9431fc20..05d2e07f 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-kad" edition = "2018" description = "Kademlia protocol for libp2p" -version = "0.19.0" +version = "0.20.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,11 +14,11 @@ arrayvec = "0.5.1" bytes = "0.5" either = "1.5" fnv = "1.0" -futures_codec = "0.3.4" +futures_codec = "0.4" futures = "0.3.1" log = "0.4" -libp2p-core = { version = "0.19.0", path = "../../core" } -libp2p-swarm = { version = "0.19.0", path = "../../swarm" } +libp2p-core = { version = "0.19.2", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } multihash = "0.11.0" prost = "0.6.1" rand = "0.7.2" @@ -26,7 +26,7 @@ sha2 = "0.8.0" smallvec = "1.0" wasm-timer = "0.2" uint = "0.8" -unsigned-varint = { version = "0.3", features = ["futures-codec"] } +unsigned-varint = { version = "0.4", features = ["futures-codec"] } void = "1.0" bs58 = "0.3.0" derivative = "2.0.2" @@ -35,8 +35,9 @@ trust-graph = { git = "https://github.com/fluencelabs/fluence", branch = "master prometheus = "0.9.0" [dev-dependencies] -libp2p-secio = { version = "0.19.0", path = "../secio" } -libp2p-yamux = { version = "0.19.0", path = "../../muxers/yamux" } +futures-timer = "3.0" +libp2p-secio = { path = "../secio" } +libp2p-yamux = { path = "../../muxers/yamux" } quickcheck = "0.9.0" env_logger = "0.7.1" diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index efcc3e76..087b8377 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -59,11 +59,15 @@ use crate::metrics::Metrics; pub use crate::query::QueryStats; -/// Network behaviour that handles Kademlia. +/// `Kademlia` is a `NetworkBehaviour` that implements the libp2p +/// Kademlia protocol. pub struct Kademlia { /// The Kademlia routing table. kbuckets: KBucketsTable, Contact>, + /// The k-bucket insertion strategy. + kbucket_inserts: KademliaBucketInserts, + /// Configuration of the wire protocol. protocol_config: KademliaProtocolConfig, @@ -106,6 +110,30 @@ pub struct Kademlia { // TODO: how substrate uses bootstrap? is there a periodic maintenance job? } +/// The configurable strategies for the insertion of peers +/// and their addresses into the k-buckets of the Kademlia +/// routing table. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum KademliaBucketInserts { + /// Whenever a connection to a peer is established as a + /// result of a dialing attempt and that peer is not yet + /// in the routing table, it is inserted as long as there + /// is a free slot in the corresponding k-bucket. If the + /// k-bucket is full but still has a free pending slot, + /// it may be inserted into the routing table at a later time if an unresponsive + /// disconnected peer is evicted from the bucket. + OnConnected, + /// New peers and addresses are only added to the routing table via + /// explicit calls to [`Kademlia::add_address`]. + /// + /// > **Note**: Even though peers can only get into the + /// > routing table as a result of [`Kademlia::add_address`], + /// > routing table entries are still updated as peers + /// > connect and disconnect (i.e. the order of the entries + /// > as well as the network addresses). + Manual, +} + /// The configuration for the `Kademlia` behaviour. /// /// The configuration is consumed by [`Kademlia::new`]. @@ -120,6 +148,7 @@ pub struct KademliaConfig { provider_record_ttl: Option, provider_publication_interval: Option, connection_idle_timeout: Duration, + kbucket_inserts: KademliaBucketInserts, } impl Default for KademliaConfig { @@ -134,6 +163,7 @@ impl Default for KademliaConfig { provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)), provider_record_ttl: Some(Duration::from_secs(24 * 60 * 60)), connection_idle_timeout: Duration::from_secs(10), + kbucket_inserts: KademliaBucketInserts::OnConnected, } } } @@ -141,8 +171,9 @@ impl Default for KademliaConfig { impl KademliaConfig { /// Sets a custom protocol name. /// - /// Kademlia nodes only communicate with other nodes using the same protocol name. Using a - /// custom name therefore allows to segregate the DHT from others, if that is desired. + /// Kademlia nodes only communicate with other nodes using the same protocol + /// name. Using a custom name therefore allows to segregate the DHT from + /// others, if that is desired. pub fn set_protocol_name(&mut self, name: impl Into>) -> &mut Self { self.protocol_config.set_protocol_name(name); self @@ -168,10 +199,41 @@ impl KademliaConfig { self } + /// Sets the allowed level of parallelism for iterative queries. + /// + /// The `α` parameter in the Kademlia paper. The maximum number of peers + /// that an iterative query is allowed to wait for in parallel while + /// iterating towards the closest nodes to a target. Defaults to + /// `ALPHA_VALUE`. + /// + /// This only controls the level of parallelism of an iterative query, not + /// the level of parallelism of a query to a fixed set of peers. + /// + /// When used with [`KademliaConfig::disjoint_query_paths`] it equals + /// the amount of disjoint paths used. + pub fn set_parallelism(&mut self, parallelism: NonZeroUsize) -> &mut Self { + self.query_config.parallelism = parallelism; + self + } + + /// Require iterative queries to use disjoint paths for increased resiliency + /// in the presence of potentially adversarial nodes. + /// + /// When enabled the number of disjoint paths used equals the configured + /// parallelism. + /// + /// See the S/Kademlia paper for more information on the high level design + /// as well as its security improvements. + pub fn disjoint_query_paths(&mut self, enabled: bool) -> &mut Self { + self.query_config.disjoint_query_paths = enabled; + self + } + /// Sets the TTL for stored records. /// /// The TTL should be significantly longer than the (re-)publication - /// interval, to avoid premature expiration of records. The default is 36 hours. + /// interval, to avoid premature expiration of records. The default is 36 + /// hours. /// /// `None` means records never expire. /// @@ -205,10 +267,10 @@ impl KademliaConfig { /// Sets the (re-)publication interval of stored records. /// - /// Records persist in the DHT until they expire. By default, published records - /// are re-published in regular intervals for as long as the record exists - /// in the local storage of the original publisher, thereby extending the - /// records lifetime. + /// Records persist in the DHT until they expire. By default, published + /// records are re-published in regular intervals for as long as the record + /// exists in the local storage of the original publisher, thereby extending + /// the records lifetime. /// /// This interval should be significantly shorter than the record TTL, to /// ensure records do not expire prematurely. The default is 24 hours. @@ -234,7 +296,8 @@ impl KademliaConfig { /// Sets the interval at which provider records for keys provided /// by the local node are re-published. /// - /// `None` means that stored provider records are never automatically re-published. + /// `None` means that stored provider records are never automatically + /// re-published. /// /// Must be significantly less than the provider record TTL. pub fn set_provider_publication_interval(&mut self, interval: Option) -> &mut Self { @@ -250,18 +313,25 @@ impl KademliaConfig { /// Modifies the maximum allowed size of individual Kademlia packets. /// - /// It might be necessary to increase this value if trying to put large records. + /// It might be necessary to increase this value if trying to put large + /// records. pub fn set_max_packet_size(&mut self, size: usize) -> &mut Self { self.protocol_config.set_max_packet_size(size); self } + + /// Sets the k-bucket insertion strategy for the Kademlia routing table. + pub fn set_kbucket_inserts(&mut self, inserts: KademliaBucketInserts) -> &mut Self { + self.kbucket_inserts = inserts; + self + } } impl Kademlia where for<'a> TStore: RecordStore<'a> { - /// Creates a new `Kademlia` network behaviour with the given configuration. + /// Creates a new `Kademlia` network behaviour with a default configuration. pub fn new(kp: Keypair, id: PeerId, store: TStore, trust: TrustGraph) -> Self { Self::with_config(kp, id, store, Default::default(), trust) } @@ -292,6 +362,7 @@ where Kademlia { store, kbuckets: KBucketsTable::new(kp, local_key, config.kbucket_pending_timeout), + kbucket_inserts: config.kbucket_inserts, protocol_config: config.protocol_config, queued_events: VecDeque::with_capacity(config.query_config.replication_factor.get()), queries: QueryPool::new(config.query_config), @@ -368,7 +439,7 @@ where /// /// If the routing table has been updated as a result of this operation, /// a [`KademliaEvent::RoutingUpdated`] event is emitted. - pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr, public_key: PublicKey) { + pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr, public_key: PublicKey) -> RoutingUpdate { let key = kbucket::Key::new(peer.clone()); match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { @@ -381,9 +452,11 @@ where } )) } + RoutingUpdate::Success } kbucket::Entry::Pending(mut entry, _) => { entry.value().insert(address); + RoutingUpdate::Pending } kbucket::Entry::Absent(entry) => { debug!( @@ -401,18 +474,88 @@ where } else { NodeStatus::Disconnected }; - Self::insert_new_peer(entry, contact, status, &self.connected_peers, &self.trust); + let (status, events) = Self::insert_new_peer(entry, contact, status, &self.connected_peers, &self.trust); + events.into_iter().for_each(|e| self.queued_events.push_back(e)); + status }, - kbucket::Entry::SelfEntry => {}, + kbucket::Entry::SelfEntry => RoutingUpdate::Failed, } self.print_bucket_table(); } - /// Returns an iterator over all peer IDs of nodes currently contained in a bucket - /// of the Kademlia routing table. - pub fn kbuckets_entries(&mut self) -> impl Iterator { - self.kbuckets.iter().map(|entry| entry.node.key.preimage()) + /// Removes an address of a peer from the routing table. + /// + /// If the given address is the last address of the peer in the + /// routing table, the peer is removed from the routing table + /// and `Some` is returned with a view of the removed entry. + /// The same applies if the peer is currently pending insertion + /// into the routing table. + /// + /// If the given peer or address is not in the routing table, + /// this is a no-op. + pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) + -> Option, Addresses>> + { + let key = kbucket::Key::new(peer.clone()); + match self.kbuckets.entry(&key) { + kbucket::Entry::Present(mut entry, _) => { + if entry.value().remove(address).is_err() { + Some(entry.remove()) // it is the last address, thus remove the peer. + } else { + None + } + } + kbucket::Entry::Pending(mut entry, _) => { + if entry.value().remove(address).is_err() { + Some(entry.remove()) // it is the last address, thus remove the peer. + } else { + None + } + } + kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => { + None + } + } + } + + /// Removes a peer from the routing table. + /// + /// Returns `None` if the peer was not in the routing table, + /// not even pending insertion. + pub fn remove_peer(&mut self, peer: &PeerId) + -> Option, Addresses>> + { + let key = kbucket::Key::new(peer.clone()); + match self.kbuckets.entry(&key) { + kbucket::Entry::Present(entry, _) => { + Some(entry.remove()) + } + kbucket::Entry::Pending(entry, _) => { + Some(entry.remove()) + } + kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => { + None + } + } + } + + /// Returns an iterator over all non-empty buckets in the routing table. + pub fn kbuckets(&mut self) + -> impl Iterator, Addresses>> + { + self.kbuckets.iter().filter(|b| !b.is_empty()) + } + + /// Returns the k-bucket for the distance to the given key. + /// + /// Returns `None` if the given key refers to the local key. + pub fn kbucket(&mut self, key: K) + -> Option, Addresses>> + where + K: Borrow<[u8]> + Clone + { + self.kbuckets.bucket(&kbucket::Key::new(key)) } /// Initiates an iterative query for the closest peers to the given key. @@ -443,7 +586,7 @@ where self.store.remove(key); self.metrics.record_removed(); } else { - records.push(record.into_owned()); + records.push(PeerRecord{ peer: None, record: record.into_owned()}); } } @@ -669,18 +812,6 @@ where let local_id = self.kbuckets.local_key().preimage().clone(); let others_iter = peers.filter(|p| p.node_id != local_id); - - for peer in others_iter.clone() { - self.queued_events.push_back(NetworkBehaviourAction::GenerateEvent( - KademliaEvent::Discovered { - peer_id: peer.node_id.clone(), - addresses: peer.multiaddrs.clone(), - ty: peer.connection_ty, - public_key: peer.public_key.clone() - } - )); - } - let trust = &self.trust; if let Some(query) = self.queries.get_mut(query_id) { @@ -777,7 +908,7 @@ where self.queries.add_iter_closest(target.clone(), peers, inner); } - /// Updates the connection status of a peer in the Kademlia routing table. + /// Updates the routing table with a new connection status and address of a peer. fn connection_updated(&mut self, peer: PeerId, contact: Option, new_status: NodeStatus) { let key = kbucket::Key::new(peer.clone()); match self.kbuckets.entry(&key) { @@ -810,17 +941,30 @@ where kbucket::Entry::Absent(entry) => { // Only connected nodes with a known address are newly inserted. - if new_status == NodeStatus::Connected { - if let Some(contact) = contact { - Self::insert_new_peer(entry, contact, new_status, &self.connected_peers, &self.trust) - .map(|e| - self.queued_events.push_back(e) - ); - } else { + if new_status != NodeStatus::Connected { + return + } + + match (contact, self.kbucket_inserts) { + (None, _) => { self.queued_events.push_back(NetworkBehaviourAction::GenerateEvent( KademliaEvent::UnroutablePeer { peer } )); } + (Some(c), KademliaBucketInserts::Manual) => { + self.queued_events.push_back(NetworkBehaviourAction::GenerateEvent( + KademliaEvent::RoutablePeer { peer, address: c.addresses.last } // TODO + )); + } + (Some(contact), KademliaBucketInserts::OnConnected) => { + // Only connected nodes with a known address are newly inserted. + Self::insert_new_peer(entry, contact, new_status, &self.connected_peers, &self.trust) + .1 + .into_iter() + .for_each(|e| + self.queued_events.push_back(e) + ); + } } }, _ => {} @@ -835,7 +979,7 @@ where status: NodeStatus, connected_peers: &FnvHashSet, trust: &TrustGraph - ) -> Option, KademliaEvent>> + ) -> (RoutingUpdate, Vec, KademliaEvent>>) { let addresses = contact.addresses.clone(); let peer = entry.key().preimage().clone(); @@ -848,28 +992,43 @@ where ); match entry.insert(contact, status, weight) { kbucket::InsertResult::Inserted => { - Some( - NetworkBehaviourAction::GenerateEvent( - KademliaEvent::RoutingUpdated { - peer, - addresses, - old_peer: None, - } - ) + ( + RoutingUpdate::Success, + vec![ + NetworkBehaviourAction::GenerateEvent( + KademliaEvent::RoutingUpdated { + peer, + addresses, + old_peer: None, + } + ) + ] ) }, kbucket::InsertResult::Full => { - // TODO: excess peer.clone() debug!("Bucket full. Peer not added to routing table: {}", peer); - None + ( + RoutingUpdate::Failed, + vec![NetworkBehaviourAction::GenerateEvent( + KademliaEvent::RoutablePeer { peer, address: addresses.last } // TODO: fix compilation + )] + ) }, kbucket::InsertResult::Pending { disconnected } => { // least recently connected peer is returned debug_assert!(!connected_peers.contains(disconnected.preimage())); - Some( - NetworkBehaviourAction::DialPeer { // will try to dial that peer in order to check if it's online - peer_id: disconnected.into_preimage(), - condition: DialPeerCondition::Disconnected, - } + let address = addresses.first().clone(); + ( + RoutingUpdate::Pending, + vec![ + // TODO: 'A connection to a peer has been established' isn't true at this point + NetworkBehaviourAction::GenerateEvent( + KademliaEvent::PendingRoutablePeer { peer, address } + ), + NetworkBehaviourAction::DialPeer { + peer_id: disconnected.into_preimage(), + condition: DialPeerCondition::Disconnected + }, + ] ) }, } @@ -894,8 +1053,8 @@ where // a bucket refresh should be performed for every bucket farther away than // the first non-empty bucket (which are most likely no more than the last // few, i.e. farthest, buckets). - self.kbuckets.buckets() - .skip_while(|b| b.num_entries() == 0) + self.kbuckets.iter() + .skip_while(|b| b.is_empty()) .skip(1) // Skip the bucket with the closest neighbour. .map(|b| { // Try to find a key that falls into the bucket. While such keys can @@ -1033,7 +1192,7 @@ where if let Some(cache_key) = cache_at { // Cache the record at the closest node to the key that // did not return the record. - let record = records.first().expect("[not empty]").clone(); + let record = records.first().expect("[not empty]").record.clone(); let quorum = NonZeroUsize::new(1).expect("1 > 0"); let context = PutRecordContext::Cache; let info = QueryInfo::PutRecord { @@ -1041,7 +1200,7 @@ where record, quorum, phase: PutRecordPhase::PutRecord { - num_results: 0, + success: vec![], get_closest_peers_stats: QueryStats::empty() } }; @@ -1084,7 +1243,7 @@ where record, quorum, phase: PutRecordPhase::PutRecord { - num_results: 0, + success: vec![], get_closest_peers_stats: result.stats } }; @@ -1110,13 +1269,13 @@ where context, record, quorum, - phase: PutRecordPhase::PutRecord { num_results, get_closest_peers_stats } + phase: PutRecordPhase::PutRecord { success, get_closest_peers_stats } } => { let mk_result = |key: record::Key| { - if num_results >= quorum.get() { + if success.len() >= quorum.get() { Ok(PutRecordOk { key }) } else { - Err(PutRecordError::QuorumFailed { key, quorum, num_results }) + Err(PutRecordError::QuorumFailed { key, quorum, success }) } }; match context { @@ -1213,9 +1372,9 @@ where let err = Err(PutRecordError::Timeout { key: record.key, quorum, - num_results: match phase { - PutRecordPhase::GetClosestPeers => 0, - PutRecordPhase::PutRecord { num_results, .. } => num_results + success: match phase { + PutRecordPhase::GetClosestPeers => vec![], + PutRecordPhase::PutRecord { ref success, .. } => success.clone(), } }); match context { @@ -1261,7 +1420,7 @@ where id: query_id, stats: result.stats, result: QueryResult::GetRecord(Err( - GetRecordError::Timeout { key, records, quorum } + GetRecordError::Timeout { key, records, quorum }, )) }), @@ -1399,14 +1558,6 @@ where }); } - self.queued_events.push_back(NetworkBehaviourAction::GenerateEvent( - KademliaEvent::Discovered { - peer_id: provider.node_id.clone(), - addresses: provider.multiaddrs.clone(), - ty: provider.connection_ty, - public_key: provider.public_key.clone() - })); - if &provider.node_id != self.kbuckets.local_key().preimage() { // TODO: calculate weight let record = ProviderRecord { @@ -1753,9 +1904,24 @@ where key, records, quorum, cache_at } = &mut query.inner.info { if let Some(record) = record { - records.push(record); - if records.len() >= quorum.get() { - query.finish() + records.push(PeerRecord{ peer: Some(source.clone()), record }); + + let quorum = quorum.get(); + if records.len() >= quorum { + // Desired quorum reached. The query may finish. See + // [`Query::try_finish`] for details. + let peers = records.iter() + .filter_map(|PeerRecord{ peer, .. }| peer.as_ref()) + .cloned() + .collect::>(); + let finished = query.try_finish(peers.iter()); + if !finished { + debug!( + "GetRecord query ({:?}) reached quorum ({}/{}) with \ + response from peer {} but could not yet finish.", + user_data, peers.len(), quorum, source, + ); + } } } else if quorum.get() == 1 { // It is a "standard" Kademlia query, for which the @@ -1791,11 +1957,21 @@ where if let Some(query) = self.queries.get_mut(&user_data) { query.on_success(&source, vec![]); if let QueryInfo::PutRecord { - phase: PutRecordPhase::PutRecord { num_results, .. }, quorum, .. + phase: PutRecordPhase::PutRecord { success, .. }, quorum, .. } = &mut query.inner.info { - *num_results += 1; - if *num_results >= quorum.get() { - query.finish() + success.push(source.clone()); + + let quorum = quorum.get(); + if success.len() >= quorum { + let peers = success.clone(); + let finished = query.try_finish(peers.iter()); + if !finished { + debug!( + "PutRecord query ({:?}) reached quorum ({}/{}) with response \ + from peer {} but could not yet finish.", + user_data, peers.len(), quorum, source, + ); + } } } } @@ -1947,6 +2123,16 @@ impl Quorum { } } +/// A record either received by the given peer or retrieved from the local +/// record store. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PeerRecord { + /// The peer from whom the record was received. `None` if the record was + /// retrieved from local storage. + pub peer: Option, + pub record: Record, +} + ////////////////////////////////////////////////////////////////////////////// // Events @@ -1966,25 +2152,12 @@ pub enum KademliaEvent { stats: QueryStats }, - /// A peer has been discovered during a query. - Discovered { - /// The ID of the discovered peer. - peer_id: PeerId, - /// The known addresses of the discovered peer. - addresses: Vec, - /// The connection status reported by the discovered peer - /// towards the local peer. - ty: KadConnectionType, - /// PublicKey of the discovered peer - #[derivative(Debug="ignore")] - public_key: PublicKey - }, - - /// The routing table has been updated. + /// The routing table has been updated with a new peer and / or + /// address, thereby possibly evicting another peer. RoutingUpdated { /// The ID of the peer that was added or updated. peer: PeerId, - /// The list of known addresses of `peer`. + /// The full list of known addresses of `peer`. addresses: Addresses, /// The ID of the peer that was evicted from the routing table to make /// room for the new peer, if any. @@ -1993,10 +2166,42 @@ pub enum KademliaEvent { /// A peer has connected for whom no listen address is known. /// - /// If the peer is to be added to the local node's routing table, a known + /// If the peer is to be added to the routing table, a known /// listen address for the peer must be provided via [`Kademlia::add_address`]. UnroutablePeer { peer: PeerId + }, + + /// A connection to a peer has been established for whom a listen address + /// is known but the peer has not been added to the routing table either + /// because [`KademliaBucketInserts::Manual`] is configured or because + /// the corresponding bucket is full. + /// + /// If the peer is to be included in the routing table, it must + /// must be explicitly added via [`Kademlia::add_address`], possibly after + /// removing another peer. + /// + /// See [`Kademlia::kbucket`] for insight into the contents of + /// the k-bucket of `peer`. + RoutablePeer { + peer: PeerId, + address: Multiaddr, + }, + + /// A connection to a peer has been established for whom a listen address + /// is known but the peer is only pending insertion into the routing table + /// if the least-recently disconnected peer is unresponsive, i.e. the peer + /// may not make it into the routing table. + /// + /// If the peer is to be unconditionally included in the routing table, + /// it should be explicitly added via [`Kademlia::add_address`] after + /// removing another peer. + /// + /// See [`Kademlia::kbucket`] for insight into the contents of + /// the k-bucket of `peer`. + PendingRoutablePeer { + peer: PeerId, + address: Multiaddr, } } @@ -2034,7 +2239,7 @@ pub type GetRecordResult = Result; /// The successful result of [`Kademlia::get_record`]. #[derive(Debug, Clone)] pub struct GetRecordOk { - pub records: Vec + pub records: Vec } /// The error result of [`Kademlia::get_record`]. @@ -2046,12 +2251,12 @@ pub enum GetRecordError { }, QuorumFailed { key: record::Key, - records: Vec, + records: Vec, quorum: NonZeroUsize }, Timeout { key: record::Key, - records: Vec, + records: Vec, quorum: NonZeroUsize } } @@ -2091,12 +2296,14 @@ pub struct PutRecordOk { pub enum PutRecordError { QuorumFailed { key: record::Key, - num_results: usize, + /// [`PeerId`]s of the peers the record was successfully stored on. + success: Vec, quorum: NonZeroUsize }, Timeout { key: record::Key, - num_results: usize, + /// [`PeerId`]s of the peers the record was successfully stored on. + success: Vec, quorum: NonZeroUsize }, } @@ -2376,8 +2583,9 @@ pub enum QueryInfo { GetRecord { /// The key to look for. key: record::Key, - /// The records found so far. - records: Vec, + /// The records with the id of the peer that returned them. `None` when + /// the record was found in the local store. + records: Vec, /// The number of records to look for. quorum: NonZeroUsize, /// The closest peer to `key` that did not return a record. @@ -2471,8 +2679,8 @@ pub enum PutRecordPhase { /// The query is replicating the record to the closest nodes to the key. PutRecord { - /// The number of successful replication requests so far. - num_results: usize, + /// A list of peers the given record has been successfully replicated to. + success: Vec, /// Query statistics from the finished `GetClosestPeers` phase. get_closest_peers_stats: QueryStats, }, @@ -2543,3 +2751,22 @@ impl fmt::Display for NoKnownPeers { } impl std::error::Error for NoKnownPeers {} + +/// The possible outcomes of [`Kademlia::add_address`]. +pub enum RoutingUpdate { + /// The given peer and address has been added to the routing + /// table. + Success, + /// The peer and address is pending insertion into + /// the routing table, if a disconnected peer fails + /// to respond. If the given peer and address ends up + /// in the routing table, [`KademliaEvent::RoutingUpdated`] + /// is eventually emitted. + Pending, + /// The routing table update failed, either because the + /// corresponding bucket for the peer is full and the + /// pending slot(s) are occupied, or because the given + /// peer ID is deemed invalid (e.g. refers to the local + /// peer ID). + Failed, +} diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index e84e705c..88a601bc 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -22,14 +22,15 @@ use super::*; -use crate::{ALPHA_VALUE, K_VALUE}; +use crate::K_VALUE; use crate::kbucket::Distance; -use crate::record::store::MemoryStore; +use crate::record::{Key, store::MemoryStore}; use futures::{ prelude::*, executor::block_on, future::poll_fn, }; +use futures_timer::Delay; use libp2p_core::{ PeerId, Transport, @@ -43,8 +44,8 @@ use libp2p_secio::SecioConfig; use libp2p_swarm::Swarm; use libp2p_yamux as yamux; use quickcheck::*; -use rand::{Rng, random, thread_rng}; -use std::{collections::{HashSet, HashMap}, io, num::NonZeroUsize, u64}; +use rand::{Rng, random, thread_rng, rngs::StdRng, SeedableRng}; +use std::{collections::{HashSet, HashMap}, time::Duration, io, num::NonZeroUsize, u64}; use multihash::{wrap, Code, Multihash}; use libp2p_core::identity::ed25519; @@ -136,21 +137,45 @@ fn random_multihash() -> Multihash { wrap(Code::Sha2_256, &thread_rng().gen::<[u8; 32]>()) } +#[derive(Clone, Debug)] +struct Seed([u8; 32]); + +impl Arbitrary for Seed { + fn arbitrary(g: &mut G) -> Seed { + Seed(g.gen()) + } +} + #[test] fn bootstrap() { - fn run(rng: &mut impl Rng) { - let num_total = rng.gen_range(2, 20); - // When looking for the closest node to a key, Kademlia considers ALPHA_VALUE nodes to query - // at initialization. If `num_groups` is larger than ALPHA_VALUE the remaining locally known - // nodes will not be considered. Given that no other node is aware of them, they would be - // lost entirely. To prevent the above restrict `num_groups` to be equal or smaller than - // ALPHA_VALUE. - let num_group = rng.gen_range(1, (num_total % ALPHA_VALUE.get()) + 2); + fn prop(seed: Seed) { + let mut rng = StdRng::from_seed(seed.0); - let mut swarms = build_connected_nodes(num_total, num_group).into_iter() + let num_total = rng.gen_range(2, 20); + // When looking for the closest node to a key, Kademlia considers + // K_VALUE nodes to query at initialization. If `num_group` is larger + // than K_VALUE the remaining locally known nodes will not be + // considered. Given that no other node is aware of them, they would be + // lost entirely. To prevent the above restrict `num_group` to be equal + // or smaller than K_VALUE. + let num_group = rng.gen_range(1, (num_total % K_VALUE.get()) + 2); + + let mut cfg = KademliaConfig::default(); + if rng.gen() { + cfg.disjoint_query_paths(true); + } + + let mut swarms = build_connected_nodes_with_config( + num_total, + num_group, + cfg, + ).into_iter() .map(|(_, _a, s)| s) .collect::>(); - let swarm_ids: Vec<_> = swarms.iter().map(Swarm::local_peer_id).cloned().collect(); + let swarm_ids: Vec<_> = swarms.iter() + .map(Swarm::local_peer_id) + .cloned() + .collect(); let qid = swarms[0].bootstrap().unwrap(); @@ -175,9 +200,12 @@ fn bootstrap() { } first = false; if ok.num_remaining == 0 { - let known = swarm.kbuckets.iter() - .map(|e| e.node.key.preimage().clone()) - .collect::>(); + let mut known = HashSet::new(); + for b in swarm.kbuckets.iter() { + for e in b.iter() { + known.insert(e.node.key.preimage().clone()); + } + } assert_eq!(expected_known, known); return Poll::Ready(()) } @@ -194,10 +222,7 @@ fn bootstrap() { ) } - let mut rng = thread_rng(); - for _ in 0 .. 10 { - run(&mut rng) - } + QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _) } #[test] @@ -424,16 +449,22 @@ fn get_record_not_found() { ) } -/// A node joining a fully connected network via a single bootnode should be able to put a record to -/// the X closest nodes of the network where X is equal to the configured replication factor. +/// A node joining a fully connected network via three (ALPHA_VALUE) bootnodes +/// should be able to put a record to the X closest nodes of the network where X +/// is equal to the configured replication factor. #[test] fn put_record() { - fn prop(replication_factor: usize, records: Vec) { - let replication_factor = NonZeroUsize::new(replication_factor % (K_VALUE.get() / 2) + 1).unwrap(); - let num_total = replication_factor.get() * 2; + fn prop(records: Vec, seed: Seed) { + let mut rng = StdRng::from_seed(seed.0); + let replication_factor = NonZeroUsize::new(rng.gen_range(1, (K_VALUE.get() / 2) + 1)).unwrap(); + // At least 4 nodes, 1 under test + 3 bootnodes. + let num_total = usize::max(4, replication_factor.get() * 2); let mut config = KademliaConfig::default(); config.set_replication_factor(replication_factor); + if rng.gen() { + config.disjoint_query_paths(true); + } let mut swarms = { let mut fully_connected_swarms = build_fully_connected_nodes_with_config( @@ -442,11 +473,14 @@ fn put_record() { ); let mut single_swarm = build_node_with_config(config); - single_swarm.2.add_address( - Swarm::local_peer_id(&fully_connected_swarms[0].2), - fully_connected_swarms[0].1.clone(), - fully_connected_swarms[0].0.public(), - ); + // Connect `single_swarm` to three bootnodes. + for i in 0..3 { + single_swarm.2.add_address( + Swarm::local_peer_id(&fully_connected_swarms[0].2), + fully_connected_swarms[i].1.clone(), + fully_connected_swarms[i].0.public(), + ); + } let mut swarms = vec![single_swarm]; swarms.append(&mut fully_connected_swarms); @@ -628,11 +662,13 @@ fn get_record() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(KademliaEvent::QueryResult { - id, result: QueryResult::GetRecord(Ok(ok)), .. + id, + result: QueryResult::GetRecord(Ok(GetRecordOk { records })), + .. })) => { assert_eq!(id, qid); - assert_eq!(ok.records.len(), 1); - assert_eq!(ok.records.first(), Some(&record)); + assert_eq!(records.len(), 1); + assert_eq!(records.first().unwrap().record, record); return Poll::Ready(()); } // Ignore any other event. @@ -672,11 +708,13 @@ fn get_record_many() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(KademliaEvent::QueryResult { - id, result: QueryResult::GetRecord(Ok(ok)), .. + id, + result: QueryResult::GetRecord(Ok(GetRecordOk { records })), + .. })) => { assert_eq!(id, qid); - assert_eq!(ok.records.len(), num_results); - assert_eq!(ok.records.first(), Some(&record)); + assert_eq!(records.len(), num_results); + assert_eq!(records.first().unwrap().record, record); return Poll::Ready(()); } // Ignore any other event. @@ -691,17 +729,22 @@ fn get_record_many() { ) } -/// A node joining a fully connected network via a single bootnode should be able to add itself as a -/// provider to the X closest nodes of the network where X is equal to the configured replication -/// factor. +/// A node joining a fully connected network via three (ALPHA_VALUE) bootnodes +/// should be able to add itself as a provider to the X closest nodes of the +/// network where X is equal to the configured replication factor. #[test] fn add_provider() { - fn prop(replication_factor: usize, keys: Vec) { - let replication_factor = NonZeroUsize::new(replication_factor % (K_VALUE.get() / 2) + 1).unwrap(); - let num_total = replication_factor.get() * 2; + fn prop(keys: Vec, seed: Seed) { + let mut rng = StdRng::from_seed(seed.0); + let replication_factor = NonZeroUsize::new(rng.gen_range(1, (K_VALUE.get() / 2) + 1)).unwrap(); + // At least 4 nodes, 1 under test + 3 bootnodes. + let num_total = usize::max(4, replication_factor.get() * 2); let mut config = KademliaConfig::default(); config.set_replication_factor(replication_factor); + if rng.gen() { + config.disjoint_query_paths(true); + } let mut swarms = { let mut fully_connected_swarms = build_fully_connected_nodes_with_config( @@ -710,11 +753,14 @@ fn add_provider() { ); let mut single_swarm = build_node_with_config(config); - single_swarm.2.add_address( - Swarm::local_peer_id(&fully_connected_swarms[0].2), - fully_connected_swarms[0].1.clone(), - fully_connected_swarms[0].0.public(), - ); + // Connect `single_swarm` to three bootnodes. + for i in 0..3 { + single_swarm.2.add_address( + Swarm::local_peer_id(&fully_connected_swarms[0].2), + fully_connected_swarms[i].1.clone(), + fully_connected_swarms[i].0.public(), + ); + } let mut swarms = vec![single_swarm]; swarms.append(&mut fully_connected_swarms); @@ -888,3 +934,179 @@ fn exp_decr_expiration_overflow() { quickcheck(prop_no_panic as fn(_, _)) } + +#[test] +fn disjoint_query_does_not_finish_before_all_paths_did() { + let mut config = KademliaConfig::default(); + config.disjoint_query_paths(true); + // I.e. setting the amount disjoint paths to be explored to 2. + config.set_parallelism(NonZeroUsize::new(2).unwrap()); + + let mut alice = build_node_with_config(config); + let mut trudy = build_node(); // Trudy the intrudor, an adversary. + let mut bob = build_node(); + + let key = Key::new(&multihash::Sha2_256::digest(&thread_rng().gen::<[u8; 32]>())); + let record_bob = Record::new(key.clone(), b"bob".to_vec()); + let record_trudy = Record::new(key.clone(), b"trudy".to_vec()); + + // Make `bob` and `trudy` aware of their version of the record searched by + // `alice`. + bob.1.store.put(record_bob.clone()).unwrap(); + trudy.1.store.put(record_trudy.clone()).unwrap(); + + // Make `trudy` and `bob` known to `alice`. + alice.1.add_address(Swarm::local_peer_id(&trudy.1), trudy.0.clone()); + alice.1.add_address(Swarm::local_peer_id(&bob.1), bob.0.clone()); + + // Drop the swarm addresses. + let (mut alice, mut bob, mut trudy) = (alice.1, bob.1, trudy.1); + + // Have `alice` query the Dht for `key` with a quorum of 1. + alice.get_record(&key, Quorum::One); + + // The default peer timeout is 10 seconds. Choosing 1 seconds here should + // give enough head room to prevent connections to `bob` to time out. + let mut before_timeout = Delay::new(Duration::from_secs(1)); + + // Poll only `alice` and `trudy` expecting `alice` not yet to return a query + // result as it is not able to connect to `bob` just yet. + block_on( + poll_fn(|ctx| { + for (i, swarm) in [&mut alice, &mut trudy].iter_mut().enumerate() { + loop { + match swarm.poll_next_unpin(ctx) { + Poll::Ready(Some(KademliaEvent::QueryResult{ + result: QueryResult::GetRecord(result), + .. + })) => { + if i != 0 { + panic!("Expected `QueryResult` from Alice.") + } + + match result { + Ok(_) => panic!( + "Expected query not to finish until all \ + disjoint paths have been explored.", + ), + Err(e) => panic!("{:?}", e), + } + } + // Ignore any other event. + Poll::Ready(Some(_)) => (), + Poll::Ready(None) => panic!("Expected Kademlia behaviour not to finish."), + Poll::Pending => break, + } + } + } + + // Make sure not to wait until connections to `bob` time out. + before_timeout.poll_unpin(ctx) + }) + ); + + // Make sure `alice` has exactly one query with `trudy`'s record only. + assert_eq!(1, alice.queries.iter().count()); + alice.queries.iter().for_each(|q| { + match &q.inner.info { + QueryInfo::GetRecord{ records, .. } => { + assert_eq!( + *records, + vec![PeerRecord { + peer: Some(Swarm::local_peer_id(&trudy).clone()), + record: record_trudy.clone(), + }], + ); + }, + i @ _ => panic!("Unexpected query info: {:?}", i), + } + }); + + // Poll `alice` and `bob` expecting `alice` to return a successful query + // result as it is now able to explore the second disjoint path. + let records = block_on( + poll_fn(|ctx| { + for (i, swarm) in [&mut alice, &mut bob].iter_mut().enumerate() { + loop { + match swarm.poll_next_unpin(ctx) { + Poll::Ready(Some(KademliaEvent::QueryResult{ + result: QueryResult::GetRecord(result), + .. + })) => { + if i != 0 { + panic!("Expected `QueryResult` from Alice.") + } + + match result { + Ok(ok) => return Poll::Ready(ok.records), + Err(e) => unreachable!("{:?}", e), + } + } + // Ignore any other event. + Poll::Ready(Some(_)) => (), + Poll::Ready(None) => panic!( + "Expected Kademlia behaviour not to finish.", + ), + Poll::Pending => break, + } + } + } + + Poll::Pending + }) + ); + + assert_eq!(2, records.len()); + assert!(records.contains(&PeerRecord { + peer: Some(Swarm::local_peer_id(&bob).clone()), + record: record_bob, + })); + assert!(records.contains(&PeerRecord { + peer: Some(Swarm::local_peer_id(&trudy).clone()), + record: record_trudy, + })); +} + +/// Tests that peers are not automatically inserted into +/// the routing table with `KademliaBucketInserts::Manual`. +#[test] +fn manual_bucket_inserts() { + let mut cfg = KademliaConfig::default(); + cfg.set_kbucket_inserts(KademliaBucketInserts::Manual); + // 1 -> 2 -> [3 -> ...] + let mut swarms = build_connected_nodes_with_config(3, 1, cfg); + // The peers and their addresses for which we expect `RoutablePeer` events. + let mut expected = swarms.iter().skip(2) + .map(|(a, s)| (a.clone(), Swarm::local_peer_id(s).clone())) + .collect::>(); + // We collect the peers for which a `RoutablePeer` event + // was received in here to check at the end of the test + // that none of them was inserted into a bucket. + let mut routable = Vec::new(); + // Start an iterative query from the first peer. + swarms[0].1.get_closest_peers(PeerId::random()); + block_on(poll_fn(move |ctx| { + for (_, swarm) in swarms.iter_mut() { + loop { + match swarm.poll_next_unpin(ctx) { + Poll::Ready(Some(KademliaEvent::RoutablePeer { + peer, address + })) => { + assert_eq!(peer, expected.remove(&address).expect("Unexpected address")); + routable.push(peer); + if expected.is_empty() { + for peer in routable.iter() { + let bucket = swarm.kbucket(peer.clone()).unwrap(); + assert!(bucket.iter().all(|e| e.node.key.preimage() != peer)); + } + return Poll::Ready(()) + } + } + Poll::Ready(..) => {}, + Poll::Pending => break + } + } + } + Poll::Pending + })); +} diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index d7facddc..e538ca82 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -632,7 +632,7 @@ where } (None, Some(event), _) => { if self.substreams.is_empty() { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); + self.keep_alive = KeepAlive::Until(Instant::now() + self.config.idle_timeout); } return Poll::Ready(event); } @@ -653,7 +653,7 @@ where if self.substreams.is_empty() { // We destroyed all substreams in this function. - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); + self.keep_alive = KeepAlive::Until(Instant::now() + self.config.idle_timeout); } else { self.keep_alive = KeepAlive::Yes; } diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 0923eb2a..1ce3aa7a 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -210,10 +210,11 @@ where } /// Returns a by-reference iterator over all buckets. + /// Returns an iterator over all buckets. /// /// The buckets are ordered by proximity to the `local_key`, i.e. the first /// bucket is the closest bucket (containing at most one key). - pub fn buckets<'a>(&'a mut self) -> impl Iterator> + 'a { + pub fn iter<'a>(&'a mut self) -> impl Iterator> + 'a { let applied_pending = &mut self.applied_pending; self.buckets.iter_mut().enumerate().map(move |(i, b)| { applied_pending.extend(b.apply_pending()); @@ -224,6 +225,25 @@ where }) } + /// Returns the bucket for the distance to the given key. + /// + /// Returns `None` if the given key refers to the local key. + pub fn bucket(&mut self, key: &K) -> Option> + where + K: AsRef, + { + let d = self.local_key.as_ref().distance(key); + if let Some(index) = BucketIndex::new(&d) { + let bucket = &mut self.buckets[index.0]; + if let Some(applied) = bucket.apply_pending() { + self.applied_pending.push_back(applied) + } + Some(KBucketRef { bucket, index }) + } else { + None + } + } + /// Consumes the next applied pending entry, if any. /// /// When an entry is attempted to be inserted and the respective bucket is full, @@ -492,17 +512,22 @@ where } } -/// A reference to a bucket in a `KBucketsTable`. -pub struct KBucketRef<'a, TPeerId, TVal> { +/// A reference to a bucket in a [`KBucketsTable`]. +pub struct KBucketRef<'a, TKey, TVal> { pub index: BucketIndex, - pub bucket: &'a mut KBucket, + pub bucket: &'a mut KBucket, } -impl KBucketRef<'_, TKey, TVal> +impl<'a, TKey, TVal> KBucketRef<'a, TKey, TVal> where TKey: Clone + AsRef, TVal: Clone, { + /// Checks whether the bucket is empty. + pub fn is_empty(&self) -> bool { + self.num_entries() == 0 + } + /// Returns the number of entries in the bucket. pub fn num_entries(&self) -> usize { self.bucket.num_entries() @@ -527,6 +552,19 @@ where pub fn rand_distance(&self, rng: &mut impl rand::Rng) -> Distance { self.index.rand_distance(rng) } + + /// Returns an iterator over the entries in the bucket. + pub fn iter(&'a self) -> impl Iterator> { + self.bucket.iter().map(move |(n, status)| { + EntryRefView { + node: NodeRefView { + key: &n.key, + value: &n.value + }, + status + } + }) + } } #[cfg(test)] diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index b933684c..9812d3f4 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -207,6 +207,11 @@ where .collect() } + /// Removes the pending node from the bucket, if any. + pub fn remove_pending(&mut self) -> Option> { + self.pending.take() + } + /// Updates the status of the node referred to by the given key, if it is /// in the bucket. pub fn update(&mut self, key: &TKey, new_status: NodeStatus) { @@ -261,9 +266,10 @@ where } } - fn pending_active(&self, weighted: bool) -> bool { - if weighted { - self.weighted.pending_active() + /// Returns the status of the node at the given position. + pub fn status(&self, pos: Position) -> NodeStatus { + if self.first_connected_pos.map_or(false, |i| pos.0 >= i) { + NodeStatus::Connected } else { self.swamp.pending_active() } diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs index f420de21..b96beacc 100644 --- a/protocols/kad/src/kbucket/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -186,7 +186,7 @@ where .0 .bucket .get_mut(self.0.key) - .expect("We can only build a ConnectedEntry if the entry is in the bucket; QED") + .expect("We can only build a PresentEntry if the entry is in the bucket; QED") .value } @@ -195,6 +195,14 @@ where self.0.bucket.update(self.0.key, status); Self::new(self.0.bucket, self.0.key) } + + /// Removes the entry from the bucket. + pub fn remove(self) -> EntryView { + let (node, status, _pos) = self.0.bucket + .remove(&self.0.key) + .expect("We can only build a PresentEntry if the entry is in the bucket; QED"); + EntryView { node, status } + } } /// An entry waiting for a slot to be available in a bucket. @@ -229,6 +237,17 @@ where self.0.bucket.update_pending(self.0.key, status); PendingEntry::new(self.0.bucket, self.0.key) } + + /// Removes the pending entry from the bucket. + pub fn remove(self) -> EntryView { + let pending = self.0.bucket + .remove_pending() + .expect("We can only build a PendingEntry if the entry is pending insertion + into the bucket; QED"); + let status = pending.status(); + let node = pending.into_node(); + EntryView { node, status } + } } /// An entry that is not present in any bucket. diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index ec526faf..6a4f5eaf 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -41,11 +41,17 @@ mod dht_proto { } pub use addresses::Addresses; +pub use behaviour::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, Quorum}; pub use behaviour::{ + QueryRef, + QueryMut, + QueryResult, QueryInfo, QueryStats, + PeerRecord, + BootstrapResult, BootstrapOk, BootstrapError, @@ -54,6 +60,8 @@ pub use behaviour::{ GetRecordOk, GetRecordError, + PutRecordPhase, + PutRecordContext, PutRecordResult, PutRecordOk, PutRecordError, @@ -62,6 +70,8 @@ pub use behaviour::{ GetClosestPeersOk, GetClosestPeersError, + AddProviderPhase, + AddProviderContext, AddProviderResult, AddProviderOk, AddProviderError, diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index c334aba1..6862af4c 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -21,10 +21,10 @@ mod peers; use peers::PeersIterState; -use peers::closest::{ClosestPeersIter, ClosestPeersIterConfig}; +use peers::closest::{ClosestPeersIterConfig, ClosestPeersIter, disjoint::ClosestDisjointPeersIter}; use peers::fixed::FixedPeersIter; -use crate::K_VALUE; +use crate::{ALPHA_VALUE, K_VALUE}; use crate::kbucket::{Key, KeyBytes}; use either::Either; use fnv::FnvHashMap; @@ -113,7 +113,7 @@ impl QueryPool { { assert!(!self.queries.contains_key(&id)); // TODO: why not alpha? - let parallelism = self.config.replication_factor.get(); + let parallelism = self.config.replication_factor; let (swamp, weighted) = peers.into_iter().partition::, _>(|p| p.weight == 0); let swamp = swamp.into_iter().map(|p| p.peer_id.into_preimage()); @@ -143,7 +143,8 @@ impl QueryPool { I: IntoIterator { let cfg = ClosestPeersIterConfig { - num_results: self.config.replication_factor.get(), + num_results: self.config.replication_factor, + parallelism: self.config.parallelism, .. ClosestPeersIterConfig::default() }; @@ -151,8 +152,15 @@ impl QueryPool { let swamp = swamp.into_iter().map(|p| p.peer_id); let weighted = weighted.into_iter().map(|p| p.peer_id); - let weighted_iter = QueryPeerIter::Closest(ClosestPeersIter::with_config(cfg.clone(), target.clone(), weighted)); - let swamp_iter = QueryPeerIter::Closest(ClosestPeersIter::with_config(cfg, target, swamp)); + let weighted_iter = if self.config.disjoint_query_paths { + QueryPeerIter::ClosestDisjoint( + ClosestDisjointPeersIter::with_config(cfg, target, peers), + ) + } else { + QueryPeerIter::Closest(ClosestPeersIter::with_config(cfg.clone(), target.clone(), weighted)); + let swamp_iter = QueryPeerIter::Closest(ClosestPeersIter::with_config(cfg, target, swamp)) + }; + let query = Query::new(id, weighted_iter, swamp_iter, inner); self.queries.insert(id, query); } @@ -233,15 +241,34 @@ pub struct QueryId(usize); /// The configuration for queries in a `QueryPool`. #[derive(Debug, Clone)] pub struct QueryConfig { + /// Timeout of a single query. + /// + /// See [`crate::behaviour::KademliaConfig::set_query_timeout`] for details. pub timeout: Duration, + + /// The replication factor to use. + /// + /// See [`crate::behaviour::KademliaConfig::set_replication_factor`] for details. pub replication_factor: NonZeroUsize, + + /// Allowed level of parallelism for iterative queries. + /// + /// See [`crate::behaviour::KademliaConfig::set_parallelism`] for details. + pub parallelism: NonZeroUsize, + + /// Whether to use disjoint paths on iterative lookups. + /// + /// See [`crate::behaviour::KademliaConfig::disjoint_query_paths`] for details. + pub disjoint_query_paths: bool, } impl Default for QueryConfig { fn default() -> Self { QueryConfig { timeout: Duration::from_secs(60), - replication_factor: NonZeroUsize::new(K_VALUE.get()).expect("K_VALUE > 0") + replication_factor: NonZeroUsize::new(K_VALUE.get()).expect("K_VALUE > 0"), + parallelism: ALPHA_VALUE, + disjoint_query_paths: false, } } } @@ -263,6 +290,7 @@ pub struct Query { /// The peer selection strategies that can be used by queries. enum QueryPeerIter { Closest(ClosestPeersIter), + ClosestDisjoint(ClosestDisjointPeersIter), Fixed(FixedPeersIter) } @@ -291,6 +319,7 @@ impl Query { let updated_weighted = match &mut self.weighted_iter { QueryPeerIter::Closest(iter) => iter.on_failure(peer), + QueryPeerIter::ClosestDisjoint(iter) => iter.on_failure(peer), QueryPeerIter::Fixed(iter) => iter.on_failure(peer), }; @@ -310,6 +339,7 @@ impl Query { let updated_swamp = match &mut self.swamp_iter { QueryPeerIter::Closest(iter) => iter.on_success(peer, swamp.into_iter().map(|p| p.peer_id)), + QueryPeerIter::ClosestDisjoint(iter) => iter.on_success(peer, new_peers), QueryPeerIter::Fixed(iter) => iter.on_success(peer), }; @@ -332,6 +362,7 @@ impl Query { let swamp_waiting = match &self.swamp_iter { QueryPeerIter::Closest(iter) => iter.is_waiting(peer), + QueryPeerIter::ClosestDisjoint(iter) => iter.is_waiting(peer), QueryPeerIter::Fixed(iter) => iter.is_waiting(peer) }; @@ -347,6 +378,7 @@ impl Query { // First query weighted iter let weighted_state = match &mut self.weighted_iter { QueryPeerIter::Closest(iter) => iter.next(now), + QueryPeerIter::ClosestDisjoint(iter) => iter.next(now), QueryPeerIter::Fixed(iter) => iter.next() }; @@ -377,6 +409,34 @@ impl Query { } } + /// Tries to (gracefully) finish the query prematurely, providing the peers + /// that are no longer of interest for further progress of the query. + /// + /// A query may require that in order to finish gracefully a certain subset + /// of peers must be contacted. E.g. in the case of disjoint query paths a + /// query may only finish gracefully if every path contacted a peer whose + /// response permits termination of the query. The given peers are those for + /// which this is considered to be the case, i.e. for which a termination + /// condition is satisfied. + /// + /// Returns `true` if the query did indeed finish, `false` otherwise. In the + /// latter case, a new attempt at finishing the query may be made with new + /// `peers`. + /// + /// A finished query immediately stops yielding new peers to contact and + /// will be reported by [`QueryPool::poll`] via + /// [`QueryPoolState::Finished`]. + pub fn try_finish<'a, I>(&mut self, peers: I) -> bool + where + I: IntoIterator + { + match &mut self.peer_iter { + QueryPeerIter::Closest(iter) => { iter.finish(); true }, + QueryPeerIter::ClosestDisjoint(iter) => iter.finish_paths(peers), + QueryPeerIter::Fixed(iter) => { iter.finish(); true } + } + } + /// Finishes the query prematurely. /// /// A finished query immediately stops yielding new peers to contact and will be @@ -384,6 +444,7 @@ impl Query { pub fn finish(&mut self) { match &mut self.weighted_iter { QueryPeerIter::Closest(iter) => iter.finish(), + QueryPeerIter::ClosestDisjoint(iter) => iter.finish(), QueryPeerIter::Fixed(iter) => iter.finish() }; @@ -405,6 +466,7 @@ impl Query { let swamp_finished = match &self.swamp_iter { QueryPeerIter::Closest(iter) => iter.is_finished(), + QueryPeerIter::ClosestDisjoint(iter) => iter.is_finished(), QueryPeerIter::Fixed(iter) => iter.is_finished() }; @@ -414,7 +476,8 @@ impl Query { /// Consumes the query, producing the final `QueryResult`. pub fn into_result(self) -> QueryResult> { let weighted = match self.weighted_iter { - QueryPeerIter::Closest(iter) => Either::Left(iter.into_result()), + QueryPeerIter::Closest(iter) => Either::Left(Either::Left(iter.into_result())), + QueryPeerIter::ClosestDisjoint(iter) => Either::Left(Either::Right(iter.into_result())), QueryPeerIter::Fixed(iter) => Either::Right(iter.into_result()) }; diff --git a/protocols/kad/src/query/peers.rs b/protocols/kad/src/query/peers.rs index 049ffe58..964068aa 100644 --- a/protocols/kad/src/query/peers.rs +++ b/protocols/kad/src/query/peers.rs @@ -65,4 +65,3 @@ pub enum PeersIterState<'a> { /// The iterator finished. Finished } - diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 8805911d..4ac734c2 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -18,20 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::{iter::FromIterator, time::Duration}; -use std::collections::btree_map::{BTreeMap, Entry}; +use super::*; +use crate::{K_VALUE, ALPHA_VALUE}; +use crate::kbucket::{Key, KeyBytes, Distance}; +use libp2p_core::PeerId; +use std::{time::Duration, iter::FromIterator, num::NonZeroUsize}; +use std::collections::btree_map::{BTreeMap, Entry}; use log::trace; use wasm_timer::Instant; - -use libp2p_core::PeerId; - -use crate::{ALPHA_VALUE, K_VALUE}; -use crate::kbucket::{Distance, Key, KeyBytes}; - use derivative::Derivative; -use super::*; +pub mod disjoint; /// A peer iterator for a dynamically changing list of peers, sorted by increasing /// distance to a chosen target. @@ -63,13 +61,13 @@ pub struct ClosestPeersIterConfig { /// The `α` parameter in the Kademlia paper. The maximum number of peers that /// the iterator is allowed to wait for in parallel while iterating towards the closest /// nodes to a target. Defaults to `ALPHA_VALUE`. - pub parallelism: usize, + pub parallelism: NonZeroUsize, /// Number of results (closest peers) to search for. /// /// The number of closest peers for which the iterator must obtain successful results /// in order to finish successfully. Defaults to `K_VALUE`. - pub num_results: usize, + pub num_results: NonZeroUsize, /// The timeout for a single peer. /// @@ -83,8 +81,8 @@ pub struct ClosestPeersIterConfig { impl Default for ClosestPeersIterConfig { fn default() -> Self { ClosestPeersIterConfig { - parallelism: ALPHA_VALUE.get(), - num_results: K_VALUE.get(), + parallelism: ALPHA_VALUE, + num_results: K_VALUE, peer_timeout: Duration::from_secs(10), } } @@ -195,14 +193,14 @@ impl ClosestPeersIter { // than any peer seen so far (i.e. is the first entry), or the iterator did // not yet accumulate enough closest peers. progress = self.closest_peers.keys().next() == Some(&distance) - || num_closest < self.config.num_results; + || num_closest < self.config.num_results.get(); } // Update the iterator state. self.state = match self.state { State::Iterating { no_progress } => { let no_progress = if progress { 0 } else { no_progress + 1 }; - if no_progress >= self.config.parallelism { + if no_progress >= self.config.parallelism.get() { State::Stalled } else { State::Iterating { no_progress } @@ -335,7 +333,7 @@ impl ClosestPeersIter { *cnt += 1; // If `num_results` successful results have been delivered for the // closest peers, the iterator is done. - if *cnt >= self.config.num_results { + if *cnt >= self.config.num_results.get() { trace!( "ClosestPeerIter: target = {}; {} peers responded, finished.", bs58::encode(&self.target).into_string(), @@ -414,7 +412,7 @@ impl ClosestPeersIter { self.state == State::Finished } - /// Consumes the iterator, returning the target and the closest peers. + /// Consumes the iterator, returning the closest peers. pub fn into_result(self) -> impl Iterator { self.closest_peers .into_iter() @@ -425,7 +423,7 @@ impl ClosestPeersIter { None } }) - .take(self.config.num_results) + .take(self.config.num_results.get()) } /// Checks if the iterator is at capacity w.r.t. the permitted parallelism. @@ -437,9 +435,9 @@ impl ClosestPeersIter { fn at_capacity(&self) -> bool { match self.state { State::Stalled => self.num_waiting >= usize::max( - self.config.num_results, self.config.parallelism + self.config.num_results.get(), self.config.parallelism.get() ), - State::Iterating { .. } => self.num_waiting >= self.config.parallelism, + State::Iterating { .. } => self.num_waiting >= self.config.parallelism.get(), State::Finished => true } } @@ -560,8 +558,8 @@ mod tests { .map(Key::from); let target = Key::from(Into::::into(PeerId::random())); let config = ClosestPeersIterConfig { - parallelism: g.gen_range(1, 10), - num_results: g.gen_range(1, 25), + parallelism: NonZeroUsize::new(g.gen_range(1, 10)).unwrap(), + num_results: NonZeroUsize::new(g.gen_range(1, 25)).unwrap(), peer_timeout: Duration::from_secs(g.gen_range(10, 30)), }; ClosestPeersIter::with_config(config, target, known_closest_peers) @@ -618,7 +616,7 @@ mod tests { .map(|e| e.key.clone()) .collect::>(); let num_known = expected.len(); - let max_parallelism = usize::min(iter.config.parallelism, num_known); + let max_parallelism = usize::min(iter.config.parallelism.get(), num_known); let target = iter.target.clone(); let mut remaining; @@ -657,7 +655,7 @@ mod tests { // peers or an error, thus finishing the "in-flight requests". for (i, k) in expected.iter().enumerate() { if rng.gen_bool(0.75) { - let num_closer = rng.gen_range(0, iter.config.num_results + 1); + let num_closer = rng.gen_range(0, iter.config.num_results.get() + 1); let closer_peers = random_peers(num_closer, &mut rng) .into_iter() .map(|p| p.into()) @@ -696,16 +694,16 @@ mod tests { assert!(sorted(&target, &closest)); - if closest.len() < num_results { + if closest.len() < num_results.get() { // The iterator returned fewer results than requested. Therefore // either the initial number of known peers must have been // less than the desired number of results, or there must // have been failures. - assert!(num_known < num_results || num_failures > 0); + assert!(num_known < num_results.get() || num_failures > 0); // All peers must have been contacted. assert!(all_contacted, "Not all peers have been contacted."); } else { - assert_eq!(num_results, closest.len(), "Too many results."); + assert_eq!(num_results.get(), closest.len(), "Too many results."); } } @@ -820,7 +818,7 @@ mod tests { fn prop(mut iter: ClosestPeersIter) { iter.state = State::Stalled; - for i in 0..usize::max(iter.config.parallelism, iter.config.num_results) { + for i in 0..usize::max(iter.config.parallelism.get(), iter.config.num_results.get()) { iter.num_waiting = i; assert!( !iter.at_capacity(), @@ -829,7 +827,10 @@ mod tests { ) } - iter.num_waiting = usize::max(iter.config.parallelism, iter.config.num_results); + iter.num_waiting = usize::max( + iter.config.parallelism.get(), + iter.config.num_results.get(), + ); assert!( iter.at_capacity(), "Iterator should be at capacity if `max(parallelism, num_results)` requests are \ diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs new file mode 100644 index 00000000..480000fe --- /dev/null +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -0,0 +1,971 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::*; +use crate::kbucket::{Key, KeyBytes}; +use libp2p_core::PeerId; +use std::{ + collections::HashMap, + iter::{Cycle, Map, Peekable}, + ops::{Index, IndexMut, Range}, +}; +use wasm_timer::Instant; + +/// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery +/// path per configured parallelism according to the S/Kademlia paper. +pub struct ClosestDisjointPeersIter { + config: ClosestPeersIterConfig, + target: KeyBytes, + + /// The set of wrapped [`ClosestPeersIter`]. + iters: Vec, + /// Order in which to query the iterators ensuring fairness across + /// [`ClosestPeersIter::next`] calls. + iter_order: Cycle, fn(usize) -> IteratorIndex>>, + + /// Mapping of contacted peers by their [`PeerId`] to [`PeerState`] + /// containing the corresponding iterator indices as well as the response + /// state. + /// + /// Used to track which iterator contacted which peer. See [`PeerState`] + /// for details. + contacted_peers: HashMap, +} + +impl ClosestDisjointPeersIter { + /// Creates a new iterator with a default configuration. + pub fn new(target: KeyBytes, known_closest_peers: I) -> Self + where + I: IntoIterator>, + { + Self::with_config( + ClosestPeersIterConfig::default(), + target, + known_closest_peers, + ) + } + + /// Creates a new iterator with the given configuration. + pub fn with_config( + config: ClosestPeersIterConfig, + target: T, + known_closest_peers: I, + ) -> Self + where + I: IntoIterator>, + T: Into + Clone, + { + let peers = known_closest_peers.into_iter().take(K_VALUE.get()).collect::>(); + let iters = (0..config.parallelism.get()) + // NOTE: All [`ClosestPeersIter`] share the same set of peers at + // initialization. The [`ClosestDisjointPeersIter.contacted_peers`] + // mapping ensures that a successful response from a peer is only + // ever passed to a single [`ClosestPeersIter`]. See + // [`ClosestDisjointPeersIter::on_success`] for details. + .map(|_| ClosestPeersIter::with_config(config.clone(), target.clone(), peers.clone())) + .collect::>(); + + let iters_len = iters.len(); + + ClosestDisjointPeersIter { + config, + target: target.into(), + iters, + iter_order: (0..iters_len).map(IteratorIndex as fn(usize) -> IteratorIndex).cycle(), + contacted_peers: HashMap::new(), + } + } + + /// Callback for informing the iterator about a failed request to a peer. + /// + /// If the iterator is currently waiting for a result from `peer`, + /// the iterator state is updated and `true` is returned. In that + /// case, after calling this function, `next` should eventually be + /// called again to obtain the new state of the iterator. + /// + /// If the iterator is finished, it is not currently waiting for a + /// result from `peer`, or a result for `peer` has already been reported, + /// calling this function has no effect and `false` is returned. + pub fn on_failure(&mut self, peer: &PeerId) -> bool { + let mut updated = false; + + if let Some(PeerState{ initiated_by, response }) = self.contacted_peers.get_mut(peer) { + updated = self.iters[*initiated_by].on_failure(peer); + + if updated { + *response = ResponseState::Failed; + } + + for (i, iter) in &mut self.iters.iter_mut().enumerate() { + if IteratorIndex(i) != *initiated_by { + // This iterator never triggered an actual request to the + // given peer - thus ignore the returned boolean. + iter.on_failure(peer); + } + } + } + + updated + } + + /// Callback for delivering the result of a successful request to a peer. + /// + /// Delivering results of requests back to the iterator allows the iterator + /// to make progress. The iterator is said to make progress either when the + /// given `closer_peers` contain a peer closer to the target than any peer + /// seen so far, or when the iterator did not yet accumulate `num_results` + /// closest peers and `closer_peers` contains a new peer, regardless of its + /// distance to the target. + /// + /// If the iterator is currently waiting for a result from `peer`, + /// the iterator state is updated and `true` is returned. In that + /// case, after calling this function, `next` should eventually be + /// called again to obtain the new state of the iterator. + /// + /// If the iterator is finished, it is not currently waiting for a + /// result from `peer`, or a result for `peer` has already been reported, + /// calling this function has no effect and `false` is returned. + pub fn on_success(&mut self, peer: &PeerId, closer_peers: I) -> bool + where + I: IntoIterator, + { + let mut updated = false; + + if let Some(PeerState{ initiated_by, response }) = self.contacted_peers.get_mut(peer) { + // Pass the new `closer_peers` to the iterator that first yielded + // the peer. + updated = self.iters[*initiated_by].on_success(peer, closer_peers); + + if updated { + // Mark the response as succeeded for future iterators yielding + // this peer. There is no need to keep the `closer_peers` + // around, given that they are only passed to the first + // iterator. + *response = ResponseState::Succeeded; + } + + for (i, iter) in &mut self.iters.iter_mut().enumerate() { + if IteratorIndex(i) != *initiated_by { + // Only report the success to all remaining not-first + // iterators. Do not pass the `closer_peers` in order to + // uphold the S/Kademlia disjoint paths guarantee. + // + // This iterator never triggered an actual request to the + // given peer - thus ignore the returned boolean. + iter.on_success(peer, std::iter::empty()); + } + } + } + + updated + } + + pub fn is_waiting(&self, peer: &PeerId) -> bool { + self.iters.iter().any(|i| i.is_waiting(peer)) + } + + pub fn next(&mut self, now: Instant) -> PeersIterState { + let mut state = None; + + // Ensure querying each iterator at most once. + for _ in 0 .. self.iters.len() { + let i = self.iter_order.next().expect("Cycle never ends."); + let iter = &mut self.iters[i]; + + loop { + match iter.next(now) { + PeersIterState::Waiting(None) => { + match state { + Some(PeersIterState::Waiting(Some(_))) => { + // [`ClosestDisjointPeersIter::next`] returns immediately once a + // [`ClosestPeersIter`] yielded a peer. Thus this state is + // unreachable. + unreachable!(); + }, + Some(PeersIterState::Waiting(None)) => {} + Some(PeersIterState::WaitingAtCapacity) => { + // At least one ClosestPeersIter is no longer at capacity, thus the + // composite ClosestDisjointPeersIter is no longer at capacity. + state = Some(PeersIterState::Waiting(None)) + } + Some(PeersIterState::Finished) => { + // `state` is never set to `Finished`. + unreachable!(); + } + None => state = Some(PeersIterState::Waiting(None)), + + }; + + break; + } + PeersIterState::Waiting(Some(peer)) => { + match self.contacted_peers.get_mut(&*peer) { + Some(PeerState{ response, .. }) => { + // Another iterator already contacted this peer. + let peer = peer.into_owned(); + + match response { + // The iterator will be notified later whether the given node + // was successfully contacted or not. See + // [`ClosestDisjointPeersIter::on_success`] for details. + ResponseState::Waiting => {}, + ResponseState::Succeeded => { + // Given that iterator was not the first to contact the peer + // it will not be made aware of the closer peers discovered + // to uphold the S/Kademlia disjoint paths guarantee. See + // [`ClosestDisjointPeersIter::on_success`] for details. + iter.on_success(&peer, std::iter::empty()); + }, + ResponseState::Failed => { + iter.on_failure(&peer); + }, + } + }, + None => { + // The iterator is the first to contact this peer. + self.contacted_peers.insert( + peer.clone().into_owned(), + PeerState::new(i), + ); + return PeersIterState::Waiting(Some(Cow::Owned(peer.into_owned()))); + }, + } + } + PeersIterState::WaitingAtCapacity => { + match state { + Some(PeersIterState::Waiting(Some(_))) => { + // [`ClosestDisjointPeersIter::next`] returns immediately once a + // [`ClosestPeersIter`] yielded a peer. Thus this state is + // unreachable. + unreachable!(); + }, + Some(PeersIterState::Waiting(None)) => {} + Some(PeersIterState::WaitingAtCapacity) => {} + Some(PeersIterState::Finished) => { + // `state` is never set to `Finished`. + unreachable!(); + }, + None => state = Some(PeersIterState::WaitingAtCapacity), + }; + + break; + } + PeersIterState::Finished => break, + } + } + } + + state.unwrap_or(PeersIterState::Finished) + } + + /// Finishes all paths containing one of the given peers. + /// + /// See [`crate::query::Query::try_finish`] for details. + pub fn finish_paths<'a, I>(&mut self, peers: I) -> bool + where + I: IntoIterator + { + for peer in peers { + if let Some(PeerState{ initiated_by, .. }) = self.contacted_peers.get_mut(peer) { + self.iters[*initiated_by].finish(); + } + } + + self.is_finished() + } + + /// Immediately transitions the iterator to [`PeersIterState::Finished`]. + pub fn finish(&mut self) { + for iter in &mut self.iters { + iter.finish(); + } + } + + /// Checks whether the iterator has finished. + pub fn is_finished(&self) -> bool { + self.iters.iter().all(|i| i.is_finished()) + } + + /// Note: In the case of no adversarial peers or connectivity issues along + /// any path, all paths return the same result, deduplicated through + /// the `ResultIter`, thus overall `into_result` returns + /// `num_results`. In the case of adversarial peers or connectivity + /// issues `ClosestDisjointPeersIter` tries to return the + /// `num_results` closest benign peers, but as it can not + /// differentiate benign from faulty paths it as well returns faulty + /// peers and thus overall returns more than `num_results` peers. + pub fn into_result(self) -> impl Iterator { + let result_per_path= self.iters.into_iter() + .map(|iter| iter.into_result().map(Key::from)); + + ResultIter::new(self.target, result_per_path).map(Key::into_preimage) + } +} + +/// Index into the [`ClosestDisjointPeersIter`] `iters` vector. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +struct IteratorIndex(usize); + +impl Index for Vec { + type Output = ClosestPeersIter; + + fn index(&self, index: IteratorIndex) -> &Self::Output { + &self[index.0] + } +} + +impl IndexMut for Vec { + fn index_mut(&mut self, index: IteratorIndex) -> &mut Self::Output { + &mut self[index.0] + } +} + +/// State tracking the iterator that yielded (i.e. tried to contact) a peer. See +/// [`ClosestDisjointPeersIter::on_success`] for details. +#[derive(Debug, PartialEq, Eq)] +struct PeerState { + /// First iterator to yield the peer. Will be notified both of the outcome + /// (success/failure) as well as the closer peers. + initiated_by: IteratorIndex, + /// Keeping track of the response state. In case other iterators later on + /// yield the same peer, they can be notified of the response outcome. + response: ResponseState, +} + +impl PeerState { + fn new(initiated_by: IteratorIndex) -> Self { + PeerState { + initiated_by, + response: ResponseState::Waiting, + } + } +} + +#[derive(Debug, PartialEq, Eq)] +enum ResponseState { + Waiting, + Succeeded, + Failed, +} + +/// Iterator combining the result of multiple [`ClosestPeersIter`] into a single +/// deduplicated ordered iterator. +// +// Note: This operates under the assumption that `I` is ordered. +#[derive(Clone, Debug)] +struct ResultIter where + I: Iterator>, +{ + target: KeyBytes, + iters: Vec>, +} + +impl>> ResultIter { + fn new(target: KeyBytes, iters: impl Iterator) -> Self { + ResultIter{ + target, + iters: iters.map(Iterator::peekable).collect(), + } + } +} + +impl>> Iterator for ResultIter { + type Item = I::Item; + + fn next(&mut self) -> Option { + let target = &self.target; + + self.iters.iter_mut() + // Find the iterator with the next closest peer. + .fold( + Option::<&mut Peekable<_>>::None, + |iter_a, iter_b| { + let iter_a = match iter_a { + Some(iter_a) => iter_a, + None => return Some(iter_b), + }; + + match (iter_a.peek(), iter_b.peek()) { + (Some(next_a), Some(next_b)) => { + if next_a == next_b { + // Remove from one for deduplication. + iter_b.next(); + return Some(iter_a) + } + + if target.distance(next_a) < target.distance(next_b) { + Some(iter_a) + } else { + Some(iter_b) + } + }, + (Some(_), None) => Some(iter_a), + (None, Some(_)) => Some(iter_b), + (None, None) => None, + } + }, + ) + // Pop off the next closest peer from that iterator. + .and_then(Iterator::next) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::K_VALUE; + use quickcheck::*; + use rand::{Rng, seq::SliceRandom}; + use std::collections::HashSet; + use std::iter; + + impl Arbitrary for ResultIter>> { + fn arbitrary(g: &mut G) -> Self { + let target = Target::arbitrary(g).0; + let num_closest_iters = g.gen_range(0, 20 + 1); + let peers = random_peers( + g.gen_range(0, 20 * num_closest_iters + 1), + g, + ); + + let iters: Vec<_> = (0..num_closest_iters) + .map(|_| { + let num_peers = g.gen_range(0, 20 + 1); + let mut peers = peers.choose_multiple(g, num_peers) + .cloned() + .map(Key::from) + .collect::>(); + + peers.sort_unstable_by(|a, b| { + target.distance(a).cmp(&target.distance(b)) + }); + + peers.into_iter() + }) + .collect(); + + ResultIter::new(target, iters.into_iter()) + } + + fn shrink(&self) -> Box> { + let peers = self.iters + .clone() + .into_iter() + .flatten() + .collect::>() + .into_iter() + .collect::>(); + + let iters = self.iters.clone() + .into_iter() + .map(|iter| iter.collect::>()) + .collect(); + + Box::new(ResultIterShrinker { + target: self.target.clone(), + peers, + iters, + }) + } + } + + struct ResultIterShrinker { + target: KeyBytes, + peers: Vec>, + iters: Vec>>, + } + + impl Iterator for ResultIterShrinker { + type Item = ResultIter>>; + + /// Return an iterator of [`ResultIter`]s with each of them missing a + /// different peer from the original set. + fn next(&mut self) -> Option { + // The peer that should not be included. + let peer = self.peers.pop()?; + + let iters = self.iters.clone().into_iter() + .filter_map(|mut iter| { + iter.retain(|p| p != &peer); + if iter.is_empty() { + return None; + } + Some(iter.into_iter()) + }).collect::>(); + + Some(ResultIter::new(self.target.clone(), iters.into_iter())) + } + } + + #[derive(Clone, Debug)] + struct Target(KeyBytes); + + impl Arbitrary for Target { + fn arbitrary(g: &mut G) -> Self { + Target(Key::from(random_peers(1, g).pop().unwrap()).into()) + } + } + + fn random_peers(n: usize, g: &mut R) -> Vec { + (0 .. n).map(|_| PeerId::from_multihash( + multihash::wrap(multihash::Code::Sha2_256, &g.gen::<[u8; 32]>()) + ).unwrap()).collect() + } + + #[test] + fn result_iter_returns_deduplicated_ordered_peer_id_stream() { + fn prop(result_iter: ResultIter>>) { + let expected = { + let mut deduplicated = result_iter.clone() + .iters + .into_iter() + .flatten() + .collect::>() + .into_iter() + .map(Key::from) + .collect::>(); + + deduplicated.sort_unstable_by(|a, b| { + result_iter.target.distance(a).cmp(&result_iter.target.distance(b)) + }); + + deduplicated + }; + + assert_eq!(expected, result_iter.collect::>()); + } + + QuickCheck::new().quickcheck(prop as fn(_)) + } + + #[derive(Debug, Clone)] + struct Parallelism(NonZeroUsize); + + impl Arbitrary for Parallelism{ + fn arbitrary(g: &mut G) -> Self { + Parallelism(NonZeroUsize::new(g.gen_range(1, 10)).unwrap()) + } + } + + #[derive(Debug, Clone)] + struct NumResults(NonZeroUsize); + + impl Arbitrary for NumResults{ + fn arbitrary(g: &mut G) -> Self { + NumResults(NonZeroUsize::new(g.gen_range(1, K_VALUE.get())).unwrap()) + } + } + + impl Arbitrary for ClosestPeersIterConfig { + fn arbitrary(g: &mut G) -> Self { + ClosestPeersIterConfig { + parallelism: Parallelism::arbitrary(g).0, + num_results: NumResults::arbitrary(g).0, + peer_timeout: Duration::from_secs(1), + } + } + } + + #[derive(Debug, Clone)] + struct PeerVec(pub Vec>); + + impl Arbitrary for PeerVec { + fn arbitrary(g: &mut G) -> Self { + PeerVec( + (0..g.gen_range(1, 60)) + .map(|_| PeerId::random()) + .map(Key::from) + .collect(), + ) + } + } + + #[test] + fn s_kademlia_disjoint_paths() { + let now = Instant::now(); + let target: KeyBytes = Key::from(PeerId::random()).into(); + + let mut pool = [0; 12].iter() + .map(|_| Key::from(PeerId::random())) + .collect::>(); + + pool.sort_unstable_by(|a, b| { + target.distance(a).cmp(&target.distance(b)) + }); + + let known_closest_peers = pool.split_off(pool.len() - 3); + + let config = ClosestPeersIterConfig { + parallelism: NonZeroUsize::new(3).unwrap(), + num_results: NonZeroUsize::new(3).unwrap(), + ..ClosestPeersIterConfig::default() + }; + + let mut peers_iter = ClosestDisjointPeersIter::with_config( + config.clone(), + target, + known_closest_peers.clone(), + ); + + //////////////////////////////////////////////////////////////////////// + // First round. + + for _ in 0..3 { + if let PeersIterState::Waiting(Some(Cow::Owned(peer))) = peers_iter.next(now) { + assert!(known_closest_peers.contains(&Key::from(peer))); + } else { + panic!("Expected iterator to return peer to query."); + } + } + + assert_eq!( + PeersIterState::WaitingAtCapacity, + peers_iter.next(now), + ); + + let response_2 = pool.split_off(pool.len() - 3); + let response_3 = pool.split_off(pool.len() - 3); + // Keys are closer than any of the previous two responses from honest + // node 1 and 2. + let malicious_response_1 = pool.split_off(pool.len() - 3); + + // Response from malicious peer 1. + peers_iter.on_success( + known_closest_peers[0].preimage(), + malicious_response_1.clone().into_iter().map(|k| k.preimage().clone()), + ); + + // Response from peer 2. + peers_iter.on_success( + known_closest_peers[1].preimage(), + response_2.clone().into_iter().map(|k| k.preimage().clone()), + ); + + // Response from peer 3. + peers_iter.on_success( + known_closest_peers[2].preimage(), + response_3.clone().into_iter().map(|k| k.preimage().clone()), + ); + + //////////////////////////////////////////////////////////////////////// + // Second round. + + let mut next_to_query = vec![]; + for _ in 0..3 { + if let PeersIterState::Waiting(Some(Cow::Owned(peer))) = peers_iter.next(now) { + next_to_query.push(peer) + } else { + panic!("Expected iterator to return peer to query."); + } + }; + + // Expect a peer from each disjoint path. + assert!(next_to_query.contains(malicious_response_1[0].preimage())); + assert!(next_to_query.contains(response_2[0].preimage())); + assert!(next_to_query.contains(response_3[0].preimage())); + + for peer in next_to_query { + peers_iter.on_success(&peer, vec![]); + } + + // Mark all remaining peers as succeeded. + for _ in 0..6 { + if let PeersIterState::Waiting(Some(Cow::Owned(peer))) = peers_iter.next(now) { + peers_iter.on_success(&peer, vec![]); + } else { + panic!("Expected iterator to return peer to query."); + } + } + + assert_eq!( + PeersIterState::Finished, + peers_iter.next(now), + ); + + let final_peers: Vec<_> = peers_iter.into_result().collect(); + + // Expect final result to contain peer from each disjoint path, even + // though not all are among the best ones. + assert!(final_peers.contains(malicious_response_1[0].preimage())); + assert!(final_peers.contains(response_2[0].preimage())); + assert!(final_peers.contains(response_3[0].preimage())); + } + + #[derive(Clone)] + struct Graph(HashMap); + + impl std::fmt::Debug for Graph { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_list().entries(self.0.iter().map(|(id, _)| id)).finish() + } + } + + impl Arbitrary for Graph { + fn arbitrary(g: &mut G) -> Self { + let mut peer_ids = random_peers(g.gen_range(K_VALUE.get(), 200), g) + .into_iter() + .map(|peer_id| (peer_id.clone(), Key::from(peer_id))) + .collect::>(); + + // Make each peer aware of its direct neighborhood. + let mut peers = peer_ids.clone().into_iter() + .map(|(peer_id, key)| { + peer_ids.sort_unstable_by(|(_, a), (_, b)| { + key.distance(a).cmp(&key.distance(b)) + }); + + assert_eq!(peer_id, peer_ids[0].0); + + let known_peers = peer_ids.iter() + // Skip itself. + .skip(1) + .take(K_VALUE.get()) + .cloned() + .collect::>(); + + (peer_id, Peer{ known_peers }) + }) + .collect::>(); + + // Make each peer aware of a random set of other peers within the graph. + for (peer_id, peer) in peers.iter_mut() { + peer_ids.shuffle(g); + + let num_peers = g.gen_range(K_VALUE.get(), peer_ids.len() + 1); + let mut random_peer_ids = peer_ids.choose_multiple(g, num_peers) + // Make sure not to include itself. + .filter(|(id, _)| peer_id != id) + .cloned() + .collect::>(); + + peer.known_peers.append(&mut random_peer_ids); + peer.known_peers = std::mem::replace(&mut peer.known_peers, vec![]) + // Deduplicate peer ids. + .into_iter().collect::>().into_iter().collect(); + } + + Graph(peers) + } + } + + impl Graph { + fn get_closest_peer(&self, target: &KeyBytes) -> PeerId { + self.0.iter() + .map(|(peer_id, _)| (target.distance(&Key::new(peer_id.clone())), peer_id)) + .fold(None, |acc, (distance_b, peer_id_b)| { + match acc { + None => Some((distance_b, peer_id_b)), + Some((distance_a, peer_id_a)) => if distance_a < distance_b { + Some((distance_a, peer_id_a)) + } else { + Some((distance_b, peer_id_b)) + } + } + + }) + .expect("Graph to have at least one peer.") + .1.clone() + } + } + + #[derive(Debug, Clone)] + struct Peer { + known_peers: Vec<(PeerId, Key)>, + } + + impl Peer { + fn get_closest_peers(&mut self, target: &KeyBytes) -> Vec { + self.known_peers.sort_unstable_by(|(_, a), (_, b)| { + target.distance(a).cmp(&target.distance(b)) + }); + + self.known_peers.iter().take(K_VALUE.get()).map(|(id, _)| id).cloned().collect() + } + } + + enum PeerIterator { + Disjoint(ClosestDisjointPeersIter), + Closest(ClosestPeersIter), + } + + impl PeerIterator { + fn next(&mut self, now: Instant) -> PeersIterState { + match self { + PeerIterator::Disjoint(iter) => iter.next(now), + PeerIterator::Closest(iter) => iter.next(now), + } + } + + fn on_success(&mut self, peer: &PeerId, closer_peers: Vec) { + match self { + PeerIterator::Disjoint(iter) => iter.on_success(peer, closer_peers), + PeerIterator::Closest(iter) => iter.on_success(peer, closer_peers), + }; + } + + fn into_result(self) -> Vec { + match self { + PeerIterator::Disjoint(iter) => iter.into_result().collect(), + PeerIterator::Closest(iter) => iter.into_result().collect(), + } + } + } + + /// Ensure [`ClosestPeersIter`] and [`ClosestDisjointPeersIter`] yield same closest peers. + #[test] + fn closest_and_disjoint_closest_yield_same_result() { + fn prop( + target: Target, + graph: Graph, + parallelism: Parallelism, + num_results: NumResults, + ) -> TestResult { + if parallelism.0 > num_results.0 { + return TestResult::discard(); + } + + let target: KeyBytes = target.0; + let closest_peer = graph.get_closest_peer(&target); + + let mut known_closest_peers = graph.0.iter() + .take(K_VALUE.get()) + .map(|(key, _peers)| Key::new(key.clone())) + .collect::>(); + known_closest_peers.sort_unstable_by(|a, b| { + target.distance(a).cmp(&target.distance(b)) + }); + + let cfg = ClosestPeersIterConfig{ + parallelism: parallelism.0, + num_results: num_results.0, + ..ClosestPeersIterConfig::default() + }; + + let closest = drive_to_finish( + PeerIterator::Closest(ClosestPeersIter::with_config( + cfg.clone(), + target.clone(), + known_closest_peers.clone(), + )), + graph.clone(), + &target, + ); + + let disjoint = drive_to_finish( + PeerIterator::Disjoint(ClosestDisjointPeersIter::with_config( + cfg, + target.clone(), + known_closest_peers.clone(), + )), + graph.clone(), + &target, + ); + + assert!( + closest.contains(&closest_peer), + "Expected `ClosestPeersIter` to find closest peer.", + ); + assert!( + disjoint.contains(&closest_peer), + "Expected `ClosestDisjointPeersIter` to find closest peer.", + ); + + assert!( + closest.len() == num_results.0.get(), + "Expected `ClosestPeersIter` to find `num_results` closest \ + peers." + ); + assert!( + disjoint.len() >= num_results.0.get(), + "Expected `ClosestDisjointPeersIter` to find at least \ + `num_results` closest peers." + ); + + if closest.len() > disjoint.len() { + let closest_only = closest.difference(&disjoint).collect::>(); + + panic!( + "Expected `ClosestDisjointPeersIter` to find all peers \ + found by `ClosestPeersIter`, but it did not find {:?}.", + closest_only, + ); + }; + + TestResult::passed() + } + + fn drive_to_finish( + mut iter: PeerIterator, + mut graph: Graph, + target: &KeyBytes, + ) -> HashSet { + let now = Instant::now(); + loop { + match iter.next(now) { + PeersIterState::Waiting(Some(peer_id)) => { + let peer_id = peer_id.clone().into_owned(); + let closest_peers = graph.0.get_mut(&peer_id) + .unwrap() + .get_closest_peers(&target); + iter.on_success(&peer_id, closest_peers); + } , + PeersIterState::WaitingAtCapacity | PeersIterState::Waiting(None) => + panic!("There is never more than one request in flight."), + PeersIterState::Finished => break, + } + } + + let mut result = iter.into_result().into_iter().map(Key::new).collect::>(); + result.sort_unstable_by(|a, b| { + target.distance(a).cmp(&target.distance(b)) + }); + result.into_iter().map(|k| k.into_preimage()).collect() + } + + QuickCheck::new().tests(10).quickcheck(prop as fn(_, _, _, _) -> _) + } + + #[test] + fn failure_can_not_overwrite_previous_success() { + let now = Instant::now(); + let peer = PeerId::random(); + let mut iter = ClosestDisjointPeersIter::new( + Key::from(PeerId::random()).into(), + iter::once(Key::from(peer.clone())), + ); + + assert!(matches!(iter.next(now), PeersIterState::Waiting(Some(_)))); + + // Expect peer to be marked as succeeded. + assert!(iter.on_success(&peer, iter::empty())); + assert_eq!(iter.contacted_peers.get(&peer), Some(&PeerState { + initiated_by: IteratorIndex(0), + response: ResponseState::Succeeded, + })); + + // Expect peer to stay marked as succeeded. + assert!(!iter.on_failure(&peer)); + assert_eq!(iter.contacted_peers.get(&peer), Some(&PeerState { + initiated_by: IteratorIndex(0), + response: ResponseState::Succeeded, + })); + } +} diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index edb86ef4..723ce414 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -22,12 +22,12 @@ use super::*; use fnv::FnvHashMap; use libp2p_core::PeerId; -use std::{vec, collections::hash_map::Entry}; +use std::{vec, collections::hash_map::Entry, num::NonZeroUsize}; /// A peer iterator for a fixed set of peers. pub struct FixedPeersIter { /// Ther permitted parallelism, i.e. number of pending results. - parallelism: usize, + parallelism: NonZeroUsize, /// The state of peers emitted by the iterator. peers: FnvHashMap, @@ -58,7 +58,7 @@ enum PeerState { } impl FixedPeersIter { - pub fn new(peers: I, parallelism: usize) -> Self + pub fn new(peers: I, parallelism: NonZeroUsize) -> Self where I: IntoIterator { @@ -133,7 +133,7 @@ impl FixedPeersIter { match &mut self.state { State::Finished => return PeersIterState::Finished, State::Waiting { num_waiting } => { - if *num_waiting >= self.parallelism { + if *num_waiting >= self.parallelism.get() { return PeersIterState::WaitingAtCapacity } loop { @@ -175,7 +175,10 @@ mod test { #[test] fn decrease_num_waiting_on_failure() { - let mut iter = FixedPeersIter::new(vec![PeerId::random(), PeerId::random()], 1); + let mut iter = FixedPeersIter::new( + vec![PeerId::random(), PeerId::random()], + NonZeroUsize::new(1).unwrap(), + ); match iter.next() { PeersIterState::Waiting(Some(peer)) => { diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md new file mode 100644 index 00000000..7bb1e881 --- /dev/null +++ b/protocols/mdns/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 66890a8c..ec538b49 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "libp2p-mdns" edition = "2018" -version = "0.19.0" +version = "0.19.2" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -10,14 +10,14 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-std = "~1.5.0" +async-std = "1.6.2" data-encoding = "2.0" dns-parser = "0.8" either = "1.5.3" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.19.0", path = "../../core" } -libp2p-swarm = { version = "0.19.0", path = "../../swarm" } +libp2p-core = { version = "0.19.2", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } log = "0.4" net2 = "0.2" rand = "0.7" diff --git a/protocols/noise/CHANGELOG.md b/protocols/noise/CHANGELOG.md new file mode 100644 index 00000000..76abe1b2 --- /dev/null +++ b/protocols/noise/CHANGELOG.md @@ -0,0 +1,6 @@ +# 0.19.1 [2020-06-22] + +- Re-add noise upgrades for IK and IX + ([PR 1580](https://github.com/libp2p/rust-libp2p/pull/1580)). + +- Updated dependencies. diff --git a/protocols/noise/Cargo.toml b/protocols/noise/Cargo.toml index 1f812f1e..4936b793 100644 --- a/protocols/noise/Cargo.toml +++ b/protocols/noise/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "libp2p-noise" description = "Cryptographic handshake protocol using the noise framework." -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ edition = "2018" curve25519-dalek = "2.0.0" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4" prost = "0.6.1" rand = "0.7.2" @@ -20,17 +20,17 @@ static_assertions = "1" x25519-dalek = "0.6.0" zeroize = "1" -[target.'cfg(not(target_os = "unknown"))'.dependencies] -snow = { version = "0.6.1", features = ["ring-resolver"], default-features = false } +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +snow = { version = "0.7.0", features = ["ring-resolver"], default-features = false } -[target.'cfg(target_os = "unknown")'.dependencies] -snow = { version = "0.6.1", features = ["default-resolver"], default-features = false } +[target.'cfg(target_arch = "wasm32")'.dependencies] +snow = { version = "0.7.0", features = ["default-resolver"], default-features = false } [dev-dependencies] env_logger = "0.7.1" -libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" } +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } quickcheck = "0.9.0" -sodiumoxide = "^0.2.5" +sodiumoxide = "0.2.5" [build-dependencies] prost-build = "0.6" diff --git a/protocols/noise/src/protocol.rs b/protocols/noise/src/protocol.rs index 5844dbc8..7c61274a 100644 --- a/protocols/noise/src/protocol.rs +++ b/protocols/noise/src/protocol.rs @@ -229,22 +229,22 @@ impl snow::resolvers::CryptoResolver for Resolver { } fn resolve_hash(&self, choice: &snow::params::HashChoice) -> Option> { - #[cfg(target_os = "unknown")] + #[cfg(target_arch = "wasm32")] { snow::resolvers::DefaultResolver.resolve_hash(choice) } - #[cfg(not(target_os = "unknown"))] + #[cfg(not(target_arch = "wasm32"))] { snow::resolvers::RingResolver.resolve_hash(choice) } } fn resolve_cipher(&self, choice: &snow::params::CipherChoice) -> Option> { - #[cfg(target_os = "unknown")] + #[cfg(target_arch = "wasm32")] { snow::resolvers::DefaultResolver.resolve_cipher(choice) } - #[cfg(not(target_os = "unknown"))] + #[cfg(not(target_arch = "wasm32"))] { snow::resolvers::RingResolver.resolve_cipher(choice) } diff --git a/protocols/noise/src/protocol/x25519_spec.rs b/protocols/noise/src/protocol/x25519_spec.rs index 446ff7cc..c2f32095 100644 --- a/protocols/noise/src/protocol/x25519_spec.rs +++ b/protocols/noise/src/protocol/x25519_spec.rs @@ -78,6 +78,26 @@ impl UpgradeInfo for NoiseConfig { } } +/// **Note**: This is not currentlyy a standardised upgrade. +impl UpgradeInfo for NoiseConfig { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(b"/noise/ix/25519/chachapoly/sha256/0.1.0") + } +} + +/// **Note**: This is not currently a standardised upgrade. +impl UpgradeInfo for NoiseConfig { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(b"/noise/ik/25519/chachapoly/sha256/0.1.0") + } +} + /// Noise protocols for X25519 with libp2p-spec compliant signatures. /// /// **Note**: Only the XX handshake pattern is currently guaranteed to be diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md new file mode 100644 index 00000000..f760124a --- /dev/null +++ b/protocols/ping/CHANGELOG.md @@ -0,0 +1,9 @@ +# 0.19.3 [2020-06-22] + +Updated dependencies. + +# 0.19.2 [2020-06-18] + +- Close substream in inbound upgrade + [PR 1606](https://github.com/libp2p/rust-libp2p/pull/1606). + diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 73a13183..c243f57f 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-ping" edition = "2018" description = "Ping protocol for libp2p" -version = "0.19.0" +version = "0.19.3" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,16 +11,16 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../../core" } -libp2p-swarm = { version = "0.19.0", path = "../../swarm" } +libp2p-core = { version = "0.19.2", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } log = "0.4.1" rand = "0.7.2" void = "1.0" wasm-timer = "0.2" [dev-dependencies] -async-std = "~1.5.0" -libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" } -libp2p-secio = { version = "0.19.0", path = "../../protocols/secio" } -libp2p-yamux = { version = "0.19.0", path = "../../muxers/yamux" } +async-std = "1.6.2" +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } +libp2p-secio = { path = "../../protocols/secio" } +libp2p-yamux = { path = "../../muxers/yamux" } quickcheck = "0.9.0" diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index fab16b36..8e7e963b 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -69,6 +69,7 @@ where while let Ok(_) = socket.read_exact(&mut payload).await { socket.write_all(&payload).await?; } + socket.close().await?; Ok(()) }.boxed() } @@ -128,7 +129,7 @@ mod tests { } else { panic!("MemoryTransport not listening on an address!"); }; - + async_std::task::spawn(async move { let listener_event = listener.next().await.unwrap(); let (listener_upgrade, _) = listener_event.unwrap().into_upgrade().unwrap(); diff --git a/protocols/plaintext/CHANGELOG.md b/protocols/plaintext/CHANGELOG.md new file mode 100644 index 00000000..0d5b7cf2 --- /dev/null +++ b/protocols/plaintext/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.1 [2020-06-22] + +Updated dependencies. diff --git a/protocols/plaintext/Cargo.toml b/protocols/plaintext/Cargo.toml index e00b5ac4..2304ca75 100644 --- a/protocols/plaintext/Cargo.toml +++ b/protocols/plaintext/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-plaintext" edition = "2018" description = "Plaintext encryption dummy protocol for libp2p" -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,12 +12,12 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "0.5" futures = "0.3.1" -futures_codec = "0.3.4" -libp2p-core = { version = "0.19.0", path = "../../core" } +futures_codec = "0.4.0" +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4.8" prost = "0.6.1" rw-stream-sink = "0.2.0" -unsigned-varint = { version = "0.3", features = ["futures-codec"] } +unsigned-varint = { version = "0.4.0", features = ["futures-codec"] } void = "1.0.2" [dev-dependencies] diff --git a/protocols/pnet/CHANGELOG.md b/protocols/pnet/CHANGELOG.md new file mode 100644 index 00000000..5dde110e --- /dev/null +++ b/protocols/pnet/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.1 [2020-06-22] + +- Updated dependencies. diff --git a/protocols/pnet/Cargo.toml b/protocols/pnet/Cargo.toml index de5ea5cf..e2700834 100644 --- a/protocols/pnet/Cargo.toml +++ b/protocols/pnet/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-pnet" edition = "2018" description = "Private swarm support for libp2p" -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,7 +15,7 @@ log = "0.4.8" salsa20 = "0.3.0" sha3 = "0.8" rand = "0.7" -pin-project = "0.4.6" +pin-project = "0.4.17" [dev-dependencies] quickcheck = "0.9.0" diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md new file mode 100644 index 00000000..49462a2f --- /dev/null +++ b/protocols/request-response/CHANGELOG.md @@ -0,0 +1,4 @@ +# 0.1.0 + +Initial release. + diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml new file mode 100644 index 00000000..023b83dc --- /dev/null +++ b/protocols/request-response/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "libp2p-request-response" +edition = "2018" +description = "Generic Request/Response Protocols" +version = "0.1.0" +authors = ["Parity Technologies "] +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +async-trait = "0.1" +futures = "0.3.1" +libp2p-core = { version = "0.19.2", path = "../../core" } +libp2p-swarm = { version = "0.19.1", path = "../../swarm" } +smallvec = "1.4" +wasm-timer = "0.2" + +[dev-dependencies] +async-std = "1.6.2" +libp2p-noise = { path = "../noise" } +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } +libp2p-yamux = { path = "../../muxers/yamux" } +rand = "0.7" diff --git a/protocols/request-response/src/codec.rs b/protocols/request-response/src/codec.rs new file mode 100644 index 00000000..da85b277 --- /dev/null +++ b/protocols/request-response/src/codec.rs @@ -0,0 +1,66 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +pub use libp2p_core::ProtocolName; + +use async_trait::async_trait; +use futures::prelude::*; +use std::io; + +/// A `RequestResponseCodec` defines the request and response types +/// for a [`RequestResponse`](crate::RequestResponse) protocol or +/// protocol family and how they are encoded / decoded on an I/O stream. +#[async_trait] +pub trait RequestResponseCodec { + /// The type of protocol(s) or protocol versions being negotiated. + type Protocol: ProtocolName + Send + Clone; + /// The type of inbound and outbound requests. + type Request: Send; + /// The type of inbound and outbound responses. + type Response: Send; + + /// Reads a request from the given I/O stream according to the + /// negotiated protocol. + async fn read_request(&mut self, protocol: &Self::Protocol, io: &mut T) + -> io::Result + where + T: AsyncRead + Unpin + Send; + + /// Reads a response from the given I/O stream according to the + /// negotiated protocol. + async fn read_response(&mut self, protocol: &Self::Protocol, io: &mut T) + -> io::Result + where + T: AsyncRead + Unpin + Send; + + /// Writes a request to the given I/O stream according to the + /// negotiated protocol. + async fn write_request(&mut self, protocol: &Self::Protocol, io: &mut T, req: Self::Request) + -> io::Result<()> + where + T: AsyncWrite + Unpin + Send; + + /// Writes a response to the given I/O stream according to the + /// negotiated protocol. + async fn write_response(&mut self, protocol: &Self::Protocol, io: &mut T, res: Self::Response) + -> io::Result<()> + where + T: AsyncWrite + Unpin + Send; +} diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs new file mode 100644 index 00000000..3a491a69 --- /dev/null +++ b/protocols/request-response/src/handler.rs @@ -0,0 +1,326 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +mod protocol; + +use crate::{EMPTY_QUEUE_SHRINK_THRESHOLD, RequestId}; +use crate::codec::RequestResponseCodec; + +pub use protocol::{RequestProtocol, ResponseProtocol, ProtocolSupport}; + +use futures::{ + channel::oneshot, + future::BoxFuture, + prelude::*, + stream::FuturesUnordered +}; +use libp2p_core::{ + upgrade::{UpgradeError, NegotiationError}, +}; +use libp2p_swarm::{ + SubstreamProtocol, + protocols_handler::{ + KeepAlive, + ProtocolsHandler, + ProtocolsHandlerEvent, + ProtocolsHandlerUpgrErr, + } +}; +use smallvec::SmallVec; +use std::{ + collections::VecDeque, + io, + time::Duration, + task::{Context, Poll} +}; +use wasm_timer::Instant; + +/// A connection handler of a `RequestResponse` protocol. +#[doc(hidden)] +pub struct RequestResponseHandler +where + TCodec: RequestResponseCodec, +{ + /// The supported inbound protocols. + inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, + /// The request/response message codec. + codec: TCodec, + /// The keep-alive timeout of idle connections. A connection is considered + /// idle if there are no outbound substreams. + keep_alive_timeout: Duration, + /// The timeout for inbound and outbound substreams (i.e. request + /// and response processing). + substream_timeout: Duration, + /// The current connection keep-alive. + keep_alive: KeepAlive, + /// A pending fatal error that results in the connection being closed. + pending_error: Option>, + /// Queue of events to emit in `poll()`. + pending_events: VecDeque>, + /// Outbound upgrades waiting to be emitted as an `OutboundSubstreamRequest`. + outbound: VecDeque>, + /// Inbound upgrades waiting for the incoming request. + inbound: FuturesUnordered), + oneshot::Canceled + >>>, +} + +impl RequestResponseHandler +where + TCodec: RequestResponseCodec, +{ + pub(super) fn new( + inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, + codec: TCodec, + keep_alive_timeout: Duration, + substream_timeout: Duration, + ) -> Self { + Self { + inbound_protocols, + codec, + keep_alive: KeepAlive::Yes, + keep_alive_timeout, + substream_timeout, + outbound: VecDeque::new(), + inbound: FuturesUnordered::new(), + pending_events: VecDeque::new(), + pending_error: None, + } + } +} + +/// The events emitted by the [`RequestResponseHandler`]. +#[doc(hidden)] +pub enum RequestResponseHandlerEvent +where + TCodec: RequestResponseCodec +{ + /// An inbound request. + Request { + request: TCodec::Request, + sender: oneshot::Sender + }, + /// An inbound response. + Response { + request_id: RequestId, + response: TCodec::Response + }, + /// An outbound upgrade (i.e. request) timed out. + OutboundTimeout(RequestId), + /// An outbound request failed to negotiate a mutually supported protocol. + OutboundUnsupportedProtocols(RequestId), + /// An inbound request timed out. + InboundTimeout, + /// An inbound request failed to negotiate a mutually supported protocol. + InboundUnsupportedProtocols, +} + +impl ProtocolsHandler for RequestResponseHandler +where + TCodec: RequestResponseCodec + Send + Clone + 'static, +{ + type InEvent = RequestProtocol; + type OutEvent = RequestResponseHandlerEvent; + type Error = ProtocolsHandlerUpgrErr; + type InboundProtocol = ResponseProtocol; + type OutboundProtocol = RequestProtocol; + type OutboundOpenInfo = RequestId; + + fn listen_protocol(&self) -> SubstreamProtocol { + // A channel for notifying the handler when the inbound + // upgrade received the request. + let (rq_send, rq_recv) = oneshot::channel(); + + // A channel for notifying the inbound upgrade when the + // response is sent. + let (rs_send, rs_recv) = oneshot::channel(); + + // By keeping all I/O inside the `ResponseProtocol` and thus the + // inbound substream upgrade via above channels, we ensure that it + // is all subject to the configured timeout without extra bookkeeping + // for inbound substreams as well as their timeouts and also make the + // implementation of inbound and outbound upgrades symmetric in + // this sense. + let proto = ResponseProtocol { + protocols: self.inbound_protocols.clone(), + codec: self.codec.clone(), + request_sender: rq_send, + response_receiver: rs_recv, + }; + + // The handler waits for the request to come in. It then emits + // `RequestResponseHandlerEvent::Request` together with a + // `ResponseChannel`. + self.inbound.push(rq_recv.map_ok(move |rq| (rq, rs_send)).boxed()); + + SubstreamProtocol::new(proto).with_timeout(self.substream_timeout) + } + + fn inject_fully_negotiated_inbound( + &mut self, + (): (), + ) { + // Nothing to do, as the response has already been sent + // as part of the upgrade. + } + + fn inject_fully_negotiated_outbound( + &mut self, + response: TCodec::Response, + request_id: RequestId, + ) { + self.pending_events.push_back( + RequestResponseHandlerEvent::Response { + request_id, response + }); + } + + fn inject_event(&mut self, request: Self::InEvent) { + self.keep_alive = KeepAlive::Yes; + self.outbound.push_back(request); + } + + fn inject_dial_upgrade_error( + &mut self, + info: RequestId, + error: ProtocolsHandlerUpgrErr, + ) { + match error { + ProtocolsHandlerUpgrErr::Timeout => { + self.pending_events.push_back( + RequestResponseHandlerEvent::OutboundTimeout(info)); + } + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { + // The remote merely doesn't support the protocol(s) we requested. + // This is no reason to close the connection, which may + // successfully communicate with other protocols already. + // An event is reported to permit user code to react to the fact that + // the remote peer does not support the requested protocol(s). + self.pending_events.push_back( + RequestResponseHandlerEvent::OutboundUnsupportedProtocols(info)); + } + _ => { + // Anything else is considered a fatal error or misbehaviour of + // the remote peer and results in closing the connection. + self.pending_error = Some(error); + } + } + } + + fn inject_listen_upgrade_error( + &mut self, + error: ProtocolsHandlerUpgrErr + ) { + match error { + ProtocolsHandlerUpgrErr::Timeout => { + self.pending_events.push_back( + RequestResponseHandlerEvent::InboundTimeout); + } + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { + // The local peer merely doesn't support the protocol(s) requested. + // This is no reason to close the connection, which may + // successfully communicate with other protocols already. + // An event is reported to permit user code to react to the fact that + // the local peer does not support the requested protocol(s). + self.pending_events.push_back( + RequestResponseHandlerEvent::InboundUnsupportedProtocols); + } + _ => { + // Anything else is considered a fatal error or misbehaviour of + // the remote peer and results in closing the connection. + self.pending_error = Some(error); + } + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + self.keep_alive + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent, RequestId, Self::OutEvent, Self::Error>, + > { + // Check for a pending (fatal) error. + if let Some(err) = self.pending_error.take() { + // The handler will not be polled again by the `Swarm`. + return Poll::Ready(ProtocolsHandlerEvent::Close(err)) + } + + // Drain pending events. + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_events.shrink_to_fit(); + } + + // Check for inbound requests. + while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) { + match result { + Ok((rq, rs_sender)) => { + // We received an inbound request. + self.keep_alive = KeepAlive::Yes; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + RequestResponseHandlerEvent::Request { + request: rq, sender: rs_sender + })) + } + Err(oneshot::Canceled) => { + // The inbound upgrade has errored or timed out reading + // or waiting for the request. The handler is informed + // via `inject_listen_upgrade_error`. + } + } + } + + // Emit outbound requests. + if let Some(request) = self.outbound.pop_front() { + let info = request.request_id; + return Poll::Ready( + ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(request) + .with_timeout(self.substream_timeout), + info, + }, + ) + } + + debug_assert!(self.outbound.is_empty()); + + if self.outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.outbound.shrink_to_fit(); + } + + if self.inbound.is_empty() { + // No new inbound or outbound requests. However, we may just have + // started the latest inbound or outbound upgrade(s), so make sure + // the keep-alive timeout is preceded by the substream timeout. + let until = Instant::now() + self.substream_timeout + self.keep_alive_timeout; + self.keep_alive = KeepAlive::Until(until); + } + + Poll::Pending + } +} + diff --git a/protocols/request-response/src/handler/protocol.rs b/protocols/request-response/src/handler/protocol.rs new file mode 100644 index 00000000..c0dcdaf9 --- /dev/null +++ b/protocols/request-response/src/handler/protocol.rs @@ -0,0 +1,165 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! The definition of a request/response protocol via inbound +//! and outbound substream upgrades. The inbound upgrade +//! receives a request and sends a response, whereas the +//! outbound upgrade send a request and receives a response. + +use crate::RequestId; +use crate::codec::RequestResponseCodec; + +use futures::{ + channel::oneshot, + future::BoxFuture, + prelude::*, +}; +use libp2p_core::{ + upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, +}; +use libp2p_swarm::{ + NegotiatedSubstream, +}; +use smallvec::SmallVec; +use std::io; + +/// The level of support for a particular protocol. +#[derive(Debug, Clone)] +pub enum ProtocolSupport { + /// The protocol is only supported for inbound requests. + Inbound, + /// The protocol is only supported for outbound requests. + Outbound, + /// The protocol is supported for inbound and outbound requests. + Full +} + +impl ProtocolSupport { + /// Whether inbound requests are supported. + pub fn inbound(&self) -> bool { + match self { + ProtocolSupport::Inbound | ProtocolSupport::Full => true, + ProtocolSupport::Outbound => false, + } + } + + /// Whether outbound requests are supported. + pub fn outbound(&self) -> bool { + match self { + ProtocolSupport::Outbound | ProtocolSupport::Full => true, + ProtocolSupport::Inbound => false, + } + } +} + +/// Response substream upgrade protocol. +/// +/// Receives a request and sends a response. +#[derive(Debug)] +pub struct ResponseProtocol +where + TCodec: RequestResponseCodec +{ + pub(crate) codec: TCodec, + pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, + pub(crate) request_sender: oneshot::Sender, + pub(crate) response_receiver: oneshot::Receiver +} + +impl UpgradeInfo for ResponseProtocol +where + TCodec: RequestResponseCodec +{ + type Info = TCodec::Protocol; + type InfoIter = smallvec::IntoIter<[Self::Info; 2]>; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocols.clone().into_iter() + } +} + +impl InboundUpgrade for ResponseProtocol +where + TCodec: RequestResponseCodec + Send + 'static, +{ + type Output = (); + type Error = io::Error; + type Future = BoxFuture<'static, Result>; + + fn upgrade_inbound(mut self, mut io: NegotiatedSubstream, protocol: Self::Info) -> Self::Future { + async move { + let read = self.codec.read_request(&protocol, &mut io); + let request = read.await?; + if let Ok(()) = self.request_sender.send(request) { + if let Ok(response) = self.response_receiver.await { + let write = self.codec.write_response(&protocol, &mut io, response); + write.await?; + } + } + Ok(()) + }.boxed() + } +} + +/// Request substream upgrade protocol. +/// +/// Sends a request and receives a response. +#[derive(Debug, Clone)] +pub struct RequestProtocol +where + TCodec: RequestResponseCodec +{ + pub(crate) codec: TCodec, + pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, + pub(crate) request_id: RequestId, + pub(crate) request: TCodec::Request, +} + +impl UpgradeInfo for RequestProtocol +where + TCodec: RequestResponseCodec +{ + type Info = TCodec::Protocol; + type InfoIter = smallvec::IntoIter<[Self::Info; 2]>; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocols.clone().into_iter() + } +} + +impl OutboundUpgrade for RequestProtocol +where + TCodec: RequestResponseCodec + Send + 'static, +{ + type Output = TCodec::Response; + type Error = io::Error; + type Future = BoxFuture<'static, Result>; + + fn upgrade_outbound(mut self, mut io: NegotiatedSubstream, protocol: Self::Info) -> Self::Future { + async move { + let write = self.codec.write_request(&protocol, &mut io, self.request); + write.await?; + let read = self.codec.read_response(&protocol, &mut io); + let response = read.await?; + Ok(response) + }.boxed() + } +} + diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs new file mode 100644 index 00000000..c2192934 --- /dev/null +++ b/protocols/request-response/src/lib.rs @@ -0,0 +1,607 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Generic request/response protocols. +//! +//! ## General Usage +//! +//! [`RequestResponse`] is a `NetworkBehaviour` that implements a generic +//! request/response protocol or protocol family, whereby each request is +//! sent over a new substream on a connection. `RequestResponse` is generic +//! over the actual messages being sent, which are defined in terms of a +//! [`RequestResponseCodec`]. Creating a request/response protocol thus amounts +//! to providing an implementation of this trait which can then be +//! given to [`RequestResponse::new`]. Further configuration options are +//! available via the [`RequestResponseConfig`]. +//! +//! Requests are sent using [`RequestResponse::send_request`] and the +//! responses received as [`RequestResponseMessage::Response`] via +//! [`RequestResponseEvent::Message`]. +//! +//! Responses are sent using [`RequestResponse::send_response`] upon +//! receiving a [`RequestResponseMessage::Request`] via +//! [`RequestResponseEvent::Message`]. +//! +//! ## Protocol Families +//! +//! A single [`RequestResponse`] instance can be used with an entire +//! protocol family that share the same request and response types. +//! For that purpose, [`RequestResponseCodec::Protocol`] is typically +//! instantiated with a sum type. +//! +//! ## One-Way Protocols +//! +//! The implementation supports one-way protocols that do not +//! have responses. In these cases the [`RequestResponseCodec::Response`] can +//! be defined as `()` and [`RequestResponseCodec::read_response`] as well as +//! [`RequestResponseCodec::write_response`] given the obvious implementations. +//! Note that `RequestResponseMessage::Response` will still be emitted, +//! immediately after the request has been sent, since `RequestResponseCodec::read_response` +//! will not actually read anything from the given I/O stream. +//! [`RequestResponse::send_response`] need not be called for one-way protocols, +//! i.e. the [`ResponseChannel`] may just be dropped. +//! +//! ## Limited Protocol Support +//! +//! It is possible to only support inbound or outbound requests for +//! a particular protocol. This is achieved by instantiating `RequestResponse` +//! with protocols using [`ProtocolSupport::Inbound`] or +//! [`ProtocolSupport::Outbound`]. Any subset of protocols of a protocol +//! family can be configured in this way. Such protocols will not be +//! advertised during inbound respectively outbound protocol negotiation +//! on the substreams. + +pub mod codec; +pub mod handler; + +pub use codec::{RequestResponseCodec, ProtocolName}; +pub use handler::ProtocolSupport; + +use futures::{ + channel::oneshot, +}; +use handler::{ + RequestProtocol, + RequestResponseHandler, + RequestResponseHandlerEvent, +}; +use libp2p_core::{ + ConnectedPoint, + Multiaddr, + PeerId, + connection::ConnectionId, +}; +use libp2p_swarm::{ + DialPeerCondition, + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + PollParameters, +}; +use smallvec::SmallVec; +use std::{ + collections::{VecDeque, HashMap}, + time::Duration, + task::{Context, Poll} +}; + +/// An inbound request or response. +#[derive(Debug)] +pub enum RequestResponseMessage { + /// A request message. + Request { + /// The request message. + request: TRequest, + /// The sender of the request who is awaiting a response. + /// + /// See [`RequestResponse::send_response`]. + channel: ResponseChannel, + }, + /// A response message. + Response { + /// The ID of the request that produced this response. + /// + /// See [`RequestResponse::send_request`]. + request_id: RequestId, + /// The response message. + response: TResponse + }, +} + +/// The events emitted by a [`RequestResponse`] protocol. +#[derive(Debug)] +pub enum RequestResponseEvent { + /// An incoming message (request or response). + Message { + /// The peer who sent the message. + peer: PeerId, + /// The incoming message. + message: RequestResponseMessage + }, + /// An outbound request failed. + OutboundFailure { + /// The peer to whom the request was sent. + peer: PeerId, + /// The (local) ID of the failed request. + request_id: RequestId, + /// The error that occurred. + error: OutboundFailure, + }, + /// An inbound request failed. + InboundFailure { + /// The peer from whom the request was received. + peer: PeerId, + /// The error that occurred. + error: InboundFailure, + }, +} + +/// Possible failures occurring in the context of sending +/// an outbound request and receiving the response. +#[derive(Debug)] +pub enum OutboundFailure { + /// The request could not be sent because a dialing attempt failed. + DialFailure, + /// The request timed out before a response was received. + /// + /// It is not known whether the request may have been + /// received (and processed) by the remote peer. + Timeout, + /// The connection closed before a response was received. + /// + /// It is not known whether the request may have been + /// received (and processed) by the remote peer. + ConnectionClosed, + /// The remote supports none of the requested protocols. + UnsupportedProtocols, +} + +/// Possible failures occurring in the context of receiving an +/// inbound request and sending a response. +#[derive(Debug)] +pub enum InboundFailure { + /// The inbound request timed out, either while reading the + /// incoming request or before a response is sent, i.e. if + /// [`RequestResponse::send_response`] is not called in a + /// timely manner. + Timeout, + /// The local peer supports none of the requested protocols. + UnsupportedProtocols, +} + +/// A channel for sending a response to an inbound request. +/// +/// See [`RequestResponse::send_response`]. +#[derive(Debug)] +pub struct ResponseChannel { + peer: PeerId, + sender: oneshot::Sender, +} + +impl ResponseChannel { + /// Checks whether the response channel is still open, i.e. + /// the `RequestResponse` behaviour is still waiting for a + /// a response to be sent via [`RequestResponse::send_response`] + /// and this response channel. + /// + /// If the response channel is no longer open then the inbound + /// request timed out waiting for the response. + pub fn is_open(&self) -> bool { + !self.sender.is_canceled() + } +} + +/// The (local) ID of an outgoing request. +/// +/// See [`RequestResponse::send_request`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct RequestId(u64); + +/// The configuration for a `RequestResponse` protocol. +#[derive(Debug, Clone)] +pub struct RequestResponseConfig { + request_timeout: Duration, + connection_keep_alive: Duration, +} + +impl Default for RequestResponseConfig { + fn default() -> Self { + Self { + connection_keep_alive: Duration::from_secs(10), + request_timeout: Duration::from_secs(10), + } + } +} + +impl RequestResponseConfig { + /// Sets the keep-alive timeout of idle connections. + pub fn set_connection_keep_alive(&mut self, v: Duration) -> &mut Self { + self.connection_keep_alive = v; + self + } + + /// Sets the timeout for inbound and outbound requests. + pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + self.request_timeout = v; + self + } +} + +/// A request/response protocol for some message codec. +pub struct RequestResponse +where + TCodec: RequestResponseCodec, +{ + /// The supported inbound protocols. + inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, + /// The supported outbound protocols. + outbound_protocols: SmallVec<[TCodec::Protocol; 2]>, + /// The next (local) request ID. + next_request_id: RequestId, + /// The protocol configuration. + config: RequestResponseConfig, + /// The protocol codec for reading and writing requests and responses. + codec: TCodec, + /// Pending events to return from `poll`. + pending_events: VecDeque< + NetworkBehaviourAction< + RequestProtocol, + RequestResponseEvent>>, + /// The currently connected peers and their known, reachable addresses, if any. + connected: HashMap>, + /// Externally managed addresses via `add_address` and `remove_address`. + addresses: HashMap>, + /// Requests that have not yet been sent and are waiting for a connection + /// to be established. + pending_requests: HashMap; 10]>>, + /// Responses that have not yet been received. + pending_responses: HashMap, +} + +impl RequestResponse +where + TCodec: RequestResponseCodec + Clone, +{ + /// Creates a new `RequestResponse` behaviour for the given + /// protocols, codec and configuration. + pub fn new(codec: TCodec, protocols: I, cfg: RequestResponseConfig) -> Self + where + I: IntoIterator + { + let mut inbound_protocols = SmallVec::new(); + let mut outbound_protocols = SmallVec::new(); + for (p, s) in protocols { + if s.inbound() { + inbound_protocols.push(p.clone()); + } + if s.outbound() { + outbound_protocols.push(p.clone()); + } + } + RequestResponse { + inbound_protocols, + outbound_protocols, + next_request_id: RequestId(1), + config: cfg, + codec, + pending_events: VecDeque::new(), + connected: HashMap::new(), + pending_requests: HashMap::new(), + pending_responses: HashMap::new(), + addresses: HashMap::new(), + } + } + + /// Initiates sending a request. + /// + /// If the targeted peer is currently not connected, a dialing + /// attempt is initiated and the request is sent as soon as a + /// connection is established. + /// + /// > **Note**: In order for such a dialing attempt to succeed, + /// > the `RequestResonse` protocol must either be embedded + /// > in another `NetworkBehaviour` that provides peer and + /// > address discovery, or known addresses of peers must be + /// > managed via [`RequestResponse::add_address`] and + /// > [`RequestResponse::remove_address`]. + pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> RequestId { + let request_id = self.next_request_id(); + let request = RequestProtocol { + request_id, + codec: self.codec.clone(), + protocols: self.outbound_protocols.clone(), + request, + }; + + if let Some(request) = self.try_send_request(peer, request) { + self.pending_events.push_back(NetworkBehaviourAction::DialPeer { + peer_id: peer.clone(), + condition: DialPeerCondition::Disconnected, + }); + self.pending_requests.entry(peer.clone()).or_default().push(request); + } + + request_id + } + + /// Initiates sending a response to an inbound request. + /// + /// If the `ResponseChannel` is already closed due to a timeout, + /// the response is discarded and eventually [`RequestResponseEvent::InboundFailure`] + /// is emitted by `RequestResponse::poll`. + /// + /// The provided `ResponseChannel` is obtained from a + /// [`RequestResponseMessage::Request`]. + pub fn send_response(&mut self, ch: ResponseChannel, rs: TCodec::Response) { + // Fails only if the inbound upgrade timed out waiting for the response, + // in which case the handler emits `RequestResponseHandlerEvent::InboundTimeout` + // which in turn results in `RequestResponseEvent::InboundFailure`. + let _ = ch.sender.send(rs); + } + + /// Adds a known address for a peer that can be used for + /// dialing attempts by the `Swarm`, i.e. is returned + /// by [`NetworkBehaviour::addresses_of_peer`]. + /// + /// Addresses added in this way are only removed by `remove_address`. + pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) { + self.addresses.entry(peer.clone()).or_default().push(address); + } + + /// Removes an address of a peer previously added via `add_address`. + pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) { + let mut last = false; + if let Some(addresses) = self.addresses.get_mut(peer) { + addresses.retain(|a| a != address); + last = addresses.is_empty(); + } + if last { + self.addresses.remove(peer); + } + } + + /// Checks whether a peer is currently connected. + pub fn is_connected(&self, peer: &PeerId) -> bool { + self.connected.contains_key(peer) + } + + /// Checks whether an outbound request initiated by + /// [`RequestResponse::send_request`] is still pending, i.e. waiting + /// for a response. + pub fn is_pending(&self, req_id: &RequestId) -> bool { + self.pending_responses.contains_key(req_id) + } + + /// Returns the next request ID. + fn next_request_id(&mut self) -> RequestId { + let request_id = self.next_request_id; + self.next_request_id.0 += 1; + request_id + } + + /// Tries to send a request by queueing an appropriate event to be + /// emitted to the `Swarm`. If the peer is not currently connected, + /// the given request is return unchanged. + fn try_send_request(&mut self, peer: &PeerId, request: RequestProtocol) + -> Option> + { + if let Some(connections) = self.connected.get(peer) { + let ix = (request.request_id.0 as usize) % connections.len(); + let conn = connections[ix].id; + self.pending_responses.insert(request.request_id, (peer.clone(), conn)); + self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer.clone(), + handler: NotifyHandler::One(conn), + event: request + }); + None + } else { + Some(request) + } + } +} + +impl NetworkBehaviour for RequestResponse +where + TCodec: RequestResponseCodec + Send + Clone + 'static, +{ + type ProtocolsHandler = RequestResponseHandler; + type OutEvent = RequestResponseEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + RequestResponseHandler::new( + self.inbound_protocols.clone(), + self.codec.clone(), + self.config.connection_keep_alive, + self.config.request_timeout, + ) + } + + fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { + let mut addresses = Vec::new(); + if let Some(connections) = self.connected.get(peer) { + addresses.extend(connections.iter().filter_map(|c| c.address.clone())) + } + if let Some(more) = self.addresses.get(peer) { + addresses.extend(more.into_iter().cloned()); + } + addresses + } + + fn inject_connected(&mut self, peer: &PeerId) { + if let Some(pending) = self.pending_requests.remove(peer) { + for request in pending { + let request = self.try_send_request(peer, request); + assert!(request.is_none()); + } + } + } + + fn inject_connection_established(&mut self, peer: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + let address = match endpoint { + ConnectedPoint::Dialer { address } => Some(address.clone()), + ConnectedPoint::Listener { .. } => None + }; + let connections = self.connected.entry(peer.clone()).or_default(); + connections.push(Connection { id: *conn, address }) + } + + fn inject_connection_closed(&mut self, peer: &PeerId, conn: &ConnectionId, _: &ConnectedPoint) { + if let Some(connections) = self.connected.get_mut(peer) { + if let Some(pos) = connections.iter().position(|c| &c.id == conn) { + connections.remove(pos); + } + } + + // Any pending responses of requests sent over this connection + // must be considered failed. + let failed = self.pending_responses.iter() + .filter_map(|(r, (p, c))| + if conn == c { + Some((p.clone(), *r)) + } else { + None + }) + .collect::>(); + + for (peer, request_id) in failed { + self.pending_responses.remove(&request_id); + self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::OutboundFailure { + peer, + request_id, + error: OutboundFailure::ConnectionClosed + } + )); + } + } + + fn inject_disconnected(&mut self, peer: &PeerId) { + self.connected.remove(peer); + } + + fn inject_dial_failure(&mut self, peer: &PeerId) { + // If there are pending outgoing requests when a dial failure occurs, + // it is implied that we are not connected to the peer, since pending + // outgoing requests are drained when a connection is established and + // only created when a peer is not connected when a request is made. + // Thus these requests must be considered failed, even if there is + // another, concurrent dialing attempt ongoing. + if let Some(pending) = self.pending_requests.remove(peer) { + for request in pending { + self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::OutboundFailure { + peer: peer.clone(), + request_id: request.request_id, + error: OutboundFailure::DialFailure + } + )); + } + } + } + + fn inject_event( + &mut self, + peer: PeerId, + _: ConnectionId, + event: RequestResponseHandlerEvent, + ) { + match event { + RequestResponseHandlerEvent::Response { request_id, response } => { + self.pending_responses.remove(&request_id); + let message = RequestResponseMessage::Response { request_id, response }; + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::Message { peer, message })); + } + RequestResponseHandlerEvent::Request { request, sender } => { + let channel = ResponseChannel { peer: peer.clone(), sender }; + let message = RequestResponseMessage::Request { request, channel }; + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::Message { peer, message })); + } + RequestResponseHandlerEvent::OutboundTimeout(request_id) => { + if let Some((peer, _conn)) = self.pending_responses.remove(&request_id) { + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::OutboundFailure { + peer, + request_id, + error: OutboundFailure::Timeout, + })); + } + } + RequestResponseHandlerEvent::InboundTimeout => { + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::InboundFailure { + peer, + error: InboundFailure::Timeout, + })); + } + RequestResponseHandlerEvent::OutboundUnsupportedProtocols(request_id) => { + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::OutboundFailure { + peer, + request_id, + error: OutboundFailure::UnsupportedProtocols, + })); + } + RequestResponseHandlerEvent::InboundUnsupportedProtocols => { + self.pending_events.push_back( + NetworkBehaviourAction::GenerateEvent( + RequestResponseEvent::InboundFailure { + peer, + error: InboundFailure::UnsupportedProtocols, + })); + } + } + } + + fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) + -> Poll, + RequestResponseEvent + >> + { + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ev); + } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_events.shrink_to_fit(); + } + + Poll::Pending + } +} + +/// Internal threshold for when to shrink the capacity +/// of empty queues. If the capacity of an empty queue +/// exceeds this threshold, the associated memory is +/// released. +const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100; + +/// Internal information tracked for an established connection. +struct Connection { + id: ConnectionId, + address: Option, +} + diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs new file mode 100644 index 00000000..107a37ed --- /dev/null +++ b/protocols/request-response/tests/ping.rs @@ -0,0 +1,195 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Integration tests for the `RequestResponse` network behaviour. + +use async_trait::async_trait; +use libp2p_core::{ + Multiaddr, + PeerId, + identity, + muxing::StreamMuxerBox, + transport::{Transport, boxed::Boxed}, + upgrade::{self, read_one, write_one} +}; +use libp2p_noise::{NoiseConfig, X25519Spec, Keypair}; +use libp2p_request_response::*; +use libp2p_swarm::Swarm; +use libp2p_tcp::TcpConfig; +use futures::{prelude::*, channel::mpsc}; +use rand::{self, Rng}; +use std::{io, iter}; + +/// Exercises a simple ping protocol. +#[test] +fn ping_protocol() { + let num_pings: u8 = rand::thread_rng().gen_range(1, 100); + + let ping = Ping("ping".to_string().into_bytes()); + let pong = Pong("pong".to_string().into_bytes()); + + let protocols = iter::once((PingProtocol(), ProtocolSupport::Full)); + let cfg = RequestResponseConfig::default(); + + let (peer1_id, trans) = mk_transport(); + let ping_proto1 = RequestResponse::new(PingCodec(), protocols.clone(), cfg.clone()); + let mut swarm1 = Swarm::new(trans, ping_proto1, peer1_id.clone()); + + let (peer2_id, trans) = mk_transport(); + let ping_proto2 = RequestResponse::new(PingCodec(), protocols, cfg); + let mut swarm2 = Swarm::new(trans, ping_proto2, peer2_id.clone()); + + let (mut tx, mut rx) = mpsc::channel::(1); + + let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap(); + Swarm::listen_on(&mut swarm1, addr).unwrap(); + + let expected_ping = ping.clone(); + let expected_pong = pong.clone(); + + let peer1 = async move { + while let Some(_) = swarm1.next().now_or_never() {} + + let l = Swarm::listeners(&swarm1).next().unwrap(); + tx.send(l.clone()).await.unwrap(); + + loop { + match swarm1.next().await { + RequestResponseEvent::Message { + peer, + message: RequestResponseMessage::Request { request, channel } + } => { + assert_eq!(&request, &expected_ping); + assert_eq!(&peer, &peer2_id); + swarm1.send_response(channel, pong.clone()); + }, + e => panic!("Peer1: Unexpected event: {:?}", e) + } + } + }; + + let peer2 = async move { + let mut count = 0; + let addr = rx.next().await.unwrap(); + swarm2.add_address(&peer1_id, addr.clone()); + let mut req_id = swarm2.send_request(&peer1_id, ping.clone()); + + loop { + match swarm2.next().await { + RequestResponseEvent::Message { + peer, + message: RequestResponseMessage::Response { request_id, response } + } => { + count += 1; + assert_eq!(&response, &expected_pong); + assert_eq!(&peer, &peer1_id); + assert_eq!(req_id, request_id); + if count >= num_pings { + return + } else { + req_id = swarm2.send_request(&peer1_id, ping.clone()); + } + }, + e => panic!("Peer2: Unexpected event: {:?}", e) + } + } + }; + + async_std::task::spawn(Box::pin(peer1)); + let () = async_std::task::block_on(peer2); +} + +fn mk_transport() -> (PeerId, Boxed<(PeerId, StreamMuxerBox), io::Error>) { + let id_keys = identity::Keypair::generate_ed25519(); + let peer_id = id_keys.public().into_peer_id(); + let noise_keys = Keypair::::new().into_authentic(&id_keys).unwrap(); + let transport = TcpConfig::new() + .nodelay(true) + .upgrade(upgrade::Version::V1) + .authenticate(NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p_yamux::Config::default()) + .map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer))) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); + (peer_id, transport) +} + +// Simple Ping-Pong Protocol + +#[derive(Debug, Clone)] +struct PingProtocol(); +#[derive(Clone)] +struct PingCodec(); +#[derive(Debug, Clone, PartialEq, Eq)] +struct Ping(Vec); +#[derive(Debug, Clone, PartialEq, Eq)] +struct Pong(Vec); + +impl ProtocolName for PingProtocol { + fn protocol_name(&self) -> &[u8] { + "/ping/1".as_bytes() + } +} + +#[async_trait] +impl RequestResponseCodec for PingCodec { + type Protocol = PingProtocol; + type Request = Ping; + type Response = Pong; + + async fn read_request(&mut self, _: &PingProtocol, io: &mut T) + -> io::Result + where + T: AsyncRead + Unpin + Send + { + read_one(io, 1024) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .map_ok(Ping) + .await + } + + async fn read_response(&mut self, _: &PingProtocol, io: &mut T) + -> io::Result + where + T: AsyncRead + Unpin + Send + { + read_one(io, 1024) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .map_ok(Pong) + .await + } + + async fn write_request(&mut self, _: &PingProtocol, io: &mut T, Ping(data): Ping) + -> io::Result<()> + where + T: AsyncWrite + Unpin + Send + { + write_one(io, data).await + } + + async fn write_response(&mut self, _: &PingProtocol, io: &mut T, Pong(data): Pong) + -> io::Result<()> + where + T: AsyncWrite + Unpin + Send + { + write_one(io, data).await + } +} + diff --git a/protocols/secio/CHANGELOG.md b/protocols/secio/CHANGELOG.md new file mode 100644 index 00000000..7bb1e881 --- /dev/null +++ b/protocols/secio/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/protocols/secio/Cargo.toml b/protocols/secio/Cargo.toml index ced1078f..aaf31383 100644 --- a/protocols/secio/Cargo.toml +++ b/protocols/secio/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-secio" edition = "2018" description = "Secio encryption protocol for libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,10 +16,10 @@ ctr = "0.3" futures = "0.3.1" hmac = "0.7.0" lazy_static = "1.2.0" -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4.6" prost = "0.6.1" -pin-project = "0.4.6" +pin-project = "0.4.17" quicksink = "0.1" rand = "0.7" rw-stream-sink = "0.2.0" @@ -46,10 +46,10 @@ secp256k1 = [] aes-all = ["aesni"] [dev-dependencies] -async-std = "~1.5.0" +async-std = "1.6.2" criterion = "0.3" -libp2p-mplex = { version = "0.19.0", path = "../../muxers/mplex" } -libp2p-tcp = { version = "0.19.0", path = "../../transports/tcp" } +libp2p-mplex = { path = "../../muxers/mplex" } +libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"] } [[bench]] name = "bench" diff --git a/protocols/secio/src/algo_support.rs b/protocols/secio/src/algo_support.rs index 21114cde..5e6d5e46 100644 --- a/protocols/secio/src/algo_support.rs +++ b/protocols/secio/src/algo_support.rs @@ -24,7 +24,7 @@ //! helps you with. use crate::error::SecioError; -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] use ring::digest; use std::cmp::Ordering; use crate::stream_cipher::Cipher; @@ -204,7 +204,7 @@ pub fn select_digest(r: Ordering, ours: &str, theirs: &str) -> Result for Digest { #[inline] fn into(self) -> &'static digest::Algorithm { diff --git a/protocols/secio/src/exchange.rs b/protocols/secio/src/exchange.rs index c1a9bad6..1ae4120b 100644 --- a/protocols/secio/src/exchange.rs +++ b/protocols/secio/src/exchange.rs @@ -24,10 +24,10 @@ use futures::prelude::*; use crate::SecioError; #[path = "exchange/impl_ring.rs"] -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] mod platform; #[path = "exchange/impl_webcrypto.rs"] -#[cfg(any(target_os = "emscripten", target_os = "unknown"))] +#[cfg(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown"))] mod platform; /// Possible key agreement algorithms. diff --git a/protocols/secio/src/handshake.rs b/protocols/secio/src/handshake.rs index faffccc9..e436a777 100644 --- a/protocols/secio/src/handshake.rs +++ b/protocols/secio/src/handshake.rs @@ -371,7 +371,7 @@ mod tests { use futures::{prelude::*, channel::oneshot}; #[test] - #[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] + #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] fn handshake_with_self_succeeds_rsa() { let key1 = { let mut private = include_bytes!("../tests/test-rsa-private-key.pk8").to_vec(); diff --git a/src/lib.rs b/src/lib.rs index 7f24d409..eb68a331 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -85,7 +85,7 @@ //! Example ([`secio`] + [`yamux`] Protocol Upgrade): //! //! ```rust -//! # #[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "secio", feature = "yamux"))] { +//! # #[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), feature = "tcp-async-std", feature = "secio", feature = "yamux"))] { //! use libp2p::{Transport, core::upgrade, tcp::TcpConfig, secio::SecioConfig, identity::Keypair, yamux}; //! let tcp = TcpConfig::new(); //! let secio = SecioConfig::new(Keypair::generate_ed25519()); @@ -166,12 +166,12 @@ pub use multihash; pub use libp2p_core as core; #[cfg(feature = "deflate")] #[cfg_attr(docsrs, doc(cfg(feature = "deflate")))] -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_deflate as deflate; #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_dns as dns; #[cfg(feature = "identify")] @@ -196,7 +196,7 @@ pub use libp2p_gossipsub as gossipsub; pub use libp2p_mplex as mplex; #[cfg(feature = "mdns")] #[cfg_attr(docsrs, doc(cfg(feature = "mdns")))] -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_mdns as mdns; #[cfg(feature = "noise")] @@ -217,9 +217,9 @@ pub use libp2p_plaintext as plaintext; pub use libp2p_secio as secio; #[doc(inline)] pub use libp2p_swarm as swarm; -#[cfg(any(feature = "tcp-async-std", feature = "tcp-tokio-std"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-async-std", feature = "tcp-tokio-std"))))] -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(any(feature = "tcp-async-std", feature = "tcp-tokio"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-async-std", feature = "tcp-tokio"))))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_tcp as tcp; #[cfg(feature = "uds")] @@ -232,7 +232,7 @@ pub use libp2p_uds as uds; pub use libp2p_wasm_ext as wasm_ext; #[cfg(feature = "websocket")] #[cfg_attr(docsrs, doc(cfg(feature = "websocket")))] -#[cfg(not(any(target_os = "emscripten", target_os = "unknown")))] +#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_websocket as websocket; #[cfg(feature = "yamux")] @@ -266,8 +266,8 @@ pub use self::transport_ext::TransportExt; /// /// > **Note**: This `Transport` is not suitable for production usage, as its implementation /// > reserves the right to support additional protocols or remove deprecated protocols. -#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))] -#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))] +#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))] +#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))] pub fn build_development_transport(keypair: identity::Keypair) -> std::io::Result> + Send + Sync), Error = impl std::error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone> { @@ -280,13 +280,16 @@ pub fn build_development_transport(keypair: identity::Keypair) /// and mplex or yamux as the multiplexing layer. /// /// > **Note**: If you ever need to express the type of this `Transport`. -#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))] -#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))] +#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))] +#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux"))))] pub fn build_tcp_ws_secio_mplex_yamux(keypair: identity::Keypair) -> std::io::Result> + Send + Sync), Error = impl std::error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone> { let transport = { + #[cfg(feature = "tcp-async-std")] let tcp = tcp::TcpConfig::new().nodelay(true); + #[cfg(feature = "tcp-tokio")] + let tcp = tcp::TokioTcpConfig::new().nodelay(true); let transport = dns::DnsConfig::new(tcp)?; let trans_clone = transport.clone(); transport.or_transport(websocket::WsConfig::new(trans_clone)) @@ -306,13 +309,16 @@ pub fn build_tcp_ws_secio_mplex_yamux(keypair: identity::Keypair) /// and mplex or yamux as the multiplexing layer. /// /// > **Note**: If you ever need to express the type of this `Transport`. -#[cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))] -#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "unknown")), feature = "tcp-async-std", feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))))] +#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))] +#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "secio", feature = "mplex", feature = "yamux", feature = "pnet"))))] pub fn build_tcp_ws_pnet_secio_mplex_yamux(keypair: identity::Keypair, psk: PreSharedKey) -> std::io::Result> + Send + Sync), Error = impl std::error::Error + Send, Listener = impl Send, Dial = impl Send, ListenerUpgrade = impl Send> + Clone> { let transport = { + #[cfg(feature = "tcp-async-std")] let tcp = tcp::TcpConfig::new().nodelay(true); + #[cfg(feature = "tcp-tokio")] + let tcp = tcp::TokioTcpConfig::new().nodelay(true); let transport = dns::DnsConfig::new(tcp)?; let trans_clone = transport.clone(); transport.or_transport(websocket::WsConfig::new(trans_clone)) diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md new file mode 100644 index 00000000..f3059907 --- /dev/null +++ b/swarm/CHANGELOG.md @@ -0,0 +1,15 @@ +# 0.20.0 [????-??-??] + +- Add `ProtocolsHandler::inject_listen_upgrade_error`, the inbound +analogue of `ProtocolsHandler::inject_dial_upgrade_error`, with an +empty default implementation. No implementation is required to +retain existing behaviour. + +- Add `ProtocolsHandler::inject_address_change` and +`NetworkBehaviour::inject_address_change` to notify of a change in +the address of an existing connection. + +# 0.19.1 [2020-06-18] + +- Bugfix: Fix MultiHandler panicking when empty + ([PR 1598](https://github.com/libp2p/rust-libp2p/pull/1598)). diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 46785665..d02ffff7 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-swarm" edition = "2018" description = "The libp2p swarm" -version = "0.19.0" +version = "0.19.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../core" } +libp2p-core = { version = "0.19.2", path = "../core" } log = "0.4" rand = "0.7" smallvec = "1.0" @@ -19,6 +19,6 @@ wasm-timer = "0.2" void = "1" [dev-dependencies] -libp2p-mplex = { version = "0.19.0", path = "../muxers/mplex" } +libp2p-mplex = { path = "../muxers/mplex" } quickcheck = "0.9.0" rand = "0.7.2" diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 16e804f9..bce5fbe2 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -101,6 +101,15 @@ pub trait NetworkBehaviour: Send + 'static { fn inject_connection_closed(&mut self, _: &PeerId, _: &ConnectionId, _: &ConnectedPoint) {} + /// Informs the behaviour that the [`ConnectedPoint`] of an existing connection has changed. + fn inject_address_change( + &mut self, + _: &PeerId, + _: &ConnectionId, + _old: &ConnectedPoint, + _new: &ConnectedPoint + ) {} + /// Informs the behaviour about an event generated by the handler dedicated to the peer identified by `peer_id`. /// for the behaviour. /// diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index f848f64f..70d2d2f0 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -496,6 +496,11 @@ where TBehaviour: NetworkBehaviour, let connection = connection.id(); this.behaviour.inject_event(peer, connection, event); }, + Poll::Ready(NetworkEvent::AddressChange { connection, new_endpoint, old_endpoint }) => { + let peer = connection.peer_id(); + let connection = connection.id(); + this.behaviour.inject_address_change(&peer, &connection, &old_endpoint, &new_endpoint); + }, Poll::Ready(NetworkEvent::ConnectionEstablished { connection, num_established }) => { let peer_id = connection.peer_id().clone(); let endpoint = connection.endpoint().clone(); diff --git a/swarm/src/protocols_handler.rs b/swarm/src/protocols_handler.rs index 95722ed9..9721e9db 100644 --- a/swarm/src/protocols_handler.rs +++ b/swarm/src/protocols_handler.rs @@ -53,6 +53,7 @@ pub use crate::upgrade::{ use libp2p_core::{ ConnectedPoint, + Multiaddr, PeerId, upgrade::{self, UpgradeError}, }; @@ -140,7 +141,10 @@ pub trait ProtocolsHandler: Send + 'static { /// Injects an event coming from the outside in the handler. fn inject_event(&mut self, event: Self::InEvent); - /// Indicates to the handler that upgrading a substream to the given protocol has failed. + /// Notifies the handler of a change in the address of the remote. + fn inject_address_change(&mut self, _new_address: &Multiaddr) {} + + /// Indicates to the handler that upgrading an outbound substream to the given protocol has failed. fn inject_dial_upgrade_error( &mut self, info: Self::OutboundOpenInfo, @@ -149,6 +153,14 @@ pub trait ProtocolsHandler: Send + 'static { > ); + /// Indicates to the handler that upgrading an inbound substream to the given protocol has failed. + fn inject_listen_upgrade_error( + &mut self, + _: ProtocolsHandlerUpgrErr< + ::Error + > + ) {} + /// Returns until when the connection should be kept alive. /// /// This method is called by the `Swarm` after each invocation of @@ -236,7 +248,7 @@ pub struct SubstreamProtocol { } impl SubstreamProtocol { - /// Create a new `ListenProtocol` from the given upgrade. + /// Create a new `SubstreamProtocol` from the given upgrade. /// /// The default timeout for applying the given upgrade on a substream is /// 10 seconds. diff --git a/swarm/src/protocols_handler/multi.rs b/swarm/src/protocols_handler/multi.rs index 3ecaab9b..cfbc6a7b 100644 --- a/swarm/src/protocols_handler/multi.rs +++ b/swarm/src/protocols_handler/multi.rs @@ -157,6 +157,12 @@ where fn poll(&mut self, cx: &mut Context) -> Poll> { + // Calling `gen_range(0, 0)` (see below) would panic, so we have return early to avoid + // that situation. + if self.handlers.is_empty() { + return Poll::Pending; + } + // Not always polling handlers in the same order should give anyone the chance to make progress. let pos = rand::thread_rng().gen_range(0, self.handlers.len()); diff --git a/swarm/src/protocols_handler/node_handler.rs b/swarm/src/protocols_handler/node_handler.rs index 8b045061..a24ea2cc 100644 --- a/swarm/src/protocols_handler/node_handler.rs +++ b/swarm/src/protocols_handler/node_handler.rs @@ -29,6 +29,7 @@ use crate::protocols_handler::{ use futures::prelude::*; use libp2p_core::{ + Multiaddr, PeerId, ConnectionInfo, Connected, @@ -220,6 +221,10 @@ where self.handler.inject_event(event); } + fn inject_address_change(&mut self, new_address: &Multiaddr) { + self.handler.inject_address_change(new_address); + } + fn poll(&mut self, cx: &mut Context) -> Poll< Result, Self::Error> > { @@ -228,15 +233,26 @@ where for n in (0..self.negotiating_in.len()).rev() { let (mut in_progress, mut timeout) = self.negotiating_in.swap_remove(n); match Future::poll(Pin::new(&mut timeout), cx) { - Poll::Ready(_) => continue, + Poll::Ready(Ok(_)) => { + let err = ProtocolsHandlerUpgrErr::Timeout; + self.handler.inject_listen_upgrade_error(err); + continue + } + Poll::Ready(Err(_)) => { + let err = ProtocolsHandlerUpgrErr::Timer; + self.handler.inject_listen_upgrade_error(err); + continue; + } Poll::Pending => {}, } match Future::poll(Pin::new(&mut in_progress), cx) { Poll::Ready(Ok(upgrade)) => self.handler.inject_fully_negotiated_inbound(upgrade), Poll::Pending => self.negotiating_in.push((in_progress, timeout)), - // TODO: return a diagnostic event? - Poll::Ready(Err(_err)) => {} + Poll::Ready(Err(err)) => { + let err = ProtocolsHandlerUpgrErr::Upgrade(err); + self.handler.inject_listen_upgrade_error(err); + } } } diff --git a/swarm/src/protocols_handler/one_shot.rs b/swarm/src/protocols_handler/one_shot.rs index 92d63088..4659fe0f 100644 --- a/swarm/src/protocols_handler/one_shot.rs +++ b/swarm/src/protocols_handler/one_shot.rs @@ -31,23 +31,20 @@ use smallvec::SmallVec; use std::{error, task::Context, task::Poll, time::Duration}; use wasm_timer::Instant; -/// Implementation of `ProtocolsHandler` that opens a new substream for each individual message. -/// -/// This struct is meant to be a helper for other implementations to use. +/// A `ProtocolsHandler` that opens a new substream for each request. // TODO: Debug -pub struct OneShotHandler +pub struct OneShotHandler where - TOutProto: OutboundUpgradeSend, + TOutbound: OutboundUpgradeSend, { /// The upgrade for inbound substreams. - listen_protocol: SubstreamProtocol, + listen_protocol: SubstreamProtocol, /// If `Some`, something bad happened and we should shut down the handler with an error. - pending_error: - Option::Error>>, + pending_error: Option::Error>>, /// Queue of events to produce in `poll()`. - events_out: SmallVec<[TOutEvent; 4]>, + events_out: SmallVec<[TEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[TOutProto; 4]>, + dial_queue: SmallVec<[TOutbound; 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, /// Maximum number of concurrent outbound substreams being opened. Value is never modified. @@ -58,15 +55,14 @@ where config: OneShotHandlerConfig, } -impl - OneShotHandler +impl + OneShotHandler where - TOutProto: OutboundUpgradeSend, + TOutbound: OutboundUpgradeSend, { /// Creates a `OneShotHandler`. - #[inline] pub fn new( - listen_protocol: SubstreamProtocol, + listen_protocol: SubstreamProtocol, config: OneShotHandlerConfig, ) -> Self { OneShotHandler { @@ -77,12 +73,11 @@ where dial_negotiated: 0, max_dial_negotiated: 8, keep_alive: KeepAlive::Yes, - config + config, } } /// Returns the number of pending requests. - #[inline] pub fn pending_requests(&self) -> u32 { self.dial_negotiated + self.dial_queue.len() as u32 } @@ -91,8 +86,7 @@ where /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound /// > substreams, not the ones already being negotiated. - #[inline] - pub fn listen_protocol_ref(&self) -> &SubstreamProtocol { + pub fn listen_protocol_ref(&self) -> &SubstreamProtocol { &self.listen_protocol } @@ -100,26 +94,23 @@ where /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound /// > substreams, not the ones already being negotiated. - #[inline] - pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol { + pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol { &mut self.listen_protocol } /// Opens an outbound substream with `upgrade`. - #[inline] - pub fn send_request(&mut self, upgrade: TOutProto) { + pub fn send_request(&mut self, upgrade: TOutbound) { self.keep_alive = KeepAlive::Yes; self.dial_queue.push(upgrade); } } -impl Default - for OneShotHandler +impl Default + for OneShotHandler where - TOutProto: OutboundUpgradeSend, - TInProto: InboundUpgradeSend + Default, + TOutbound: OutboundUpgradeSend, + TInbound: InboundUpgradeSend + Default, { - #[inline] fn default() -> Self { OneShotHandler::new( SubstreamProtocol::new(Default::default()), @@ -128,45 +119,42 @@ where } } -impl ProtocolsHandler - for OneShotHandler +impl ProtocolsHandler + for OneShotHandler where - TInProto: InboundUpgradeSend + Send + 'static, - TOutProto: OutboundUpgradeSend, - TInProto::Output: Into, - TOutProto::Output: Into, - TOutProto::Error: error::Error + Send + 'static, - SubstreamProtocol: Clone, - TOutEvent: Send + 'static, + TInbound: InboundUpgradeSend + Send + 'static, + TOutbound: OutboundUpgradeSend, + TInbound::Output: Into, + TOutbound::Output: Into, + TOutbound::Error: error::Error + Send + 'static, + SubstreamProtocol: Clone, + TEvent: Send + 'static, { - type InEvent = TOutProto; - type OutEvent = TOutEvent; + type InEvent = TOutbound; + type OutEvent = TEvent; type Error = ProtocolsHandlerUpgrErr< ::Error, >; - type InboundProtocol = TInProto; - type OutboundProtocol = TOutProto; + type InboundProtocol = TInbound; + type OutboundProtocol = TOutbound; type OutboundOpenInfo = (); - #[inline] fn listen_protocol(&self) -> SubstreamProtocol { self.listen_protocol.clone() } - #[inline] fn inject_fully_negotiated_inbound( &mut self, out: ::Output, ) { // If we're shutting down the connection for inactivity, reset the timeout. if !self.keep_alive.is_yes() { - self.keep_alive = KeepAlive::Until(Instant::now() + self.config.inactive_timeout); + self.keep_alive = KeepAlive::Until(Instant::now() + self.config.keep_alive_timeout); } self.events_out.push(out.into()); } - #[inline] fn inject_fully_negotiated_outbound( &mut self, out: ::Output, @@ -175,21 +163,19 @@ where self.dial_negotiated -= 1; if self.dial_negotiated == 0 && self.dial_queue.is_empty() { - self.keep_alive = KeepAlive::Until(Instant::now() + self.config.inactive_timeout); + self.keep_alive = KeepAlive::Until(Instant::now() + self.config.keep_alive_timeout); } self.events_out.push(out.into()); } - #[inline] fn inject_event(&mut self, event: Self::InEvent) { self.send_request(event); } - #[inline] fn inject_dial_upgrade_error( &mut self, - _: Self::OutboundOpenInfo, + _info: Self::OutboundOpenInfo, error: ProtocolsHandlerUpgrErr< ::Error, >, @@ -199,7 +185,6 @@ where } } - #[inline] fn connection_keep_alive(&self) -> KeepAlive { self.keep_alive } @@ -211,12 +196,12 @@ where ProtocolsHandlerEvent, > { if let Some(err) = self.pending_error.take() { - return Poll::Ready(ProtocolsHandlerEvent::Close(err)); + return Poll::Ready(ProtocolsHandlerEvent::Close(err)) } if !self.events_out.is_empty() { return Poll::Ready(ProtocolsHandlerEvent::Custom( - self.events_out.remove(0), + self.events_out.remove(0) )); } else { self.events_out.shrink_to_fit(); @@ -225,10 +210,11 @@ where if !self.dial_queue.is_empty() { if self.dial_negotiated < self.max_dial_negotiated { self.dial_negotiated += 1; + let upgrade = self.dial_queue.remove(0); return Poll::Ready( ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.dial_queue.remove(0)) - .with_timeout(self.config.substream_timeout), + protocol: SubstreamProtocol::new(upgrade) + .with_timeout(self.config.outbound_substream_timeout), info: (), }, ); @@ -244,17 +230,18 @@ where /// Configuration parameters for the `OneShotHandler` #[derive(Debug)] pub struct OneShotHandlerConfig { - /// After the given duration has elapsed, an inactive connection will shutdown. - pub inactive_timeout: Duration, - /// Timeout duration for each newly opened outbound substream. - pub substream_timeout: Duration, + /// Keep-alive timeout for idle connections. + pub keep_alive_timeout: Duration, + /// Timeout for outbound substream upgrades. + pub outbound_substream_timeout: Duration, } impl Default for OneShotHandlerConfig { fn default() -> Self { - let inactive_timeout = Duration::from_secs(10); - let substream_timeout = Duration::from_secs(10); - OneShotHandlerConfig { inactive_timeout, substream_timeout } + OneShotHandlerConfig { + keep_alive_timeout: Duration::from_secs(10), + outbound_substream_timeout: Duration::from_secs(10), + } } } diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 82e54151..b9bd3763 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -29,8 +29,8 @@ //! implementation of the `Transport` trait. //! //! Whenever we want to dial an address through the `DnsConfig` and that address contains a -//! `/dns4/` or `/dns6/` component, a DNS resolve will be performed and the component will be -//! replaced with respectively an `/ip4/` or an `/ip6/` component. +//! `/dns/`, `/dns4/`, or `/dns6/` component, a DNS resolve will be performed and the component +//! will be replaced with `/ip4/` and/or `/ip6/` components. //! use futures::{prelude::*, channel::oneshot, future::BoxFuture}; @@ -45,8 +45,8 @@ use std::{error, fmt, io, net::ToSocketAddrs}; /// Represents the configuration for a DNS transport capability of libp2p. /// /// This struct implements the `Transport` trait and holds an underlying transport. Any call to -/// `dial` with a multiaddr that contains `/dns4/` or `/dns6/` will be first be resolved, then -/// passed to the underlying transport. +/// `dial` with a multiaddr that contains `/dns/`, `/dns4/`, or `/dns6/` will be first be resolved, +/// then passed to the underlying transport. /// /// Listening is unaffected. #[derive(Clone)] diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md new file mode 100644 index 00000000..7bb1e881 --- /dev/null +++ b/transports/tcp/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index f7b096b6..6c771d87 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-tcp" edition = "2018" description = "TCP/IP transport protocol for libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -10,12 +10,16 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-std = { version = "~1.5.0", optional = true } +async-std = { version = "1.6.2", optional = true } futures = "0.3.1" futures-timer = "3.0" get_if_addrs = "0.5.3" ipnet = "2.0.0" -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4.1" socket2 = "0.3.12" tokio = { version = "0.2", default-features = false, features = ["tcp"], optional = true } + +[dev-dependencies] +libp2p-tcp = { path = ".", features = ["async-std"] } + diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md new file mode 100644 index 00000000..7bb1e881 --- /dev/null +++ b/transports/uds/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.19.2 [2020-06-22] + +Updated dependencies. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index ac298c0d..f72fe8b8 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -2,21 +2,21 @@ name = "libp2p-uds" edition = "2018" description = "Unix domain sockets transport for libp2p" -version = "0.19.0" +version = "0.19.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] -[target.'cfg(all(unix, not(any(target_os = "emscripten", target_os = "unknown"))))'.dependencies] -async-std = { version = "~1.5.0", optional = true } -libp2p-core = { version = "0.19.0", path = "../../core" } +[target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies] +async-std = { version = "1.6.2", optional = true } +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4.1" futures = "0.3.1" tokio = { version = "0.2", default-features = false, features = ["uds"], optional = true } -[target.'cfg(all(unix, not(any(target_os = "emscripten", target_os = "unknown"))))'.dev-dependencies] +[target.'cfg(all(unix, not(target_os = "emscripten")))'.dev-dependencies] tempfile = "3.0" [features] diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 44bad943..05efae63 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -31,8 +31,8 @@ //! The `UdsConfig` structs implements the `Transport` trait of the `core` library. See the //! documentation of `core` and of libp2p in general to learn how to use the `Transport` trait. -#![cfg(all(unix, not(any(target_os = "emscripten", target_os = "unknown"))))] -#![cfg_attr(docsrs, doc(cfg(all(unix, not(any(target_os = "emscripten", target_os = "unknown"))))))] +#![cfg(all(unix, not(target_os = "emscripten")))] +#![cfg_attr(docsrs, doc(cfg(all(unix, not(target_os = "emscripten")))))] use futures::{prelude::*, future::{BoxFuture, Ready}}; use futures::stream::BoxStream; diff --git a/transports/wasm-ext/src/websockets.js b/transports/wasm-ext/src/websockets.js index 7b96409c..babf10f9 100644 --- a/transports/wasm-ext/src/websockets.js +++ b/transports/wasm-ext/src/websockets.js @@ -32,7 +32,7 @@ export const websocket_transport = () => { /// Turns a string multiaddress into a WebSockets string URL. // TODO: support dns addresses as well const multiaddr_to_ws = (addr) => { - let parsed = addr.match(/^\/(ip4|ip6|dns4|dns6)\/(.*?)\/tcp\/(.*?)\/(ws|wss|x-parity-ws\/(.*)|x-parity-wss\/(.*))$/); + let parsed = addr.match(/^\/(ip4|ip6|dns4|dns6|dns)\/(.*?)\/tcp\/(.*?)\/(ws|wss|x-parity-ws\/(.*)|x-parity-wss\/(.*))$/); if (parsed != null) { let proto = 'wss'; if (parsed[4] == 'ws' || parsed[4] == 'x-parity-ws') { diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md new file mode 100644 index 00000000..9af3da88 --- /dev/null +++ b/transports/websocket/CHANGELOG.md @@ -0,0 +1,4 @@ +# 0.20.0 [2020-06-22] + +- Updated `soketto` dependency which caused some smaller + API changes ([PR 1603](https://github.com/libp2p/rust-libp2p/pull/1603)). diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 7dfe6c75..04905a07 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-websocket" edition = "2018" description = "WebSocket transport for libp2p" -version = "0.19.0" +version = "0.20.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,18 +11,17 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-tls = "0.7.0" -bytes = "0.5" either = "1.5.3" futures = "0.3.1" -libp2p-core = { version = "0.19.0", path = "../../core" } +libp2p-core = { version = "0.19.2", path = "../../core" } log = "0.4.8" quicksink = "0.1" rustls = "0.17.0" rw-stream-sink = "0.2.0" -soketto = { version = "0.3", features = ["deflate"] } +soketto = { version = "0.4.1", features = ["deflate"] } url = "2.1" webpki = "0.21" webpki-roots = "0.18" [dev-dependencies] -libp2p-tcp = { version = "0.19.0", path = "../tcp" } +libp2p-tcp = { path = "../tcp", features = ["async-std"] } diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 182b81ab..4d2f9a7a 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -19,7 +19,6 @@ // DEALINGS IN THE SOFTWARE. use async_tls::{client, server}; -use bytes::BytesMut; use crate::{error::Error, tls}; use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; @@ -30,8 +29,8 @@ use libp2p_core::{ transport::{ListenerEvent, TransportError} }; use log::{debug, trace}; -use soketto::{connection, data, extension::deflate::Deflate, handshake}; -use std::{convert::TryInto, fmt, io, pin::Pin, task::Context, task::Poll}; +use soketto::{connection, extension::deflate::Deflate, handshake}; +use std::{convert::TryInto, fmt, io, mem, pin::Pin, task::Context, task::Poll}; use url::Url; /// Max. number of payload bytes of a single frame. @@ -406,36 +405,55 @@ fn location_to_multiaddr(location: &str) -> Result> { /// The websocket connection. pub struct Connection { - receiver: BoxStream<'static, Result>, + receiver: BoxStream<'static, Result>, sender: Pin + Send>>, _marker: std::marker::PhantomData } /// Data received over the websocket connection. #[derive(Debug, Clone)] -pub struct IncomingData(data::Incoming); +pub enum IncomingData { + /// Binary application data. + Binary(Vec), + /// UTF-8 encoded application data. + Text(Vec), + /// PONG control frame data. + Pong(Vec) +} impl IncomingData { + pub fn is_data(&self) -> bool { + self.is_binary() || self.is_text() + } + pub fn is_binary(&self) -> bool { - self.0.is_binary() + if let IncomingData::Binary(_) = self { true } else { false } } pub fn is_text(&self) -> bool { - self.0.is_text() - } - - pub fn is_data(&self) -> bool { - self.0.is_data() + if let IncomingData::Text(_) = self { true } else { false } } pub fn is_pong(&self) -> bool { - self.0.is_pong() + if let IncomingData::Pong(_) = self { true } else { false } + } + + pub fn into_bytes(self) -> Vec { + match self { + IncomingData::Binary(d) => d, + IncomingData::Text(d) => d, + IncomingData::Pong(d) => d + } } } impl AsRef<[u8]> for IncomingData { fn as_ref(&self) -> &[u8] { - self.0.as_ref() + match self { + IncomingData::Binary(d) => d, + IncomingData::Text(d) => d, + IncomingData::Pong(d) => d + } } } @@ -443,12 +461,12 @@ impl AsRef<[u8]> for IncomingData { #[derive(Debug, Clone)] pub enum OutgoingData { /// Send some bytes. - Binary(BytesMut), + Binary(Vec), /// Send a PING message. - Ping(BytesMut), + Ping(Vec), /// Send an unsolicited PONG message. /// (Incoming PINGs are answered automatically.) - Pong(BytesMut) + Pong(Vec) } impl fmt::Debug for Connection { @@ -469,13 +487,13 @@ where sender.send_binary_mut(x).await? } quicksink::Action::Send(OutgoingData::Ping(x)) => { - let data = x.as_ref().try_into().map_err(|_| { + let data = x[..].try_into().map_err(|_| { io::Error::new(io::ErrorKind::InvalidInput, "PING data must be < 126 bytes") })?; sender.send_ping(data).await? } quicksink::Action::Send(OutgoingData::Pong(x)) => { - let data = x.as_ref().try_into().map_err(|_| { + let data = x[..].try_into().map_err(|_| { io::Error::new(io::ErrorKind::InvalidInput, "PONG data must be < 126 bytes") })?; sender.send_pong(data).await? @@ -485,26 +503,41 @@ where } Ok(sender) }); + let stream = stream::unfold((Vec::new(), receiver), |(mut data, mut receiver)| async { + match receiver.receive(&mut data).await { + Ok(soketto::Incoming::Data(soketto::Data::Text(_))) => { + Some((Ok(IncomingData::Text(mem::take(&mut data))), (data, receiver))) + } + Ok(soketto::Incoming::Data(soketto::Data::Binary(_))) => { + Some((Ok(IncomingData::Binary(mem::take(&mut data))), (data, receiver))) + } + Ok(soketto::Incoming::Pong(pong)) => { + Some((Ok(IncomingData::Pong(Vec::from(pong))), (data, receiver))) + } + Err(connection::Error::Closed) => None, + Err(e) => Some((Err(e), (data, receiver))) + } + }); Connection { - receiver: connection::into_stream(receiver).boxed(), + receiver: stream.boxed(), sender: Box::pin(sink), _marker: std::marker::PhantomData } } /// Send binary application data to the remote. - pub fn send_data(&mut self, data: impl Into) -> sink::Send<'_, Self, OutgoingData> { - self.send(OutgoingData::Binary(data.into())) + pub fn send_data(&mut self, data: Vec) -> sink::Send<'_, Self, OutgoingData> { + self.send(OutgoingData::Binary(data)) } /// Send a PING to the remote. - pub fn send_ping(&mut self, data: impl Into) -> sink::Send<'_, Self, OutgoingData> { - self.send(OutgoingData::Ping(data.into())) + pub fn send_ping(&mut self, data: Vec) -> sink::Send<'_, Self, OutgoingData> { + self.send(OutgoingData::Ping(data)) } /// Send an unsolicited PONG to the remote. - pub fn send_pong(&mut self, data: impl Into) -> sink::Send<'_, Self, OutgoingData> { - self.send(OutgoingData::Pong(data.into())) + pub fn send_pong(&mut self, data: Vec) -> sink::Send<'_, Self, OutgoingData> { + self.send(OutgoingData::Pong(data)) } } @@ -517,7 +550,7 @@ where fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let item = ready!(self.receiver.poll_next_unpin(cx)); let item = item.map(|result| { - result.map(IncomingData).map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + result.map_err(|e| io::Error::new(io::ErrorKind::Other, e)) }); Poll::Ready(item) } diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index b55dd258..5327496d 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -24,7 +24,6 @@ pub mod error; pub mod framed; pub mod tls; -use bytes::BytesMut; use error::Error; use framed::Connection; use futures::{future::BoxFuture, prelude::*, stream::BoxStream, ready}; @@ -142,13 +141,13 @@ impl Stream for BytesConnection where T: AsyncRead + AsyncWrite + Send + Unpin + 'static { - type Item = io::Result; + type Item = io::Result>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { loop { if let Some(item) = ready!(self.0.try_poll_next_unpin(cx)?) { if item.is_data() { - return Poll::Ready(Some(Ok(BytesMut::from(item.as_ref())))) + return Poll::Ready(Some(Ok(item.into_bytes()))) } } else { return Poll::Ready(None) @@ -157,7 +156,7 @@ where } } -impl Sink for BytesConnection +impl Sink> for BytesConnection where T: AsyncRead + AsyncWrite + Send + Unpin + 'static { @@ -167,7 +166,7 @@ where Pin::new(&mut self.0).poll_ready(cx) } - fn start_send(mut self: Pin<&mut Self>, item: BytesMut) -> io::Result<()> { + fn start_send(mut self: Pin<&mut Self>, item: Vec) -> io::Result<()> { Pin::new(&mut self.0).start_send(framed::OutgoingData::Binary(item)) }