diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69f74d5c..af059cf1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,20 +13,20 @@ jobs: steps: - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.6.0 + uses: styfle/cancel-workflow-action@0.8.0 with: access_token: ${{ github.token }} - uses: actions/checkout@v2 - name: Cache CARGO_HOME - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: ~/.cargo key: cargo-home-${{ hashFiles('Cargo.toml') }} - name: Cache cargo build - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: target key: cargo-build-target-${{ hashFiles('Cargo.toml') }} @@ -47,7 +47,7 @@ jobs: steps: - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.6.0 + uses: styfle/cancel-workflow-action@0.8.0 with: access_token: ${{ github.token }} @@ -71,13 +71,13 @@ jobs: run: apt-get install -y cmake - name: Cache CARGO_HOME - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: ~/.cargo key: cargo-home-${{ hashFiles('Cargo.toml') }} - name: Cache cargo build - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: target key: wasm-cargo-build-target-${{ hashFiles('Cargo.toml') }} @@ -95,7 +95,7 @@ jobs: steps: - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.6.0 + uses: styfle/cancel-workflow-action@0.8.0 with: access_token: ${{ github.token }} @@ -109,7 +109,7 @@ jobs: steps: - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.6.0 + uses: styfle/cancel-workflow-action@0.8.0 with: access_token: ${{ github.token }} @@ -123,13 +123,13 @@ jobs: components: clippy - name: Cache CARGO_HOME - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: ~/.cargo key: cargo-home-${{ hashFiles('Cargo.toml') }} - name: Cache cargo build - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: target key: cargo-build-target-${{ hashFiles('Cargo.toml') }} @@ -138,14 +138,14 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: -- -A clippy::mutable_key_type -A clippy::type_complexity + args: -- -A clippy::type_complexity -A clippy::pedantic -A clippy::style run-benchmarks: runs-on: ubuntu-latest steps: - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.6.0 + uses: styfle/cancel-workflow-action@0.8.0 with: access_token: ${{ github.token }} @@ -158,13 +158,13 @@ jobs: override: true - name: Cache CARGO_HOME - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: ~/.cargo key: cargo-home-${{ hashFiles('Cargo.toml') }} - name: Cache cargo build - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: target key: cargo-build-target-${{ hashFiles('Cargo.toml') }} @@ -183,20 +183,20 @@ jobs: steps: - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.6.0 + uses: styfle/cancel-workflow-action@0.8.0 with: access_token: ${{ github.token }} - uses: actions/checkout@v2 - name: Cache CARGO_HOME - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: ~/.cargo key: cargo-home-${{ hashFiles('Cargo.toml') }} - name: Cache cargo build - uses: actions/cache@v2 + uses: actions/cache@v2.1.4 with: path: target key: cargo-build-target-${{ hashFiles('Cargo.toml') }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 616d072b..ab53d41f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,45 +1,84 @@ + +# Individual crates + +## Main APIs + - [`libp2p-core` CHANGELOG](core/CHANGELOG.md) -- [`libp2p-deflate` CHANGELOG](protocols/deflate/CHANGELOG.md) -- [`libp2p-dns` CHANGELOG](transports/dns/CHANGELOG.md) +- [`libp2p-swarm` CHANGELOG](swarm/CHANGELOG.md) +- [`libp2p-swarm-derive` CHANGELOG](swarm-derive/CHANGELOG.md) + +## Application Protocols + - [`libp2p-floodsub` CHANGELOG](protocols/floodsub/CHANGELOG.md) - [`libp2p-gossipsub` CHANGELOG](protocols/gossipsub/CHANGELOG.md) - [`libp2p-identify` CHANGELOG](protocols/identify/CHANGELOG.md) - [`libp2p-kad` CHANGELOG](protocols/kad/CHANGELOG.md) - [`libp2p-mdns` CHANGELOG](protocols/mdns/CHANGELOG.md) -- [`libp2p-mplex` CHANGELOG](muxers/mplex/CHANGELOG.md) -- [`libp2p-noise` CHANGELOG](protocols/noise/CHANGELOG.md) - [`libp2p-ping` CHANGELOG](protocols/ping/CHANGELOG.md) -- [`libp2p-plaintext` CHANGELOG](protocols/plaintext/CHANGELOG.md) -- [`libp2p-pnet` CHANGELOG](protocols/pnet/CHANGELOG.md) - [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md) -- [`libp2p-secio` CHANGELOG](protocols/secio/CHANGELOG.md) -- [`libp2p-swarm` CHANGELOG](swarm/CHANGELOG.md) + +## Transport Protocols & Upgrades + +- [`libp2p-deflate` CHANGELOG](transports/deflate/CHANGELOG.md) +- [`libp2p-dns` CHANGELOG](transports/dns/CHANGELOG.md) +- [`libp2p-noise` CHANGELOG](transports/noise/CHANGELOG.md) +- [`libp2p-plaintext` CHANGELOG](transports/plaintext/CHANGELOG.md) +- [`libp2p-pnet` CHANGELOG](transports/pnet/CHANGELOG.md) - [`libp2p-tcp` CHANGELOG](transports/tcp/CHANGELOG.md) - [`libp2p-uds` CHANGELOG](transports/uds/CHANGELOG.md) - [`libp2p-wasm-ext` CHANGELOG](transports/wasm-ext/CHANGELOG.md) - [`libp2p-websocket` CHANGELOG](transports/websocket/CHANGELOG.md) + +## Multiplexers + +- [`libp2p-mplex` CHANGELOG](muxers/mplex/CHANGELOG.md) - [`libp2p-yamux` CHANGELOG](muxers/yamux/CHANGELOG.md) -- [`multistream-select` CHANGELOG](misc/multistream-select/CHANGELOG.md) + +## Utilities + - [`parity-multiaddr` CHANGELOG](misc/multiaddr/CHANGELOG.md) -- [`libp2p-core-derive` CHANGELOG](misc/core-derive/CHANGELOG.md) +- [`multistream-select` CHANGELOG](misc/multistream-select/CHANGELOG.md) -# Version 0.34.0 [unreleased] +# `libp2p` facade crate -- Update `libp2p-request-response`. +## Version 0.36.0 [unreleased] -# Version 0.33.0 [2020-12-17] +- Update libp2p crates. + +- Do not leak default features from libp2p crates. + [PR 1986](https://github.com/libp2p/rust-libp2p/pull/1986). + +## Version 0.35.1 [2021-02-17] + +- Update `libp2p-yamux` to latest patch version. + +## Version 0.35.0 [2021-02-15] + +- Use `libp2p-swarm-derive`, the former `libp2p-core-derive`. + +- Update `libp2p-deflate`, `libp2p-gossipsub`, `libp2p-mdns`, `libp2p-request-response`, + `libp2p-swarm` and `libp2p-tcp`. + +## Version 0.34.0 [2021-01-12] - Update `libp2p-core` and all dependent crates. -# Version 0.32.2 [2020-12-10] +- The `tcp-async-std` feature is now `tcp-async-io`, still + enabled by default. + +## Version 0.33.0 [2020-12-17] + +- Update `libp2p-core` and all dependent crates. + +## Version 0.32.2 [2020-12-10] - Update `libp2p-websocket`. -# Version 0.32.1 [2020-12-09] +## Version 0.32.1 [2020-12-09] - Update minimum patch version of `libp2p-websocket`. -# Version 0.32.0 [2020-12-08] +## Version 0.32.0 [2020-12-08] - Update `libp2p-request-response`. @@ -47,57 +86,57 @@ - Update `libp2p-websocket` minimum patch version. -# Version 0.31.2 [2020-12-02] +## Version 0.31.2 [2020-12-02] - Bump minimum `libp2p-core` patch version. -# Version 0.31.1 [2020-11-26] +## Version 0.31.1 [2020-11-26] - Bump minimum `libp2p-tcp` patch version. -# Version 0.31.0 [2020-11-25] +## Version 0.31.0 [2020-11-25] - Update `multistream-select` and all dependent crates. -# Version 0.30.1 [2020-11-11] +## Version 0.30.1 [2020-11-11] - Update `libp2p-plaintext`. -# Version 0.30.0 [2020-11-09] +## Version 0.30.0 [2020-11-09] - Update `libp2p-mdns`, `libp2p-tcp` and `libp2p-uds` as well as `libp2p-core` and all its dependers. -# Version 0.29.1 [2020-10-20] +## Version 0.29.1 [2020-10-20] - Update `libp2p-core`. -# Version 0.29.0 [2020-10-16] +## Version 0.29.0 [2020-10-16] - Update `libp2p-core`, `libp2p-floodsub`, `libp2p-gossipsub`, `libp2p-mplex`, `libp2p-noise`, `libp2p-plaintext`, `libp2p-pnet`, `libp2p-request-response`, `libp2p-swarm`, `libp2p-tcp`, `libp2p-websocket` and `parity-multiaddr`. -# Version 0.28.1 [2020-09-10] +## Version 0.28.1 [2020-09-10] - Update to `libp2p-core` `0.22.1`. -# Version 0.28.0 [2020-09-09] +## Version 0.28.0 [2020-09-09] - Update `libp2p-yamux` to `0.25.0`. *Step 4 of 4 in a multi-release upgrade process.* See the `libp2p-yamux` CHANGELOG for details. -# Version 0.27.0 [2020-09-09] +## Version 0.27.0 [2020-09-09] - Update `libp2p-yamux` to `0.24.0`. *Step 3 of 4 in a multi-release upgrade process.* See the `libp2p-yamux` CHANGELOG for details. -# Version 0.26.0 [2020-09-09] +## Version 0.26.0 [2020-09-09] - Update `libp2p-yamux` to `0.23.0`. *Step 2 of 4 in a multi-release upgrade process.* See the `libp2p-yamux` CHANGELOG for details. -# Version 0.25.0 [2020-09-09] +## Version 0.25.0 [2020-09-09] - Remove the deprecated `libp2p-secio` dependency. To continue to use SECIO, add an explicit dependency on `libp2p-secio`. However, @@ -114,12 +153,12 @@ changelog for details about the `LegacyConfig`. [PR 1714]: https://github.com/libp2p/rust-libp2p/pull/1714 -# Version 0.24.0 [2020-08-18] +## Version 0.24.0 [2020-08-18] - Update `libp2p-core`, `libp2p-gossipsub`, `libp2p-kad`, `libp2p-mdns`, `libp2p-ping`, `libp2p-request-response`, `libp2p-swarm` and dependent crates. -# Version 0.23.0 (2020-08-03) +## Version 0.23.0 (2020-08-03) **NOTE**: For a smooth upgrade path from `0.21` to `> 0.22` on an existing deployment, this version must not be skipped @@ -130,7 +169,7 @@ changelog for details about the `LegacyConfig`. - Refactored bandwidth logging ([PR 1670](https://github.com/libp2p/rust-libp2p/pull/1670)). -# Version 0.22.0 (2020-07-17) +## Version 0.22.0 (2020-07-17) **NOTE**: For a smooth upgrade path from `0.21` to `> 0.22` on an existing deployment using `libp2p-noise`, this version @@ -138,11 +177,11 @@ must not be skipped! - Bump `libp2p-noise` dependency to `0.21`. -# Version 0.21.1 (2020-07-02) +## Version 0.21.1 (2020-07-02) - Bump `libp2p-websockets` lower bound. -# Version 0.21.0 (2020-07-01) +## Version 0.21.0 (2020-07-01) - Conditional compilation fixes for the `wasm32-wasi` target ([PR 1633](https://github.com/libp2p/rust-libp2p/pull/1633)). @@ -152,7 +191,7 @@ must not be skipped! - Updated libp2p dependencies. -# Version 0.19.1 (2020-05-25) +## Version 0.19.1 (2020-05-25) - Temporarily pin all `async-std` dependencies to `< 1.6`. [PR 1589](https://github.com/libp2p/rust-libp2p/pull/1589) @@ -160,7 +199,7 @@ must not be skipped! - `libp2p-core-derive`: Fully qualified std::result::Result in macro [PR 1587](https://github.com/libp2p/rust-libp2p/pull/1587) -# Version 0.19.0 (2020-05-18) +## Version 0.19.0 (2020-05-18) - `libp2p-core`, `libp2p-swarm`: Added support for multiple dialing attempts per peer, with a configurable limit. @@ -218,12 +257,12 @@ must not be skipped! be supported. IPv4 listener addresses are not affected by this change. [PR 1555](https://github.com/libp2p/rust-libp2p/pull/1555) -# Version 0.18.1 (2020-04-17) +## Version 0.18.1 (2020-04-17) - `libp2p-swarm`: Make sure inject_dial_failure is called in all situations. [PR 1549](https://github.com/libp2p/rust-libp2p/pull/1549) -# Version 0.18.0 (2020-04-09) +## Version 0.18.0 (2020-04-09) - `libp2p-core`: Treat connection limit errors as pending connection errors. [PR 1546](https://github.com/libp2p/rust-libp2p/pull/1546) @@ -240,7 +279,7 @@ must not be skipped! - `libp2p-wasm-ext`: Fix "parsed is null" errors being thrown. [PR 1535](https://github.com/libp2p/rust-libp2p/pull/1535) -# Version 0.17.0 (2020-04-02) +## Version 0.17.0 (2020-04-02) - `libp2p-core`: Finished "identity hashing" for peer IDs migration. [PR 1460](https://github.com/libp2p/rust-libp2p/pull/1460) @@ -287,18 +326,18 @@ must not be skipped! - `multihash`: Removed the crate in favour of the upstream crate. [PR 1472](https://github.com/libp2p/rust-libp2p/pull/1472) -# Version 0.16.2 (2020-02-28) +## Version 0.16.2 (2020-02-28) - Fixed yamux connections not properly closing and being stuck in the `CLOSE_WAIT` state. - Added a `websocket_transport()` function in `libp2p-wasm-ext`, behind a Cargo feature. - Fixed ambiguity in `IntoProtocolsHandler::select` vs `ProtocolsHandler::select` in the `NetworkBehaviour` custom derive. -# Version 0.16.1 (2020-02-18) +## Version 0.16.1 (2020-02-18) - Fixed wrong representation of `PeerId`s being used in `Kademlia::get_closest_peers`. - Implemented `FusedStream` for `Swarm`. -# Version 0.16.0 (2020-02-13) +## Version 0.16.0 (2020-02-13) - Removed the `Substream` associated type from the `ProtocolsHandler` trait. The type of the substream is now always `libp2p::swarm::NegotiatedSubstream`. - As a consequence of the previous change, most of the implementations of the `NetworkBehaviour` trait provided by libp2p (`Ping`, `Identify`, `Kademlia`, `Floodsub`, `Gossipsub`) have lost a generic parameter. @@ -314,7 +353,7 @@ must not be skipped! - All crates prefixed with `libp2p-` now use the same version number. - Added a new variant `ListenerEvent::Error` for listeners to report non-fatal errors. `libp2p-tcp` uses this variant to report errors that happen on remote sockets before they have been accepted and errors when trying to determine the local machine's IP address. -# Version 0.15.0 (2020-01-24) +## Version 0.15.0 (2020-01-24) - Added `libp2p-gossipsub`. - Added `SwarmBuilder::executor` to allow configuring which tasks executor to use. @@ -327,7 +366,7 @@ must not be skipped! - Fixed `libp2p-kad` keeping connections alive when it shouldn't. - Fixed `InboundUpgrade` not always properly implemented on `NoiseConfig`. -# Version 0.14.0-alpha.1 (2020-01-07) +## Version 0.14.0-alpha.1 (2020-01-07) - Upgraded the crate to stable futures. - Use varints instead of fixed sized (4 byte) integers to delimit plaintext 2.0 messages to align implementation with the specification. @@ -339,16 +378,16 @@ must not be skipped! - Revamped the API of `libp2p_websockets::framed`. - Added protocol string to `Error::UnknownProtocolString`. -# Version 0.13.2 (2020-01-02) +## Version 0.13.2 (2020-01-02) - Fixed the `libp2p-noise` handshake not flushing the underlying stream before waiting for a response. - Fixed semver issue with the `protobuf` crate. -# Version 0.13.1 (2019-11-13) +## Version 0.13.1 (2019-11-13) - Maintenance release to bump dependencies and deal with an accidental breaking change in multihash 0.1.4. -# Version 0.13.0 (2019-11-05) +## Version 0.13.0 (2019-11-05) - Reworked the transport upgrade API. See https://github.com/libp2p/rust-libp2p/pull/1240 for more information. - Added a parameter allowing to choose the protocol negotiation protocol when upgrading a connection or a substream. See https://github.com/libp2p/rust-libp2p/pull/1245 for more information. @@ -361,7 +400,7 @@ must not be skipped! - Added some `Debug` trait implementations. - Fixed potential arithmetic overflows in `libp2p-kad` and `multistream-select`. -# Version 0.12.0 (2019-08-15) +## Version 0.12.0 (2019-08-15) - In some situations, `multistream-select` will now assume that protocol negotiation immediately succeeds. If it turns out that it failed, an error is generated when reading or writing from/to the stream. - Replaced `listen_addr` with `local_addr` in events related to incoming connections. The address no longer has to match a previously-reported address. @@ -372,7 +411,7 @@ must not be skipped! - Added `Toggle::is_enabled()`. - Removed `IdentifyTransport`. -# Version 0.11.0 (2019-07-18) +## Version 0.11.0 (2019-07-18) - `libp2p-kad`: Completed the core functionality of the record storage API, thereby extending the `RecordStore` for provider records. All records expire by default and are subject to regular republication and caching as per the Kademlia spec(s). Expiration and publication intervals are configurable through the `KademliaConfig`. - `libp2p-kad`: The routing table now never stores peers without a known (listen) address. In particular, on receiving a new inbound connection, the Kademlia behaviour now emits `KademliaEvent::UnroutablePeer` to indicate that in order for the peer to be added to the routing table and hence considered a reachable node in the DHT, a listen address of the peer must be discovered and reported via `Kademlia::add_address`. This is usually achieved through the use of the `Identify` protocol on the same connection(s). @@ -383,7 +422,7 @@ must not be skipped! - Replaced unbounded channels with bounded ones at the boundary between the `Network` (formerly `RawSwarm`) and `NodeHandler`. The node handlers will now wait if the main task is busy, instead of continuing to push events to the channel. - Fixed the `address_translation` function ignoring `/dns` addresses. -# Version 0.10.0 (2019-06-25) +## Version 0.10.0 (2019-06-25) - `PollParameters` is now a trait instead of a struct. - The `Swarm` can now be customized with connection information. @@ -392,12 +431,12 @@ must not be skipped! - Improved the heuristics for determining external multiaddresses based on reports. - Various fixes to Kademlia iterative queries and the WebSockets transport. -# Version 0.9.1 (2019-06-05) +## Version 0.9.1 (2019-06-05) - `EitherOutput` now implements `Stream` and `Sink` if their variants also implement these traits. - `libp2p::websocket::error::Error` now implements `Sync`. -# Version 0.9.0 (2019-06-04) +## Version 0.9.0 (2019-06-04) - Major fixes and performance improvements to libp2p-kad. - Initial prototype for record storage in libp2p-kad. @@ -409,11 +448,11 @@ must not be skipped! - Added some utility functions in `core::identity::secp256k1`. - It is now possible to inject an artificial connection in the `RawSwarm`. -# Version 0.8.1 (2019-05-15) +## Version 0.8.1 (2019-05-15) - Fixed a vulnerability in ED25519 signatures verification in libp2p-core. -# Version 0.8.0 (2019-05-15) +## Version 0.8.0 (2019-05-15) - Crate now successfully runs from within the browser when compiled to WASM. - Modified the constructors of `NoiseConfig` to accept any type of public key. The Noise handshake has consequently been modified. @@ -429,11 +468,11 @@ must not be skipped! - Added `multiaddr::from_url`. - Added `OptionalTransport`. -# Version 0.7.1 (2019-05-15) +## Version 0.7.1 (2019-05-15) - Fixed a vulnerability in ED25519 signatures verification in libp2p-core. -# Version 0.7.0 (2019-04-23) +## Version 0.7.0 (2019-04-23) - Fixed the inactive connections shutdown mechanism not working. - `Transport::listen_on` must now return a `Stream` that produces `ListenEvent`s. This makes it possible to notify about listened addresses at a later point in time. @@ -449,7 +488,7 @@ must not be skipped! - Reworked the `PingEvent`. - Renamed `KeepAlive::Forever` to `Yes` and `KeepAlive::Now` to `No`. -# Version 0.6.0 (2019-03-29) +## Version 0.6.0 (2019-03-29) - Replaced `NetworkBehaviour::inject_dial_failure` with `inject_dial_failure` and `inject_addr_reach_failure`. The former is called when we have finished trying to dial a node @@ -462,7 +501,7 @@ must not be skipped! - Added `Swarm::external_addresses`. - Added a `core::swarm::toggle::Toggle` that allows having a disabled `NetworkBehaviour`. -# Version 0.5.0 (2019-03-13) +## Version 0.5.0 (2019-03-13) - Moved the `SecioKeypair` struct in `core/identity` and renamed it to `Keypair`. - mplex now supports half-closed substreams. @@ -479,15 +518,15 @@ must not be skipped! - Reworked some API of `core/nodes/node.rs` and `core/nodes/handled_node.rs`. - The core now works even outside of a tokio context. -# Version 0.4.2 (2019-02-27) +## Version 0.4.2 (2019-02-27) - Fixed periodic pinging not working. -# Version 0.4.1 (2019-02-20) +## Version 0.4.1 (2019-02-20) - Fixed wrong version of libp2p-noise. -# Version 0.4.0 (2019-02-20) +## Version 0.4.0 (2019-02-20) - The `multiaddr!` macro has been moved to the `multiaddr` crate and is now reexported under the name `build_multiaddr!`. - Modified the functions in `upgrade::transfer` to be more convenient to use. @@ -503,12 +542,12 @@ must not be skipped! - Added `IdentifyEvent::SendBack`, when we send back our information. - Rewrote the `MemoryTransport` to be similar to the `TcpConfig`. -# Version 0.3.1 (2019-02-02) +## Version 0.3.1 (2019-02-02) - Added `NetworkBehaviour::inject_replaced` that is called whenever we replace a connection with a different connection to the same peer. - Fixed various issues with Kademlia. -# Version 0.3.0 (2019-01-30) +## Version 0.3.0 (2019-01-30) - Removed the `topology` module and everything it contained, including the `Topology` trait. - Added `libp2p-noise` that supports Noise handshakes, as an alternative to `libp2p-secio`. @@ -527,15 +566,15 @@ must not be skipped! - Added `SecioKeypair::ed25519_raw_key()`. - Fix improper connection shutdown in `ProtocolsHandler`. -# Version 0.2.2 (2019-01-14) +## Version 0.2.2 (2019-01-14) - Fixed improper dependencies versions causing deriving `NetworkBehaviour` to generate an error. -# Version 0.2.1 (2019-01-14) +## Version 0.2.1 (2019-01-14) - Added the `IntoNodeHandler` and `IntoProtocolsHandler` traits, allowing node handlers and protocol handlers to know the `PeerId` of the node they are interacting with. -# Version 0.2 (2019-01-10) +## Version 0.2 (2019-01-10) - The `Transport` trait now has an `Error` associated type instead of always using `std::io::Error`. - Merged `PeriodicPing` and `PingListen` into one `Ping` behaviour. diff --git a/Cargo.toml b/Cargo.toml index 419af4dd..4d22a9e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p" edition = "2018" description = "Peer-to-peer networking library" -version = "0.34.1" +version = "0.36.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -28,7 +28,7 @@ default = [ "pnet", "request-response", "secp256k1", - "tcp-async-std", + "tcp-async-io", "uds", "wasm-ext", "websocket", @@ -47,7 +47,7 @@ ping = ["libp2p-ping"] plaintext = ["libp2p-plaintext"] pnet = ["libp2p-pnet"] request-response = ["libp2p-request-response"] -tcp-async-std = ["libp2p-tcp", "libp2p-tcp/async-std"] +tcp-async-io = ["libp2p-tcp", "libp2p-tcp/async-io"] tcp-tokio = ["libp2p-tcp", "libp2p-tcp/tokio"] uds = ["libp2p-uds"] wasm-ext = ["libp2p-wasm-ext"] @@ -61,67 +61,66 @@ all-features = true [dependencies] atomic = "0.5.0" -bytes = "0.5" +bytes = "1" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.26.1", path = "core", package = "fluence-fork-libp2p-core" } -libp2p-core-derive = { version = "0.21.1", path = "misc/core-derive", package = "fluence-fork-libp2p-core-derive" } -libp2p-floodsub = { version = "0.26.1", path = "protocols/floodsub", optional = true, package = "fluence-fork-libp2p-floodsub" } -libp2p-gossipsub = { version = "0.26.1", path = "./protocols/gossipsub", optional = true, package = "fluence-fork-libp2p-gossipsub" } -libp2p-identify = { version = "0.26.1", path = "protocols/identify", optional = true, package = "fluence-fork-libp2p-identify" } -libp2p-kad = { version = "0.27.0", path = "protocols/kad", optional = true, package = "fluence-fork-libp2p-kad" } -libp2p-mplex = { version = "0.26.1", path = "muxers/mplex", optional = true, package = "fluence-fork-libp2p-mplex" } -libp2p-noise = { version = "0.28.1", path = "protocols/noise", optional = true, package = "fluence-fork-libp2p-noise" } -libp2p-ping = { version = "0.26.1", path = "protocols/ping", optional = true, package = "fluence-fork-libp2p-ping" } -libp2p-plaintext = { version = "0.26.1", path = "protocols/plaintext", optional = true, package = "fluence-fork-libp2p-plaintext" } -libp2p-pnet = { version = "0.20.1", path = "protocols/pnet", optional = true, package = "fluence-fork-libp2p-pnet" } -libp2p-request-response = { version = "0.9.1", path = "protocols/request-response", optional = true, package = "fluence-fork-libp2p-request-response" } -libp2p-swarm = { version = "0.26.1", path = "swarm", package = "fluence-fork-libp2p-swarm" } -libp2p-uds = { version = "0.26.1", path = "transports/uds", optional = true, package = "fluence-fork-libp2p-uds" } -libp2p-wasm-ext = { version = "0.26.1", path = "transports/wasm-ext", optional = true, package = "fluence-fork-libp2p-wasm-ext" } -libp2p-yamux = { version = "0.29.1", path = "muxers/yamux", optional = true, package = "fluence-fork-libp2p-yamux" } -multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.10.1", path = "misc/multiaddr" } +libp2p-core = { version = "0.27.1", path = "core", default-features = false, package = "fluence-fork-libp2p-core" } +libp2p-floodsub = { version = "0.28.0", path = "protocols/floodsub", optional = true, package = "fluence-fork-libp2p-floodsub" } +libp2p-gossipsub = { version = "0.29.0", path = "./protocols/gossipsub", optional = true, package = "fluence-fork-libp2p-gossipsub" } +libp2p-identify = { version = "0.28.0", path = "protocols/identify", optional = true, package = "fluence-fork-libp2p-identify" } +libp2p-kad = { version = "0.29.0", path = "protocols/kad", optional = true, package = "fluence-fork-libp2p-kad" } +libp2p-mplex = { version = "0.27.1", path = "muxers/mplex", optional = true, package = "fluence-fork-libp2p-mplex" } +libp2p-noise = { version = "0.29.0", path = "transports/noise", optional = true, package = "fluence-fork-libp2p-noise" } +libp2p-ping = { version = "0.28.0", path = "protocols/ping", optional = true, package = "fluence-fork-libp2p-ping" } +libp2p-plaintext = { version = "0.27.1", path = "transports/plaintext", optional = true, package = "fluence-fork-libp2p-plaintext" } +libp2p-pnet = { version = "0.20.0", path = "transports/pnet", optional = true, package = "fluence-fork-libp2p-pnet" } +libp2p-request-response = { version = "0.10.0", path = "protocols/request-response", optional = true, package = "fluence-fork-libp2p-request-response" } +libp2p-swarm = { version = "0.28.0", path = "swarm", package = "fluence-fork-libp2p-swarm" } +libp2p-swarm-derive = { version = "0.22.0", path = "swarm-derive", package = "fluence-fork-libp2p-swarm-derive" } +libp2p-uds = { version = "0.27.0", path = "transports/uds", optional = true, package = "fluence-fork-libp2p-uds" } +libp2p-wasm-ext = { version = "0.27.0", path = "transports/wasm-ext", default-features = false, optional = true, package = "fluence-fork-libp2p-wasm-ext" } +libp2p-yamux = { version = "0.30.1", path = "muxers/yamux", optional = true, package = "fluence-fork-libp2p-yamux" } +multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.11.1", path = "misc/multiaddr" } parking_lot = "0.11.0" pin-project = "1.0.0" smallvec = "1.0" wasm-timer = "0.2.4" [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] -libp2p-deflate = { package = "fluence-fork-libp2p-deflate", version = "0.26.1", path = "protocols/deflate", optional = true } -libp2p-dns = { package = "fluence-fork-libp2p-dns", version = "0.26.1", path = "transports/dns", optional = true } -libp2p-mdns = { package = "fluence-fork-libp2p-mdns", version = "0.27.0", path = "protocols/mdns", optional = true } -libp2p-tcp = { package = "fluence-fork-libp2p-tcp", version = "0.26.1", path = "transports/tcp", optional = true } -libp2p-websocket = { package = "fluence-fork-libp2p-websocket", version = "0.27.0", path = "transports/websocket", optional = true } +libp2p-deflate = { version = "0.27.1", path = "transports/deflate", optional = true, package = "fluence-fork-libp2p-deflate" } +libp2p-dns = { version = "0.27.0", path = "transports/dns", optional = true, package = "fluence-fork-libp2p-dns" } +libp2p-mdns = { version = "0.29.0", path = "protocols/mdns", optional = true, package = "fluence-fork-libp2p-mdns" } +libp2p-tcp = { version = "0.27.1", path = "transports/tcp", default-features = false, optional = true, package = "fluence-fork-libp2p-tcp" } +libp2p-websocket = { version = "0.28.0", path = "transports/websocket", optional = true, package = "fluence-fork-libp2p-websocket" } [dev-dependencies] -async-std = "1.6.2" +async-std = { version = "1.6.2", features = ["attributes"] } env_logger = "0.8.1" -tokio = { version = "0.3", features = ["io-util", "io-std", "stream", "macros", "rt", "rt-multi-thread"] } -trust-graph = "0.2.0" +tokio = { version = "1.0.1", features = ["io-util", "io-std", "macros", "rt", "rt-multi-thread"] } +trust-graph = "0.2.5" [workspace] members = [ "core", - "misc/core-derive", "misc/multiaddr", "misc/multistream-select", "misc/peer-id-generator", "muxers/mplex", "muxers/yamux", - "protocols/deflate", "protocols/floodsub", "protocols/gossipsub", "protocols/identify", "protocols/kad", "protocols/mdns", - "protocols/noise", "protocols/ping", - "protocols/plaintext", - "protocols/pnet", "protocols/request-response", - "protocols/secio", "swarm", + "swarm-derive", + "transports/deflate", "transports/dns", + "transports/noise", + "transports/plaintext", + "transports/pnet", "transports/tcp", "transports/uds", "transports/websocket", diff --git a/README.md b/README.md index 3e84e060..fba4cb15 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,6 @@ - - [![dependency status](https://deps.rs/repo/github/libp2p/rust-libp2p/status.svg?style=flat-square)](https://deps.rs/repo/github/libp2p/rust-libp2p) This repository is the central place for Rust development of the [libp2p](https://libp2p.io) spec. @@ -23,6 +21,34 @@ Where to ask questions? - In the #libp2p IRC channel on freenode. - By opening an issue in this repository. + +## Repository Structure + +The main components of this repository are structured as follows: + + * `core/`: The implementation of `libp2p-core` with its `Network`, + `Transport` and `StreamMuxer` API on which almost all other crates depend. + + * `transports/`: Implementations of transport protocols (e.g. TCP) and protocol upgrades + (e.g. for authenticated encryption, compression, ...) based on the `libp2p-core` `Transport` + API . + + * `muxers/`: Implementations of the `StreamMuxer` interface of `libp2p-core`, + e.g. (sub)stream multiplexing protocols on top of (typically TCP) connections. + Multiplexing protocols are (mandatory) `Transport` upgrades. + + * `swarm/`: The implementation of `libp2p-swarm` building on `libp2p-core` + with the central interfaces `NetworkBehaviour` and `ProtocolsHandler` used + to implement application protocols (see `protocols/`). + + * `protocols/`: Implementations of application protocols based on the + `libp2p-swarm` APIs. + + * `misc/`: Utility libraries. + + * `examples/`: Worked examples of built-in application protocols (see `protocols/`) + with common `Transport` configurations. + ## Notable users (open a pull request if you want your project to be added here) @@ -31,6 +57,7 @@ Where to ask questions? - https://github.com/paritytech/substrate - https://github.com/sigp/lighthouse - https://github.com/golemfactory/golem-libp2p -- https://github.com/comit-network/comit-rs +- https://github.com/comit-network - https://github.com/rs-ipfs/rust-ipfs - https://github.com/marcopoloprotocol/marcopolo +- https://github.com/ChainSafe/forest diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 4c9db1f8..8e2bd1e5 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,15 @@ +# 0.27.1 [2021-02-15] + +- Update dependencies. + +# 0.27.0 [2021-01-12] + +- (Re)add `Transport::address_translation` to permit transport-specific + translations of observed addresses onto listening addresses. + [PR 1887](https://github.com/libp2p/rust-libp2p/pull/1887) + +- Update dependencies. + # 0.26.0 [2020-12-17] - Make `PeerId` be `Copy`, including small `PeerId` API changes. diff --git a/core/Cargo.toml b/core/Cargo.toml index b07cd041..e2b1ebcd 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-core" edition = "2018" description = "Core traits and structs of libp2p" -version = "0.26.1" +version = "0.27.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -23,18 +23,18 @@ futures-timer = "3" lazy_static = "1.2" libsecp256k1 = { version = "0.3.1", optional = true } log = "0.4" -multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.10.1", path = "../misc/multiaddr" } +multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.11", path = "../misc/multiaddr" } multihash = { version = "0.13", default-features = false, features = ["std", "multihash-impl", "identity", "sha2"] } -multistream-select = { version = "0.9.2", path = "../misc/multistream-select", package = "fluence-fork-multistream-select" } +multistream-select = { version = "0.10", path = "../misc/multistream-select", package = "fluence-fork-multistream-select" } parking_lot = "0.11.0" pin-project = "1.0.0" -prost = "0.6.1" +prost = "0.7" rand = "0.7" rw-stream-sink = "0.2.0" sha2 = "0.9.1" smallvec = "1.0" thiserror = "1.0" -unsigned-varint = "0.5" +unsigned-varint = "0.7" void = "1" zeroize = "1" serde = { version = "1.0.114", default-features = false } @@ -43,17 +43,17 @@ serde = { version = "1.0.114", default-features = false } ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false } [dev-dependencies] -async-std = "1.6.2" +async-std = { version = "1.6.2", features = ["attributes"] } criterion = "0.3" libp2p-mplex = { path = "../muxers/mplex", package = "fluence-fork-libp2p-mplex" } -libp2p-noise = { path = "../protocols/noise", package = "fluence-fork-libp2p-noise" } -libp2p-tcp = { path = "../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } +libp2p-noise = { path = "../transports/noise", package = "fluence-fork-libp2p-noise" } +libp2p-tcp = { path = "../transports/tcp", package = "fluence-fork-libp2p-tcp" } multihash = { version = "0.13", default-features = false, features = ["arb"] } quickcheck = "0.9.0" wasm-timer = "0.2" [build-dependencies] -prost-build = "0.6" +prost-build = "0.7" [features] default = ["secp256k1"] diff --git a/core/src/connection/listeners.rs b/core/src/connection/listeners.rs index 962c14a7..02982d87 100644 --- a/core/src/connection/listeners.rs +++ b/core/src/connection/listeners.rs @@ -428,6 +428,8 @@ mod tests { fn dial(self, _: Multiaddr) -> Result> { panic!() } + + fn address_translation(&self, _: &Multiaddr, _: &Multiaddr) -> Option { None } } async_std::task::block_on(async move { @@ -466,6 +468,8 @@ mod tests { fn dial(self, _: Multiaddr) -> Result> { panic!() } + + fn address_translation(&self, _: &Multiaddr, _: &Multiaddr) -> Option { None } } async_std::task::block_on(async move { diff --git a/core/src/connection/pool.rs b/core/src/connection/pool.rs index ccb89d6f..2bc7ca08 100644 --- a/core/src/connection/pool.rs +++ b/core/src/connection/pool.rs @@ -182,13 +182,13 @@ where }, PoolEvent::ConnectionEvent { ref connection, ref event } => { f.debug_struct("PoolEvent::ConnectionEvent") - .field("peer", connection.peer_id()) + .field("peer", &connection.peer_id()) .field("event", event) .finish() }, PoolEvent::AddressChange { ref connection, ref new_endpoint, ref old_endpoint } => { f.debug_struct("PoolEvent::AddressChange") - .field("peer", connection.peer_id()) + .field("peer", &connection.peer_id()) .field("new_endpoint", new_endpoint) .field("old_endpoint", old_endpoint) .finish() @@ -325,8 +325,8 @@ impl // "established" connection. let future = future.and_then({ let endpoint = endpoint.clone(); - let expected_peer = peer.clone(); - let local_id = self.local_id.clone(); + let expected_peer = peer; + let local_id = self.local_id; move |(peer_id, muxer)| { if let Some(peer) = expected_peer { if peer != peer_id { @@ -376,7 +376,7 @@ impl self.counters.check_max_established_per_peer(self.num_peer_established(&i.peer_id))?; let id = self.manager.add(c, i.clone()); self.counters.inc_established(&i.endpoint); - self.established.entry(i.peer_id.clone()).or_default().insert(id, i.endpoint); + self.established.entry(i.peer_id).or_default().insert(id, i.endpoint); Ok(id) } @@ -667,7 +667,7 @@ impl } // Add the connection to the pool. - let peer = entry.connected().peer_id.clone(); + let peer = entry.connected().peer_id; let conns = self.established.entry(peer).or_default(); let num_established = NonZeroU32::new(u32::try_from(conns.len() + 1).unwrap()) .expect("n + 1 is always non-zero; qed"); @@ -786,8 +786,8 @@ impl EstablishedConnection<'_, TInEvent> { } /// Returns the identity of the connected peer. - pub fn peer_id(&self) -> &PeerId { - &self.entry.connected().peer_id + pub fn peer_id(&self) -> PeerId { + self.entry.connected().peer_id } /// Returns the local connection ID. @@ -842,6 +842,7 @@ where I: Iterator { /// Obtains the next connection, if any. + #[allow(clippy::should_implement_trait)] pub fn next(&mut self) -> Option> { while let Some(id) = self.ids.next() { diff --git a/core/src/either.rs b/core/src/either.rs index 48257fc6..4d991936 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -477,4 +477,11 @@ where }, } } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + match self { + EitherTransport::Left(a) => a.address_translation(server, observed), + EitherTransport::Right(b) => b.address_translation(server, observed), + } + } } diff --git a/core/src/network.rs b/core/src/network.rs index 5819ba26..2667ca41 100644 --- a/core/src/network.rs +++ b/core/src/network.rs @@ -30,7 +30,6 @@ use crate::{ Executor, Multiaddr, PeerId, - address_translation, connection::{ ConnectionId, ConnectionLimit, @@ -145,11 +144,10 @@ where local_peer_id: PeerId, config: NetworkConfig, ) -> Self { - let pool_local_id = local_peer_id.clone(); Network { local_peer_id, listeners: ListenersStream::new(transport), - pool: Pool::new(pool_local_id, config.manager_config, config.limits), + pool: Pool::new(local_peer_id, config.manager_config, config.limits), dialing: Default::default(), } } @@ -176,30 +174,27 @@ where self.listeners.listen_addrs() } - /// Call this function in order to know which address remotes should dial to - /// access your local node. + /// Maps the given `observed_addr`, representing an address of the local + /// node observed by a remote peer, onto the locally known listen addresses + /// to yield one or more addresses of the local node that may be publicly + /// reachable. /// - /// When receiving an observed address on a tcp connection that we initiated, the observed - /// address contains our tcp dial port, not our tcp listen port. We know which port we are - /// listening on, thereby we can replace the port within the observed address. - /// - /// When receiving an observed address on a tcp connection that we did **not** initiated, the - /// observed address should contain our listening port. In case it differs from our listening - /// port there might be a proxy along the path. - /// - /// # Arguments - /// - /// * `observed_addr` - should be an address a remote observes you as, which can be obtained for - /// example with the identify protocol. + /// I.e. this method incorporates the view of other peers into the listen + /// addresses seen by the local node to account for possible IP and port + /// mappings performed by intermediate network devices in an effort to + /// obtain addresses for the local peer that are also reachable for peers + /// other than the peer who reported the `observed_addr`. /// + /// The translation is transport-specific. See [`Transport::address_translation`]. pub fn address_translation<'a>(&'a self, observed_addr: &'a Multiaddr) -> impl Iterator + 'a where TMuxer: 'a, THandler: 'a, { + let transport = self.listeners.transport(); let mut addrs: Vec<_> = self.listen_addrs() - .filter_map(move |server| address_translation(server, observed_addr)) + .filter_map(move |server| transport.address_translation(server, observed_addr)) .collect(); // remove duplicates @@ -384,7 +379,7 @@ where let event = match self.pool.poll(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(PoolEvent::ConnectionEstablished { connection, num_established }) => { - if let hash_map::Entry::Occupied(mut e) = self.dialing.entry(connection.peer_id().clone()) { + if let hash_map::Entry::Occupied(mut e) = self.dialing.entry(connection.peer_id()) { e.get_mut().retain(|s| s.current.0 != connection.id()); if e.get().is_empty() { e.remove(); @@ -530,7 +525,7 @@ where if let Some(pos) = attempts.iter().position(|s| s.current.0 == id) { let attempt = attempts.remove(pos); let last = attempts.is_empty(); - Some((peer.clone(), attempt, last)) + Some((*peer, attempt, last)) } else { None } @@ -549,7 +544,7 @@ where if let Some(handler) = handler { let next_attempt = attempt.remaining.remove(0); let opts = DialingOpts { - peer: peer_id.clone(), + peer: peer_id, handler, address: next_attempt, remaining: attempt.remaining diff --git a/core/src/network/peer.rs b/core/src/network/peer.rs index 4e179e54..3e669302 100644 --- a/core/src/network/peer.rs +++ b/core/src/network/peer.rs @@ -223,7 +223,7 @@ where }; let id = network.dial_peer(DialingOpts { - peer: peer_id.clone(), + peer: peer_id, handler, address, remaining: remaining.into_iter().collect(), @@ -435,7 +435,7 @@ where pub fn attempt(&mut self, id: ConnectionId) -> Option> { - if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id.clone()) { + if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id) { if let Some(pos) = attempts.get().iter().position(|s| s.current.0 == id) { if let Some(inner) = self.network.pool.get_outgoing(id) { return Some(DialingAttempt { pos, inner, attempts }) @@ -662,7 +662,8 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr> } /// Obtains the next dialing connection, if any. - pub fn next<'b>(&'b mut self) -> Option> { + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option> { // If the number of elements reduced, the current `DialingAttempt` has been // aborted and iteration needs to continue from the previous position to // account for the removed element. @@ -676,7 +677,7 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr> return None } - if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) { + if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(*self.peer_id) { let id = attempts.get()[self.pos].current.0; if let Some(inner) = self.pool.get_outgoing(id) { let conn = DialingAttempt { pos: self.pos, inner, attempts }; @@ -697,7 +698,7 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr> return None } - if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) { + if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(*self.peer_id) { let id = attempts.get()[self.pos].current.0; if let Some(inner) = self.pool.get_outgoing(id) { return Some(DialingAttempt { pos: self.pos, inner, attempts }) diff --git a/core/src/transport.rs b/core/src/transport.rs index 50499ec1..f6e70c44 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -128,6 +128,11 @@ pub trait Transport { where Self: Sized; + /// Performs a transport-specific mapping of an address `observed` by + /// a remote onto a local `listen` address to yield an address for + /// the local node that may be reachable for other peers. + fn address_translation(&self, listen: &Multiaddr, observed: &Multiaddr) -> Option; + /// Boxes the transport, including custom transport errors. fn boxed(self) -> boxed::Boxed where diff --git a/core/src/transport/and_then.rs b/core/src/transport/and_then.rs index ba751328..22018729 100644 --- a/core/src/transport/and_then.rs +++ b/core/src/transport/and_then.rs @@ -69,6 +69,10 @@ where }; Ok(future) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } } /// Custom `Stream` to avoid boxing. diff --git a/core/src/transport/boxed.rs b/core/src/transport/boxed.rs index 7f2e721e..5322b517 100644 --- a/core/src/transport/boxed.rs +++ b/core/src/transport/boxed.rs @@ -51,6 +51,7 @@ type ListenerUpgrade = Pin> + Send>>; trait Abstract { fn listen_on(&self, addr: Multiaddr) -> Result, TransportError>; fn dial(&self, addr: Multiaddr) -> Result, TransportError>; + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option; } impl Abstract for T @@ -78,6 +79,10 @@ where .map_err(|e| e.map(box_err))?; Ok(Box::pin(fut) as Dial<_>) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + Transport::address_translation(self, server, observed) + } } impl fmt::Debug for Boxed { @@ -108,6 +113,10 @@ impl Transport for Boxed { fn dial(self, addr: Multiaddr) -> Result> { self.inner.dial(addr) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.inner.address_translation(server, observed) + } } fn box_err(e: E) -> io::Error { diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index c6593912..3488b068 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -74,4 +74,12 @@ where Err(TransportError::MultiaddrNotSupported(addr)) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + if let Some(addr) = self.0.address_translation(server, observed) { + Some(addr) + } else { + self.1.address_translation(server, observed) + } + } } diff --git a/core/src/transport/dummy.rs b/core/src/transport/dummy.rs index 0f9ee672..5839a6a5 100644 --- a/core/src/transport/dummy.rs +++ b/core/src/transport/dummy.rs @@ -67,6 +67,10 @@ impl Transport for DummyTransport { fn dial(self, addr: Multiaddr) -> Result> { Err(TransportError::MultiaddrNotSupported(addr)) } + + fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } } /// Implementation of `AsyncRead` and `AsyncWrite`. Not meant to be instanciated. diff --git a/core/src/transport/map.rs b/core/src/transport/map.rs index f9fb2cf7..0305af66 100644 --- a/core/src/transport/map.rs +++ b/core/src/transport/map.rs @@ -57,6 +57,10 @@ where let p = ConnectedPoint::Dialer { address: addr }; Ok(MapFuture { inner: future, args: Some((self.fun, p)) }) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } } /// Custom `Stream` implementation to avoid boxing. diff --git a/core/src/transport/map_err.rs b/core/src/transport/map_err.rs index 90e65eb2..c0be6485 100644 --- a/core/src/transport/map_err.rs +++ b/core/src/transport/map_err.rs @@ -64,6 +64,10 @@ where Err(err) => Err(err.map(map)), } } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } } /// Listening stream for `MapErr`. diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index e7d30630..366abd4e 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -191,6 +191,10 @@ impl Transport for MemoryTransport { DialFuture::new(port).ok_or(TransportError::Other(MemoryTransportError::Unreachable)) } + + fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } } /// Error that can be produced from the `MemoryTransport`. diff --git a/core/src/transport/optional.rs b/core/src/transport/optional.rs index 283b50d7..2b29773e 100644 --- a/core/src/transport/optional.rs +++ b/core/src/transport/optional.rs @@ -74,4 +74,12 @@ where Err(TransportError::MultiaddrNotSupported(addr)) } } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + if let Some(inner) = &self.0 { + inner.address_translation(server, observed) + } else { + None + } + } } diff --git a/core/src/transport/timeout.rs b/core/src/transport/timeout.rs index dc29af81..d55d007d 100644 --- a/core/src/transport/timeout.rs +++ b/core/src/transport/timeout.rs @@ -101,6 +101,10 @@ where timer: Delay::new(self.outgoing_timeout), }) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.inner.address_translation(server, observed) + } } // TODO: can be removed and replaced with an `impl Stream` once impl Trait is fully stable diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index 4304314b..b2cb7b46 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -334,6 +334,10 @@ where fn listen_on(self, addr: Multiaddr) -> Result> { self.0.listen_on(addr) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.0.address_translation(server, observed) + } } /// An inbound or outbound upgrade. @@ -383,6 +387,10 @@ where upgrade: self.upgrade }) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.inner.address_translation(server, observed) + } } /// Errors produced by a transport upgrade. diff --git a/core/tests/network_dial_error.rs b/core/tests/network_dial_error.rs index 13e532d7..85b2186d 100644 --- a/core/tests/network_dial_error.rs +++ b/core/tests/network_dial_error.rs @@ -41,10 +41,12 @@ fn deny_incoming_connec() { swarm1.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); let address = async_std::task::block_on(future::poll_fn(|cx| { - if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm1.poll(cx) { - Poll::Ready(listen_addr) - } else { - panic!("Was expecting the listen address to be reported") + match swarm1.poll(cx) { + Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) => { + Poll::Ready(listen_addr) + } + Poll::Pending => Poll::Pending, + _ => panic!("Was expecting the listen address to be reported"), } })); @@ -67,7 +69,7 @@ fn deny_incoming_connec() { multiaddr, error: PendingConnectionError::Transport(_) }) => { - assert_eq!(peer_id, *swarm1.local_peer_id()); + assert_eq!(&peer_id, swarm1.local_peer_id()); assert_eq!(multiaddr, address); return Poll::Ready(Ok(())); }, @@ -95,15 +97,15 @@ fn dial_self() { let mut swarm = test_network(NetworkConfig::default()); swarm.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); - let (local_address, mut swarm) = async_std::task::block_on( - future::lazy(move |cx| { - if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm.poll(cx) { - Ok::<_, void::Void>((listen_addr, swarm)) - } else { - panic!("Was expecting the listen address to be reported") + let local_address = async_std::task::block_on(future::poll_fn(|cx| { + match swarm.poll(cx) { + Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) => { + Poll::Ready(listen_addr) } - })) - .unwrap(); + Poll::Pending => Poll::Pending, + _ => panic!("Was expecting the listen address to be reported"), + } + })); swarm.dial(&local_address, TestHandler()).unwrap(); diff --git a/examples/chat-tokio.rs b/examples/chat-tokio.rs index 15577ac6..87754497 100644 --- a/examples/chat-tokio.rs +++ b/examples/chat-tokio.rs @@ -36,7 +36,6 @@ //! --features="floodsub mplex noise tcp-tokio mdns-tokio" //! ``` -use futures::prelude::*; use libp2p::{ Multiaddr, NetworkBehaviour, @@ -121,7 +120,7 @@ async fn main() -> Result<(), Box> { // Create a Swarm to manage peers and events. let mut swarm = { - let mdns = Mdns::new().await?; + let mdns = Mdns::new(Default::default()).await?; let mut behaviour = MyBehaviour { floodsub: Floodsub::new(peer_id.clone()), mdns, @@ -154,10 +153,15 @@ async fn main() -> Result<(), Box> { loop { let to_publish = { tokio::select! { - line = stdin.try_next() => Some((floodsub_topic.clone(), line?.expect("Stdin closed"))), + line = stdin.next_line() => { + let line = line?.expect("stdin closed"); + Some((floodsub_topic.clone(), line)) + } event = swarm.next() => { - println!("New Event: {:?}", event); - None + // All events are handled by the `NetworkBehaviourEventProcess`es. + // I.e. the `swarm.next()` future drives the `Swarm` without ever + // terminating. + panic!("Unexpected event: {:?}", event); } } }; diff --git a/examples/chat.rs b/examples/chat.rs index 67966e07..e7050da9 100644 --- a/examples/chat.rs +++ b/examples/chat.rs @@ -58,7 +58,7 @@ use libp2p::{ NetworkBehaviour, identity, floodsub::{self, Floodsub, FloodsubEvent}, - mdns::{Mdns, MdnsEvent}, + mdns::{Mdns, MdnsConfig, MdnsEvent}, swarm::NetworkBehaviourEventProcess }; use std::{error::Error, task::{Context, Poll}}; @@ -121,7 +121,7 @@ fn main() -> Result<(), Box> { // Create a Swarm to manage peers and events let mut swarm = { - let mdns = task::block_on(Mdns::new())?; + let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?; let mut behaviour = MyBehaviour { floodsub: Floodsub::new(local_peer_id.clone()), mdns, diff --git a/examples/distributed-key-value-store.rs b/examples/distributed-key-value-store.rs index c2c76d97..6067e883 100644 --- a/examples/distributed-key-value-store.rs +++ b/examples/distributed-key-value-store.rs @@ -60,11 +60,11 @@ use libp2p::{ Swarm, build_development_transport, identity, - mdns::{Mdns, MdnsEvent}, + mdns::{Mdns, MdnsConfig, MdnsEvent}, swarm::NetworkBehaviourEventProcess }; use std::{error::Error, task::{Context, Poll}}; -use trust_graph::TrustGraph; +use trust_graph::{TrustGraph, InMemoryStorage}; fn main() -> Result<(), Box> { env_logger::init(); @@ -159,8 +159,12 @@ fn main() -> Result<(), Box> { libp2p::identity::Keypair::Ed25519(kp) => kp, _ => unreachable!("only ed25519 supported"), }; - let kademlia = Kademlia::new(local_key, local_peer_id.clone(), store, TrustGraph::new(vec![])); - let mdns = task::block_on(Mdns::new())?; + let trust = { + let storage = InMemoryStorage::new_in_memory(vec![]); + TrustGraph::new(storage) + }; + let kademlia = Kademlia::new(local_key, local_peer_id.clone(), store, trust); + let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?; let behaviour = MyBehaviour { kademlia, mdns }; Swarm::new(transport, behaviour, local_peer_id) }; diff --git a/examples/mdns-passive-discovery.rs b/examples/mdns-passive-discovery.rs index a8f4323a..774fc9e6 100644 --- a/examples/mdns-passive-discovery.rs +++ b/examples/mdns-passive-discovery.rs @@ -18,43 +18,42 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_std::task; -use libp2p::mdns::service::{MdnsPacket, MdnsService}; +use libp2p::{identity, mdns::{Mdns, MdnsConfig, MdnsEvent}, PeerId, Swarm}; use std::error::Error; -fn main() -> Result<(), Box> { - // This example provides passive discovery of the libp2p nodes on the - // network that send mDNS queries and answers. - task::block_on(async move { - let mut service = MdnsService::new().await?; - loop { - let (srv, packet) = service.next().await; - match packet { - MdnsPacket::Query(query) => { - // We detected a libp2p mDNS query on the network. In a real application, you - // probably want to answer this query by doing `query.respond(...)`. - println!("Detected query from {:?}", query.remote_addr()); - } - MdnsPacket::Response(response) => { - // We detected a libp2p mDNS response on the network. Responses are for - // everyone and not just for the requester, which makes it possible to - // passively listen. - for peer in response.discovered_peers() { - println!("Discovered peer {:?}", peer.id()); - // These are the self-reported addresses of the peer we just discovered. - for addr in peer.addresses() { - println!(" Address = {:?}", addr); - } - } - } - MdnsPacket::ServiceDiscovery(query) => { - // The last possibility is a service detection query from DNS-SD. - // Just like `Query`, in a real application you probably want to call - // `query.respond`. - println!("Detected service query from {:?}", query.remote_addr()); +#[async_std::main] +async fn main() -> Result<(), Box> { + env_logger::init(); + + // Create a random PeerId. + let id_keys = identity::Keypair::generate_ed25519(); + let peer_id = PeerId::from(id_keys.public()); + println!("Local peer id: {:?}", peer_id); + + // Create a transport. + let transport = libp2p::build_development_transport(id_keys)?; + + // Create an MDNS network behaviour. + let behaviour = Mdns::new(MdnsConfig::default()).await?; + + // Create a Swarm that establishes connections through the given transport. + // Note that the MDNS behaviour itself will not actually inititiate any connections, + // as it only uses UDP. + let mut swarm = Swarm::new(transport, behaviour, peer_id); + Swarm::listen_on(&mut swarm, "/ip4/0.0.0.0/tcp/0".parse()?)?; + + loop { + match swarm.next().await { + MdnsEvent::Discovered(peers) => { + for (peer, addr) in peers { + println!("discovered {} {}", peer, addr); + } + } + MdnsEvent::Expired(expired) => { + for (peer, addr) in expired { + println!("expired {} {}", peer, addr); } } - service = srv } - }) + } } diff --git a/misc/multiaddr/CHANGELOG.md b/misc/multiaddr/CHANGELOG.md index 712d03e7..663f5336 100644 --- a/misc/multiaddr/CHANGELOG.md +++ b/misc/multiaddr/CHANGELOG.md @@ -1,3 +1,16 @@ +# 0.11.1 [2021-02-15] + +- Update dependencies + +# 0.11.0 [2021-01-12] + +- Update dependencies + +# 0.10.1 [2021-01-12] + +- Fix compilation with serde-1.0.119. + [PR 1912](https://github.com/libp2p/rust-libp2p/pull/1912) + # 0.10.0 [2020-11-25] - Upgrade multihash to `0.13`. diff --git a/misc/multiaddr/Cargo.toml b/misc/multiaddr/Cargo.toml index 204aa3d6..862e6b58 100644 --- a/misc/multiaddr/Cargo.toml +++ b/misc/multiaddr/Cargo.toml @@ -6,7 +6,7 @@ description = "Implementation of the multiaddr format" homepage = "https://github.com/libp2p/rust-libp2p" keywords = ["multiaddr", "ipfs"] license = "MIT" -version = "0.10.1" +version = "0.11.1" [features] default = ["url"] @@ -23,7 +23,7 @@ multihash = { version = "0.13", default-features = false, features = ["std", "mu percent-encoding = "2.1.0" serde = "1.0.70" static_assertions = "1.1" -unsigned-varint = "0.5" +unsigned-varint = "0.7" url = { version = "2.1.0", optional = true, default-features = false } [dev-dependencies] diff --git a/misc/multiaddr/src/onion_addr.rs b/misc/multiaddr/src/onion_addr.rs index e445117a..29261287 100644 --- a/misc/multiaddr/src/onion_addr.rs +++ b/misc/multiaddr/src/onion_addr.rs @@ -1,6 +1,4 @@ -use std::borrow::Cow; -use std::fmt::Debug; -use std::fmt; +use std::{borrow::Cow, fmt}; /// Represents an Onion v3 address #[derive(Clone)] @@ -43,7 +41,7 @@ impl<'a> From<(&'a [u8; 35], u16)> for Onion3Addr<'a> { } } -impl Debug for Onion3Addr<'_> { +impl fmt::Debug for Onion3Addr<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_tuple("Onion3Addr") .field(&format!("{:02x?}", &self.0[..])) diff --git a/misc/multistream-select/CHANGELOG.md b/misc/multistream-select/CHANGELOG.md index d41a5620..aef09073 100644 --- a/misc/multistream-select/CHANGELOG.md +++ b/misc/multistream-select/CHANGELOG.md @@ -1,3 +1,17 @@ +# 0.10.2 [2021-03-01] + +- Re-enable "parallel negotiation" if the dialer has 3 or more + alternative protocols. + [PR 1934](https://github.com/libp2p/rust-libp2p/pull/1934) + +# 0.10.1 [2021-02-15] + +- Update dependencies. + +# 0.10.0 [2021-01-12] + +- Update dependencies. + # 0.9.1 [2020-12-02] - Ensure uniform outcomes for failed negotiations with both diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 822c8d46..f015b181 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "fluence-fork-multistream-select" description = "Multistream-select negotiation protocol for libp2p" -version = "0.9.2" +version = "0.10.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,19 +13,19 @@ edition = "2018" name = "multistream_select" [dependencies] -bytes = "0.5" +bytes = "1" futures = "0.3" log = "0.4" pin-project = "1.0.0" smallvec = "1.0" -unsigned-varint = "0.5" +unsigned-varint = "0.7" [dev-dependencies] async-std = "1.6.2" env_logger = "0.8" libp2p-core = { path = "../../core", package = "fluence-fork-libp2p-core" } libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" } -libp2p-plaintext = { path = "../../protocols/plaintext", package = "fluence-fork-libp2p-plaintext" } +libp2p-plaintext = { path = "../../transports/plaintext", package = "fluence-fork-libp2p-plaintext" } quickcheck = "0.9.0" rand = "0.7.2" rw-stream-sink = "0.2.1" diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 733c4903..cd2de652 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -56,17 +56,12 @@ where I::Item: AsRef<[u8]> { let iter = protocols.into_iter(); - // NOTE: Temporarily disabled "parallel" negotiation in order to correct the - // "ls" responses towards interoperability and (new) spec compliance. - // See https://github.com/libp2p/rust-libp2p/issues/1795. - Either::Left(dialer_select_proto_serial(inner, iter, version)) - // We choose between the "serial" and "parallel" strategies based on the number of protocols. - // if iter.size_hint().1.map(|n| n <= 3).unwrap_or(false) { - // Either::Left(dialer_select_proto_serial(inner, iter, version)) - // } else { - // Either::Right(dialer_select_proto_parallel(inner, iter, version)) - // } + if iter.size_hint().1.map(|n| n <= 3).unwrap_or(false) { + Either::Left(dialer_select_proto_serial(inner, iter, version)) + } else { + Either::Right(dialer_select_proto_parallel(inner, iter, version)) + } } /// Future, returned by `dialer_select_proto`, which selects a protocol and dialer diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index 6a4dd135..28a993ed 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.27.1 [2021-02-15] + +- Update dependencies. + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index 50551b29..a8e2a4cb 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-mplex" edition = "2018" description = "Mplex multiplexing protocol for libp2p" -version = "0.26.1" +version = "0.27.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,24 +13,24 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_mplex" [dependencies] -bytes = "0.5" +bytes = "1" futures = "0.3.1" -futures_codec = "0.4.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +asynchronous-codec = "0.6" +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } log = "0.4" nohash-hasher = "0.2" parking_lot = "0.11" rand = "0.7" smallvec = "1.4" -unsigned-varint = { version = "0.5", features = ["futures-codec"] } +unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [dev-dependencies] async-std = "1.7.0" criterion = "0.3" env_logger = "0.8" futures = "0.3" -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } -libp2p-plaintext = { path = "../../protocols/plaintext", package = "fluence-fork-libp2p-plaintext" } +libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" } +libp2p-plaintext = { path = "../../transports/plaintext", package = "fluence-fork-libp2p-plaintext" } quickcheck = "0.9" rand = "0.7" diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs index b91865ad..a1112223 100644 --- a/muxers/mplex/src/codec.rs +++ b/muxers/mplex/src/codec.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use bytes::{BufMut, Bytes, BytesMut}; -use futures_codec::{Decoder, Encoder}; +use asynchronous_codec::{Decoder, Encoder}; use libp2p_core::Endpoint; use std::{fmt, hash::{Hash, Hasher}, io, mem}; use unsigned_varint::{codec, encode}; diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index a390f79c..dcc1c4a4 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -24,7 +24,7 @@ use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId}; use log::{debug, trace}; use futures::{prelude::*, ready, stream::Fuse}; use futures::task::{AtomicWaker, ArcWake, waker_ref, WakerRef}; -use futures_codec::Framed; +use asynchronous_codec::Framed; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; @@ -321,7 +321,7 @@ where // Remove the substream, scheduling pending frames as necessary. match self.substreams.remove(&id) { - None => return, + None => {}, Some(state) => { // If we fell below the substream limit, notify tasks that had // interest in opening an outbound substream earlier. @@ -442,7 +442,7 @@ where // Read the next frame. match ready!(self.poll_read_frame(cx, Some(id)))? { Frame::Data { data, stream_id } if stream_id.into_local() == id => { - return Poll::Ready(Ok(Some(data.clone()))) + return Poll::Ready(Ok(Some(data))) }, Frame::Data { stream_id, data } => { // The data frame is for a different stream than the one @@ -595,18 +595,16 @@ where // this task again to have a chance at progress. trace!("{}: No task to read from blocked stream. Waking current task.", self.id); cx.waker().clone().wake(); + } else if let Some(id) = stream_id { + // We woke some other task, but are still interested in + // reading `Data` frames from the current stream when unblocked. + debug_assert!(blocked_id != &id, "Unexpected attempt at reading a new \ + frame from a substream with a full buffer."); + let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id); } else { - if let Some(id) = stream_id { - // We woke some other task, but are still interested in - // reading `Data` frames from the current stream when unblocked. - debug_assert!(blocked_id != &id, "Unexpected attempt at reading a new \ - frame from a substream with a full buffer."); - let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id); - } else { - // We woke some other task but are still interested in - // reading new `Open` frames when unblocked. - let _ = NotifierRead::register_next_stream(&self.notifier_read, cx.waker()); - } + // We woke some other task but are still interested in + // reading new `Open` frames when unblocked. + let _ = NotifierRead::register_next_stream(&self.notifier_read, cx.waker()); } return Poll::Pending @@ -932,7 +930,7 @@ impl NotifierRead { impl ArcWake for NotifierRead { fn wake_by_ref(this: &Arc) { - let wakers = mem::replace(&mut *this.read_stream.lock(), Default::default()); + let wakers = mem::take(&mut *this.read_stream.lock()); for (_, waker) in wakers { waker.wake(); } @@ -963,7 +961,7 @@ impl NotifierWrite { impl ArcWake for NotifierWrite { fn wake_by_ref(this: &Arc) { - let wakers = mem::replace(&mut *this.pending.lock(), Default::default()); + let wakers = mem::take(&mut *this.pending.lock()); for waker in wakers { waker.wake(); } @@ -985,7 +983,7 @@ impl NotifierOpen { } fn wake_all(&mut self) { - let wakers = mem::replace(&mut self.pending, Default::default()); + let wakers = mem::take(&mut self.pending); for waker in wakers { waker.wake(); } @@ -1006,7 +1004,7 @@ mod tests { use async_std::task; use bytes::BytesMut; use futures::prelude::*; - use futures_codec::{Decoder, Encoder}; + use asynchronous_codec::{Decoder, Encoder}; use quickcheck::*; use rand::prelude::*; use std::collections::HashSet; diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index a0b22c8e..653c3310 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -106,7 +106,7 @@ where -> Poll> { let stream_id = ready!(self.io.lock().poll_open_stream(cx))?; - return Poll::Ready(Ok(Substream::new(stream_id))) + Poll::Ready(Ok(Substream::new(stream_id))) } fn destroy_outbound(&self, _substream: Self::OutboundSubstream) { diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index 16d2cf5a..1a3cfdd8 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.30.1 [2021-02-17] + +- Update `yamux` to `0.8.1`. + +# 0.30.0 [2021-01-12] + +- Update dependencies. + # 0.29.0 [2020-12-17] - Update `libp2p-core`. diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 64f4d0cb..1f9c2c04 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-yamux" edition = "2018" description = "Yamux multiplexing protocol for libp2p" -version = "0.29.1" +version = "0.30.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,10 +14,10 @@ name = "libp2p_yamux" [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } parking_lot = "0.11" thiserror = "1.0" -yamux = "0.8.0" +yamux = "0.8.1" [package.metadata.workspaces] independent = true diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index e1e8f7fe..1e3db772 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.28.0 [unreleased] + +- Update `libp2p-swarm`. + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index d9cc3053..a84d4db7 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-floodsub" edition = "2018" description = "Floodsub protocol for libp2p" -version = "0.26.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,15 +16,15 @@ name = "libp2p_floodsub" cuckoofilter = "0.5.0" fnv = "1.0" futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } log = "0.4" -prost = "0.6.1" +prost = "0.7" rand = "0.7" smallvec = "1.0" [build-dependencies] -prost-build = "0.6" +prost-build = "0.7" [package.metadata.workspaces] diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 2d5d3e12..cf43354a 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -34,7 +34,6 @@ use libp2p_swarm::{ DialPeerCondition, }; use log::warn; -use rand; use smallvec::SmallVec; use std::{collections::VecDeque, iter}; use std::collections::hash_map::{DefaultHasher, HashMap}; @@ -89,7 +88,7 @@ impl Floodsub { if self.connected_peers.contains_key(&peer_id) { for topic in self.subscribed_topics.iter().cloned() { self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), + peer_id, handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), @@ -102,7 +101,7 @@ impl Floodsub { } } - if self.target_peers.insert(peer_id.clone()) { + if self.target_peers.insert(peer_id) { self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id, condition: DialPeerCondition::Disconnected }); @@ -125,7 +124,7 @@ impl Floodsub { for peer in self.connected_peers.keys() { self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer.clone(), + peer_id: *peer, handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), @@ -156,7 +155,7 @@ impl Floodsub { for peer in self.connected_peers.keys() { self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer.clone(), + peer_id: *peer, handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), @@ -196,7 +195,7 @@ impl Floodsub { fn publish_many_inner(&mut self, topic: impl IntoIterator>, data: impl Into>, check_self_subscriptions: bool) { let message = FloodsubMessage { - source: self.config.local_peer_id.clone(), + source: self.config.local_peer_id, data: data.into(), // If the sequence numbers are predictable, then an attacker could flood the network // with packets with the predetermined sequence numbers and absorb our legitimate @@ -231,7 +230,7 @@ impl Floodsub { } self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), + peer_id: *peer_id, handler: NotifyHandler::Any, event: FloodsubRpc { subscriptions: Vec::new(), @@ -259,7 +258,7 @@ impl NetworkBehaviour for Floodsub { if self.target_peers.contains(id) { for topic in self.subscribed_topics.iter().cloned() { self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: id.clone(), + peer_id: *id, handler: NotifyHandler::Any, event: FloodsubRpc { messages: Vec::new(), @@ -272,7 +271,7 @@ impl NetworkBehaviour for Floodsub { } } - self.connected_peers.insert(id.clone(), SmallVec::new()); + self.connected_peers.insert(*id, SmallVec::new()); } fn inject_disconnected(&mut self, id: &PeerId) { @@ -283,7 +282,7 @@ impl NetworkBehaviour for Floodsub { // try to reconnect. if self.target_peers.contains(id) { self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: id.clone(), + peer_id: *id, condition: DialPeerCondition::Disconnected }); } @@ -312,7 +311,7 @@ impl NetworkBehaviour for Floodsub { remote_peer_topics.push(subscription.topic.clone()); } self.events.push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Subscribed { - peer_id: propagation_source.clone(), + peer_id: propagation_source, topic: subscription.topic, })); } @@ -321,7 +320,7 @@ impl NetworkBehaviour for Floodsub { remote_peer_topics.remove(pos); } self.events.push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Unsubscribed { - peer_id: propagation_source.clone(), + peer_id: propagation_source, topic: subscription.topic, })); } @@ -364,7 +363,7 @@ impl NetworkBehaviour for Floodsub { if let Some(pos) = rpcs_to_dispatch.iter().position(|(p, _)| p == peer_id) { rpcs_to_dispatch[pos].1.messages.push(message.clone()); } else { - rpcs_to_dispatch.push((peer_id.clone(), FloodsubRpc { + rpcs_to_dispatch.push((*peer_id, FloodsubRpc { subscriptions: Vec::new(), messages: vec![message.clone()], })); diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 0090a149..3fc5e902 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,3 +1,21 @@ +# 0.29.0 [unreleased] + +- Update `libp2p-swarm`. + +# 0.28.0 [2021-02-15] + +- Prevent non-published messages being added to caches. + [PR 1930](https://github.com/libp2p/rust-libp2p/pull/1930) + +- Update dependencies. + +# 0.27.0 [2021-01-12] + +- Update dependencies. + +- Implement Gossipsub v1.1 specification. + [PR 1720](https://github.com/libp2p/rust-libp2p/pull/1720) + # 0.26.0 [2020-12-17] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 63add239..0dc2bc3a 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-gossipsub" edition = "2018" description = "Gossipsub protocol for libp2p" -version = "0.26.1" +version = "0.29.0" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,37 +13,37 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_gossipsub" [dependencies] -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -bytes = "0.5.6" +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +bytes = "1.0" byteorder = "1.3.4" fnv = "1.0.7" futures = "0.3.5" rand = "0.7.3" -futures_codec = "0.4.1" +asynchronous-codec = "0.6" wasm-timer = "0.2.4" -unsigned-varint = { version = "0.5.0", features = ["futures-codec"] } +unsigned-varint = { version = "0.7.0", features = ["asynchronous_codec"] } log = "0.4.11" sha2 = "0.9.1" base64 = "0.13.0" smallvec = "1.4.2" -prost = "0.6.1" +prost = "0.7" hex_fmt = "0.3.0" regex = "1.4.0" [dev-dependencies] async-std = "1.6.3" env_logger = "0.8.1" -libp2p-plaintext = { path = "../plaintext", package = "fluence-fork-libp2p-plaintext" } +libp2p-plaintext = { path = "../../transports/plaintext", package = "fluence-fork-libp2p-plaintext" } libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" } libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" } -libp2p-noise = { path = "../../protocols/noise", package = "fluence-fork-libp2p-noise" } +libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" } quickcheck = "0.9.2" hex = "0.4.2" derive_builder = "0.9.0" [build-dependencies] -prost-build = "0.6.1" +prost-build = "0.7" [package.metadata.workspaces] independent = true diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index 2e6e7614..c10814d2 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -78,7 +78,7 @@ impl BackoffStorage { backoffs_by_heartbeat: &mut Vec>, heartbeat_interval, backoff_slack| { - let pair = (topic.clone(), peer.clone()); + let pair = (topic.clone(), *peer); let index = (heartbeat_index.0 + Self::heartbeats(&time, heartbeat_interval) + backoff_slack as usize) @@ -90,12 +90,12 @@ impl BackoffStorage { .backoffs .entry(topic.clone()) .or_insert_with(HashMap::new) - .entry(peer.clone()) + .entry(*peer) { Entry::Occupied(mut o) => { let (backoff, index) = o.get(); if backoff < &instant { - let pair = (topic.clone(), peer.clone()); + let pair = (topic.clone(), *peer); if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) { s.remove(&pair); } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index bae8ec22..5f0a16e9 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -574,7 +574,7 @@ where // calculate the message id from the un-transformed data let msg_id = self.config.message_id(&GossipsubMessage { - source: raw_message.source.clone(), + source: raw_message.source, data, // the uncompressed form sequence_number: raw_message.sequence_number, topic: raw_message.topic.clone(), @@ -591,15 +591,11 @@ where // check that the size doesn't exceed the max transmission size if event.encoded_len() > self.config.max_transmit_size() { - // NOTE: The size limit can be reached by excessive topics or an excessive message. - // This is an estimate that should be within 10% of the true encoded value. It is - // possible to have a message that exceeds the RPC limit and is not caught here. A - // warning log will be emitted in this case. return Err(PublishError::MessageTooLarge); } - // Add published message to the duplicate cache. - if !self.duplicate_cache.insert(msg_id.clone()) { + // Check the if the message has been published before + if self.duplicate_cache.contains(&msg_id) { // This message has already been seen. We don't re-publish messages that have already // been published on the network. warn!( @@ -609,24 +605,13 @@ where return Err(PublishError::Duplicate); } - // If the message isn't a duplicate add it to the memcache. - self.mcache.put(&msg_id, raw_message.clone()); - debug!("Publishing message: {:?}", msg_id); - // If the message is anonymous or has a random author add it to the published message ids - // cache. - if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config { - if !self.config.allow_self_origin() { - self.published_message_ids.insert(msg_id.clone()); - } - } - let topic_hash = raw_message.topic.clone(); // If we are not flood publishing forward the message to mesh peers. let mesh_peers_sent = - !self.config.flood_publish() && self.forward_msg(&msg_id, raw_message, None)?; + !self.config.flood_publish() && self.forward_msg(&msg_id, raw_message.clone(), None)?; let mut recipient_peers = HashSet::new(); if let Some(set) = self.topic_peers.get(&topic_hash) { @@ -644,7 +629,7 @@ where // Explicit peers for peer in &self.explicit_peers { if set.contains(peer) { - recipient_peers.insert(peer.clone()); + recipient_peers.insert(*peer); } } @@ -655,7 +640,7 @@ where .score_below_threshold(peer, |ts| ts.publish_threshold) .0 { - recipient_peers.insert(peer.clone()); + recipient_peers.insert(*peer); } } @@ -665,7 +650,7 @@ where // If we have fanout peers add them to the map. if self.fanout.contains_key(&topic_hash) { for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { - recipient_peers.insert(peer.clone()); + recipient_peers.insert(*peer); } } else { // We have no fanout peers, select mesh_n of them and add them to the fanout @@ -688,7 +673,7 @@ where self.fanout.insert(topic_hash.clone(), new_peers.clone()); for peer in new_peers { debug!("Peer added to fanout: {:?}", peer); - recipient_peers.insert(peer.clone()); + recipient_peers.insert(peer); } } // We are publishing to fanout peers - update the time we published @@ -702,10 +687,23 @@ where return Err(PublishError::InsufficientPeers); } + // If the message isn't a duplicate and we have sent it to some peers add it to the + // duplicate cache and memcache. + self.duplicate_cache.insert(msg_id.clone()); + self.mcache.put(&msg_id, raw_message); + + // If the message is anonymous or has a random author add it to the published message ids + // cache. + if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config { + if !self.config.allow_self_origin() { + self.published_message_ids.insert(msg_id.clone()); + } + } + // Send to peers we know are subscribed to the topic. for peer_id in recipient_peers.iter() { debug!("Sending message to peer: {:?}", peer_id); - self.send_message(peer_id.clone(), event.clone())?; + self.send_message(*peer_id, event.clone())?; } info!("Published message: {:?}", &msg_id); @@ -777,7 +775,7 @@ where pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { debug!("Adding explicit peer {}", peer_id); - self.explicit_peers.insert(peer_id.clone()); + self.explicit_peers.insert(*peer_id); self.check_explicit_peer_connection(peer_id); } @@ -792,7 +790,7 @@ where /// Blacklists a peer. All messages from this peer will be rejected and any message that was /// created by this peer will be rejected. pub fn blacklist_peer(&mut self, peer_id: &PeerId) { - if self.blacklisted_peers.insert(peer_id.clone()) { + if self.blacklisted_peers.insert(*peer_id) { debug!("Peer has been blacklisted: {}", peer_id); } } @@ -944,7 +942,7 @@ where } Self::control_pool_add( &mut self.control_pool, - peer_id.clone(), + peer_id, GossipsubControlAction::Graft { topic_hash: topic_hash.clone(), }, @@ -1019,7 +1017,7 @@ where // Send a PRUNE control message info!("LEAVE: Sending PRUNE to peer: {:?}", peer); let control = self.make_prune(topic_hash, &peer, self.config.do_px()); - Self::control_pool_add(&mut self.control_pool, peer.clone(), control); + Self::control_pool_add(&mut self.control_pool, peer, control); } } debug!("Completed LEAVE for topic: {:?}", topic_hash); @@ -1031,7 +1029,7 @@ where // Connect to peer debug!("Connecting to explicit peer {:?}", peer_id); self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: peer_id.clone(), + peer_id: *peer_id, condition: DialPeerCondition::Disconnected, }); } @@ -1078,7 +1076,7 @@ where // IHAVE flood protection let peer_have = self .count_received_ihave - .entry(peer_id.clone()) + .entry(*peer_id) .or_insert(0); *peer_have += 1; if *peer_have > self.config.max_ihave_messages() { @@ -1124,7 +1122,7 @@ where } if !iwant_ids.is_empty() { - let iasked = self.count_sent_iwant.entry(peer_id.clone()).or_insert(0); + let iasked = self.count_sent_iwant.entry(*peer_id).or_insert(0); let mut iask = iwant_ids.len(); if *iasked + iask > self.config.max_ihave_length() { iask = self.config.max_ihave_length().saturating_sub(*iasked); @@ -1149,7 +1147,7 @@ where let message_ids = iwant_ids_vec.into_iter().cloned().collect::>(); if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { gossip_promises.add_promise( - peer_id.clone(), + *peer_id, &message_ids, Instant::now() + self.config.iwant_followup_time(), ); @@ -1161,7 +1159,7 @@ where Self::control_pool_add( &mut self.control_pool, - peer_id.clone(), + *peer_id, GossipsubControlAction::IWant { message_ids }, ); } @@ -1205,11 +1203,11 @@ where // Send the messages to the peer let message_list = cached_messages .into_iter() - .map(|entry| RawGossipsubMessage::from(entry.1)) + .map(|entry| entry.1) .collect(); if self .send_message( - peer_id.clone(), + *peer_id, GossipsubRpc { subscriptions: Vec::new(), messages: message_list, @@ -1313,7 +1311,7 @@ where "GRAFT: Mesh link added for peer: {:?} in topic: {:?}", peer_id, &topic_hash ); - peers.insert(peer_id.clone()); + peers.insert(*peer_id); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(peer_id, topic_hash); @@ -1345,7 +1343,7 @@ where if self .send_message( - peer_id.clone(), + *peer_id, GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -1455,7 +1453,7 @@ where // it, see https://github.com/libp2p/specs/pull/217 if let Some(peer_id) = p.peer_id { // mark as px peer - self.px_peers.insert(peer_id.clone()); + self.px_peers.insert(peer_id); // dial peer self.events.push_back(NetworkBehaviourAction::DialPeer { @@ -1609,7 +1607,7 @@ where if !self.duplicate_cache.insert(msg_id.clone()) { debug!( "Message already received, ignoring. Message: {}", - msg_id.clone() + msg_id ); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); @@ -1636,7 +1634,7 @@ where debug!("Sending received message to user"); self.events.push_back(NetworkBehaviourAction::GenerateEvent( GossipsubEvent::Message { - propagation_source: propagation_source.clone(), + propagation_source: *propagation_source, message_id: msg_id.clone(), message, }, @@ -1742,7 +1740,7 @@ where match subscription.action { GossipsubSubscriptionAction::Subscribe => { - if peer_list.insert(propagation_source.clone()) { + if peer_list.insert(*propagation_source) { debug!( "SUBSCRIPTION: Adding gossip peer: {} to topic: {:?}", propagation_source.to_string(), @@ -1772,7 +1770,7 @@ where { if let Some(peers) = self.mesh.get_mut(&subscription.topic_hash) { if peers.len() < self.config.mesh_n_low() - && peers.insert(propagation_source.clone()) + && peers.insert(*propagation_source) { debug!( "SUBSCRIPTION: Adding peer {} to the mesh for topic {:?}", @@ -1798,7 +1796,7 @@ where // generates a subscription event to be polled application_event.push(NetworkBehaviourAction::GenerateEvent( GossipsubEvent::Subscribed { - peer_id: propagation_source.clone(), + peer_id: *propagation_source, topic: subscription.topic_hash.clone(), }, )); @@ -1814,11 +1812,11 @@ where // remove topic from the peer_topics mapping subscribed_topics.remove(&subscription.topic_hash); unsubscribed_peers - .push((propagation_source.clone(), subscription.topic_hash.clone())); + .push((*propagation_source, subscription.topic_hash.clone())); // generate an unsubscribe event to be polled application_event.push(NetworkBehaviourAction::GenerateEvent( GossipsubEvent::Unsubscribed { - peer_id: propagation_source.clone(), + peer_id: *propagation_source, topic: subscription.topic_hash.clone(), }, )); @@ -1836,7 +1834,7 @@ where if !grafts.is_empty() && self .send_message( - propagation_source.clone(), + *propagation_source, GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -1901,7 +1899,7 @@ where let peer_score = &self.peer_score; let mut score = |p: &PeerId| match peer_score { Some((peer_score, ..)) => *scores - .entry(p.clone()) + .entry(*p) .or_insert_with(|| peer_score.score(p)), _ => 0.0, }; @@ -1928,9 +1926,9 @@ where topic_hash ); - let current_topic = to_prune.entry(p.clone()).or_insert_with(Vec::new); + let current_topic = to_prune.entry(*p).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); - no_px.insert(p.clone()); + no_px.insert(*p); true } else { false @@ -1965,7 +1963,7 @@ where }, ); for peer in &peer_list { - let current_topic = to_graft.entry(peer.clone()).or_insert_with(Vec::new); + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); } // update the mesh @@ -2048,7 +2046,7 @@ where }, ); for peer in &peer_list { - let current_topic = to_graft.entry(peer.clone()).or_insert_with(Vec::new); + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); } // update the mesh @@ -2104,8 +2102,7 @@ where }, ); for peer in &peer_list { - let current_topic = - to_graft.entry(peer.clone()).or_insert_with(Vec::new); + let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); } // update the mesh @@ -2153,12 +2150,12 @@ where "HEARTBEAT: Peer removed from fanout for topic: {:?}", topic_hash ); - to_remove_peers.push(peer.clone()); + to_remove_peers.push(*peer); } } None => { // remove if the peer has disconnected - to_remove_peers.push(peer.clone()); + to_remove_peers.push(*peer); } } } @@ -2207,7 +2204,7 @@ where .iter() .map(|p| { ( - p.clone(), + *p, peer_score .as_ref() .expect("peer_score.is_some()") @@ -2297,7 +2294,7 @@ where // send an IHAVE message Self::control_pool_add( &mut self.control_pool, - peer.clone(), + peer, GossipsubControlAction::IHave { topic_hash: topic_hash.clone(), message_ids: peer_message_ids, @@ -2348,7 +2345,7 @@ where // send the control messages if self .send_message( - peer.clone(), + *peer, GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -2376,7 +2373,7 @@ where .collect(); if self .send_message( - peer.clone(), + *peer, GossipsubRpc { subscriptions: Vec::new(), messages: Vec::new(), @@ -2416,7 +2413,7 @@ where if let Some(mesh_peers) = self.mesh.get(&topic) { for peer_id in mesh_peers { if Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref() { - recipient_peers.insert(peer_id.clone()); + recipient_peers.insert(*peer_id); } } } @@ -2428,7 +2425,7 @@ where && Some(p) != message.source.as_ref() && topics.contains(&message.topic) { - recipient_peers.insert(p.clone()); + recipient_peers.insert(*p); } } } @@ -2438,7 +2435,7 @@ where let event = Arc::new( GossipsubRpc { subscriptions: Vec::new(), - messages: vec![RawGossipsubMessage::from(message.clone())], + messages: vec![message.clone()], control_msgs: Vec::new(), } .into_protobuf(), @@ -2446,7 +2443,7 @@ where for peer in recipient_peers.iter() { debug!("Sending message: {:?} to peer {:?}", msg_id, peer); - self.send_message(peer.clone(), event.clone())?; + self.send_message(*peer, event.clone())?; } debug!("Completed forwarding message"); Ok(true) @@ -2492,7 +2489,7 @@ where }; Ok(RawGossipsubMessage { - source: Some(author.clone()), + source: Some(*author), data, // To be interoperable with the go-implementation this is treated as a 64-bit // big-endian uint. @@ -2505,7 +2502,7 @@ where } PublishConfig::Author(peer_id) => { Ok(RawGossipsubMessage { - source: Some(peer_id.clone()), + source: Some(*peer_id), data, // To be interoperable with the go-implementation this is treated as a 64-bit // big-endian uint. @@ -2593,7 +2590,7 @@ where for message in messages { self.events .push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), + peer_id, event: message, handler: NotifyHandler::Any, }) @@ -2776,7 +2773,7 @@ where // send our subscriptions to the peer if self .send_message( - peer_id.clone(), + *peer_id, GossipsubRpc { messages: Vec::new(), subscriptions, @@ -2791,7 +2788,7 @@ where } // Insert an empty set of the topics of this peer until known. - self.peer_topics.insert(peer_id.clone(), Default::default()); + self.peer_topics.insert(*peer_id, Default::default()); // By default we assume a peer is only a floodsub peer. // @@ -2799,11 +2796,11 @@ where // update the type of peer that this is in order to determine which kind of routing should // occur. self.peer_protocols - .entry(peer_id.clone()) + .entry(*peer_id) .or_insert(PeerKind::Floodsub); if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.add_peer(peer_id.clone()); + peer_score.add_peer(*peer_id); } } @@ -2888,7 +2885,7 @@ where if !self.peer_topics.contains_key(peer_id) && !self.px_peers.contains(peer_id) { // The first connection is outbound and it is not a peer from peer exchange => mark // it as outbound peer - self.outbound_peers.insert(peer_id.clone()); + self.outbound_peers.insert(*peer_id); } } @@ -3251,7 +3248,7 @@ impl fmt::Debug for PublishConfig { mod local_test { use super::*; use crate::IdentTopic; - use futures_codec::Encoder; + use asynchronous_codec::Encoder; use quickcheck::*; use rand::Rng; diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 5a9a5e00..3d9eccf6 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -393,7 +393,7 @@ impl Default for GossipsubConfigBuilder { let mut source_string = if let Some(peer_id) = message.source.as_ref() { peer_id.to_base58() } else { - PeerId::from_bytes(&vec![0, 1, 0]) + PeerId::from_bytes(&[0, 1, 0]) .expect("Valid peer id") .to_base58() }; diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index 4dfadbe5..49b1dde6 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -83,7 +83,7 @@ impl GossipPromises { self.promises.retain(|msg, peers| { peers.retain(|peer_id, expires| { if *expires < now { - let count = result.entry(peer_id.clone()).or_insert(0); + let count = result.entry(*peer_id).or_insert(0); *count += 1; debug!( "The peer {} broke the promise to deliver message {} in time!", diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 8026143b..f5eb2789 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -24,7 +24,7 @@ use crate::protocol::{GossipsubCodec, ProtocolConfig}; use crate::types::{GossipsubRpc, PeerKind, RawGossipsubMessage}; use futures::prelude::*; use futures::StreamExt; -use futures_codec::Framed; +use asynchronous_codec::Framed; use libp2p_core::upgrade::{InboundUpgrade, NegotiationError, OutboundUpgrade, UpgradeError}; use libp2p_swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 73104710..cba668f6 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. //! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon -//! flooodsub and meshsub routing protocols. +//! floodsub and meshsub routing protocols. //! //! # Overview //! diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index 6e8aab91..d9b903ac 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -110,7 +110,7 @@ impl MessageCache { let count = iwant_counts .entry(message_id.clone()) .or_default() - .entry(peer.clone()) + .entry(*peer) .or_default(); *count += 1; *count diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index 42765837..4ff7470d 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -432,7 +432,7 @@ impl PeerScore { /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it pub fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { trace!("Add ip for peer {}, ip: {}", peer_id, ip); - let peer_stats = self.peer_stats.entry(peer_id.clone()).or_default(); + let peer_stats = self.peer_stats.entry(*peer_id).or_default(); // Mark the peer as connected (currently the default is connected, but we don't want to // rely on the default). @@ -443,7 +443,7 @@ impl PeerScore { self.peer_ips .entry(ip) .or_insert_with(HashSet::new) - .insert(peer_id.clone()); + .insert(*peer_id); } /// Removes an ip from a peer @@ -474,7 +474,7 @@ impl PeerScore { pub fn remove_peer(&mut self, peer_id: &PeerId) { // we only retain non-positive scores of peers if self.score(peer_id) > 0f64 { - if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(peer_id.clone()) { + if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(*peer_id) { Self::remove_ips_for_peer(entry.get(), &mut self.peer_ips, peer_id); entry.remove(); } @@ -692,11 +692,11 @@ impl PeerScore { DeliveryStatus::Unknown => { // the message is being validated; track the peer delivery and wait for // the Deliver/Reject notification. - record.peers.insert(from.clone()); + record.peers.insert(*from); } DeliveryStatus::Valid(validated) => { // mark the peer delivery time to only count a duplicate delivery once. - record.peers.insert(from.clone()); + record.peers.insert(*from); self.mark_duplicate_message_delivery(from, topic_hash, Some(validated)); } DeliveryStatus::Invalid => { diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index d2505cb8..ac258410 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -32,7 +32,7 @@ use bytes::Bytes; use bytes::BytesMut; use futures::future; use futures::prelude::*; -use futures_codec::{Decoder, Encoder, Framed}; +use asynchronous_codec::{Decoder, Encoder, Framed}; use libp2p_core::{ identity::PublicKey, InboundUpgrade, OutboundUpgrade, PeerId, ProtocolName, UpgradeInfo, }; diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 26ca3990..10d36731 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.28.0 [unreleased] + +- Update `libp2p-swarm`. + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 0508cdfa..ba8f3c1e 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-identify" edition = "2018" description = "Nodes identifcation protocol for libp2p" -version = "0.26.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,22 +14,21 @@ name = "libp2p_identify" [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } log = "0.4.1" -prost = "0.6.1" +prost = "0.7" smallvec = "1.0" wasm-timer = "0.2" [dev-dependencies] async-std = "1.6.2" libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" } -libp2p-noise = { path = "../../protocols/noise", package = "fluence-fork-libp2p-noise" } -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } +libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" } +libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" } [build-dependencies] -prost-build = "0.6" - +prost-build = "0.7" [package.metadata.workspaces] independent = true diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/identify.rs index 667a3f22..81c114f1 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/identify.rs @@ -117,7 +117,7 @@ impl NetworkBehaviour for Identify { ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), }; - self.observed_addresses.entry(peer_id.clone()).or_default().insert(*conn, addr); + self.observed_addresses.entry(*peer_id).or_default().insert(*conn, addr); } fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _: &ConnectedPoint) { diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 79f3bcdd..dc4a7a6e 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -35,13 +35,12 @@ use std::{fmt, io, iter, pin::Pin}; pub struct IdentifyProtocolConfig; #[derive(Debug, Clone)] +#[non_exhaustive] pub struct RemoteInfo { /// Information about the remote. pub info: IdentifyInfo, /// Address the remote sees for us. pub observed_addr: Multiaddr, - - _priv: () } /// The substream on which a reply is expected to be sent. @@ -80,7 +79,7 @@ where agent_version: Some(info.agent_version), protocol_version: Some(info.protocol_version), public_key: Some(pubkey_bytes), - listen_addrs: listen_addrs, + listen_addrs, observed_addr: Some(observed_addr.to_vec()), protocols: info.protocols }; @@ -158,8 +157,7 @@ where Ok(RemoteInfo { info, - observed_addr: observed_addr.clone(), - _priv: () + observed_addr, }) }) } diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 333c96ae..a4a0b05f 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,3 +1,20 @@ +# 0.29.0 [unreleased] + +- Update `libp2p-swarm`. + +# 0.28.1 [2021-02-15] + +- Update dependencies. + +# 0.28.0 [2021-01-12] + +- Update dependencies. + +# 0.27.1 [2021-01-11] + +- Add From impls for `kbucket::Key`. + [PR 1909](https://github.com/libp2p/rust-libp2p/pull/1909). + # 0.27.0 [2020-12-17] - Update `libp2p-core` and `libp2p-swarm`. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 29693c72..7a398009 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-kad" edition = "2018" description = "Kademlia protocol for libp2p" -version = "0.27.1" +version = "0.29.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,37 +14,38 @@ name = "libp2p_kad" [dependencies] arrayvec = "0.5.1" -bytes = "0.5" +bytes = "1" either = "1.5" fnv = "1.0" -futures_codec = "0.4" +asynchronous-codec = "0.6" futures = "0.3.1" log = "0.4" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } -prost = "0.6.1" +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +prost = "0.7" rand = "0.7.2" sha2 = "0.9.1" smallvec = "1.0" wasm-timer = "0.2" -uint = "0.8" -unsigned-varint = { version = "0.5", features = ["futures-codec"] } +uint = "0.9" +unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0" bs58 = "0.3.0" derivative = "2.0.2" -trust-graph = "0.2.0" +trust-graph = "0.2.5" +fluence-identity = "0.2.4" prometheus = "0.9.0" [dev-dependencies] futures-timer = "3.0" -libp2p-noise = { path = "../noise", package = "fluence-fork-libp2p-noise" } +libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" } libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" } quickcheck = "0.9.0" env_logger = "0.7.1" [build-dependencies] -prost-build = "0.6" +prost-build = "0.7" [package.metadata.workspaces] diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs index 2b218a95..9bab8ff7 100644 --- a/protocols/kad/src/addresses.rs +++ b/protocols/kad/src/addresses.rs @@ -34,6 +34,7 @@ pub enum Remove { KeepLast = 1 } +#[allow(clippy::len_without_is_empty)] impl Addresses { /// Creates a new list of addresses. pub fn new(addr: Multiaddr) -> Addresses { diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 7ee7b13c..24eb4f00 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -58,12 +58,14 @@ use std::task::{Context, Poll}; use std::vec; use wasm_timer::Instant; use libp2p_core::identity::ed25519::{Keypair, PublicKey}; -use trust_graph::{TrustGraph, Certificate}; +use trust_graph::{Certificate}; use derivative::Derivative; use crate::metrics::Metrics; pub use crate::query::QueryStats; +type TrustGraph = trust_graph::TrustGraph; + /// `Kademlia` is a `NetworkBehaviour` that implements the libp2p /// Kademlia protocol. pub struct Kademlia { @@ -363,7 +365,7 @@ where .record_replication_interval .or(config.record_publication_interval) .map(|interval| PutRecordJob::new( - id.clone(), + id, interval, config.record_publication_interval, config.record_ttl, @@ -398,7 +400,7 @@ where } /// Gets an iterator over immutable references to all running queries. - pub fn iter_queries<'a>(&'a self) -> impl Iterator> { + pub fn iter_queries(&self) -> impl Iterator> { self.queries.iter().filter_map(|query| if !query.is_finished() { Some(QueryRef { query }) @@ -408,7 +410,7 @@ where } /// Gets an iterator over mutable references to all running queries. - pub fn iter_queries_mut<'a>(&'a mut self) -> impl Iterator> { + pub fn iter_queries_mut(&mut self) -> impl Iterator> { self.queries.iter_mut().filter_map(|query| if !query.is_finished() { Some(QueryMut { query }) @@ -418,7 +420,7 @@ where } /// Gets an immutable reference to a running query, if it exists. - pub fn query<'a>(&'a self, id: &QueryId) -> Option> { + pub fn query(&self, id: &QueryId) -> Option> { self.queries.get(id).and_then(|query| if !query.is_finished() { Some(QueryRef { query }) @@ -461,7 +463,7 @@ where if entry.value().insert(address) { self.queued_events.push_back(NetworkBehaviourAction::GenerateEvent( KademliaEvent::RoutingUpdated { - peer: peer.clone(), + peer: *peer, addresses: entry.value().clone().into(), old_peer: None, } @@ -647,7 +649,7 @@ where /// with an explicit expiration will always expire at that instant and until then /// is subject to regular (re-)replication and (re-)publication. pub fn put_record(&mut self, mut record: Record, quorum: Quorum) -> Result { - record.publisher = Some(self.kbuckets.local_key().preimage().clone()); + record.publisher = Some(*self.kbuckets.local_key().preimage()); self.store.put(record.clone())?; self.metrics.store_put(); record.expires = record.expires.or_else(|| @@ -712,7 +714,7 @@ where pub fn bootstrap(&mut self) -> Result { let local_key = self.kbuckets.local_key().clone(); let info = QueryInfo::Bootstrap { - peer: local_key.preimage().clone(), + peer: *local_key.preimage(), remaining: None }; let peers = Self::closest_keys(&mut self.kbuckets, &local_key).collect::>(); @@ -756,7 +758,7 @@ where // TODO: calculate weight for self? let record = ProviderRecord::new( key.clone(), - self.kbuckets.local_key().preimage().clone(), + *self.kbuckets.local_key().preimage(), local_addrs); self.store.add_provider(record)?; let target = kbucket::Key::new(key.clone()); @@ -766,7 +768,7 @@ where bs58::encode(target.as_ref()).into_string(), // sha256 ); let provider_key = self.kbuckets.local_public_key(); - let certificates = self.trust.get_all_certs(&provider_key, &[]); + let certificates = self.get_certificates(&provider_key); let peers = Self::closest_keys(&mut self.kbuckets, &target); let context = AddProviderContext::Publish; let info = QueryInfo::AddProvider { @@ -843,20 +845,18 @@ where } } - let local_id = self.kbuckets.local_key().preimage().clone(); - let others_iter = peers.filter(|p| p.node_id != local_id); + let local_id = self.kbuckets.local_key().preimage(); + let others_iter = peers.filter(|p| &p.node_id != local_id); let trust = &self.trust; - if let Some(query) = self.queries.get_mut(query_id) { log::trace!("Request to {:?} in query {:?} succeeded.", source, query_id); for peer in others_iter.clone() { - log::trace!("Peer {:?} reported by {:?} in query {:?}.", - peer, source, query_id); - query.inner.contacts.insert(peer.node_id.clone(), peer.clone().into()); + log::trace!("Peer {:?} reported by {:?} in query {:?}.", peer, source, query_id); + query.inner.contacts.insert(peer.node_id, peer.clone().into()); } query.on_success(source, others_iter.map(|kp| WeightedPeer { peer_id: kp.node_id.clone().into(), - weight: trust.weight(&kp.public_key).unwrap_or_default() + weight: get_weight(trust, &kp.public_key), })) } } @@ -875,7 +875,7 @@ where .map(KadPeer::from) .collect(); peers.iter_mut().for_each(|mut peer| - peer.certificates = self.trust.get_all_certs(&peer.public_key, &[]) + peer.certificates = self.get_certificates(&peer.public_key) ); peers } @@ -908,14 +908,12 @@ where // The provider is either the local node and we fill in // the local addresses on demand, let self_key = kbuckets.local_public_key(); - let certificates = trust.get_all_certs(&self_key, &[]); - let multiaddrs = local_addrs.iter().cloned().collect::>(); Some(KadPeer { - public_key: self_key, node_id, - multiaddrs, connection_ty, - certificates + multiaddrs: local_addrs.iter().cloned().collect::>(), + certificates: get_certificates(&trust, &self_key), + public_key: self_key, }) } else { let key = kbucket::Key::from(node_id); @@ -928,16 +926,16 @@ where } else { p.addresses }; - let certificates = node_id.as_public_key().and_then(|provider_pk| - match provider_pk { - libp2p_core::identity::PublicKey::Ed25519(pk) => - Some(trust.get_all_certs(pk, &[])), + let certificates = { + match node_id.as_public_key() { + Some(libp2p_core::identity::PublicKey::Ed25519(pk)) => + get_certificates(&trust, &pk), key => { log::warn!("Provider {} has a non-Ed25519 public key: {:?}", node_id, key); - None + vec![] } } - ).unwrap_or_default(); + }; KadPeer { node_id, @@ -970,7 +968,7 @@ where /// Starts an iterative `ADD_PROVIDER` query for the given key. fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) { let provider_key = self.kbuckets.local_public_key(); - let certificates = self.trust.get_all_certs(&provider_key, &[]); + let certificates = self.get_certificates(&provider_key); let info = QueryInfo::AddProvider { context, key: key.clone(), @@ -1072,7 +1070,7 @@ where { let addresses = contact.addresses.clone(); let peer = entry.key().preimage().clone(); - let weight = trust.weight(contact.public_key.clone()).unwrap_or(0); + let weight = get_weight(&trust, &contact.public_key); debug!( "Calculated weight for {} pk {}: {}", entry.key().preimage(), @@ -1221,10 +1219,10 @@ where phase: AddProviderPhase::GetClosestPeers, .. } => { - let provider_id = params.local_peer_id().clone(); + let provider_id = *params.local_peer_id(); let external_addresses = params.external_addresses().map(|r| r.addr).collect(); let provider_key = self.kbuckets.local_public_key(); - let certificates = self.trust.get_all_certs(&provider_key, &[]); + let certificates = self.get_certificates(&provider_key); let inner = QueryInner::new(QueryInfo::AddProvider { context, key, @@ -1241,7 +1239,7 @@ where let peers = result.peers.into_iter().map(|peer_id| { let weight = contacts .get(&peer_id) - .and_then(|c| trust.weight(&c.public_key)) + .map(|c| get_weight(&trust, &c.public_key)) .unwrap_or_default(); WeightedPeer { peer_id: peer_id.into(), @@ -1299,7 +1297,8 @@ where let trust = &self.trust; let weight = result.inner.contacts.get(peer_id) - .and_then(|c| trust.weight(&c.public_key)).unwrap_or_default(); + .map(|c| get_weight(&trust, &c.public_key)) + .unwrap_or_default(); let peer = WeightedPeer { weight, peer_id: cache_key @@ -1342,9 +1341,10 @@ where let trust = &self.trust; let peers = result.peers.into_iter().map(|peer_id| { let weight = - contacts.get(&peer_id).and_then(|c| - trust.weight(&c.public_key) - ).unwrap_or_default(); + contacts + .get(&peer_id) + .map(|c| get_weight(&trust, &c.public_key)) + .unwrap_or_default(); WeightedPeer { peer_id: peer_id.into(), @@ -1738,6 +1738,26 @@ where log!("\n{}", buckets); } } + + fn get_certificates(&self, key: &PublicKey) -> Vec { + get_certificates(&self.trust, key) + } + + fn get_weight(&self, key: &PublicKey) -> u32 { + get_weight(&self.trust, key) + } +} + +fn get_certificates(trust: &TrustGraph, key: &PublicKey) -> Vec { + fluence_identity::PublicKey::from_libp2p(&key).map(|key| + trust.get_all_certs(&key, &[]).unwrap_or_default() + ).unwrap_or_default() +} + +fn get_weight(trust: &TrustGraph, key: &PublicKey) -> u32 { + fluence_identity::PublicKey::from_libp2p(&key).map(|key| + trust.weight(&key).unwrap_or_default().unwrap_or_default() + ).unwrap_or(0) } /// Exponentially decrease the given duration (base 2). @@ -1804,7 +1824,7 @@ where }); } - self.connected_peers.insert(peer.clone()); + self.connected_peers.insert(*peer); self.metrics.node_connected(); } @@ -1913,7 +1933,7 @@ where for query in self.queries.iter_mut() { query.on_failure(id); } - self.connection_updated(id.clone(), None, NodeStatus::Disconnected); + self.connection_updated(*id, None, NodeStatus::Disconnected); self.connected_peers.remove(id); } @@ -1932,7 +1952,7 @@ where // since the remote address on an inbound connection may be specific // to that connection (e.g. typically the TCP port numbers). let new_address = match endpoint { - ConnectedPoint::Dialer { address } => Some(address.clone()), + ConnectedPoint::Dialer { address } => Some(address), ConnectedPoint::Listener { .. } => None, }; @@ -2080,7 +2100,7 @@ where key, records, quorum, cache_at } = &mut query.inner.info { if let Some(record) = record { - records.push(PeerRecord{ peer: Some(source.clone()), record }); + records.push(PeerRecord{ peer: Some(source), record }); let quorum = quorum.get(); if records.len() >= quorum { @@ -2104,7 +2124,7 @@ where // closest node to the key that did *not* return the // value is tracked in order to cache the record on // that node if the query turns out to be successful. - let source_key = kbucket::Key::from(source.clone()); + let source_key = kbucket::Key::from(source); if let Some(cache_key) = cache_at { let key = kbucket::Key::new(key.clone()); if source_key.distance(&key) < cache_key.distance(&key) { @@ -2135,7 +2155,7 @@ where if let QueryInfo::PutRecord { phase: PutRecordPhase::PutRecord { success, .. }, quorum, .. } = &mut query.inner.info { - success.push(source.clone()); + success.push(source); let quorum = quorum.get(); if success.len() >= quorum { @@ -2270,7 +2290,7 @@ where peer_id, event, handler: NotifyHandler::Any }); } else if &peer_id != self.kbuckets.local_key().preimage() { - query.inner.pending_rpcs.push((peer_id.clone(), event)); + query.inner.pending_rpcs.push((peer_id, event)); self.queued_events.push_back(NetworkBehaviourAction::DialPeer { peer_id, condition: DialPeerCondition::Disconnected }); @@ -2801,7 +2821,7 @@ impl QueryInfo { key: key.clone(), provider: crate::protocol::KadPeer { public_key: provider_key.clone(), - node_id: provider_id.clone(), + node_id: *provider_id, multiaddrs: external_addresses.clone(), connection_ty: crate::protocol::KadConnectionType::Connected, certificates: certificates.clone(), diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index cd11a805..d5d525ed 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -48,6 +48,7 @@ use quickcheck::*; use rand::{Rng, random, thread_rng, rngs::StdRng, SeedableRng}; use std::{collections::{HashSet, HashMap}, time::Duration, num::NonZeroUsize, u64}; use libp2p_core::identity::ed25519; +use trust_graph::InMemoryStorage; type TestSwarm = Swarm>; @@ -67,8 +68,12 @@ fn build_node_with_config(cfg: KademliaConfig) -> (ed25519::Keypair, Multiaddr, .boxed(); let local_id = local_public_key.clone().into_peer_id(); + let trust = { + let pk = fluence_identity::PublicKey::from_libp2p(&ed25519_key.public()).unwrap(); + let storage = InMemoryStorage::new_in_memory(vec![(pk, 1)]); + TrustGraph::new(storage) + }; let store = MemoryStore::new(local_id.clone()); - let trust = TrustGraph::new(vec![(ed25519_key.public(), 1)]); let behaviour = Kademlia::with_config(ed25519_key.clone(), local_id.clone(), store, cfg.clone(), trust); let mut swarm = Swarm::new(transport, behaviour, local_id); @@ -172,6 +177,7 @@ fn bootstrap() { ).into_iter() .map(|(_, _a, s)| s) .collect::>(); + let swarm_ids: Vec<_> = swarms.iter() .map(Swarm::local_peer_id) .cloned() @@ -477,7 +483,7 @@ fn put_record() { // Connect `single_swarm` to three bootnodes. for i in 0..3 { single_swarm.2.add_address( - Swarm::local_peer_id(&fully_connected_swarms[0].2), + &Swarm::local_peer_id(&fully_connected_swarms[0].2), fully_connected_swarms[i].1.clone(), fully_connected_swarms[i].0.public(), ); @@ -758,7 +764,7 @@ fn add_provider() { // Connect `single_swarm` to three bootnodes. for i in 0..3 { single_swarm.2.add_address( - Swarm::local_peer_id(&fully_connected_swarms[0].2), + &Swarm::local_peer_id(&fully_connected_swarms[0].2), fully_connected_swarms[i].1.clone(), fully_connected_swarms[i].0.public(), ); @@ -960,8 +966,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { trudy.2.store.put(record_trudy.clone()).unwrap(); // Make `trudy` and `bob` known to `alice`. - alice.2.add_address(Swarm::local_peer_id(&trudy.2), trudy.1.clone(), trudy.0.public()); - alice.2.add_address(Swarm::local_peer_id(&bob.2), bob.1.clone(), bob.0.public()); + alice.2.add_address(&Swarm::local_peer_id(&trudy.2), trudy.1.clone(), trudy.0.public()); + alice.2.add_address(&Swarm::local_peer_id(&bob.2), bob.1.clone(), bob.0.public()); // Drop the swarm addresses. let (mut alice, mut bob, mut trudy) = (alice.2, bob.2, trudy.2); @@ -1191,7 +1197,8 @@ fn make_swarms(total: usize, config: KademliaConfig) -> Vec<(Keypair, Multiaddr, #[cfg(test)] mod certificates { use super::*; - use trust_graph::{KeyPair, current_time}; + use trust_graph::current_time; + use fluence_identity::{KeyPair, PublicKey}; fn gen_root_cert(from: &KeyPair, to: PublicKey) -> Certificate { let cur_time = current_time(); @@ -1223,7 +1230,7 @@ mod certificates { } fn bs(pk: PublicKey) -> String { - bs58::encode(pk.encode()).into_string() + bs58::encode(pk.to_bytes()).into_string() } #[test] @@ -1239,15 +1246,19 @@ mod certificates { // Set same weights to all nodes, so they store each other's certificates let weights = swarms.iter().map(|(kp, _, _)| (kp.public(), 1)).collect::>(); for swarm in swarms.iter_mut() { - swarm.2.trust.add_root_weights(weights.clone()); + for (pk, weight) in weights.iter() { + let pk = fluence_identity::PublicKey::from_libp2p(&pk).unwrap(); + swarm.2.trust.add_root_weight(pk, *weight); + } } let mut swarms = swarms.into_iter(); let (first_kp, _, first) = swarms.next().unwrap(); // issue certs from each swarm to the first swarm, so all swarms trust the first one let mut swarms = swarms.map(|(kp, _, mut swarm)| { + let pk = fluence_identity::PublicKey::from_libp2p(&first_kp.public()).unwrap(); // root cert, its chain is [self-signed: swarm -> swarm, swarm -> first] - let root = gen_root_cert(&kp.clone().into(), first_kp.public()); + let root = gen_root_cert(&kp.clone().into(), pk); swarm.trust.add(&root, current_time()).unwrap(); SwarmWithKeypair { swarm, kp } }); @@ -1258,16 +1269,25 @@ mod certificates { // issue cert from the first swarm to the second (will be later disseminated via kademlia) // chain: 0 -> 1 - let cert_0_1 = gen_root_cert(&swarm0.kp.clone().into(), swarm1.kp.public()); + let cert_0_1 = { + let pk = fluence_identity::PublicKey::from_libp2p(&swarm1.kp.public()).unwrap(); + gen_root_cert(&swarm0.kp.clone().into(), pk) + }; swarm0.swarm.trust.add(&cert_0_1, current_time()).unwrap(); - let cert_0_1_check = swarm0.swarm.trust.get_all_certs(&swarm1.kp.public(), &[]); + let cert_0_1_check = { + let pk = fluence_identity::PublicKey::from_libp2p(&swarm1.kp.public()).unwrap(); + swarm0.swarm.trust.get_all_certs(pk, &[]).unwrap() + }; assert_eq!(cert_0_1_check.len(), 1); let cert_0_1_check = cert_0_1_check.into_iter().nth(0).unwrap(); assert_eq!(cert_0_1, cert_0_1_check); // check that this certificate (with root prepended) can be added to trust graph of any other node // chain: (2 -> 0) - let mut cert_2_0_1 = gen_root_cert(&swarm2.kp.clone().into(), swarm0.kp.public()); + let mut cert_2_0_1 = { + let pk = fluence_identity::PublicKey::from_libp2p(&swarm0.kp.public()).unwrap(); + gen_root_cert(&swarm2.kp.clone().into(), pk) + }; // chain: (2 -> 0) ++ (0 -> 1) cert_2_0_1.chain.extend_from_slice(&cert_0_1.chain[1..]); swarm2.swarm.trust.add(cert_2_0_1, current_time()).unwrap(); @@ -1305,13 +1325,26 @@ mod certificates { // check that certificates for `swarm[1].kp` were disseminated for swarm in swarms.iter().skip(2) { - let disseminated = swarm.swarm.trust.get_all_certs(kp_1.clone(), &[]); + let disseminated = { + let pk = fluence_identity::PublicKey::from_libp2p(&kp_1).unwrap(); + swarm.swarm.trust.get_all_certs(&pk, &[]).unwrap() + }; // take only certificate converging to current `swarm` public key - let disseminated = disseminated.into_iter().find(|c| &c.chain[0].issued_for == &swarm.kp.public()).unwrap(); + let disseminated = { + let pk = fluence_identity::PublicKey::from_libp2p(&swarm.kp.public()).unwrap(); + disseminated.into_iter().find(|c| &c.chain[0].issued_for == &pk).unwrap() + }; // swarm -> swarm0 -> swarm1 assert_eq!(disseminated.chain.len(), 3); let pubkeys = disseminated.chain.iter().map(|c| &c.issued_for).collect::>(); - assert_eq!(pubkeys, vec![&swarm.kp.public(), &swarms[0].kp.public(), &swarms[1].kp.public()]); + assert_eq!( + pubkeys, + vec![ + &fluence_identity::PublicKey::from_libp2p(&swarm.kp.public()).unwrap(), + &fluence_identity::PublicKey::from_libp2p(&swarms[0].kp.public()).unwrap(), + &fluence_identity::PublicKey::from_libp2p(&swarms[1].kp.public()).unwrap(), + ] + ); // last trust in the certificate must be equal to previously generated (0 -> 1) trust let last = disseminated.chain.last().unwrap(); diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 961d27a3..d5d02464 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -531,7 +531,7 @@ where } KademliaHandlerIn::FindNodeReq { key, user_data } => { let msg = KadRequestMsg::FindNode { key }; - self.substreams.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone()))); + self.substreams.push(SubstreamState::OutPendingOpen(msg, Some(user_data))); } KademliaHandlerIn::FindNodeRes { closer_peers, @@ -550,7 +550,7 @@ where }; let msg = KadResponseMsg::FindNode { - closer_peers: closer_peers.clone(), + closer_peers, }; self.substreams .push(SubstreamState::InPendingSend(conn_id, substream, msg)); @@ -559,7 +559,7 @@ where KademliaHandlerIn::GetProvidersReq { key, user_data } => { let msg = KadRequestMsg::GetProviders { key }; self.substreams - .push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone()))); + .push(SubstreamState::OutPendingOpen(msg, Some(user_data))); } KademliaHandlerIn::GetProvidersRes { closer_peers, @@ -582,8 +582,8 @@ where }; let msg = KadResponseMsg::GetProviders { - closer_peers: closer_peers.clone(), - provider_peers: provider_peers.clone(), + closer_peers, + provider_peers, }; self.substreams .push(SubstreamState::InPendingSend(conn_id, substream, msg)); @@ -622,7 +622,7 @@ where let msg = KadResponseMsg::GetValue { record, - closer_peers: closer_peers.clone(), + closer_peers, }; self.substreams .push(SubstreamState::InPendingSend(conn_id, substream, msg)); diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index e1b49262..8737f9ad 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -224,15 +224,11 @@ impl PutRecordJob { } if let PeriodicJobState::Running(records) = &mut self.inner.state { - loop { - if let Some(r) = records.next() { - if r.is_expired(now) { - store.remove(&r.key) - } else { - return Poll::Ready(r) - } + for r in records { + if r.is_expired(now) { + store.remove(&r.key) } else { - break + return Poll::Ready(r) } } @@ -301,15 +297,11 @@ impl AddProviderJob { } if let PeriodicJobState::Running(keys) = &mut self.inner.state { - loop { - if let Some(r) = keys.next() { - if r.is_expired(now) { - store.remove_provider(&r.key, &r.provider) - } else { - return Poll::Ready(r) - } + for r in keys { + if r.is_expired(now) { + store.remove_provider(&r.key, &r.provider) } else { - break + return Poll::Ready(r) } } diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index b3399944..3158cc48 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -68,6 +68,8 @@ mod bucket; mod entry; +#[allow(clippy::ptr_offset_with_cast)] +#[allow(clippy::assign_op_pattern)] mod key; mod sub_bucket; mod swamp; diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index 27adb1cc..c4ee9e82 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -25,6 +25,7 @@ use sha2::{Digest, Sha256}; use std::borrow::Borrow; use std::hash::{Hash, Hasher}; use uint::*; +use crate::record; construct_uint! { /// 256-bit unsigned integer. @@ -54,7 +55,7 @@ impl Key { /// [`Key::into_preimage`]. pub fn new(preimage: T) -> Key where - T: Borrow<[u8]>, + T: Borrow<[u8]> { let bytes = KeyBytes::new(preimage.borrow()); Key { preimage, bytes } @@ -73,7 +74,7 @@ impl Key { /// Computes the distance of the keys according to the XOR metric. pub fn distance(&self, other: &U) -> Distance where - U: AsRef, + U: AsRef { self.bytes.distance(other) } @@ -114,6 +115,18 @@ impl From for Key { } } +impl From> for Key> { + fn from(b: Vec) -> Self { + Key::new(b) + } +} + +impl From for Key { + fn from(k: record::Key) -> Self { + Key::new(k) + } +} + impl AsRef for Key { fn as_ref(&self) -> &KeyBytes { &self.bytes @@ -143,7 +156,7 @@ impl KeyBytes { /// value through a random oracle. pub fn new(value: T) -> Self where - T: Borrow<[u8]>, + T: Borrow<[u8]> { KeyBytes(Sha256::digest(value.borrow())) } @@ -151,7 +164,7 @@ impl KeyBytes { /// Computes the distance of the keys according to the XOR metric. pub fn distance(&self, other: &U) -> Distance where - U: AsRef, + U: AsRef { let a = U256::from(self.0.as_slice()); let b = U256::from(other.as_ref().0.as_slice()); diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 3e5c5498..034848ab 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -31,7 +31,7 @@ use codec::UviBytes; use crate::dht_proto as proto; use crate::record::{self, Record}; use futures::prelude::*; -use futures_codec::Framed; +use asynchronous_codec::Framed; use libp2p_core::{Multiaddr, PeerId}; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use prost::Message; @@ -133,13 +133,16 @@ impl TryFrom for KadPeer { for cert in peer.certificates.into_iter() { let mut chain = Vec::with_capacity(cert.chain.len()); for trust in cert.chain.into_iter() { - let issued_for = PublicKey::decode(trust.issued_for.as_slice()) + let issued_for = fluence_identity::PublicKey::from_bytes(trust.issued_for.as_slice()) .map_err(|e| invalid_data(format!("invalid issued_for: {}", e).as_str()) )?; let expires_at: Duration = Duration::from_secs(trust.expires_at_secs); let issued_at: Duration = Duration::from_secs(trust.issued_at_secs); - let signature: Vec = trust.signature; + let signature = fluence_identity::Signature::from_bytes(&trust.signature) + .map_err(|e| + invalid_data(format!("invalid signature: {}", e).as_str()) + )?; let trust = Trust::new(issued_for, expires_at, issued_at, signature); chain.push(trust); @@ -163,9 +166,9 @@ impl Into for KadPeer { proto::Certificate { chain: cert.chain.into_iter().map(|trust| { proto::Trust { - issued_for: trust.issued_for.encode().to_vec(), + issued_for: trust.issued_for.to_bytes().to_vec(), expires_at_secs: trust.expires_at.as_secs(), - signature: trust.signature, + signature: trust.signature.to_bytes().to_vec(), issued_at_secs: trust.issued_at.as_secs(), } }).collect(), diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index 4d1fcd9f..bcfb198b 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -238,9 +238,9 @@ impl QueryPool { } if self.queries.is_empty() { - return QueryPoolState::Idle + QueryPoolState::Idle } else { - return QueryPoolState::Waiting(None) + QueryPoolState::Waiting(None) } } } diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 6a21282e..8dc7cb29 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -154,7 +154,7 @@ impl ClosestPeersIter { return false } - let key = Key::from(peer.clone()); + let key = Key::from(*peer); let distance = key.distance(&self.target); // Mark the peer as succeeded. @@ -233,7 +233,7 @@ impl ClosestPeersIter { return false } - let key = Key::from(peer.clone()); + let key = Key::from(*peer); let distance = key.distance(&self.target); match self.closest_peers.entry(distance) { diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index ba4de596..b816ea9c 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -131,7 +131,7 @@ impl FixedPeersIter { pub fn next(&mut self) -> PeersIterState<'_> { match &mut self.state { - State::Finished => return PeersIterState::Finished, + State::Finished => PeersIterState::Finished, State::Waiting { num_waiting } => { if *num_waiting >= self.parallelism.get() { return PeersIterState::WaitingAtCapacity @@ -144,7 +144,7 @@ impl FixedPeersIter { } else { return PeersIterState::Waiting(None) } - Some(p) => match self.peers.entry(p.clone()) { + Some(p) => match self.peers.entry(p) { Entry::Occupied(_) => {} // skip duplicates Entry::Vacant(e) => { *num_waiting += 1; diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index c0bb219a..7d8b3b18 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -205,7 +205,7 @@ impl<'a> RecordStore<'a> for MemoryStore { let p = providers.remove(i); self.provided.remove(&p); } - if providers.len() == 0 { + if providers.is_empty() { e.remove(); } } diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index d9621683..529e1337 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,3 +1,28 @@ +# 0.29.0 [unreleased] + +- Introduce `MdnsConfig` with configurable TTL of discovered peer + records and configurable multicast query interval. The default + query interval is increased from 20 seconds to 5 minutes, to + significantly reduce bandwidth usage. To ensure timely peer + discovery in the majority of cases, a multicast query is + initiated whenever a change on a network interface is detected, + which includes MDNS initialisation at node startup. If necessary + the MDNS query interval can be reduced via the `MdnsConfig`. + The `MdnsService` has been removed from the public API, making + it compulsory that all uses occur through the `Mdns` `NetworkBehaviour`. + An `MdnsConfig` must now be given to `Mdns::new()`. + [PR 1977](https://github.com/libp2p/rust-libp2p/pull/1977). + +- Update `libp2p-swarm`. + +# 0.28.1 [2021-02-15] + +- Update dependencies. + +# 0.28.0 [2021-01-12] + +- Update dependencies. + # 0.27.0 [2020-12-17] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 7535ee02..1069958a 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "fluence-fork-libp2p-mdns" edition = "2018" -version = "0.27.1" +version = "0.29.0" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -13,24 +13,24 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_mdns" [dependencies] -async-io = "1.3.0" -data-encoding = "2.3.1" +async-io = "1.3.1" +data-encoding = "2.3.2" dns-parser = "0.8.0" -futures = "0.3.8" -if-watch = "0.1.6" +futures = "0.3.13" +if-watch = "0.2.0" lazy_static = "1.4.0" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } -log = "0.4.11" -rand = "0.7.3" -smallvec = "1.5.0" -socket2 = { version = "0.3.17", features = ["reuseport"] } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +log = "0.4.14" +rand = "0.8.3" +smallvec = "1.6.1" +socket2 = { version = "0.3.19", features = ["reuseport"] } void = "1.0.2" [dev-dependencies] -async-std = "1.7.0" +async-std = "1.9.0" if-addrs = "0.6.5" -tokio = { version = "0.3.4", default-features = false, features = ["rt", "rt-multi-thread"] } +tokio = { version = "1.2.0", default-features = false, features = ["rt", "rt-multi-thread"] } [package.metadata.workspaces] independent = true diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 6d3dcde6..19040f37 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -18,33 +18,80 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::service::{MdnsPacket, MdnsService, build_query_response, build_service_discovery_response}; -use async_io::Timer; +use crate::dns::{build_query, build_query_response, build_service_discovery_response}; +use crate::query::MdnsPacket; +use async_io::{Async, Timer}; use futures::prelude::*; +use if_watch::{IfEvent, IfWatcher}; +use lazy_static::lazy_static; use libp2p_core::{ - Multiaddr, - PeerId, - address_translation, - connection::ConnectionId, - multiaddr::Protocol + address_translation, connection::ConnectionId, multiaddr::Protocol, Multiaddr, PeerId, }; use libp2p_swarm::{ - NetworkBehaviour, - NetworkBehaviourAction, - PollParameters, - ProtocolsHandler, - protocols_handler::DummyProtocolsHandler + protocols_handler::DummyProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, + PollParameters, ProtocolsHandler, }; use smallvec::SmallVec; -use std::{cmp, fmt, io, iter, mem, pin::Pin, time::{Duration, Instant}, task::Context, task::Poll}; +use socket2::{Domain, Socket, Type}; +use std::{ + cmp, + collections::VecDeque, + fmt, io, iter, + net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + pin::Pin, + task::Context, + task::Poll, + time::{Duration, Instant}, +}; -const MDNS_RESPONSE_TTL: std::time::Duration = Duration::from_secs(5 * 60); +lazy_static! { + static ref IPV4_MDNS_MULTICAST_ADDRESS: SocketAddr = + SocketAddr::from((Ipv4Addr::new(224, 0, 0, 251), 5353)); +} + +pub struct MdnsConfig { + /// TTL to use for mdns records. + pub ttl: Duration, + /// Interval at which to poll the network for new peers. This isn't + /// necessary during normal operation but avoids the case that an + /// initial packet was lost and not discovering any peers until a new + /// peer joins the network. Receiving an mdns packet resets the timer + /// preventing unnecessary traffic. + pub query_interval: Duration, +} + +impl Default for MdnsConfig { + fn default() -> Self { + Self { + ttl: Duration::from_secs(6 * 60), + query_interval: Duration::from_secs(5 * 60), + } + } +} /// A `NetworkBehaviour` for mDNS. Automatically discovers peers on the local network and adds /// them to the topology. +#[derive(Debug)] pub struct Mdns { - /// The inner service. - service: MdnsBusyWrapper, + /// Main socket for listening. + recv_socket: Async, + + /// Query socket for making queries. + send_socket: Async, + + /// Iface watcher. + if_watch: IfWatcher, + + /// Buffer used for receiving data from the main socket. + /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000 + /// bytes, if it can be ensured that all participating devices can handle such large packets. + /// For computers with several interfaces and IP addresses responses can easily reach sizes in + /// the range of 3000 bytes, so 4096 seems sensible for now. For more information see + /// [rfc6762](https://tools.ietf.org/html/rfc6762#page-46). + recv_buffer: [u8; 4096], + + /// Buffers pending to send on the main socket. + send_buffer: VecDeque>, /// List of nodes that we have discovered, the address, and when their TTL expires. /// @@ -56,45 +103,55 @@ pub struct Mdns { /// /// `None` if `discovered_nodes` is empty. closest_expiration: Option, -} -/// `MdnsService::next` takes ownership of `self`, returning a future that resolves with both itself -/// and a `MdnsPacket` (similar to the old Tokio socket send style). The two states are thus `Free` -/// with an `MdnsService` or `Busy` with a future returning the original `MdnsService` and an -/// `MdnsPacket`. -enum MdnsBusyWrapper { - Free(MdnsService), - Busy(Pin + Send>>), - Poisoned, -} + /// Queued events. + events: VecDeque, -impl fmt::Debug for MdnsBusyWrapper { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Free(service) => { - fmt.debug_struct("MdnsBusyWrapper::Free") - .field("service", service) - .finish() - }, - Self::Busy(_) => { - fmt.debug_struct("MdnsBusyWrapper::Busy") - .finish() - } - Self::Poisoned => { - fmt.debug_struct("MdnsBusyWrapper::Poisoned") - .finish() - } - } - } + /// Discovery interval. + query_interval: Duration, + + /// Record ttl. + ttl: Duration, + + /// Discovery timer. + timeout: Timer, } impl Mdns { /// Builds a new `Mdns` behaviour. - pub async fn new() -> io::Result { + pub async fn new(config: MdnsConfig) -> io::Result { + let recv_socket = { + let socket = Socket::new( + Domain::ipv4(), + Type::dgram(), + Some(socket2::Protocol::udp()), + )?; + socket.set_reuse_address(true)?; + #[cfg(unix)] + socket.set_reuse_port(true)?; + socket.bind(&SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 5353).into())?; + let socket = socket.into_udp_socket(); + socket.set_multicast_loop_v4(true)?; + socket.set_multicast_ttl_v4(255)?; + Async::new(socket)? + }; + let send_socket = { + let socket = UdpSocket::bind((Ipv4Addr::UNSPECIFIED, 0))?; + Async::new(socket)? + }; + let if_watch = if_watch::IfWatcher::new().await?; Ok(Self { - service: MdnsBusyWrapper::Free(MdnsService::new().await?), + recv_socket, + send_socket, + if_watch, + recv_buffer: [0; 4096], + send_buffer: Default::default(), discovered_nodes: SmallVec::new(), closest_expiration: None, + events: Default::default(), + query_interval: config.query_interval, + ttl: config.ttl, + timeout: Timer::interval(config.query_interval), }) } @@ -107,6 +164,77 @@ impl Mdns { pub fn discovered_nodes(&self) -> impl ExactSizeIterator { self.discovered_nodes.iter().map(|(p, _, _)| p) } + + fn inject_mdns_packet(&mut self, packet: MdnsPacket, params: &impl PollParameters) { + self.timeout.set_interval(self.query_interval); + match packet { + MdnsPacket::Query(query) => { + for packet in build_query_response( + query.query_id(), + *params.local_peer_id(), + params.listened_addresses(), + self.ttl, + ) { + self.send_buffer.push_back(packet); + } + } + MdnsPacket::Response(response) => { + // We replace the IP address with the address we observe the + // remote as and the address they listen on. + let obs_ip = Protocol::from(response.remote_addr().ip()); + let obs_port = Protocol::Udp(response.remote_addr().port()); + let observed: Multiaddr = iter::once(obs_ip).chain(iter::once(obs_port)).collect(); + + let mut discovered: SmallVec<[_; 4]> = SmallVec::new(); + for peer in response.discovered_peers() { + if peer.id() == params.local_peer_id() { + continue; + } + + let new_expiration = Instant::now() + peer.ttl(); + + let mut addrs: Vec = Vec::new(); + for addr in peer.addresses() { + if let Some(new_addr) = address_translation(&addr, &observed) { + addrs.push(new_addr.clone()) + } + addrs.push(addr.clone()) + } + + for addr in addrs { + if let Some((_, _, cur_expires)) = self + .discovered_nodes + .iter_mut() + .find(|(p, a, _)| p == peer.id() && *a == addr) + { + *cur_expires = cmp::max(*cur_expires, new_expiration); + } else { + self.discovered_nodes + .push((*peer.id(), addr.clone(), new_expiration)); + } + discovered.push((*peer.id(), addr)); + } + } + + self.closest_expiration = self + .discovered_nodes + .iter() + .fold(None, |exp, &(_, _, elem_exp)| { + Some(exp.map(|exp| cmp::min(exp, elem_exp)).unwrap_or(elem_exp)) + }) + .map(Timer::at); + + self.events + .push_back(MdnsEvent::Discovered(DiscoveredAddrsIter { + inner: discovered.into_iter(), + })); + } + MdnsPacket::ServiceDiscovery(disc) => { + let resp = build_service_discovery_response(disc.query_id(), self.ttl); + self.send_buffer.push_back(resp); + } + } + } } impl NetworkBehaviour for Mdns { @@ -149,138 +277,102 @@ impl NetworkBehaviour for Mdns { Self::OutEvent, >, > { - // Remove expired peers. - if let Some(ref mut closest_expiration) = self.closest_expiration { - match Pin::new(closest_expiration).poll(cx) { - Poll::Ready(now) => { - let mut expired = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); - while let Some(pos) = self.discovered_nodes.iter().position(|(_, _, exp)| *exp < now) { - let (peer_id, addr, _) = self.discovered_nodes.remove(pos); - expired.push((peer_id, addr)); + while let Poll::Ready(event) = Pin::new(&mut self.if_watch).poll(cx) { + let multicast = From::from([224, 0, 0, 251]); + let socket = self.recv_socket.get_ref(); + match event { + Ok(IfEvent::Up(inet)) => { + if inet.addr().is_loopback() { + continue; } - - if !expired.is_empty() { - let event = MdnsEvent::Expired(ExpiredAddrsIter { - inner: expired.into_iter(), - }); - - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); + if let IpAddr::V4(addr) = inet.addr() { + log::trace!("joining multicast on iface {}", addr); + if let Err(err) = socket.join_multicast_v4(&multicast, &addr) { + log::error!("join multicast failed: {}", err); + } else { + self.send_buffer.push_back(build_query()); + } } - }, - Poll::Pending => (), + } + Ok(IfEvent::Down(inet)) => { + if inet.addr().is_loopback() { + continue; + } + if let IpAddr::V4(addr) = inet.addr() { + log::trace!("leaving multicast on iface {}", addr); + if let Err(err) = socket.leave_multicast_v4(&multicast, &addr) { + log::error!("leave multicast failed: {}", err); + } + } + } + Err(err) => log::error!("if watch returned an error: {}", err), } } - - // Polling the mDNS service, and obtain the list of nodes discovered this round. - let discovered = loop { - let service = mem::replace(&mut self.service, MdnsBusyWrapper::Poisoned); - - let packet = match service { - MdnsBusyWrapper::Free(service) => { - self.service = MdnsBusyWrapper::Busy(Box::pin(service.next())); - continue; - }, - MdnsBusyWrapper::Busy(mut fut) => { - match fut.as_mut().poll(cx) { - Poll::Ready((service, packet)) => { - self.service = MdnsBusyWrapper::Free(service); - packet - }, - Poll::Pending => { - self.service = MdnsBusyWrapper::Busy(fut); - return Poll::Pending; - } + // Poll receive socket. + while self.recv_socket.poll_readable(cx).is_ready() { + match self + .recv_socket + .recv_from(&mut self.recv_buffer) + .now_or_never() + { + Some(Ok((len, from))) => { + if let Some(packet) = MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from) + { + self.inject_mdns_packet(packet, params); } - }, - MdnsBusyWrapper::Poisoned => panic!("Mdns poisoned"), - }; - - match packet { - MdnsPacket::Query(query) => { - // MaybeBusyMdnsService should always be Free. - if let MdnsBusyWrapper::Free(ref mut service) = self.service { - for packet in build_query_response( - query.query_id(), - params.local_peer_id().clone(), - params.listened_addresses().into_iter(), - MDNS_RESPONSE_TTL, - ) { - service.enqueue_response(packet) - } - } else { debug_assert!(false); } - }, - MdnsPacket::Response(response) => { - // We replace the IP address with the address we observe the - // remote as and the address they listen on. - let obs_ip = Protocol::from(response.remote_addr().ip()); - let obs_port = Protocol::Udp(response.remote_addr().port()); - let observed: Multiaddr = iter::once(obs_ip) - .chain(iter::once(obs_port)) - .collect(); - - let mut discovered: SmallVec<[_; 4]> = SmallVec::new(); - for peer in response.discovered_peers() { - if peer.id() == params.local_peer_id() { - continue; - } - - let new_expiration = Instant::now() + peer.ttl(); - - let mut addrs: Vec = Vec::new(); - for addr in peer.addresses() { - if let Some(new_addr) = address_translation(&addr, &observed) { - addrs.push(new_addr.clone()) - } - addrs.push(addr.clone()) - } - - for addr in addrs { - if let Some((_, _, cur_expires)) = self.discovered_nodes.iter_mut() - .find(|(p, a, _)| p == peer.id() && *a == addr) - { - *cur_expires = cmp::max(*cur_expires, new_expiration); - } else { - self.discovered_nodes.push((peer.id().clone(), addr.clone(), new_expiration)); - } - - discovered.push((peer.id().clone(), addr)); - } - } - - break discovered; - }, - MdnsPacket::ServiceDiscovery(disc) => { - // MaybeBusyMdnsService should always be Free. - if let MdnsBusyWrapper::Free(ref mut service) = self.service { - let resp = build_service_discovery_response( - disc.query_id(), - MDNS_RESPONSE_TTL, - ); - service.enqueue_response(resp); - } else { debug_assert!(false); } - }, + } + Some(Err(err)) => log::error!("Failed reading datagram: {}", err), + _ => {} } - }; + } + if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { + self.send_buffer.push_back(build_query()); + } + // Send responses. + if !self.send_buffer.is_empty() { + while self.send_socket.poll_writable(cx).is_ready() { + if let Some(packet) = self.send_buffer.pop_front() { + match self + .send_socket + .send_to(&packet, *IPV4_MDNS_MULTICAST_ADDRESS) + .now_or_never() + { + Some(Ok(_)) => {} + Some(Err(err)) => log::error!("{}", err), + None => self.send_buffer.push_front(packet), + } + } else { + break; + } + } + } + // Emit discovered event. + if let Some(event) = self.events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); + } + // Emit expired event. + if let Some(ref mut closest_expiration) = self.closest_expiration { + if let Poll::Ready(now) = Pin::new(closest_expiration).poll(cx) { + let mut expired = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); + while let Some(pos) = self + .discovered_nodes + .iter() + .position(|(_, _, exp)| *exp < now) + { + let (peer_id, addr, _) = self.discovered_nodes.remove(pos); + expired.push((peer_id, addr)); + } - // Getting this far implies that we discovered new nodes. As the final step, we need to - // refresh `closest_expiration`. - self.closest_expiration = self.discovered_nodes.iter() - .fold(None, |exp, &(_, _, elem_exp)| { - Some(exp.map(|exp| cmp::min(exp, elem_exp)).unwrap_or(elem_exp)) - }) - .map(Timer::at); + if !expired.is_empty() { + let event = MdnsEvent::Expired(ExpiredAddrsIter { + inner: expired.into_iter(), + }); - Poll::Ready(NetworkBehaviourAction::GenerateEvent(MdnsEvent::Discovered(DiscoveredAddrsIter { - inner: discovered.into_iter(), - }))) - } -} - -impl fmt::Debug for Mdns { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Mdns") - .field("service", &self.service) - .finish() + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); + } + } + } + Poll::Pending } } @@ -299,7 +391,7 @@ pub enum MdnsEvent { /// Iterator that produces the list of addresses that have been discovered. pub struct DiscoveredAddrsIter { - inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]> + inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, } impl Iterator for DiscoveredAddrsIter { @@ -316,19 +408,17 @@ impl Iterator for DiscoveredAddrsIter { } } -impl ExactSizeIterator for DiscoveredAddrsIter { -} +impl ExactSizeIterator for DiscoveredAddrsIter {} impl fmt::Debug for DiscoveredAddrsIter { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("DiscoveredAddrsIter") - .finish() + fmt.debug_struct("DiscoveredAddrsIter").finish() } } /// Iterator that produces the list of addresses that have expired. pub struct ExpiredAddrsIter { - inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]> + inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, } impl Iterator for ExpiredAddrsIter { @@ -345,12 +435,10 @@ impl Iterator for ExpiredAddrsIter { } } -impl ExactSizeIterator for ExpiredAddrsIter { -} +impl ExactSizeIterator for ExpiredAddrsIter {} impl fmt::Debug for ExpiredAddrsIter { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("ExpiredAddrsIter") - .finish() + fmt.debug_struct("ExpiredAddrsIter").finish() } } diff --git a/protocols/mdns/src/dns.rs b/protocols/mdns/src/dns.rs index 4627d7d5..e0645fea 100644 --- a/protocols/mdns/src/dns.rs +++ b/protocols/mdns/src/dns.rs @@ -114,7 +114,7 @@ pub fn build_query_response( let ttl = duration_to_secs(ttl); // Add a limit to 2^16-1 addresses, as the protocol limits to this number. - let mut addresses = addresses.take(65535); + let addresses = addresses.take(65535); let peer_id_bytes = encode_peer_id(&peer_id); debug_assert!(peer_id_bytes.len() <= 0xffff); @@ -127,7 +127,7 @@ pub fn build_query_response( // Encode the addresses as TXT records, and multiple TXT records into a // response packet. - while let Some(addr) = addresses.next() { + for addr in addresses { let txt_to_send = format!("dnsaddr={}/p2p/{}", addr.to_string(), peer_id.to_base58()); let mut txt_record = Vec::with_capacity(txt_to_send.len()); match append_txt_record(&mut txt_record, &peer_id_bytes, ttl, &txt_to_send) { @@ -203,7 +203,7 @@ pub fn build_service_discovery_response(id: u16, ttl: Duration) -> MdnsPacket { } /// Constructs an MDNS query response packet for an address lookup. -fn query_response_packet(id: u16, peer_id: &Vec, records: &Vec>, ttl: u32) -> MdnsPacket { +fn query_response_packet(id: u16, peer_id: &[u8], records: &[Vec], ttl: u32) -> MdnsPacket { let mut out = Vec::with_capacity(records.len() * MAX_TXT_RECORD_SIZE); append_u16(&mut out, id); @@ -264,7 +264,9 @@ fn append_u16(out: &mut Vec, value: u16) { /// be compatible with RFC 1035. fn segment_peer_id(peer_id: String) -> String { // Guard for the most common case - if peer_id.len() <= MAX_LABEL_LENGTH { return peer_id } + if peer_id.len() <= MAX_LABEL_LENGTH { + return peer_id; + } // This will only perform one allocation except in extreme circumstances. let mut out = String::with_capacity(peer_id.len() + 8); @@ -347,7 +349,7 @@ fn append_character_string(out: &mut Vec, ascii_str: &str) -> Result<(), Mdn } /// Appends a TXT record to `out`. -fn append_txt_record<'a>( +fn append_txt_record( out: &mut Vec, name: &[u8], ttl_secs: u32, @@ -391,8 +393,10 @@ impl fmt::Display for MdnsResponseError { MdnsResponseError::TxtRecordTooLong => { write!(f, "TXT record invalid because it is too long") } - MdnsResponseError::NonAsciiMultiaddr => - write!(f, "A multiaddr contains non-ASCII characters when serialized"), + MdnsResponseError::NonAsciiMultiaddr => write!( + f, + "A multiaddr contains non-ASCII characters when serialized" + ), } } } @@ -414,7 +418,9 @@ mod tests { #[test] fn build_query_response_correct() { - let my_peer_id = identity::Keypair::generate_ed25519().public().into_peer_id(); + let my_peer_id = identity::Keypair::generate_ed25519() + .public() + .into_peer_id(); let addr1 = "/ip4/1.2.3.4/tcp/5000".parse().unwrap(); let addr2 = "/ip6/::1/udp/10000".parse().unwrap(); let packets = build_query_response( @@ -446,7 +452,10 @@ mod tests { assert_eq!(segment_peer_id(str_63.clone()), str_63); assert_eq!(segment_peer_id(str_64), [&str_63, "x"].join(".")); - assert_eq!(segment_peer_id(str_126), [&str_63, str_63.as_str()].join(".")); + assert_eq!( + segment_peer_id(str_126), + [&str_63, str_63.as_str()].join(".") + ); assert_eq!(segment_peer_id(str_127), [&str_63, &str_63, "x"].join(".")); } diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index 1d3ffa03..23806138 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -35,12 +35,8 @@ const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; /// The meta query for looking up the `SERVICE_NAME`. const META_QUERY_SERVICE: &[u8] = b"_services._dns-sd._udp.local"; -pub use crate::{ - behaviour::{Mdns, MdnsEvent}, - service::MdnsService, -}; +pub use crate::behaviour::{Mdns, MdnsConfig, MdnsEvent}; mod behaviour; mod dns; - -pub mod service; +mod query; diff --git a/protocols/mdns/src/query.rs b/protocols/mdns/src/query.rs new file mode 100644 index 00000000..c605298a --- /dev/null +++ b/protocols/mdns/src/query.rs @@ -0,0 +1,305 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::{dns, META_QUERY_SERVICE, SERVICE_NAME}; +use dns_parser::{Packet, RData}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; +use std::{convert::TryFrom, fmt, net::SocketAddr, str, time::Duration}; + +/// A valid mDNS packet received by the service. +#[derive(Debug)] +pub enum MdnsPacket { + /// A query made by a remote. + Query(MdnsQuery), + /// A response sent by a remote in response to one of our queries. + Response(MdnsResponse), + /// A request for service discovery. + ServiceDiscovery(MdnsServiceDiscovery), +} + +impl MdnsPacket { + pub fn new_from_bytes(buf: &[u8], from: SocketAddr) -> Option { + match Packet::parse(buf) { + Ok(packet) => { + if packet.header.query { + if packet + .questions + .iter() + .any(|q| q.qname.to_string().as_bytes() == SERVICE_NAME) + { + let query = MdnsPacket::Query(MdnsQuery { + from, + query_id: packet.header.id, + }); + Some(query) + } else if packet + .questions + .iter() + .any(|q| q.qname.to_string().as_bytes() == META_QUERY_SERVICE) + { + // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? + let discovery = MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery { + from, + query_id: packet.header.id, + }); + Some(discovery) + } else { + None + } + } else { + let resp = MdnsPacket::Response(MdnsResponse::new(packet, from)); + Some(resp) + } + } + Err(err) => { + log::debug!("Parsing mdns packet failed: {:?}", err); + None + } + } + } +} + +/// A received mDNS query. +pub struct MdnsQuery { + /// Sender of the address. + from: SocketAddr, + /// Id of the received DNS query. We need to pass this ID back in the results. + query_id: u16, +} + +impl MdnsQuery { + /// Source address of the packet. + pub fn remote_addr(&self) -> &SocketAddr { + &self.from + } + + /// Query id of the packet. + pub fn query_id(&self) -> u16 { + self.query_id + } +} + +impl fmt::Debug for MdnsQuery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MdnsQuery") + .field("from", self.remote_addr()) + .field("query_id", &self.query_id) + .finish() + } +} + +/// A received mDNS service discovery query. +pub struct MdnsServiceDiscovery { + /// Sender of the address. + from: SocketAddr, + /// Id of the received DNS query. We need to pass this ID back in the results. + query_id: u16, +} + +impl MdnsServiceDiscovery { + /// Source address of the packet. + pub fn remote_addr(&self) -> &SocketAddr { + &self.from + } + + /// Query id of the packet. + pub fn query_id(&self) -> u16 { + self.query_id + } +} + +impl fmt::Debug for MdnsServiceDiscovery { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MdnsServiceDiscovery") + .field("from", self.remote_addr()) + .field("query_id", &self.query_id) + .finish() + } +} + +/// A received mDNS response. +pub struct MdnsResponse { + peers: Vec, + from: SocketAddr, +} + +impl MdnsResponse { + /// Creates a new `MdnsResponse` based on the provided `Packet`. + pub fn new(packet: Packet<'_>, from: SocketAddr) -> MdnsResponse { + let peers = packet + .answers + .iter() + .filter_map(|record| { + if record.name.to_string().as_bytes() != SERVICE_NAME { + return None; + } + + let record_value = match record.data { + RData::PTR(record) => record.0.to_string(), + _ => return None, + }; + + let mut peer_name = match record_value.rsplitn(4, |c| c == '.').last() { + Some(n) => n.to_owned(), + None => return None, + }; + + // if we have a segmented name, remove the '.' + peer_name.retain(|c| c != '.'); + + let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) { + Ok(bytes) => match PeerId::from_bytes(&bytes) { + Ok(id) => id, + Err(_) => return None, + }, + Err(_) => return None, + }; + + Some(MdnsPeer::new(&packet, record_value, peer_id, record.ttl)) + }) + .collect(); + + MdnsResponse { peers, from } + } + + /// Returns the list of peers that have been reported in this packet. + /// + /// > **Note**: Keep in mind that this will also contain the responses we sent ourselves. + pub fn discovered_peers(&self) -> impl Iterator { + self.peers.iter() + } + + /// Source address of the packet. + #[inline] + pub fn remote_addr(&self) -> &SocketAddr { + &self.from + } +} + +impl fmt::Debug for MdnsResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MdnsResponse") + .field("from", self.remote_addr()) + .finish() + } +} + +/// A peer discovered by the service. +pub struct MdnsPeer { + addrs: Vec, + /// Id of the peer. + peer_id: PeerId, + /// TTL of the record in seconds. + ttl: u32, +} + +impl MdnsPeer { + /// Creates a new `MdnsPeer` based on the provided `Packet`. + pub fn new( + packet: &Packet<'_>, + record_value: String, + my_peer_id: PeerId, + ttl: u32, + ) -> MdnsPeer { + let addrs = packet + .additional + .iter() + .filter_map(|add_record| { + if add_record.name.to_string() != record_value { + return None; + } + + if let RData::TXT(ref txt) = add_record.data { + Some(txt) + } else { + None + } + }) + .flat_map(|txt| txt.iter()) + .filter_map(|txt| { + // TODO: wrong, txt can be multiple character strings + let addr = match dns::decode_character_string(txt) { + Ok(a) => a, + Err(_) => return None, + }; + if !addr.starts_with(b"dnsaddr=") { + return None; + } + let addr = match str::from_utf8(&addr[8..]) { + Ok(a) => a, + Err(_) => return None, + }; + let mut addr = match addr.parse::() { + Ok(a) => a, + Err(_) => return None, + }; + match addr.pop() { + Some(Protocol::P2p(peer_id)) => { + if let Ok(peer_id) = PeerId::try_from(peer_id) { + if peer_id != my_peer_id { + return None; + } + } else { + return None; + } + } + _ => return None, + }; + Some(addr) + }) + .collect(); + + MdnsPeer { + addrs, + peer_id: my_peer_id, + ttl, + } + } + + /// Returns the id of the peer. + #[inline] + pub fn id(&self) -> &PeerId { + &self.peer_id + } + + /// Returns the requested time-to-live for the record. + #[inline] + pub fn ttl(&self) -> Duration { + Duration::from_secs(u64::from(self.ttl)) + } + + /// Returns the list of addresses the peer says it is listening on. + /// + /// Filters out invalid addresses. + pub fn addresses(&self) -> &Vec { + &self.addrs + } +} + +impl fmt::Debug for MdnsPeer { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MdnsPeer") + .field("peer_id", &self.peer_id) + .finish() + } +} diff --git a/protocols/mdns/src/service.rs b/protocols/mdns/src/service.rs deleted file mode 100644 index 84c535d8..00000000 --- a/protocols/mdns/src/service.rs +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::{SERVICE_NAME, META_QUERY_SERVICE, dns}; -use async_io::{Async, Timer}; -use dns_parser::{Packet, RData}; -use futures::{prelude::*, select}; -use if_watch::{IfEvent, IfWatcher}; -use lazy_static::lazy_static; -use libp2p_core::{multiaddr::{Multiaddr, Protocol}, PeerId}; -use log::warn; -use socket2::{Socket, Domain, Type}; -use std::{convert::TryFrom, fmt, io, net::{IpAddr, Ipv4Addr, UdpSocket, SocketAddr}, str, time::{Duration, Instant}}; - -pub use dns::{build_query_response, build_service_discovery_response}; - -lazy_static! { - static ref IPV4_MDNS_MULTICAST_ADDRESS: SocketAddr = SocketAddr::from(( - Ipv4Addr::new(224, 0, 0, 251), - 5353, - )); -} - -/// A running service that discovers libp2p peers and responds to other libp2p peers' queries on -/// the local network. -/// -/// # Usage -/// -/// In order to use mDNS to discover peers on the local network, use the `MdnsService`. This is -/// done by creating a `MdnsService` then polling it in the same way as you would poll a stream. -/// -/// Polling the `MdnsService` can produce either an `MdnsQuery`, corresponding to an mDNS query -/// received by another node on the local network, or an `MdnsResponse` corresponding to a response -/// to a query previously emitted locally. The `MdnsService` will automatically produce queries, -/// which means that you will receive responses automatically. -/// -/// When you receive an `MdnsQuery`, use the `respond` method to send back an answer to the node -/// that emitted the query. -/// -/// When you receive an `MdnsResponse`, use the provided methods to query the information received -/// in the response. -/// -/// # Example -/// -/// ```rust -/// # use futures::prelude::*; -/// # use futures::executor::block_on; -/// # use libp2p_core::{identity, Multiaddr, PeerId}; -/// # use libp2p_mdns::service::{MdnsPacket, build_query_response, build_service_discovery_response}; -/// # use std::{io, time::Duration, task::Poll}; -/// # fn main() { -/// # let my_peer_id = PeerId::from(identity::Keypair::generate_ed25519().public()); -/// # let my_listened_addrs: Vec = vec![]; -/// # async { -/// # let mut service = libp2p_mdns::service::MdnsService::new().await.unwrap(); -/// let _future_to_poll = async { -/// let (mut service, packet) = service.next().await; -/// -/// match packet { -/// MdnsPacket::Query(query) => { -/// println!("Query from {:?}", query.remote_addr()); -/// let packets = build_query_response( -/// query.query_id(), -/// my_peer_id.clone(), -/// vec![].into_iter(), -/// Duration::from_secs(120), -/// ); -/// for packet in packets { -/// service.enqueue_response(packet); -/// } -/// } -/// MdnsPacket::Response(response) => { -/// for peer in response.discovered_peers() { -/// println!("Discovered peer {:?}", peer.id()); -/// for addr in peer.addresses() { -/// println!("Address = {:?}", addr); -/// } -/// } -/// } -/// MdnsPacket::ServiceDiscovery(disc) => { -/// let resp = build_service_discovery_response( -/// disc.query_id(), -/// Duration::from_secs(120), -/// ); -/// service.enqueue_response(resp); -/// } -/// } -/// }; -/// # }; -/// # } -pub struct MdnsService { - /// Main socket for listening. - socket: Async, - - /// Socket for sending queries on the network. - query_socket: Async, - - /// Interval for sending queries. - query_interval: Timer, - /// Whether we send queries on the network at all. - /// Note that we still need to have an interval for querying, as we need to wake up the socket - /// regularly to recover from errors. Otherwise we could simply use an `Option`. - silent: bool, - /// Buffer used for receiving data from the main socket. - /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000 - /// bytes, if it can be ensured that all participating devices can handle such large packets. - /// For computers with several interfaces and IP addresses responses can easily reach sizes in - /// the range of 3000 bytes, so 4096 seems sensible for now. For more information see - /// [rfc6762](https://tools.ietf.org/html/rfc6762#page-46). - recv_buffer: [u8; 4096], - /// Buffers pending to send on the main socket. - send_buffers: Vec>, - /// Buffers pending to send on the query socket. - query_send_buffers: Vec>, - /// Iface watch. - if_watch: IfWatcher, -} - -impl MdnsService { - /// Starts a new mDNS service. - pub async fn new() -> io::Result { - Self::new_inner(false).await - } - - /// Same as `new`, but we don't automatically send queries on the network. - pub async fn silent() -> io::Result { - Self::new_inner(true).await - } - - /// Starts a new mDNS service. - async fn new_inner(silent: bool) -> io::Result { - let socket = { - let socket = Socket::new(Domain::ipv4(), Type::dgram(), Some(socket2::Protocol::udp()))?; - socket.set_reuse_address(true)?; - #[cfg(unix)] - socket.set_reuse_port(true)?; - socket.bind(&SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 5353).into())?; - let socket = socket.into_udp_socket(); - socket.set_multicast_loop_v4(true)?; - socket.set_multicast_ttl_v4(255)?; - Async::new(socket)? - }; - - // Given that we pass an IP address to bind, which does not need to be resolved, we can - // use std::net::UdpSocket::bind, instead of its async counterpart from async-std. - let query_socket = { - let socket = std::net::UdpSocket::bind((Ipv4Addr::UNSPECIFIED, 0))?; - Async::new(socket)? - }; - - - let if_watch = if_watch::IfWatcher::new().await?; - - Ok(Self { - socket, - query_socket, - query_interval: Timer::interval_at(Instant::now(), Duration::from_secs(20)), - silent, - recv_buffer: [0; 4096], - send_buffers: Vec::new(), - query_send_buffers: Vec::new(), - if_watch, - }) - } - - pub fn enqueue_response(&mut self, rsp: Vec) { - self.send_buffers.push(rsp); - } - - /// Returns a future resolving to itself and the next received `MdnsPacket`. - // - // **Note**: Why does `next` take ownership of itself? - // - // `MdnsService::next` needs to be called from within `NetworkBehaviour` - // implementations. Given that traits cannot have async methods the - // respective `NetworkBehaviour` implementation needs to somehow keep the - // Future returned by `MdnsService::next` across classic `poll` - // invocations. The instance method `next` can either take a reference or - // ownership of itself: - // - // 1. Taking a reference - If `MdnsService::poll` takes a reference to - // `&self` the respective `NetworkBehaviour` implementation would need to - // keep both the Future as well as its `MdnsService` instance across poll - // invocations. Given that in this case the Future would have a reference - // to `MdnsService`, the `NetworkBehaviour` implementation struct would - // need to be self-referential which is not possible without unsafe code in - // Rust. - // - // 2. Taking ownership - Instead `MdnsService::next` takes ownership of - // self and returns it alongside an `MdnsPacket` once the actual future - // resolves, not forcing self-referential structures on the caller. - pub async fn next(mut self) -> (Self, MdnsPacket) { - loop { - // Flush the send buffer of the main socket. - while !self.send_buffers.is_empty() { - let to_send = self.send_buffers.remove(0); - - match self.socket.send_to(&to_send, *IPV4_MDNS_MULTICAST_ADDRESS).await { - Ok(bytes_written) => { - debug_assert_eq!(bytes_written, to_send.len()); - } - Err(_) => { - // Errors are non-fatal because they can happen for example if we lose - // connection to the network. - self.send_buffers.clear(); - break; - } - } - } - - // Flush the query send buffer. - while !self.query_send_buffers.is_empty() { - let to_send = self.query_send_buffers.remove(0); - - match self.query_socket.send_to(&to_send, *IPV4_MDNS_MULTICAST_ADDRESS).await { - Ok(bytes_written) => { - debug_assert_eq!(bytes_written, to_send.len()); - } - Err(_) => { - // Errors are non-fatal because they can happen for example if we lose - // connection to the network. - self.query_send_buffers.clear(); - break; - } - } - } - - select! { - res = self.socket.recv_from(&mut self.recv_buffer).fuse() => match res { - Ok((len, from)) => { - match MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from) { - Some(packet) => return (self, packet), - None => {}, - } - }, - Err(_) => { - // Errors are non-fatal and can happen if we get disconnected from the network. - // The query interval will wake up the task at some point so that we can try again. - }, - }, - _ = self.query_interval.next().fuse() => { - // Ensure underlying task is woken up on the next interval tick. - while let Some(_) = self.query_interval.next().now_or_never() {}; - - if !self.silent { - let query = dns::build_query(); - self.query_send_buffers.push(query.to_vec()); - } - }, - event = self.if_watch.next().fuse() => { - let multicast = From::from([224, 0, 0, 251]); - let socket = self.socket.get_ref(); - match event { - Ok(IfEvent::Up(inet)) => { - if inet.addr().is_loopback() { - continue; - } - if let IpAddr::V4(addr) = inet.addr() { - log::trace!("joining multicast on iface {}", addr); - if let Err(err) = socket.join_multicast_v4(&multicast, &addr) { - log::error!("join multicast failed: {}", err); - } - } - } - Ok(IfEvent::Down(inet)) => { - if inet.addr().is_loopback() { - continue; - } - if let IpAddr::V4(addr) = inet.addr() { - log::trace!("leaving multicast on iface {}", addr); - if let Err(err) = socket.leave_multicast_v4(&multicast, &addr) { - log::error!("leave multicast failed: {}", err); - } - } - } - Err(err) => log::error!("if watch returned an error: {}", err), - } - } - }; - } - } -} - -impl fmt::Debug for MdnsService { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("$service_name") - .field("silent", &self.silent) - .finish() - } -} - -/// A valid mDNS packet received by the service. -#[derive(Debug)] -pub enum MdnsPacket { - /// A query made by a remote. - Query(MdnsQuery), - /// A response sent by a remote in response to one of our queries. - Response(MdnsResponse), - /// A request for service discovery. - ServiceDiscovery(MdnsServiceDiscovery), -} - -impl MdnsPacket { - fn new_from_bytes(buf: &[u8], from: SocketAddr) -> Option { - match Packet::parse(buf) { - Ok(packet) => { - if packet.header.query { - if packet - .questions - .iter() - .any(|q| q.qname.to_string().as_bytes() == SERVICE_NAME) - { - let query = MdnsPacket::Query(MdnsQuery { - from, - query_id: packet.header.id, - }); - return Some(query); - } else if packet - .questions - .iter() - .any(|q| q.qname.to_string().as_bytes() == META_QUERY_SERVICE) - { - // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? - let discovery = MdnsPacket::ServiceDiscovery( - MdnsServiceDiscovery { - from, - query_id: packet.header.id, - }, - ); - return Some(discovery); - } else { - return None; - } - } else { - let resp = MdnsPacket::Response(MdnsResponse::new ( - packet, - from, - )); - return Some(resp); - } - } - Err(err) => { - warn!("Parsing mdns packet failed: {:?}", err); - return None; - } - } - } -} - -/// A received mDNS query. -pub struct MdnsQuery { - /// Sender of the address. - from: SocketAddr, - /// Id of the received DNS query. We need to pass this ID back in the results. - query_id: u16, -} - -impl MdnsQuery { - /// Source address of the packet. - pub fn remote_addr(&self) -> &SocketAddr { - &self.from - } - - /// Query id of the packet. - pub fn query_id(&self) -> u16 { - self.query_id - } -} - -impl fmt::Debug for MdnsQuery { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MdnsQuery") - .field("from", self.remote_addr()) - .field("query_id", &self.query_id) - .finish() - } -} - -/// A received mDNS service discovery query. -pub struct MdnsServiceDiscovery { - /// Sender of the address. - from: SocketAddr, - /// Id of the received DNS query. We need to pass this ID back in the results. - query_id: u16, -} - -impl MdnsServiceDiscovery { - /// Source address of the packet. - pub fn remote_addr(&self) -> &SocketAddr { - &self.from - } - - /// Query id of the packet. - pub fn query_id(&self) -> u16 { - self.query_id - } -} - -impl fmt::Debug for MdnsServiceDiscovery { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MdnsServiceDiscovery") - .field("from", self.remote_addr()) - .field("query_id", &self.query_id) - .finish() - } -} - -/// A received mDNS response. -pub struct MdnsResponse { - peers: Vec, - from: SocketAddr, -} - -impl MdnsResponse { - /// Creates a new `MdnsResponse` based on the provided `Packet`. - fn new(packet: Packet<'_>, from: SocketAddr) -> MdnsResponse { - let peers = packet.answers.iter().filter_map(|record| { - if record.name.to_string().as_bytes() != SERVICE_NAME { - return None; - } - - let record_value = match record.data { - RData::PTR(record) => record.0.to_string(), - _ => return None, - }; - - let mut peer_name = match record_value.rsplitn(4, |c| c == '.').last() { - Some(n) => n.to_owned(), - None => return None, - }; - - // if we have a segmented name, remove the '.' - peer_name.retain(|c| c != '.'); - - let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) { - Ok(bytes) => match PeerId::from_bytes(&bytes) { - Ok(id) => id, - Err(_) => return None, - }, - Err(_) => return None, - }; - - Some(MdnsPeer::new ( - &packet, - record_value, - peer_id, - record.ttl, - )) - }).collect(); - - MdnsResponse { - peers, - from, - } - } - - /// Returns the list of peers that have been reported in this packet. - /// - /// > **Note**: Keep in mind that this will also contain the responses we sent ourselves. - pub fn discovered_peers(&self) -> impl Iterator { - self.peers.iter() - } - - /// Source address of the packet. - #[inline] - pub fn remote_addr(&self) -> &SocketAddr { - &self.from - } -} - -impl fmt::Debug for MdnsResponse { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MdnsResponse") - .field("from", self.remote_addr()) - .finish() - } -} - -/// A peer discovered by the service. -pub struct MdnsPeer { - addrs: Vec, - /// Id of the peer. - peer_id: PeerId, - /// TTL of the record in seconds. - ttl: u32, -} - -impl MdnsPeer { - /// Creates a new `MdnsPeer` based on the provided `Packet`. - pub fn new(packet: &Packet<'_>, record_value: String, my_peer_id: PeerId, ttl: u32) -> MdnsPeer { - let addrs = packet - .additional - .iter() - .filter_map(|add_record| { - if add_record.name.to_string() != record_value { - return None; - } - - if let RData::TXT(ref txt) = add_record.data { - Some(txt) - } else { - None - } - }) - .flat_map(|txt| txt.iter()) - .filter_map(|txt| { - // TODO: wrong, txt can be multiple character strings - let addr = match dns::decode_character_string(txt) { - Ok(a) => a, - Err(_) => return None, - }; - if !addr.starts_with(b"dnsaddr=") { - return None; - } - let addr = match str::from_utf8(&addr[8..]) { - Ok(a) => a, - Err(_) => return None, - }; - let mut addr = match addr.parse::() { - Ok(a) => a, - Err(_) => return None, - }; - match addr.pop() { - Some(Protocol::P2p(peer_id)) => { - if let Ok(peer_id) = PeerId::try_from(peer_id) { - if peer_id != my_peer_id { - return None; - } - } else { - return None; - } - }, - _ => return None, - }; - Some(addr) - }).collect(); - - MdnsPeer { - addrs, - peer_id: my_peer_id, - ttl, - } - } - - /// Returns the id of the peer. - #[inline] - pub fn id(&self) -> &PeerId { - &self.peer_id - } - - /// Returns the requested time-to-live for the record. - #[inline] - pub fn ttl(&self) -> Duration { - Duration::from_secs(u64::from(self.ttl)) - } - - /// Returns the list of addresses the peer says it is listening on. - /// - /// Filters out invalid addresses. - pub fn addresses(&self) -> &Vec { - &self.addrs - } -} - -impl fmt::Debug for MdnsPeer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MdnsPeer") - .field("peer_id", &self.peer_id) - .finish() - } -} - -#[cfg(test)] -mod tests { - macro_rules! testgen { - ($runtime_name:ident, $service_name:ty, $block_on_fn:tt) => { - mod $runtime_name { - use libp2p_core::{PeerId, multihash::{Code, MultihashDigest}}; - use std::time::Duration; - use crate::service::MdnsPacket; - - fn discover(peer_id: PeerId) { - let fut = async { - let mut service = <$service_name>::new().await.unwrap(); - - loop { - let next = service.next().await; - service = next.0; - - match next.1 { - MdnsPacket::Query(query) => { - let resp = crate::dns::build_query_response( - query.query_id(), - peer_id.clone(), - vec![].into_iter(), - Duration::from_secs(120), - ); - for r in resp { - service.enqueue_response(r); - } - } - MdnsPacket::Response(response) => { - for peer in response.discovered_peers() { - if peer.id() == &peer_id { - return; - } - } - } - MdnsPacket::ServiceDiscovery(_) => panic!( - "did not expect a service discovery packet", - ) - } - } - }; - - $block_on_fn(Box::pin(fut)); - } - - // As of today the underlying UDP socket is not stubbed out. Thus tests run in parallel to - // this unit tests inter fear with it. Test needs to be run in sequence to ensure test - // properties. - #[test] - fn respect_query_interval() { - let own_ips: Vec = if_addrs::get_if_addrs().unwrap() - .into_iter() - .map(|i| i.addr.ip()) - .collect(); - - let fut = async { - let mut service = <$service_name>::new().await.unwrap(); - - let mut sent_queries = vec![]; - - loop { - let next = service.next().await; - service = next.0; - - match next.1 { - MdnsPacket::Query(query) => { - // Ignore queries from other nodes. - let source_ip = query.remote_addr().ip(); - if !own_ips.contains(&source_ip) { - continue; - } - - sent_queries.push(query); - - if sent_queries.len() > 1 { - return; - } - } - // Ignore response packets. We don't stub out the UDP socket, thus this is - // either random noise from the network, or noise from other unit tests - // running in parallel. - MdnsPacket::Response(_) => {}, - MdnsPacket::ServiceDiscovery(_) => { - panic!("Did not expect a service discovery packet."); - }, - } - } - }; - - $block_on_fn(Box::pin(fut)); - } - - #[test] - fn discover_normal_peer_id() { - discover(PeerId::random()) - } - - #[test] - fn discover_long_peer_id() { - let max_value = String::from_utf8(vec![b'f'; 42]).unwrap(); - let hash = Code::Identity.digest(max_value.as_ref()); - discover(PeerId::from_multihash(hash).unwrap()) - } - } - } - } - - testgen!( - async_std, - crate::service::MdnsService, - (|fut| async_std::task::block_on::<_, ()>(fut)) - ); - - testgen!( - tokio, - crate::service::MdnsService, - (|fut| tokio::runtime::Runtime::new().unwrap().block_on::>(fut)) - ); -} diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index 8c260347..752768c7 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.28.0 [unreleased] + +- Update `libp2p-swarm`. + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-swarm` and `libp2p-core`. diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 07f92c1d..7c612612 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-ping" edition = "2018" description = "Ping protocol for libp2p" -version = "0.26.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,8 +14,8 @@ name = "libp2p_ping" [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } log = "0.4.1" rand = "0.7.2" void = "1.0" @@ -23,8 +23,8 @@ wasm-timer = "0.2" [dev-dependencies] async-std = "1.6.2" -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } -libp2p-noise = { path = "../../protocols/noise", package = "fluence-fork-libp2p-noise" } +libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" } +libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" } libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" } libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" } quickcheck = "0.9.0" diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 52056be4..556f27ed 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -62,21 +62,16 @@ fn ping_pong() { let mut count2 = count.get(); let peer1 = async move { - while let Some(_) = swarm1.next().now_or_never() {} - - for l in Swarm::listeners(&swarm1) { - tx.send(l.clone()).await.unwrap(); - } - loop { - match swarm1.next().await { - PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } => { + match swarm1.next_event().await { + SwarmEvent::NewListenAddr(listener) => tx.send(listener).await.unwrap(), + SwarmEvent::Behaviour(PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) }) => { count1 -= 1; if count1 == 0 { return (pid1.clone(), peer, rtt) } }, - PingEvent { result: Err(e), .. } => panic!("Ping failure: {:?}", e), + SwarmEvent::Behaviour(PingEvent { result: Err(e), .. }) => panic!("Ping failure: {:?}", e), _ => {} } } @@ -132,16 +127,11 @@ fn max_failures() { Swarm::listen_on(&mut swarm1, addr).unwrap(); let peer1 = async move { - while let Some(_) = swarm1.next().now_or_never() {} - - for l in Swarm::listeners(&swarm1) { - tx.send(l.clone()).await.unwrap(); - } - let mut count1: u8 = 0; loop { match swarm1.next_event().await { + SwarmEvent::NewListenAddr(listener) => tx.send(listener).await.unwrap(), SwarmEvent::Behaviour(PingEvent { result: Ok(PingSuccess::Ping { .. }), .. }) => { diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 07bccb1d..0ba7568f 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,4 +1,17 @@ -# 0.9.0 [unreleased] +# 0.10.0 [unreleased] + +- Update `libp2p-swarm`. + +# 0.9.1 [2021-02-15] + +- Make `is_pending_outbound` return true on pending connection. + [PR 1928](https://github.com/libp2p/rust-libp2p/pull/1928). + +- Update dependencies. + +# 0.9.0 [2021-01-12] + +- Update dependencies. - Re-export `throttled`-specific response channel. [PR 1902](https://github.com/libp2p/rust-libp2p/pull/1902). @@ -59,4 +72,3 @@ https://github.com/libp2p/rust-libp2p/pull/1606). # 0.1.0 - Initial release. - diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index fcca21e5..584ac565 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-request-response" edition = "2018" description = "Generic Request/Response Protocols" -version = "0.9.1" +version = "0.10.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,22 +14,22 @@ name = "libp2p_request_response" [dependencies] async-trait = "0.1" -bytes = "0.5.6" +bytes = "1" futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" } log = "0.4.11" lru = "0.6" minicbor = { version = "0.7", features = ["std", "derive"] } rand = "0.7" smallvec = "1.4" -unsigned-varint = { version = "0.5", features = ["std", "futures"] } +unsigned-varint = { version = "0.7", features = ["std", "futures"] } wasm-timer = "0.2" [dev-dependencies] async-std = "1.6.2" -libp2p-noise = { path = "../noise", package = "fluence-fork-libp2p-noise" } -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } +libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" } +libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" } libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" } rand = "0.7" diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index a9806c3c..d92409eb 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -230,6 +230,12 @@ impl ResponseChannel { } /// The ID of an inbound or outbound request. +/// +/// Note: [`RequestId`]'s uniqueness is only guaranteed between two +/// inbound and likewise between two outbound requests. There is no +/// uniqueness guarantee in a set of both inbound and outbound +/// [`RequestId`]s nor in a set of inbound or outbound requests +/// originating from different [`RequestResponse`] behaviours. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct RequestId(u64); @@ -371,10 +377,10 @@ where if let Some(request) = self.try_send_request(peer, request) { self.pending_events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: peer.clone(), + peer_id: *peer, condition: DialPeerCondition::Disconnected, }); - self.pending_outbound_requests.entry(peer.clone()).or_default().push(request); + self.pending_outbound_requests.entry(*peer).or_default().push(request); } request_id @@ -403,7 +409,7 @@ where /// /// Addresses added in this way are only removed by `remove_address`. pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) { - self.addresses.entry(peer.clone()).or_default().push(address); + self.addresses.entry(*peer).or_default().push(address); } /// Removes an address of a peer previously added via `add_address`. @@ -431,9 +437,16 @@ where /// [`PeerId`] initiated by [`RequestResponse::send_request`] is still /// pending, i.e. waiting for a response. pub fn is_pending_outbound(&self, peer: &PeerId, request_id: &RequestId) -> bool { - self.connected.get(peer) + // Check if request is already sent on established connection. + let est_conn = self.connected.get(peer) .map(|cs| cs.iter().any(|c| c.pending_inbound_responses.contains(request_id))) - .unwrap_or(false) + .unwrap_or(false); + // Check if request is still pending to be sent. + let pen_conn = self.pending_outbound_requests.get(peer) + .map(|rps| rps.iter().any(|rp| {rp.request_id == *request_id})) + .unwrap_or(false); + + est_conn || pen_conn } /// Checks whether an inbound request from the peer with the provided @@ -466,7 +479,7 @@ where let conn = &mut connections[ix]; conn.pending_inbound_responses.insert(request.request_id); self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer.clone(), + peer_id: *peer, handler: NotifyHandler::One(conn.id), event: request }); @@ -563,7 +576,7 @@ where ConnectedPoint::Dialer { address } => Some(address.clone()), ConnectedPoint::Listener { .. } => None }; - self.connected.entry(peer.clone()) + self.connected.entry(*peer) .or_default() .push(Connection::new(*conn, address)); } @@ -584,7 +597,7 @@ where for request_id in connection.pending_outbound_responses { self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( RequestResponseEvent::InboundFailure { - peer: peer_id.clone(), + peer: *peer_id, request_id, error: InboundFailure::ConnectionClosed } @@ -595,7 +608,7 @@ where for request_id in connection.pending_inbound_responses { self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( RequestResponseEvent::OutboundFailure { - peer: peer_id.clone(), + peer: *peer_id, request_id, error: OutboundFailure::ConnectionClosed } @@ -618,7 +631,7 @@ where for request in pending { self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( RequestResponseEvent::OutboundFailure { - peer: peer.clone(), + peer: *peer, request_id: request.request_id, error: OutboundFailure::DialFailure } @@ -647,10 +660,10 @@ where RequestResponseEvent::Message { peer, message })); } RequestResponseHandlerEvent::Request { request_id, request, sender } => { - let channel = ResponseChannel { request_id, peer: peer.clone(), sender }; + let channel = ResponseChannel { request_id, peer, sender }; let message = RequestResponseMessage::Request { request_id, request, channel }; self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( - RequestResponseEvent::Message { peer: peer.clone(), message } + RequestResponseEvent::Message { peer, message } )); match self.get_connection_mut(&peer, connection) { @@ -662,7 +675,7 @@ where None => { self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent( RequestResponseEvent::InboundFailure { - peer: peer.clone(), + peer, request_id, error: InboundFailure::ConnectionClosed } diff --git a/protocols/request-response/src/throttled.rs b/protocols/request-response/src/throttled.rs index 3a2f54a5..95edd106 100644 --- a/protocols/request-response/src/throttled.rs +++ b/protocols/request-response/src/throttled.rs @@ -251,7 +251,7 @@ where } else if let Some(info) = self.offline_peer_info.get_mut(p) { info.recv_budget.limit.set(limit) } - self.limit_overrides.insert(p.clone(), Limit::new(limit)); + self.limit_overrides.insert(*p, Limit::new(limit)); } /// Remove any limit overrides for the given peer. @@ -286,7 +286,7 @@ where let mut info = PeerInfo::new(limit); info.send_budget.remaining -= 1; let remaining = info.send_budget.remaining; - self.offline_peer_info.put(p.clone(), info); + self.offline_peer_info.put(*p, info); remaining }; @@ -428,13 +428,13 @@ where if !self.peer_info.contains_key(p) { if let Some(info) = self.offline_peer_info.pop(p) { let recv_budget = info.recv_budget.remaining; - self.peer_info.insert(p.clone(), info); + self.peer_info.insert(*p, info); if recv_budget > 1 { self.send_credit(p, recv_budget - 1); } } else { let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit); - self.peer_info.insert(p.clone(), PeerInfo::new(limit)); + self.peer_info.insert(*p, PeerInfo::new(limit)); } } } @@ -442,7 +442,7 @@ where fn inject_disconnected(&mut self, p: &PeerId) { log::trace!("{:08x}: disconnected from {}", self.id, p); if let Some(info) = self.peer_info.remove(p) { - self.offline_peer_info.put(p.clone(), info.into_disconnected()); + self.offline_peer_info.put(*p, info.into_disconnected()); } self.behaviour.inject_disconnected(p) } @@ -528,7 +528,7 @@ where if info.send_budget.grant < Some(id) { if info.send_budget.remaining == 0 && credit > 0 { log::trace!("{:08x}: sending to peer {} can resume", self.id, peer); - self.events.push_back(Event::ResumeSending(peer.clone())) + self.events.push_back(Event::ResumeSending(peer)) } info.send_budget.remaining += credit; info.send_budget.grant = Some(id); @@ -549,7 +549,7 @@ where }; if info.recv_budget.remaining == 0 { log::debug!("{:08x}: peer {} exceeds its budget", self.id, peer); - self.events.push_back(Event::TooManyInboundRequests(peer.clone())); + self.events.push_back(Event::TooManyInboundRequests(peer)); continue } info.recv_budget.remaining -= 1; diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 8f029756..9433f67f 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -31,13 +31,41 @@ use libp2p_core::{ }; use libp2p_noise::{NoiseConfig, X25519Spec, Keypair}; use libp2p_request_response::*; -use libp2p_swarm::Swarm; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_tcp::TcpConfig; use futures::{prelude::*, channel::mpsc, executor::LocalPool, task::SpawnExt}; use rand::{self, Rng}; use std::{io, iter}; use std::{collections::HashSet, num::NonZeroU16}; +#[test] +fn is_response_outbound() { + let ping = Ping("ping".to_string().into_bytes()); + let offline_peer = PeerId::random(); + + let protocols = iter::once((PingProtocol(), ProtocolSupport::Full)); + let cfg = RequestResponseConfig::default(); + + let (peer1_id, trans) = mk_transport(); + let ping_proto1 = RequestResponse::new(PingCodec(), protocols.clone(), cfg.clone()); + let mut swarm1 = Swarm::new(trans, ping_proto1, peer1_id.clone()); + + let request_id1 = swarm1.send_request(&offline_peer, ping.clone()); + + match futures::executor::block_on(swarm1.next()) { + RequestResponseEvent::OutboundFailure{peer, request_id: req_id, error: _error} => { + assert_eq!(&offline_peer, &peer); + assert_eq!(req_id, request_id1); + }, + e => panic!("Peer: Unexpected event: {:?}", e), + } + + let request_id2 = swarm1.send_request(&offline_peer, ping.clone()); + + assert!(!swarm1.is_pending_outbound(&offline_peer, &request_id1)); + assert!(swarm1.is_pending_outbound(&offline_peer, &request_id2)); +} + /// Exercises a simple ping protocol. #[test] fn ping_protocol() { @@ -64,27 +92,24 @@ fn ping_protocol() { let expected_pong = pong.clone(); let peer1 = async move { - while let Some(_) = swarm1.next().now_or_never() {} - - let l = Swarm::listeners(&swarm1).next().unwrap(); - tx.send(l.clone()).await.unwrap(); - loop { - match swarm1.next().await { - RequestResponseEvent::Message { + match swarm1.next_event().await { + SwarmEvent::NewListenAddr(addr) => tx.send(addr).await.unwrap(), + SwarmEvent::Behaviour(RequestResponseEvent::Message { peer, message: RequestResponseMessage::Request { request, channel, .. } - } => { + }) => { assert_eq!(&request, &expected_ping); assert_eq!(&peer, &peer2_id); swarm1.send_response(channel, pong.clone()).unwrap(); }, - RequestResponseEvent::ResponseSent { + SwarmEvent::Behaviour(RequestResponseEvent::ResponseSent { peer, .. - } => { + }) => { assert_eq!(&peer, &peer2_id); } - e => panic!("Peer1: Unexpected event: {:?}", e) + SwarmEvent::Behaviour(e) => panic!("Peer1: Unexpected event: {:?}", e), + _ => {} } } }; @@ -96,6 +121,7 @@ fn ping_protocol() { let addr = rx.next().await.unwrap(); swarm2.add_address(&peer1_id, addr.clone()); let mut req_id = swarm2.send_request(&peer1_id, ping.clone()); + assert!(swarm2.is_pending_outbound(&peer1_id, &req_id)); loop { match swarm2.next().await { @@ -205,26 +231,24 @@ fn ping_protocol_throttled() { swarm2.set_receive_limit(NonZeroU16::new(limit2).unwrap()); let peer1 = async move { - while let Some(_) = swarm1.next().now_or_never() {} - - let l = Swarm::listeners(&swarm1).next().unwrap(); - tx.send(l.clone()).await.unwrap(); for i in 1 .. { - match swarm1.next().await { - throttled::Event::Event(RequestResponseEvent::Message { + match swarm1.next_event().await { + SwarmEvent::NewListenAddr(addr) => tx.send(addr).await.unwrap(), + SwarmEvent::Behaviour(throttled::Event::Event(RequestResponseEvent::Message { peer, message: RequestResponseMessage::Request { request, channel, .. }, - }) => { + })) => { assert_eq!(&request, &expected_ping); assert_eq!(&peer, &peer2_id); swarm1.send_response(channel, pong.clone()).unwrap(); }, - throttled::Event::Event(RequestResponseEvent::ResponseSent { + SwarmEvent::Behaviour(throttled::Event::Event(RequestResponseEvent::ResponseSent { peer, .. - }) => { + })) => { assert_eq!(&peer, &peer2_id); } - e => panic!("Peer1: Unexpected event: {:?}", e) + SwarmEvent::Behaviour(e) => panic!("Peer1: Unexpected event: {:?}", e), + _ => {} } if i % 31 == 0 { let lim = rand::thread_rng().gen_range(1, 17); diff --git a/protocols/secio/CHANGELOG.md b/protocols/secio/CHANGELOG.md deleted file mode 100644 index 047dc7af..00000000 --- a/protocols/secio/CHANGELOG.md +++ /dev/null @@ -1,36 +0,0 @@ -# 0.26.0 [2020-12-17] - -- Update `libp2p-core`. - -# 0.25.0 [2020-11-25] - -- Update `libp2p-core`. - -# 0.24.0 [2020-11-09] - -- Update dependencies. - -# 0.23.0 [2020-10-16] - -- Update dependencies. - -# 0.22.0 [2020-09-09] - -- As of this release, SECIO is deprecated. Please use `libp2p-noise` instead. - For some more context, [see here](https://blog.ipfs.io/2020-08-07-deprecating-secio/). - -- Bump `libp2p-core` dependency. - -# 0.21.0 [2020-08-18] - -- Bump `libp2p-core` dependency. - -# 0.20.0 [2020-07-01] - -- Updated dependencies. -- Conditional compilation fixes for the `wasm32-wasi` target - ([PR 1633](https://github.com/libp2p/rust-libp2p/pull/1633)). - -# 0.19.2 [2020-06-22] - -- Updated dependencies. diff --git a/protocols/secio/Cargo.toml b/protocols/secio/Cargo.toml deleted file mode 100644 index 57a1f9c4..00000000 --- a/protocols/secio/Cargo.toml +++ /dev/null @@ -1,61 +0,0 @@ -[package] -name = "fluence-fork-libp2p-secio" -edition = "2018" -description = "Secio encryption protocol for libp2p" -version = "0.26.1" -authors = ["Parity Technologies "] -license = "MIT" -repository = "https://github.com/libp2p/rust-libp2p" -keywords = ["peer-to-peer", "libp2p", "networking"] -categories = ["network-programming", "asynchronous"] - -[lib] -name = "libp2p_secio" - -[badges] -maintenance = { status = "deprecated" } - -[dependencies] -aes-ctr = "0.3" -aesni = { version = "0.6", features = ["nocheck"], optional = true } -ctr = "0.3" -futures = "0.3.1" -hmac = "0.9.0" -lazy_static = "1.2.0" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -log = "0.4.6" -prost = "0.6.1" -pin-project = "1.0.0" -quicksink = "0.1" -rand = "0.7" -rw-stream-sink = "0.2.0" -sha2 = "0.9.1" -static_assertions = "1" -twofish = "0.2.0" - -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ring = { version = "0.16.9", features = ["alloc"], default-features = false } - -[target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.10" -parity-send-wrapper = "0.1" -wasm-bindgen = "0.2.33" -wasm-bindgen-futures = "0.4.5" -web-sys = { version = "0.3.10", features = ["Crypto", "CryptoKey", "SubtleCrypto", "Window"] } - -[build-dependencies] -prost-build = "0.6" - -[features] -default = ["secp256k1"] -secp256k1 = [] -aes-all = ["aesni"] - -[dev-dependencies] -async-std = "1.6.2" -criterion = "0.3" -libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" } -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } - -[package.metadata.workspaces] -independent = true diff --git a/protocols/secio/build.rs b/protocols/secio/build.rs deleted file mode 100644 index 1b0feff6..00000000 --- a/protocols/secio/build.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -fn main() { - prost_build::compile_protos(&["src/structs.proto"], &["src"]).unwrap(); -} - diff --git a/protocols/secio/src/algo_support.rs b/protocols/secio/src/algo_support.rs deleted file mode 100644 index 5e6d5e46..00000000 --- a/protocols/secio/src/algo_support.rs +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! This module contains some utilities for algorithm support exchange. -//! -//! One important part of the SECIO handshake is negotiating algorithms. This is what this module -//! helps you with. - -use crate::error::SecioError; -#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] -use ring::digest; -use std::cmp::Ordering; -use crate::stream_cipher::Cipher; -use crate::KeyAgreement; - -const ECDH_P256: &str = "P-256"; -const ECDH_P384: &str = "P-384"; - -const AES_128: &str = "AES-128"; -const AES_256: &str = "AES-256"; -const TWOFISH_CTR: &str = "TwofishCTR"; -const NULL: &str = "NULL"; - -const SHA_256: &str = "SHA256"; -const SHA_512: &str = "SHA512"; - -pub(crate) const DEFAULT_AGREEMENTS_PROPOSITION: &str = "P-256,P-384"; -pub(crate) const DEFAULT_CIPHERS_PROPOSITION: &str = "AES-128,AES-256,TwofishCTR"; -pub(crate) const DEFAULT_DIGESTS_PROPOSITION: &str = "SHA256,SHA512"; - -/// Return a proposition string from the given sequence of `KeyAgreement` values. -pub fn key_agreements_proposition<'a, I>(xchgs: I) -> String -where - I: IntoIterator -{ - let mut s = String::new(); - for x in xchgs { - match x { - KeyAgreement::EcdhP256 => { - s.push_str(ECDH_P256); - s.push(',') - } - KeyAgreement::EcdhP384 => { - s.push_str(ECDH_P384); - s.push(',') - } - } - } - s.pop(); // remove trailing comma if any - s -} - -/// Given two key agreement proposition strings try to figure out a match. -/// -/// The `Ordering` parameter determines which argument is preferred. If `Less` or `Equal` we -/// try for each of `theirs` every one of `ours`, for `Greater` it's the other way around. -pub fn select_agreement(r: Ordering, ours: &str, theirs: &str) -> Result { - let (a, b) = match r { - Ordering::Less | Ordering::Equal => (theirs, ours), - Ordering::Greater => (ours, theirs) - }; - for x in a.split(',') { - if b.split(',').any(|y| x == y) { - match x { - ECDH_P256 => return Ok(KeyAgreement::EcdhP256), - ECDH_P384 => return Ok(KeyAgreement::EcdhP384), - _ => continue - } - } - } - Err(SecioError::NoSupportIntersection) -} - - -/// Return a proposition string from the given sequence of `Cipher` values. -pub fn ciphers_proposition<'a, I>(ciphers: I) -> String -where - I: IntoIterator -{ - let mut s = String::new(); - for c in ciphers { - match c { - Cipher::Aes128 => { - s.push_str(AES_128); - s.push(',') - } - Cipher::Aes256 => { - s.push_str(AES_256); - s.push(',') - } - Cipher::TwofishCtr => { - s.push_str(TWOFISH_CTR); - s.push(',') - } - Cipher::Null => { - s.push_str(NULL); - s.push(',') - } - } - } - s.pop(); // remove trailing comma if any - s -} - -/// Given two cipher proposition strings try to figure out a match. -/// -/// The `Ordering` parameter determines which argument is preferred. If `Less` or `Equal` we -/// try for each of `theirs` every one of `ours`, for `Greater` it's the other way around. -pub fn select_cipher(r: Ordering, ours: &str, theirs: &str) -> Result { - let (a, b) = match r { - Ordering::Less | Ordering::Equal => (theirs, ours), - Ordering::Greater => (ours, theirs) - }; - for x in a.split(',') { - if b.split(',').any(|y| x == y) { - match x { - AES_128 => return Ok(Cipher::Aes128), - AES_256 => return Ok(Cipher::Aes256), - TWOFISH_CTR => return Ok(Cipher::TwofishCtr), - NULL => return Ok(Cipher::Null), - _ => continue - } - } - } - Err(SecioError::NoSupportIntersection) -} - - -/// Possible digest algorithms. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Digest { - Sha256, - Sha512 -} - -impl Digest { - /// Returns the size in bytes of a digest of this kind. - #[inline] - pub fn num_bytes(&self) -> usize { - match *self { - Digest::Sha256 => 256 / 8, - Digest::Sha512 => 512 / 8, - } - } -} - -/// Return a proposition string from the given sequence of `Digest` values. -pub fn digests_proposition<'a, I>(digests: I) -> String -where - I: IntoIterator -{ - let mut s = String::new(); - for d in digests { - match d { - Digest::Sha256 => { - s.push_str(SHA_256); - s.push(',') - } - Digest::Sha512 => { - s.push_str(SHA_512); - s.push(',') - } - } - } - s.pop(); // remove trailing comma if any - s -} - -/// Given two digest proposition strings try to figure out a match. -/// -/// The `Ordering` parameter determines which argument is preferred. If `Less` or `Equal` we -/// try for each of `theirs` every one of `ours`, for `Greater` it's the other way around. -pub fn select_digest(r: Ordering, ours: &str, theirs: &str) -> Result { - let (a, b) = match r { - Ordering::Less | Ordering::Equal => (theirs, ours), - Ordering::Greater => (ours, theirs) - }; - for x in a.split(',') { - if b.split(',').any(|y| x == y) { - match x { - SHA_256 => return Ok(Digest::Sha256), - SHA_512 => return Ok(Digest::Sha512), - _ => continue - } - } - } - Err(SecioError::NoSupportIntersection) -} - -#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] -impl Into<&'static digest::Algorithm> for Digest { - #[inline] - fn into(self) -> &'static digest::Algorithm { - match self { - Digest::Sha256 => &digest::SHA256, - Digest::Sha512 => &digest::SHA512, - } - } -} - -#[cfg(test)] -mod tests { - #[test] - fn cipher_non_null() { - // This test serves as a safe-guard against accidentally pushing to master a commit that - // sets this constant to `NULL`. - assert!(!super::DEFAULT_CIPHERS_PROPOSITION.contains("NULL")); - } -} diff --git a/protocols/secio/src/codec.rs b/protocols/secio/src/codec.rs deleted file mode 100644 index 6a3bfb19..00000000 --- a/protocols/secio/src/codec.rs +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Individual messages encoding and decoding. Use this after the algorithms have been -//! successfully negotiated. - -mod decode; -mod encode; -mod len_prefix; - -use aes_ctr::stream_cipher; -use crate::algo_support::Digest; -use decode::DecoderMiddleware; -use encode::EncoderMiddleware; -use futures::prelude::*; -use hmac::{self, Mac, NewMac}; -use sha2::{Sha256, Sha512}; - -pub use len_prefix::LenPrefixCodec; - -/// Type returned by `full_codec`. -pub type FullCodec = DecoderMiddleware>>; - -pub type StreamCipher = Box; - -#[derive(Debug, Clone)] -pub enum Hmac { - Sha256(hmac::Hmac), - Sha512(hmac::Hmac), -} - -impl Hmac { - /// Returns the size of the hash in bytes. - #[inline] - pub fn num_bytes(&self) -> usize { - match *self { - Hmac::Sha256(_) => 32, - Hmac::Sha512(_) => 64, - } - } - - /// Builds a `Hmac` from an algorithm and key. - pub fn from_key(algorithm: Digest, key: &[u8]) -> Self { - // TODO: it would be nice to tweak the hmac crate to add an equivalent to new_varkey that - // never errors - match algorithm { - Digest::Sha256 => Hmac::Sha256(hmac::Hmac::new_varkey(key) - .expect("Hmac::new_varkey accepts any key length")), - Digest::Sha512 => Hmac::Sha512(hmac::Hmac::new_varkey(key) - .expect("Hmac::new_varkey accepts any key length")), - } - } - - /// Signs the data. - // TODO: better return type? - pub fn sign(&self, crypted_data: &[u8]) -> Vec { - match *self { - Hmac::Sha256(ref hmac) => { - let mut hmac = hmac.clone(); - hmac.update(crypted_data); - hmac.finalize().into_bytes().to_vec() - }, - Hmac::Sha512(ref hmac) => { - let mut hmac = hmac.clone(); - hmac.update(crypted_data); - hmac.finalize().into_bytes().to_vec() - }, - } - } - - /// Verifies that the data matches the expected hash. - // TODO: better error? - pub fn verify(&self, crypted_data: &[u8], expected_hash: &[u8]) -> Result<(), ()> { - match *self { - Hmac::Sha256(ref hmac) => { - let mut hmac = hmac.clone(); - hmac.update(crypted_data); - hmac.verify(expected_hash).map_err(|_| ()) - }, - Hmac::Sha512(ref hmac) => { - let mut hmac = hmac.clone(); - hmac.update(crypted_data); - hmac.verify(expected_hash).map_err(|_| ()) - }, - } - } -} - -/// Takes control of `socket`. Returns an object that implements `future::Sink` and -/// `future::Stream`. The `Stream` and `Sink` produce and accept `Vec` objects. -/// -/// The conversion between the stream/sink items and the socket is done with the given cipher and -/// hash algorithm (which are generally decided during the handshake). -pub fn full_codec( - socket: LenPrefixCodec, - cipher_encoding: StreamCipher, - encoding_hmac: Hmac, - cipher_decoder: StreamCipher, - decoding_hmac: Hmac, - remote_nonce: Vec -) -> FullCodec -where - S: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - let encoder = EncoderMiddleware::new(socket, cipher_encoding, encoding_hmac); - DecoderMiddleware::new(encoder, cipher_decoder, decoding_hmac, remote_nonce) -} - - -#[cfg(test)] -mod tests { - use super::{full_codec, DecoderMiddleware, EncoderMiddleware, Hmac, LenPrefixCodec}; - use crate::algo_support::Digest; - use crate::stream_cipher::{ctr, Cipher}; - use crate::error::SecioError; - use async_std::net::{TcpListener, TcpStream}; - use futures::{prelude::*, channel::mpsc, channel::oneshot}; - - const NULL_IV : [u8; 16] = [0; 16]; - - #[test] - fn raw_encode_then_decode() { - let (data_tx, data_rx) = mpsc::channel::>(256); - - let cipher_key: [u8; 32] = rand::random(); - let hmac_key: [u8; 32] = rand::random(); - - let mut encoder = EncoderMiddleware::new( - data_tx, - ctr(Cipher::Aes256, &cipher_key, &NULL_IV[..]), - Hmac::from_key(Digest::Sha256, &hmac_key), - ); - - let mut decoder = DecoderMiddleware::new( - data_rx.map(|v| Ok::<_, SecioError>(v)), - ctr(Cipher::Aes256, &cipher_key, &NULL_IV[..]), - Hmac::from_key(Digest::Sha256, &hmac_key), - Vec::new() - ); - - let data = b"hello world"; - async_std::task::block_on(async move { - encoder.send(data.to_vec()).await.unwrap(); - let rx = decoder.next().await.unwrap().unwrap(); - assert_eq!(rx, data); - }); - } - - fn full_codec_encode_then_decode(cipher: Cipher) { - let cipher_key: [u8; 32] = rand::random(); - let cipher_key_clone = cipher_key.clone(); - let key_size = cipher.key_size(); - let hmac_key: [u8; 16] = rand::random(); - let hmac_key_clone = hmac_key.clone(); - let data = b"hello world"; - let data_clone = data.clone(); - let nonce = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let (l_a_tx, l_a_rx) = oneshot::channel(); - - let nonce2 = nonce.clone(); - let server = async { - let listener = TcpListener::bind(&"127.0.0.1:0").await.unwrap(); - let listener_addr = listener.local_addr().unwrap(); - l_a_tx.send(listener_addr).unwrap(); - - let (connec, _) = listener.accept().await.unwrap(); - let codec = full_codec( - LenPrefixCodec::new(connec, 1024), - ctr(cipher, &cipher_key[..key_size], &NULL_IV[..]), - Hmac::from_key(Digest::Sha256, &hmac_key), - ctr(cipher, &cipher_key[..key_size], &NULL_IV[..]), - Hmac::from_key(Digest::Sha256, &hmac_key), - nonce2.clone() - ); - - let outcome = codec.map(|v| v.unwrap()).concat().await; - assert_eq!(outcome, data_clone); - }; - - let client = async { - let listener_addr = l_a_rx.await.unwrap(); - let stream = TcpStream::connect(&listener_addr).await.unwrap(); - let mut codec = full_codec( - LenPrefixCodec::new(stream, 1024), - ctr(cipher, &cipher_key_clone[..key_size], &NULL_IV[..]), - Hmac::from_key(Digest::Sha256, &hmac_key_clone), - ctr(cipher, &cipher_key_clone[..key_size], &NULL_IV[..]), - Hmac::from_key(Digest::Sha256, &hmac_key_clone), - Vec::new() - ); - codec.send(nonce.into()).await.unwrap(); - codec.send(data.to_vec().into()).await.unwrap(); - }; - - async_std::task::block_on(future::join(client, server)); - } - - #[test] - fn full_codec_encode_then_decode_aes128() { - full_codec_encode_then_decode(Cipher::Aes128); - } - - #[test] - fn full_codec_encode_then_decode_aes256() { - full_codec_encode_then_decode(Cipher::Aes256); - } - - #[test] - fn full_codec_encode_then_decode_twofish() { - full_codec_encode_then_decode(Cipher::TwofishCtr); - } - - #[test] - fn full_codec_encode_then_decode_null() { - full_codec_encode_then_decode(Cipher::Null); - } -} diff --git a/protocols/secio/src/codec/decode.rs b/protocols/secio/src/codec/decode.rs deleted file mode 100644 index 8ea0cd4e..00000000 --- a/protocols/secio/src/codec/decode.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Individual messages decoding. - -use super::{Hmac, StreamCipher}; - -use crate::error::SecioError; -use futures::prelude::*; -use log::debug; -use std::{cmp::min, pin::Pin, task::Context, task::Poll}; - -/// Wraps around a `Stream>`. The buffers produced by the underlying stream -/// are decoded using the cipher and hmac. -/// -/// This struct implements `Stream`, whose stream item are frames of data without the length -/// prefix. The mechanism for removing the length prefix and splitting the incoming data into -/// frames isn't handled by this module. -/// -/// Also implements `Sink` for convenience. -#[pin_project::pin_project] -pub struct DecoderMiddleware { - cipher_state: StreamCipher, - hmac: Hmac, - #[pin] - raw_stream: S, - nonce: Vec -} - -impl DecoderMiddleware { - /// Create a new decoder for the given stream, using the provided cipher and HMAC. - /// - /// The `nonce` parameter denotes a sequence of bytes which are expected to be found at the - /// beginning of the stream and are checked for equality. - pub fn new(raw_stream: S, cipher: StreamCipher, hmac: Hmac, nonce: Vec) -> DecoderMiddleware { - DecoderMiddleware { - cipher_state: cipher, - hmac, - raw_stream, - nonce - } - } -} - -impl Stream for DecoderMiddleware -where - S: TryStream>, - S::Error: Into, -{ - type Item = Result, SecioError>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - - let frame = match TryStream::try_poll_next(this.raw_stream, cx) { - Poll::Ready(Some(Ok(t))) => t, - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err.into()))), - }; - - if frame.len() < this.hmac.num_bytes() { - debug!("frame too short when decoding secio frame"); - return Poll::Ready(Some(Err(SecioError::FrameTooShort))); - } - let content_length = frame.len() - this.hmac.num_bytes(); - { - let (crypted_data, expected_hash) = frame.split_at(content_length); - debug_assert_eq!(expected_hash.len(), this.hmac.num_bytes()); - - if this.hmac.verify(crypted_data, expected_hash).is_err() { - debug!("hmac mismatch when decoding secio frame"); - return Poll::Ready(Some(Err(SecioError::HmacNotMatching))); - } - } - - let mut data_buf = frame; - data_buf.truncate(content_length); - this.cipher_state.decrypt(&mut data_buf); - - if !this.nonce.is_empty() { - let n = min(data_buf.len(), this.nonce.len()); - if data_buf[.. n] != this.nonce[.. n] { - return Poll::Ready(Some(Err(SecioError::NonceVerificationFailed))) - } - this.nonce.drain(.. n); - data_buf.drain(.. n); - } - - Poll::Ready(Some(Ok(data_buf))) - } -} - -impl Sink for DecoderMiddleware -where - S: Sink, -{ - type Error = S::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Sink::poll_ready(this.raw_stream, cx) - } - - fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { - let this = self.project(); - Sink::start_send(this.raw_stream, item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Sink::poll_flush(this.raw_stream, cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Sink::poll_close(this.raw_stream, cx) - } -} diff --git a/protocols/secio/src/codec/encode.rs b/protocols/secio/src/codec/encode.rs deleted file mode 100644 index c4bbf5ac..00000000 --- a/protocols/secio/src/codec/encode.rs +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Individual messages encoding. - -use super::{Hmac, StreamCipher}; -use futures::prelude::*; -use std::{pin::Pin, task::Context, task::Poll}; - -/// Wraps around a `Sink`. Encodes the buffers passed to it and passes it to the underlying sink. -/// -/// This struct implements `Sink`. It expects individual frames of data, and outputs individual -/// frames as well, most notably without the length prefix. The mechanism for adding the length -/// prefix is not covered by this module. -/// -/// Also implements `Stream` for convenience. -#[pin_project::pin_project] -pub struct EncoderMiddleware { - cipher_state: StreamCipher, - hmac: Hmac, - #[pin] - raw_sink: S, -} - -impl EncoderMiddleware { - pub fn new(raw: S, cipher: StreamCipher, hmac: Hmac) -> EncoderMiddleware { - EncoderMiddleware { - cipher_state: cipher, - hmac, - raw_sink: raw, - } - } -} - -impl Sink> for EncoderMiddleware -where - S: Sink>, -{ - type Error = S::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Sink::poll_ready(this.raw_sink, cx) - } - - fn start_send(self: Pin<&mut Self>, mut data_buf: Vec) -> Result<(), Self::Error> { - let this = self.project(); - // TODO if SinkError gets refactor to SecioError, then use try_apply_keystream - this.cipher_state.encrypt(&mut data_buf[..]); - let signature = this.hmac.sign(&data_buf[..]); - data_buf.extend_from_slice(signature.as_ref()); - Sink::start_send(this.raw_sink, data_buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Sink::poll_flush(this.raw_sink, cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Sink::poll_close(this.raw_sink, cx) - } -} - -impl Stream for EncoderMiddleware -where - S: Stream, -{ - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.project(); - Stream::poll_next(this.raw_sink, cx) - } -} diff --git a/protocols/secio/src/codec/len_prefix.rs b/protocols/secio/src/codec/len_prefix.rs deleted file mode 100644 index 18882026..00000000 --- a/protocols/secio/src/codec/len_prefix.rs +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::{prelude::*, stream::BoxStream}; -use quicksink::Action; -use std::{fmt, io, pin::Pin, task::{Context, Poll}}; - -/// `Stream` & `Sink` that reads and writes a length prefix in front of the actual data. -pub struct LenPrefixCodec { - stream: BoxStream<'static, io::Result>>, - sink: Pin, Error = io::Error> + Send>>, - _mark: std::marker::PhantomData -} - -impl fmt::Debug for LenPrefixCodec { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("LenPrefixCodec") - } -} - -static_assertions::const_assert! { - std::mem::size_of::() <= std::mem::size_of::() -} - -impl LenPrefixCodec -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - pub fn new(socket: T, max_len: usize) -> Self { - let (r, w) = socket.split(); - - let stream = futures::stream::unfold(r, move |mut r| async move { - let mut len = [0; 4]; - if let Err(e) = r.read_exact(&mut len).await { - if e.kind() == io::ErrorKind::UnexpectedEof { - return None - } - return Some((Err(e), r)) - } - let n = u32::from_be_bytes(len) as usize; - if n > max_len { - let msg = format!("data length {} exceeds allowed maximum {}", n, max_len); - return Some((Err(io::Error::new(io::ErrorKind::PermissionDenied, msg)), r)) - } - let mut v = vec![0; n]; - if let Err(e) = r.read_exact(&mut v).await { - return Some((Err(e), r)) - } - Some((Ok(v), r)) - }); - - let sink = quicksink::make_sink(w, move |mut w, action: Action>| async move { - match action { - Action::Send(data) => { - if data.len() > max_len { - log::error!("data length {} exceeds allowed maximum {}", data.len(), max_len) - } - w.write_all(&(data.len() as u32).to_be_bytes()).await?; - w.write_all(&data).await? - } - Action::Flush => w.flush().await?, - Action::Close => w.close().await? - } - Ok(w) - }); - - LenPrefixCodec { - stream: stream.boxed(), - sink: Box::pin(sink), - _mark: std::marker::PhantomData - } - } -} - -impl Stream for LenPrefixCodec -where - T: AsyncRead + AsyncWrite + Send + 'static -{ - type Item = io::Result>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.stream.poll_next_unpin(cx) - } -} - -impl Sink> for LenPrefixCodec -where - T: AsyncRead + AsyncWrite + Send + 'static -{ - type Error = io::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.sink).poll_ready(cx) - } - - fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - Pin::new(&mut self.sink).start_send(item) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.sink).poll_flush(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.sink).poll_close(cx) - } -} - -impl Unpin for LenPrefixCodec { -} diff --git a/protocols/secio/src/error.rs b/protocols/secio/src/error.rs deleted file mode 100644 index 16b0dcdf..00000000 --- a/protocols/secio/src/error.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Defines the `SecioError` enum that groups all possible errors in SECIO. - -use aes_ctr::stream_cipher::LoopError; -use std::error; -use std::fmt; -use std::io::Error as IoError; - -/// Error at the SECIO layer communication. -#[derive(Debug)] -pub enum SecioError { - /// I/O error. - IoError(IoError), - - /// Protocol buffer error. - ProtobufError(prost::DecodeError), - - /// Failed to parse one of the handshake protobuf messages. - HandshakeParsingFailure, - - /// There is no protocol supported by both the local and remote hosts. - NoSupportIntersection, - - /// Failed to generate nonce. - NonceGenerationFailed, - - /// Failed to generate ephemeral key. - EphemeralKeyGenerationFailed, - - /// Failed to sign a message with our local private key. - SigningFailure, - - /// The signature of the exchange packet doesn't verify the remote public key. - SignatureVerificationFailed, - - /// Failed to generate the secret shared key from the ephemeral key. - SecretGenerationFailed, - - /// The final check of the handshake failed. - NonceVerificationFailed, - - /// Error with block cipher. - CipherError(LoopError), - - /// The received frame was of invalid length. - FrameTooShort, - - /// The hashes of the message didn't match. - HmacNotMatching, - - /// We received an invalid proposition from remote. - InvalidProposition(&'static str), - - #[doc(hidden)] - __Nonexhaustive -} - -impl error::Error for SecioError { - fn cause(&self) -> Option<&dyn error::Error> { - match *self { - SecioError::IoError(ref err) => Some(err), - SecioError::ProtobufError(ref err) => Some(err), - // TODO: The type doesn't implement `Error` - /*SecioError::CipherError(ref err) => { - Some(err) - },*/ - _ => None, - } - } -} - -impl fmt::Display for SecioError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - match self { - SecioError::IoError(e) => - write!(f, "I/O error: {}", e), - SecioError::ProtobufError(e) => - write!(f, "Protobuf error: {}", e), - SecioError::HandshakeParsingFailure => - f.write_str("Failed to parse one of the handshake protobuf messages"), - SecioError::NoSupportIntersection => - f.write_str("There is no protocol supported by both the local and remote hosts"), - SecioError::NonceGenerationFailed => - f.write_str("Failed to generate nonce"), - SecioError::EphemeralKeyGenerationFailed => - f.write_str("Failed to generate ephemeral key"), - SecioError::SigningFailure => - f.write_str("Failed to sign a message with our local private key"), - SecioError::SignatureVerificationFailed => - f.write_str("The signature of the exchange packet doesn't verify the remote public key"), - SecioError::SecretGenerationFailed => - f.write_str("Failed to generate the secret shared key from the ephemeral key"), - SecioError::NonceVerificationFailed => - f.write_str("The final check of the handshake failed"), - SecioError::CipherError(e) => - write!(f, "Error while decoding/encoding data: {:?}", e), - SecioError::FrameTooShort => - f.write_str("The received frame was of invalid length"), - SecioError::HmacNotMatching => - f.write_str("The hashes of the message didn't match"), - SecioError::InvalidProposition(msg) => - write!(f, "invalid proposition: {}", msg), - SecioError::__Nonexhaustive => - f.write_str("__Nonexhaustive") - } - } -} - -impl From for SecioError { - fn from(err: LoopError) -> SecioError { - SecioError::CipherError(err) - } -} - -impl From for SecioError { - fn from(err: IoError) -> SecioError { - SecioError::IoError(err) - } -} - -impl From for SecioError { - fn from(err: prost::DecodeError) -> SecioError { - SecioError::ProtobufError(err) - } -} diff --git a/protocols/secio/src/exchange.rs b/protocols/secio/src/exchange.rs deleted file mode 100644 index 1ae4120b..00000000 --- a/protocols/secio/src/exchange.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! This module handles the key agreement process. Typically ECDH. - -use futures::prelude::*; -use crate::SecioError; - -#[path = "exchange/impl_ring.rs"] -#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] -mod platform; -#[path = "exchange/impl_webcrypto.rs"] -#[cfg(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown"))] -mod platform; - -/// Possible key agreement algorithms. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum KeyAgreement { - EcdhP256, - EcdhP384 -} - -/// Opaque private key type. -pub struct AgreementPrivateKey(platform::AgreementPrivateKey); - -/// Generates a new key pair as part of the exchange. -/// -/// Returns the opaque private key and the corresponding public key. -#[inline] -pub fn generate_agreement(algorithm: KeyAgreement) -> impl Future), SecioError>> { - platform::generate_agreement(algorithm).map_ok(|(pr, pu)| (AgreementPrivateKey(pr), pu)) -} - -/// Finish the agreement. On success, returns the shared key that both remote agreed upon. -#[inline] -pub fn agree(algorithm: KeyAgreement, my_private_key: AgreementPrivateKey, other_public_key: &[u8], out_size: usize) - -> impl Future, SecioError>> -{ - platform::agree(algorithm, my_private_key.0, other_public_key, out_size) -} - diff --git a/protocols/secio/src/exchange/impl_ring.rs b/protocols/secio/src/exchange/impl_ring.rs deleted file mode 100644 index b7f42be7..00000000 --- a/protocols/secio/src/exchange/impl_ring.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Implementation of the key agreement process using the `ring` library. - -use crate::{KeyAgreement, SecioError}; -use futures::{future, prelude::*}; -use log::debug; -use ring::agreement as ring_agreement; -use ring::rand as ring_rand; - -impl Into<&'static ring_agreement::Algorithm> for KeyAgreement { - #[inline] - fn into(self) -> &'static ring_agreement::Algorithm { - match self { - KeyAgreement::EcdhP256 => &ring_agreement::ECDH_P256, - KeyAgreement::EcdhP384 => &ring_agreement::ECDH_P384, - } - } -} - -/// Opaque private key type. -pub type AgreementPrivateKey = ring_agreement::EphemeralPrivateKey; - -/// Generates a new key pair as part of the exchange. -/// -/// Returns the opaque private key and the corresponding public key. -pub fn generate_agreement(algorithm: KeyAgreement) -> impl Future), SecioError>> { - let rng = ring_rand::SystemRandom::new(); - - match ring_agreement::EphemeralPrivateKey::generate(algorithm.into(), &rng) { - Ok(tmp_priv_key) => { - let r = tmp_priv_key.compute_public_key() - .map_err(|_| SecioError::EphemeralKeyGenerationFailed) - .map(move |tmp_pub_key| (tmp_priv_key, tmp_pub_key.as_ref().to_vec())); - future::ready(r) - }, - Err(_) => { - debug!("failed to generate ECDH key"); - future::ready(Err(SecioError::EphemeralKeyGenerationFailed)) - }, - } -} - -/// Finish the agreement. On success, returns the shared key that both remote agreed upon. -pub fn agree(algorithm: KeyAgreement, my_private_key: AgreementPrivateKey, other_public_key: &[u8], _out_size: usize) - -> impl Future, SecioError>> -{ - let ret = ring_agreement::agree_ephemeral(my_private_key, - &ring_agreement::UnparsedPublicKey::new(algorithm.into(), other_public_key), - SecioError::SecretGenerationFailed, - |key_material| Ok(key_material.to_vec())); - future::ready(ret) -} diff --git a/protocols/secio/src/exchange/impl_webcrypto.rs b/protocols/secio/src/exchange/impl_webcrypto.rs deleted file mode 100644 index a7a363ca..00000000 --- a/protocols/secio/src/exchange/impl_webcrypto.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Implementation of the key agreement process using the WebCrypto API. - -use crate::{KeyAgreement, SecioError}; -use futures::prelude::*; -use parity_send_wrapper::SendWrapper; -use std::{io, pin::Pin, task::Context, task::Poll}; -use wasm_bindgen::prelude::*; - -/// Opaque private key type. Contains the private key and the `SubtleCrypto` object. -pub type AgreementPrivateKey = SendSyncHack<(JsValue, web_sys::SubtleCrypto)>; - -/// We use a `SendWrapper` from the `send_wrapper` crate around our JS data type. JavaScript data -/// types are not `Send`/`Sync`, but since WASM is single-threaded we know that we're only ever -/// going to access them from the same thread. -pub struct SendSyncHack(SendWrapper); - -impl Future for SendSyncHack -where T: Future + Unpin { - type Output = T::Output; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - self.0.poll_unpin(cx) - } -} - -/// Generates a new key pair as part of the exchange. -/// -/// Returns the opaque private key and the corresponding public key. -pub fn generate_agreement(algorithm: KeyAgreement) - -> impl Future), SecioError>> -{ - let future = async move { - // First step is to create the `SubtleCrypto` object. - let crypto = build_crypto_future().await?; - - // We then generate the ephemeral key. - let key_pair = { - let obj = build_curve_obj(algorithm); - - let usages = js_sys::Array::new(); - usages.push(&JsValue::from_str("deriveKey")); - usages.push(&JsValue::from_str("deriveBits")); - - let promise = crypto.generate_key_with_object(&obj, true, usages.as_ref())?; - wasm_bindgen_futures::JsFuture::from(promise).await? - }; - - // WebCrypto has generated a key-pair. Let's split this key pair into a private key and a - // public key. - let (private, public) = { - let private = js_sys::Reflect::get(&key_pair, &JsValue::from_str("privateKey")); - let public = js_sys::Reflect::get(&key_pair, &JsValue::from_str("publicKey")); - match (private, public) { - (Ok(pr), Ok(pu)) => (pr, pu), - (Err(err), _) => return Err(err), - (_, Err(err)) => return Err(err), - } - }; - - // Then we turn the public key into an `ArrayBuffer`. - let public = { - let promise = crypto.export_key("raw", &public.into())?; - wasm_bindgen_futures::JsFuture::from(promise).await? - }; - - // And finally we convert this `ArrayBuffer` into a `Vec`. - let public = js_sys::Uint8Array::new(&public); - let mut public_buf = vec![0; public.length() as usize]; - public.copy_to(&mut public_buf); - Ok((SendSyncHack(SendWrapper::new((private, crypto))), public_buf)) - }; - - let future = future - .map_err(|err| { - SecioError::IoError(io::Error::new(io::ErrorKind::Other, format!("{:?}", err))) - }); - SendSyncHack(SendWrapper::new(Box::pin(future))) -} - -/// Finish the agreement. On success, returns the shared key that both remote agreed upon. -pub fn agree(algorithm: KeyAgreement, key: AgreementPrivateKey, other_public_key: &[u8], out_size: usize) - -> impl Future, SecioError>> -{ - let other_public_key = { - // This unsafe is here because the lifetime of `other_public_key` must not outlive the - // `tmp_view`. This is guaranteed by the fact that we clone this array right below. - // See also https://github.com/rustwasm/wasm-bindgen/issues/1303 - let tmp_view = unsafe { js_sys::Uint8Array::view(other_public_key) }; - js_sys::Uint8Array::new(tmp_view.as_ref()) - }; - - let future = async move { - let (private_key, crypto) = key.0.take(); - - // We start by importing the remote's public key into the WebCrypto world. - let public_key = { - // Note: contrary to what one might think, we shouldn't add the "deriveBits" usage. - let promise = crypto - .import_key_with_object( - "raw", &js_sys::Object::from(other_public_key.buffer()), - &build_curve_obj(algorithm), false, &js_sys::Array::new() - )?; - wasm_bindgen_futures::JsFuture::from(promise).await? - }; - - // We then derive the final private key. - let bytes = { - let derive_params = build_curve_obj(algorithm); - let _ = js_sys::Reflect::set(derive_params.as_ref(), &JsValue::from_str("public"), &public_key); - let promise = crypto - .derive_bits_with_object( - &derive_params, - &web_sys::CryptoKey::from(private_key), - 8 * out_size as u32 - )?; - wasm_bindgen_futures::JsFuture::from(promise).await? - }; - - let bytes = js_sys::Uint8Array::new(&bytes); - let mut buf = vec![0; bytes.length() as usize]; - bytes.copy_to(&mut buf); - Ok(buf) - }; - - let future = future - .map_err(|err: JsValue| { - SecioError::IoError(io::Error::new(io::ErrorKind::Other, format!("{:?}", err))) - }); - SendSyncHack(SendWrapper::new(Box::pin(future))) -} - -/// Builds a future that returns the `SubtleCrypto` object. -async fn build_crypto_future() -> Result { - web_sys::window() - .ok_or_else(|| JsValue::from_str("Window object not available")) - .and_then(|window| window.crypto()) - .map(|crypto| crypto.subtle()) -} - -/// Builds a `EcKeyGenParams` object. -/// See https://developer.mozilla.org/en-US/docs/Web/API/EcKeyGenParams -fn build_curve_obj(algorithm: KeyAgreement) -> js_sys::Object { - let obj = js_sys::Object::new(); - let _ = js_sys::Reflect::set(obj.as_ref(), &JsValue::from_str("name"), &JsValue::from_str("ECDH")); - let _ = js_sys::Reflect::set(obj.as_ref(), &JsValue::from_str("namedCurve"), &JsValue::from_str(match algorithm { - KeyAgreement::EcdhP256 => "P-256", - KeyAgreement::EcdhP384 => "P-384", - })); - obj -} diff --git a/protocols/secio/src/handshake.rs b/protocols/secio/src/handshake.rs deleted file mode 100644 index d1ff63d0..00000000 --- a/protocols/secio/src/handshake.rs +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::SecioConfig; -use crate::algo_support; -use crate::codec::{full_codec, FullCodec, Hmac, LenPrefixCodec}; -use crate::error::SecioError; -use crate::exchange; -use crate::stream_cipher::ctr; -use crate::structs_proto::{Exchange, Propose}; -use futures::prelude::*; -use libp2p_core::PublicKey; -use log::{debug, trace}; -use prost::Message; -use rand::{self, RngCore}; -use sha2::{Digest as ShaDigestTrait, Sha256}; -use std::{cmp::{self, Ordering}, io}; - - -/// Performs a handshake on the given socket. -/// -/// This function expects that the remote is identified with `remote_public_key`, and the remote -/// will expect that we are identified with `local_key`. Any mismatch somewhere will produce a -/// `SecioError`. -/// -/// On success, returns an object that implements the `Sink` and `Stream` trait whose items are -/// buffers of data, plus the public key of the remote, plus the ephemeral public key used during -/// negotiation. -pub async fn handshake(socket: S, config: SecioConfig) - -> Result<(FullCodec, PublicKey, Vec), SecioError> -where - S: AsyncRead + AsyncWrite + Send + Unpin + 'static -{ - let mut socket = LenPrefixCodec::new(socket, config.max_frame_len); - - let local_nonce = { - let mut local_nonce = [0; 16]; - rand::thread_rng() - .try_fill_bytes(&mut local_nonce) - .map_err(|_| SecioError::NonceGenerationFailed)?; - local_nonce - }; - - let local_public_key_encoded = config.key.public().into_protobuf_encoding(); - - // Send our proposition with our nonce, public key and supported protocols. - let local_proposition = Propose { - rand: Some(local_nonce.to_vec()), - pubkey: Some(local_public_key_encoded.clone()), - exchanges: if let Some(ref p) = config.agreements_prop { - trace!("agreements proposition: {}", p); - Some(p.clone()) - } else { - trace!("agreements proposition: {}", algo_support::DEFAULT_AGREEMENTS_PROPOSITION); - Some(algo_support::DEFAULT_AGREEMENTS_PROPOSITION.into()) - }, - ciphers: if let Some(ref p) = config.ciphers_prop { - trace!("ciphers proposition: {}", p); - Some(p.clone()) - } else { - trace!("ciphers proposition: {}", algo_support::DEFAULT_CIPHERS_PROPOSITION); - Some(algo_support::DEFAULT_CIPHERS_PROPOSITION.into()) - }, - hashes: if let Some(ref p) = config.digests_prop { - trace!("digests proposition: {}", p); - Some(p.clone()) - } else { - Some(algo_support::DEFAULT_DIGESTS_PROPOSITION.into()) - } - }; - - let local_proposition_bytes = { - let mut buf = Vec::with_capacity(local_proposition.encoded_len()); - local_proposition.encode(&mut buf).expect("Vec provides capacity as needed"); - buf - }; - trace!("starting handshake; local nonce = {:?}", local_nonce); - - trace!("sending proposition to remote"); - socket.send(local_proposition_bytes.clone()).await?; - - // Receive the remote's proposition. - let remote_proposition_bytes = match socket.next().await { - Some(b) => b?, - None => { - debug!("unexpected eof while waiting for remote's proposition"); - return Err(SecioError::IoError(io::ErrorKind::UnexpectedEof.into())) - }, - }; - - let remote_proposition = match Propose::decode(&remote_proposition_bytes[..]) { - Ok(prop) => prop, - Err(_) => { - debug!("failed to parse remote's proposition protobuf message"); - return Err(SecioError::HandshakeParsingFailure); - } - }; - - let remote_public_key_encoded = remote_proposition.pubkey.unwrap_or_default(); - let remote_nonce = remote_proposition.rand.unwrap_or_default(); - - let remote_public_key = match PublicKey::from_protobuf_encoding(&remote_public_key_encoded) { - Ok(p) => p, - Err(_) => { - debug!("failed to parse remote's proposition's pubkey protobuf"); - return Err(SecioError::HandshakeParsingFailure); - }, - }; - trace!("received proposition from remote; pubkey = {:?}; nonce = {:?}", - remote_public_key, remote_nonce); - - // In order to determine which protocols to use, we compute two hashes and choose - // based on which hash is larger. - let hashes_ordering = { - let oh1 = { - let mut ctx = Sha256::new(); - ctx.update(&remote_public_key_encoded); - ctx.update(&local_nonce); - ctx.finalize() - }; - - let oh2 = { - let mut ctx = Sha256::new(); - ctx.update(&local_public_key_encoded); - ctx.update(&remote_nonce); - ctx.finalize() - }; - - oh1.cmp(&oh2) - }; - - let chosen_exchange = { - let ours = config.agreements_prop.as_ref() - .map(|s| s.as_ref()) - .unwrap_or(algo_support::DEFAULT_AGREEMENTS_PROPOSITION); - let theirs = &remote_proposition.exchanges.unwrap_or_default(); - match algo_support::select_agreement(hashes_ordering, ours, theirs) { - Ok(a) => a, - Err(err) => { - debug!("failed to select an exchange protocol"); - return Err(err); - } - } - }; - - let chosen_cipher = { - let ours = config.ciphers_prop.as_ref() - .map(|s| s.as_ref()) - .unwrap_or(algo_support::DEFAULT_CIPHERS_PROPOSITION); - let theirs = &remote_proposition.ciphers.unwrap_or_default(); - match algo_support::select_cipher(hashes_ordering, ours, theirs) { - Ok(a) => { - debug!("selected cipher: {:?}", a); - a - } - Err(err) => { - debug!("failed to select a cipher protocol"); - return Err(err); - } - } - }; - - let chosen_hash = { - let ours = config.digests_prop.as_ref() - .map(|s| s.as_ref()) - .unwrap_or(algo_support::DEFAULT_DIGESTS_PROPOSITION); - let theirs = &remote_proposition.hashes.unwrap_or_default(); - match algo_support::select_digest(hashes_ordering, ours, theirs) { - Ok(a) => { - debug!("selected hash: {:?}", a); - a - } - Err(err) => { - debug!("failed to select a hash protocol"); - return Err(err); - } - } - }; - - // Generate an ephemeral key for the negotiation. - let (tmp_priv_key, tmp_pub_key) = exchange::generate_agreement(chosen_exchange).await?; - - // Send the ephemeral pub key to the remote in an `Exchange` struct. The `Exchange` also - // contains a signature of the two propositions encoded with our static public key. - let local_exchange = { - let mut data_to_sign = local_proposition_bytes.clone(); - data_to_sign.extend_from_slice(&remote_proposition_bytes); - data_to_sign.extend_from_slice(&tmp_pub_key); - - Exchange { - epubkey: Some(tmp_pub_key.clone()), - signature: match config.key.sign(&data_to_sign) { - Ok(sig) => Some(sig), - Err(_) => return Err(SecioError::SigningFailure) - } - } - }; - let local_exch = { - let mut buf = Vec::with_capacity(local_exchange.encoded_len()); - local_exchange.encode(&mut buf).expect("Vec provides capacity as needed"); - buf - }; - - // Send our local `Exchange`. - trace!("sending exchange to remote"); - socket.send(local_exch).await?; - - // Receive the remote's `Exchange`. - let remote_exch = { - let raw = match socket.next().await { - Some(r) => r?, - None => { - debug!("unexpected eof while waiting for remote's exchange"); - return Err(SecioError::IoError(io::ErrorKind::UnexpectedEof.into())) - }, - }; - - match Exchange::decode(&raw[..]) { - Ok(e) => { - trace!("received and decoded the remote's exchange"); - e - }, - Err(err) => { - debug!("failed to parse remote's exchange protobuf; {:?}", err); - return Err(SecioError::HandshakeParsingFailure); - } - } - }; - - // Check the validity of the remote's `Exchange`. This verifies that the remote was really - // the sender of its proposition, and that it is the owner of both its global and ephemeral - // keys. - { - let mut data_to_verify = remote_proposition_bytes.clone(); - data_to_verify.extend_from_slice(&local_proposition_bytes); - data_to_verify.extend_from_slice(remote_exch.epubkey.as_deref().unwrap_or_default()); - - if !remote_public_key.verify(&data_to_verify, &remote_exch.signature.unwrap_or_default()) { - return Err(SecioError::SignatureVerificationFailed) - } - - trace!("successfully verified the remote's signature"); - } - - // Generate a key from the local ephemeral private key and the remote ephemeral public key, - // derive from it a cipher key, an iv, and a hmac key, and build the encoder/decoder. - let key_material = exchange::agree( - chosen_exchange, - tmp_priv_key, - &remote_exch.epubkey.unwrap_or_default(), - chosen_hash.num_bytes() - ).await?; - - // Generate a key from the local ephemeral private key and the remote ephemeral public key, - // derive from it a cipher key, an iv, and a hmac key, and build the encoder/decoder. - let mut codec = { - let cipher_key_size = chosen_cipher.key_size(); - let iv_size = chosen_cipher.iv_size(); - - let key = Hmac::from_key(chosen_hash, &key_material); - let mut longer_key = vec![0u8; 2 * (iv_size + cipher_key_size + 20)]; - stretch_key(key, &mut longer_key); - - let (local_infos, remote_infos) = { - let (first_half, second_half) = longer_key.split_at(longer_key.len() / 2); - match hashes_ordering { - Ordering::Equal => { - let msg = "equal digest of public key and nonce for local and remote"; - return Err(SecioError::InvalidProposition(msg)) - } - Ordering::Less => (second_half, first_half), - Ordering::Greater => (first_half, second_half), - } - }; - - let (encoding_cipher, encoding_hmac) = { - let (iv, rest) = local_infos.split_at(iv_size); - let (cipher_key, mac_key) = rest.split_at(cipher_key_size); - let hmac = Hmac::from_key(chosen_hash, mac_key); - let cipher = ctr(chosen_cipher, cipher_key, iv); - (cipher, hmac) - }; - - let (decoding_cipher, decoding_hmac) = { - let (iv, rest) = remote_infos.split_at(iv_size); - let (cipher_key, mac_key) = rest.split_at(cipher_key_size); - let hmac = Hmac::from_key(chosen_hash, mac_key); - let cipher = ctr(chosen_cipher, cipher_key, iv); - (cipher, hmac) - }; - - full_codec( - socket, - encoding_cipher, - encoding_hmac, - decoding_cipher, - decoding_hmac, - local_nonce.to_vec() - ) - }; - - // We send back their nonce to check if the connection works. - trace!("checking encryption by sending back remote's nonce"); - codec.send(remote_nonce).await?; - - Ok((codec, remote_public_key, tmp_pub_key)) -} - -/// Custom algorithm translated from reference implementations. Needs to be the same algorithm -/// amongst all implementations. -fn stretch_key(hmac: Hmac, result: &mut [u8]) { - match hmac { - Hmac::Sha256(hmac) => stretch_key_inner(hmac, result), - Hmac::Sha512(hmac) => stretch_key_inner(hmac, result), - } -} - -fn stretch_key_inner(hmac: ::hmac::Hmac, result: &mut [u8]) -where D: ::hmac::digest::Update + ::hmac::digest::BlockInput + - ::hmac::digest::FixedOutput + ::hmac::digest::Reset + Default + Clone, - ::hmac::Hmac: Clone + ::hmac::crypto_mac::Mac -{ - use ::hmac::Mac; - const SEED: &[u8] = b"key expansion"; - - let mut init_ctxt = hmac.clone(); - init_ctxt.update(SEED); - let mut a = init_ctxt.finalize().into_bytes(); - - let mut j = 0; - while j < result.len() { - let mut context = hmac.clone(); - context.update(a.as_ref()); - context.update(SEED); - let b = context.finalize().into_bytes(); - - let todo = cmp::min(b.as_ref().len(), result.len() - j); - - result[j..j + todo].copy_from_slice(&b.as_ref()[..todo]); - - j += todo; - - let mut context = hmac.clone(); - context.update(a.as_ref()); - a = context.finalize().into_bytes(); - } -} - -#[cfg(test)] -mod tests { - use super::{handshake, stretch_key}; - use crate::{algo_support::Digest, codec::Hmac, SecioConfig}; - use libp2p_core::identity; - use futures::{prelude::*, channel::oneshot}; - - #[test] - #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] - fn handshake_with_self_succeeds_rsa() { - let key1 = { - let mut private = include_bytes!("../tests/test-rsa-private-key.pk8").to_vec(); - identity::Keypair::rsa_from_pkcs8(&mut private).unwrap() - }; - - let key2 = { - let mut private = include_bytes!("../tests/test-rsa-private-key-2.pk8").to_vec(); - identity::Keypair::rsa_from_pkcs8(&mut private).unwrap() - }; - - handshake_with_self_succeeds(SecioConfig::new(key1), SecioConfig::new(key2)); - } - - #[test] - fn handshake_with_self_succeeds_ed25519() { - let key1 = identity::Keypair::generate_ed25519(); - let key2 = identity::Keypair::generate_ed25519(); - handshake_with_self_succeeds(SecioConfig::new(key1), SecioConfig::new(key2)); - } - - #[test] - #[cfg(feature = "secp256k1")] - fn handshake_with_self_succeeds_secp256k1() { - let key1 = { - let mut key = include_bytes!("../tests/test-secp256k1-private-key.der").to_vec(); - identity::Keypair::secp256k1_from_der(&mut key).unwrap() - }; - - let key2 = { - let mut key = include_bytes!("../tests/test-secp256k1-private-key-2.der").to_vec(); - identity::Keypair::secp256k1_from_der(&mut key).unwrap() - }; - - handshake_with_self_succeeds(SecioConfig::new(key1), SecioConfig::new(key2)); - } - - fn handshake_with_self_succeeds(key1: SecioConfig, key2: SecioConfig) { - let (l_a_tx, l_a_rx) = oneshot::channel(); - - async_std::task::spawn(async move { - let listener = async_std::net::TcpListener::bind(&"127.0.0.1:0").await.unwrap(); - l_a_tx.send(listener.local_addr().unwrap()).unwrap(); - let connec = listener.accept().await.unwrap().0; - let mut codec = handshake(connec, key1).await.unwrap().0; - while let Some(packet) = codec.next().await { - let packet = packet.unwrap(); - if !packet.is_empty() { - codec.send(packet.into()).await.unwrap(); - } - } - }); - - async_std::task::block_on(async move { - let listen_addr = l_a_rx.await.unwrap(); - let connec = async_std::net::TcpStream::connect(&listen_addr).await.unwrap(); - let mut codec = handshake(connec, key2).await.unwrap().0; - codec.send(b"hello".to_vec().into()).await.unwrap(); - let mut packets_stream = codec.filter(|p| future::ready(!p.as_ref().unwrap().is_empty())); - let packet = packets_stream.next().await.unwrap(); - assert_eq!(packet.unwrap(), b"hello"); - }); - } - - #[test] - fn stretch() { - let mut output = [0u8; 32]; - - let key1 = Hmac::from_key(Digest::Sha256, &[]); - stretch_key(key1, &mut output); - assert_eq!( - &output, - &[ - 103, 144, 60, 199, 85, 145, 239, 71, 79, 198, 85, 164, 32, 53, 143, 205, 50, 48, - 153, 10, 37, 32, 85, 1, 226, 61, 193, 1, 154, 120, 207, 80, - ] - ); - - let key2 = Hmac::from_key( - Digest::Sha256, - &[ - 157, 166, 80, 144, 77, 193, 198, 6, 23, 220, 87, 220, 191, 72, 168, 197, 54, 33, - 219, 225, 84, 156, 165, 37, 149, 224, 244, 32, 170, 79, 125, 35, 171, 26, 178, 176, - 92, 168, 22, 27, 205, 44, 229, 61, 152, 21, 222, 81, 241, 81, 116, 236, 74, 166, - 89, 145, 5, 162, 108, 230, 55, 54, 9, 17, - ], - ); - stretch_key(key2, &mut output); - assert_eq!( - &output, - &[ - 39, 151, 182, 63, 180, 175, 224, 139, 42, 131, 130, 116, 55, 146, 62, 31, 157, 95, - 217, 15, 73, 81, 10, 83, 243, 141, 64, 227, 103, 144, 99, 121, - ] - ); - - let key3 = Hmac::from_key( - Digest::Sha256, - &[ - 98, 219, 94, 104, 97, 70, 139, 13, 185, 110, 56, 36, 66, 3, 80, 224, 32, 205, 102, - 170, 59, 32, 140, 245, 86, 102, 231, 68, 85, 249, 227, 243, 57, 53, 171, 36, 62, - 225, 178, 74, 89, 142, 151, 94, 183, 231, 208, 166, 244, 130, 130, 209, 248, 65, - 19, 48, 127, 127, 55, 82, 117, 154, 124, 108, - ], - ); - stretch_key(key3, &mut output); - assert_eq!( - &output, - &[ - 28, 39, 158, 206, 164, 16, 211, 194, 99, 43, 208, 36, 24, 141, 90, 93, 157, 236, - 238, 111, 170, 0, 60, 11, 49, 174, 177, 121, 30, 12, 182, 25, - ] - ); - } -} diff --git a/protocols/secio/src/lib.rs b/protocols/secio/src/lib.rs deleted file mode 100644 index 7332c725..00000000 --- a/protocols/secio/src/lib.rs +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! The `secio` protocol is a middleware that will encrypt and decrypt communications going -//! through a socket (or anything that implements `AsyncRead + AsyncWrite`). -//! -//! # Usage -//! -//! The `SecioConfig` implements [`InboundUpgrade`] and [`OutboundUpgrade`] and thus -//! serves as a connection upgrade for authentication of a transport. -//! See [`authenticate`](libp2p_core::transport::upgrade::Builder::authenticate). -//! -//! ```no_run -//! # fn main() { -//! use futures::prelude::*; -//! use libp2p_secio::{SecioConfig, SecioOutput}; -//! use libp2p_core::{PeerId, Multiaddr, identity, upgrade}; -//! use libp2p_core::transport::Transport; -//! use libp2p_mplex::MplexConfig; -//! use libp2p_tcp::TcpConfig; -//! -//! // Create a local peer identity. -//! let local_keys = identity::Keypair::generate_ed25519(); -//! -//! // Create a `Transport`. -//! let transport = TcpConfig::new() -//! .upgrade(upgrade::Version::V1) -//! .authenticate(SecioConfig::new(local_keys.clone())) -//! .multiplex(MplexConfig::default()); -//! -//! // The transport can be used with a `Network` from `libp2p-core`, or a -//! // `Swarm` from from `libp2p-swarm`. See the documentation of these -//! // crates for mode details. -//! -//! // let network = Network::new(transport, local_keys.public().into_peer_id()); -//! // let swarm = Swarm::new(transport, behaviour, local_keys.public().into_peer_id()); -//! # } -//! ``` -//! - -pub use self::error::SecioError; - -use futures::stream::MapErr as StreamMapErr; -use futures::prelude::*; -use libp2p_core::{PeerId, PublicKey, identity, upgrade::{UpgradeInfo, InboundUpgrade, OutboundUpgrade}}; -use log::debug; -use rw_stream_sink::RwStreamSink; -use std::{io, iter, pin::Pin, task::Context, task::Poll}; - -mod algo_support; -mod codec; -mod error; -mod exchange; -mod handshake; -mod structs_proto { - include!(concat!(env!("OUT_DIR"), "/spipe.pb.rs")); -} -mod stream_cipher; - -pub use crate::algo_support::Digest; -pub use crate::exchange::KeyAgreement; -pub use crate::stream_cipher::Cipher; - -/// Implementation of the `ConnectionUpgrade` trait of `libp2p_core`. Automatically applies -/// secio on any connection. -#[derive(Clone)] -pub struct SecioConfig { - /// Private and public keys of the local node. - pub(crate) key: identity::Keypair, - pub(crate) agreements_prop: Option, - pub(crate) ciphers_prop: Option, - pub(crate) digests_prop: Option, - pub(crate) max_frame_len: usize -} - -impl SecioConfig { - /// Create a new `SecioConfig` with the given keypair. - pub fn new(kp: identity::Keypair) -> Self { - SecioConfig { - key: kp, - agreements_prop: None, - ciphers_prop: None, - digests_prop: None, - max_frame_len: 8 * 1024 * 1024 - } - } - - /// Override the default set of supported key agreement algorithms. - pub fn key_agreements<'a, I>(mut self, xs: I) -> Self - where - I: IntoIterator - { - self.agreements_prop = Some(algo_support::key_agreements_proposition(xs)); - self - } - - /// Override the default set of supported ciphers. - pub fn ciphers<'a, I>(mut self, xs: I) -> Self - where - I: IntoIterator - { - self.ciphers_prop = Some(algo_support::ciphers_proposition(xs)); - self - } - - /// Override the default set of supported digest algorithms. - pub fn digests<'a, I>(mut self, xs: I) -> Self - where - I: IntoIterator - { - self.digests_prop = Some(algo_support::digests_proposition(xs)); - self - } - - /// Override the default max. frame length of 8MiB. - pub fn max_frame_len(mut self, n: usize) -> Self { - self.max_frame_len = n; - self - } - - fn handshake(self, socket: T) -> impl Future), SecioError>> - where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static - { - debug!("Starting secio upgrade"); - SecioMiddleware::handshake(socket, self) - .map_ok(|(stream_sink, pubkey, ephemeral)| { - let mapped = stream_sink.map_err(map_err as fn(_) -> _); - let peer = pubkey.clone().into_peer_id(); - let io = SecioOutput { - stream: RwStreamSink::new(mapped), - remote_key: pubkey, - ephemeral_public_key: ephemeral - }; - (peer, io) - }) - } -} - -/// Output of the secio protocol. -pub struct SecioOutput -where - S: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - /// The encrypted stream. - pub stream: RwStreamSink, fn(SecioError) -> io::Error>>, - /// The public key of the remote. - pub remote_key: PublicKey, - /// Ephemeral public key used during the negotiation. - pub ephemeral_public_key: Vec, -} - -impl UpgradeInfo for SecioConfig { - type Info = &'static [u8]; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(b"/secio/1.0.0") - } -} - -impl InboundUpgrade for SecioConfig -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = (PeerId, SecioOutput); - type Error = SecioError; - type Future = Pin> + Send>>; - - fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { - Box::pin(self.handshake(socket)) - } -} - -impl OutboundUpgrade for SecioConfig -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = (PeerId, SecioOutput); - type Error = SecioError; - type Future = Pin> + Send>>; - - fn upgrade_outbound(self, socket: T, _: Self::Info) -> Self::Future { - Box::pin(self.handshake(socket)) - } -} - -impl AsyncRead for SecioOutput -where - S: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) - -> Poll> - { - AsyncRead::poll_read(Pin::new(&mut self.stream), cx, buf) - } -} - -impl AsyncWrite for SecioOutput -where - S: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) - -> Poll> - { - AsyncWrite::poll_write(Pin::new(&mut self.stream), cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll> - { - AsyncWrite::poll_flush(Pin::new(&mut self.stream), cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) - -> Poll> - { - AsyncWrite::poll_close(Pin::new(&mut self.stream), cx) - } -} - -fn map_err(err: SecioError) -> io::Error { - debug!("error during secio handshake {:?}", err); - io::Error::new(io::ErrorKind::InvalidData, err) -} - -/// Wraps around an object that implements `AsyncRead` and `AsyncWrite`. -/// -/// Implements `Sink` and `Stream` whose items are frames of data. Each frame is encoded -/// individually, so you are encouraged to group data in few frames if possible. -pub struct SecioMiddleware { - inner: codec::FullCodec, -} - -impl SecioMiddleware -where - S: AsyncRead + AsyncWrite + Send + Unpin + 'static, -{ - /// Attempts to perform a handshake on the given socket. - /// - /// On success, produces a `SecioMiddleware` that can then be used to encode/decode - /// communications, plus the public key of the remote, plus the ephemeral public key. - pub fn handshake(socket: S, config: SecioConfig) - -> impl Future, PublicKey, Vec), SecioError>> - { - handshake::handshake(socket, config).map_ok(|(inner, pubkey, ephemeral)| { - let inner = SecioMiddleware { inner }; - (inner, pubkey, ephemeral) - }) - } -} - -impl Sink> for SecioMiddleware -where - S: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Error = io::Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Sink::poll_ready(Pin::new(&mut self.inner), cx) - } - - fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - Sink::start_send(Pin::new(&mut self.inner), item) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Sink::poll_flush(Pin::new(&mut self.inner), cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Sink::poll_close(Pin::new(&mut self.inner), cx) - } -} - -impl Stream for SecioMiddleware -where - S: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Item = Result, SecioError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Stream::poll_next(Pin::new(&mut self.inner), cx) - } -} diff --git a/protocols/secio/src/stream_cipher.rs b/protocols/secio/src/stream_cipher.rs deleted file mode 100644 index ab15de1a..00000000 --- a/protocols/secio/src/stream_cipher.rs +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use super::codec::StreamCipher; -use aes_ctr::stream_cipher::generic_array::GenericArray; -use aes_ctr::stream_cipher::{NewStreamCipher, LoopError, SyncStreamCipher}; -use aes_ctr::{Aes128Ctr, Aes256Ctr}; -use ctr::Ctr128; -use twofish::Twofish; - -/// Possible encryption ciphers. -#[derive(Clone, Copy, Debug)] -pub enum Cipher { - Aes128, - Aes256, - TwofishCtr, - Null, -} - -impl Cipher { - /// Returns the size of in bytes of the key expected by the cipher. - pub fn key_size(&self) -> usize { - match *self { - Cipher::Aes128 => 16, - Cipher::Aes256 => 32, - Cipher::TwofishCtr => 32, - Cipher::Null => 0, - } - } - - /// Returns the size of in bytes of the IV expected by the cipher. - #[inline] - pub fn iv_size(&self) -> usize { - match self { - Cipher::Aes128 | Cipher::Aes256 | Cipher::TwofishCtr => 16, - Cipher::Null => 0 - } - } -} - -/// A no-op cipher which does not encrypt or decrypt at all. -/// Obviously only useful for debugging purposes. -#[derive(Clone, Copy, Debug)] -pub struct NullCipher; - -impl SyncStreamCipher for NullCipher { - fn try_apply_keystream(&mut self, _data: &mut [u8]) -> Result<(), LoopError> { - Ok(()) - } -} - -/// Returns your stream cipher depending on `Cipher`. -#[cfg(not(all(feature = "aes-all", any(target_arch = "x86_64", target_arch = "x86"))))] -pub fn ctr(key_size: Cipher, key: &[u8], iv: &[u8]) -> StreamCipher { - ctr_int(key_size, key, iv) -} - -/// Returns your stream cipher depending on `Cipher`. -#[cfg(all(feature = "aes-all", any(target_arch = "x86_64", target_arch = "x86")))] -pub fn ctr(key_size: Cipher, key: &[u8], iv: &[u8]) -> StreamCipher { - if *aes_alt::AES_NI { - aes_alt::ctr_alt(key_size, key, iv) - } else { - ctr_int(key_size, key, iv) - } -} - - -#[cfg(all(feature = "aes-all", any(target_arch = "x86_64", target_arch = "x86")))] -mod aes_alt { - use crate::codec::StreamCipher; - use ctr::Ctr128; - use aesni::{Aes128, Aes256}; - use ctr::stream_cipher::NewStreamCipher; - use ctr::stream_cipher::generic_array::GenericArray; - use lazy_static::lazy_static; - use twofish::Twofish; - use super::{Cipher, NullCipher}; - - lazy_static! { - pub static ref AES_NI: bool = is_x86_feature_detected!("aes") - && is_x86_feature_detected!("sse2") - && is_x86_feature_detected!("sse3"); - - } - - /// AES-128 in CTR mode - pub type Aes128Ctr = Ctr128; - /// AES-256 in CTR mode - pub type Aes256Ctr = Ctr128; - /// Returns alternate stream cipher if target functionalities does not allow standard one. - /// Eg : aes without sse - pub fn ctr_alt(key_size: Cipher, key: &[u8], iv: &[u8]) -> StreamCipher { - match key_size { - Cipher::Aes128 => Box::new(Aes128Ctr::new( - GenericArray::from_slice(key), - GenericArray::from_slice(iv), - )), - Cipher::Aes256 => Box::new(Aes256Ctr::new( - GenericArray::from_slice(key), - GenericArray::from_slice(iv), - )), - Cipher::TwofishCtr => Box::new(Ctr128::::new( - GenericArray::from_slice(key), - GenericArray::from_slice(iv), - )), - Cipher::Null => Box::new(NullCipher), - } - } - -} - -#[inline] -fn ctr_int(key_size: Cipher, key: &[u8], iv: &[u8]) -> StreamCipher { - match key_size { - Cipher::Aes128 => Box::new(Aes128Ctr::new( - GenericArray::from_slice(key), - GenericArray::from_slice(iv), - )), - Cipher::Aes256 => Box::new(Aes256Ctr::new( - GenericArray::from_slice(key), - GenericArray::from_slice(iv), - )), - Cipher::TwofishCtr => Box::new(Ctr128::::new( - GenericArray::from_slice(key), - GenericArray::from_slice(iv), - )), - Cipher::Null => Box::new(NullCipher), - } -} - -#[cfg(all( - feature = "aes-all", - any(target_arch = "x86_64", target_arch = "x86"), -))] -#[cfg(test)] -mod tests { - use super::{Cipher, ctr}; - - #[test] - fn assert_non_native_run() { - // this test is for asserting aes unsuported opcode does not break on old cpu - let key = [0;16]; - let iv = [0;16]; - - let mut aes = ctr(Cipher::Aes128, &key, &iv); - let mut content = [0;16]; - aes.encrypt(&mut content); - - } -} - -// aesni compile check for aes-all (aes-all import aesni through aes_ctr only if those checks pass) -#[cfg(all( - feature = "aes-all", - any(target_arch = "x86_64", target_arch = "x86"), - any(target_feature = "aes", target_feature = "ssse3"), -))] -compile_error!( - "aes-all must be compile without aes and sse3 flags : currently \ - is_x86_feature_detected macro will not detect feature correctly otherwhise. \ - RUSTFLAGS=\"-C target-feature=+aes,+ssse3\" enviromental variable. \ - For x86 target arch additionally enable sse2 target feature." -); diff --git a/protocols/secio/src/structs.proto b/protocols/secio/src/structs.proto deleted file mode 100644 index a35de4c8..00000000 --- a/protocols/secio/src/structs.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto2"; - -package spipe.pb; - -message Propose { - optional bytes rand = 1; - optional bytes pubkey = 2; - optional string exchanges = 3; - optional string ciphers = 4; - optional string hashes = 5; -} - -message Exchange { - optional bytes epubkey = 1; - optional bytes signature = 2; -} \ No newline at end of file diff --git a/protocols/secio/tests/test-rsa-private-key-2.pk8 b/protocols/secio/tests/test-rsa-private-key-2.pk8 deleted file mode 100644 index 85ff4154..00000000 Binary files a/protocols/secio/tests/test-rsa-private-key-2.pk8 and /dev/null differ diff --git a/protocols/secio/tests/test-rsa-private-key.pk8 b/protocols/secio/tests/test-rsa-private-key.pk8 deleted file mode 100644 index 452b7af1..00000000 Binary files a/protocols/secio/tests/test-rsa-private-key.pk8 and /dev/null differ diff --git a/protocols/secio/tests/test-secp256k1-private-key-2.der b/protocols/secio/tests/test-secp256k1-private-key-2.der deleted file mode 100644 index 16be9616..00000000 Binary files a/protocols/secio/tests/test-secp256k1-private-key-2.der and /dev/null differ diff --git a/protocols/secio/tests/test-secp256k1-private-key.der b/protocols/secio/tests/test-secp256k1-private-key.der deleted file mode 100644 index 90bb3973..00000000 Binary files a/protocols/secio/tests/test-secp256k1-private-key.der and /dev/null differ diff --git a/src/bandwidth.rs b/src/bandwidth.rs index 705164b5..87b66653 100644 --- a/src/bandwidth.rs +++ b/src/bandwidth.rs @@ -74,6 +74,10 @@ where .dial(addr) .map(move |fut| BandwidthFuture { inner: fut, sinks }) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.inner.address_translation(server, observed) + } } /// Wraps around a `Stream` that produces connections. Wraps each connection around a bandwidth diff --git a/src/lib.rs b/src/lib.rs index 514c352d..eb965931 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -85,7 +85,7 @@ //! Example ([`noise`] + [`yamux`] Protocol Upgrade): //! //! ```rust -//! # #[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), feature = "tcp-async-std", feature = "noise", feature = "yamux"))] { +//! # #[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), feature = "noise", feature = "yamux"))] { //! use libp2p::{Transport, core::upgrade, tcp::TcpConfig, noise, identity::Keypair, yamux}; //! let tcp = TcpConfig::new(); //! let id_keys = Keypair::generate_ed25519(); @@ -215,8 +215,8 @@ pub use libp2p_ping as ping; pub use libp2p_plaintext as plaintext; #[doc(inline)] pub use libp2p_swarm as swarm; -#[cfg(any(feature = "tcp-async-std", feature = "tcp-tokio"))] -#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-async-std", feature = "tcp-tokio"))))] +#[cfg(any(feature = "tcp-async-io", feature = "tcp-tokio"))] +#[cfg_attr(docsrs, doc(cfg(any(feature = "tcp-async-io", feature = "tcp-tokio"))))] #[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))] #[doc(inline)] pub use libp2p_tcp as tcp; @@ -258,7 +258,7 @@ pub use self::core::{ transport::TransportError, upgrade::{InboundUpgrade, InboundUpgradeExt, OutboundUpgrade, OutboundUpgradeExt} }; -pub use libp2p_core_derive::NetworkBehaviour; +pub use libp2p_swarm_derive::NetworkBehaviour; pub use self::multiaddr::{Multiaddr, multiaddr as build_multiaddr}; pub use self::simple::SimpleProtocol; pub use self::swarm::Swarm; @@ -268,8 +268,8 @@ pub use self::transport_ext::TransportExt; /// /// > **Note**: This `Transport` is not suitable for production usage, as its implementation /// > reserves the right to support additional protocols or remove deprecated protocols. -#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))] -#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))))] +#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-io", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))] +#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-io", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))))] pub fn build_development_transport(keypair: identity::Keypair) -> std::io::Result> { @@ -280,13 +280,13 @@ pub fn build_development_transport(keypair: identity::Keypair) /// /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, /// and mplex or yamux as the multiplexing layer. -#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))] -#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))))] +#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-io", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))] +#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-io", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux"))))] pub fn build_tcp_ws_noise_mplex_yamux(keypair: identity::Keypair) -> std::io::Result> { let transport = { - #[cfg(feature = "tcp-async-std")] + #[cfg(feature = "tcp-async-io")] let tcp = tcp::TcpConfig::new().nodelay(true); #[cfg(feature = "tcp-tokio")] let tcp = tcp::TokioTcpConfig::new().nodelay(true); @@ -311,13 +311,13 @@ pub fn build_tcp_ws_noise_mplex_yamux(keypair: identity::Keypair) /// /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, /// and mplex or yamux as the multiplexing layer. -#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux", feature = "pnet"))] -#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-std", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux", feature = "pnet"))))] +#[cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-io", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux", feature = "pnet"))] +#[cfg_attr(docsrs, doc(cfg(all(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")), any(feature = "tcp-async-io", feature = "tcp-tokio"), feature = "websocket", feature = "noise", feature = "mplex", feature = "yamux", feature = "pnet"))))] pub fn build_tcp_ws_pnet_noise_mplex_yamux(keypair: identity::Keypair, psk: PreSharedKey) -> std::io::Result> { let transport = { - #[cfg(feature = "tcp-async-std")] + #[cfg(feature = "tcp-async-io")] let tcp = tcp::TcpConfig::new().nodelay(true); #[cfg(feature = "tcp-tokio")] let tcp = tcp::TokioTcpConfig::new().nodelay(true); diff --git a/src/transport_ext.rs b/src/transport_ext.rs index e2e37fe5..de77007b 100644 --- a/src/transport_ext.rs +++ b/src/transport_ext.rs @@ -37,8 +37,6 @@ pub trait TransportExt: Transport { { BandwidthLogging::new(self) } - - // TODO: add methods to easily upgrade for secio/mplex/yamux } impl TransportExt for TTransport where TTransport: Transport {} diff --git a/misc/core-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md similarity index 87% rename from misc/core-derive/CHANGELOG.md rename to swarm-derive/CHANGELOG.md index a41b0946..36cd7a4a 100644 --- a/misc/core-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.22.0 [2021-02-15] + +- Rename the crate to `libp2p-swarm-derive`. + # 0.21.0 [2020-11-25] - Update for compatibility with `libp2p-swarm-0.25`. diff --git a/misc/core-derive/Cargo.toml b/swarm-derive/Cargo.toml similarity index 78% rename from misc/core-derive/Cargo.toml rename to swarm-derive/Cargo.toml index cdaca5a0..d56217c2 100644 --- a/misc/core-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "fluence-fork-libp2p-core-derive" +name = "fluence-fork-libp2p-swarm-derive" edition = "2018" description = "Procedural macros of libp2p-core" -version = "0.21.1" +version = "0.22.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [lib] -name = "libp2p_core_derive" +name = "libp2p_swarm_derive" proc-macro = true [dependencies] @@ -18,7 +18,7 @@ syn = { version = "1.0.8", default-features = false, features = ["clone-impls", quote = "1.0" [dev-dependencies] -libp2p = { path = "../..", package = "fluence-fork-libp2p" } +libp2p = { path = "../", package = "fluence-fork-libp2p" } [package.metadata.workspaces] independent = true diff --git a/misc/core-derive/src/lib.rs b/swarm-derive/src/lib.rs similarity index 99% rename from misc/core-derive/src/lib.rs rename to swarm-derive/src/lib.rs index 87a2a963..2e69c207 100644 --- a/misc/core-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -20,8 +20,6 @@ #![recursion_limit = "256"] - - use quote::quote; use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput, Data, DataStruct, Ident}; diff --git a/misc/core-derive/tests/test.rs b/swarm-derive/tests/test.rs similarity index 98% rename from misc/core-derive/tests/test.rs rename to swarm-derive/tests/test.rs index 1dfcd847..9a4c86d4 100644 --- a/misc/core-derive/tests/test.rs +++ b/swarm-derive/tests/test.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core_derive::*; +use libp2p_swarm_derive::*; /// Small utility to check that a type implements `NetworkBehaviour`. #[allow(dead_code)] @@ -291,15 +291,15 @@ fn event_process_false() { identify: libp2p::identify::Identify, } - #[allow(dead_code)] + #[allow(dead_code, unreachable_code)] fn bar() { require_net_behaviour::(); - let mut swarm: libp2p::Swarm = unimplemented!(); + let mut _swarm: libp2p::Swarm = unimplemented!(); // check that the event is bubbled up all the way to swarm let _ = async { - match swarm.next().await { + match _swarm.next().await { BehaviourOutEvent::Ping(_) => {}, BehaviourOutEvent::Identify(_) => {}, } diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 1abcaf0e..f6efa8a6 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,3 +1,28 @@ +# 0.28.0 [unreleased] + +- Remove the option for a substream-specific multistream select protocol override. + The override at this granularity is no longer deemed useful, in particular because + it can usually not be configured for existing protocols like `libp2p-kad` and others. + There is a `Swarm`-scoped configuration for this version available since + [1858](https://github.com/libp2p/rust-libp2p/pull/1858). + +# 0.27.2 [2021-02-04] + +- Have `ToggleProtoHandler` ignore listen upgrade errors when disabled. + [PR 1945](https://github.com/libp2p/rust-libp2p/pull/1945/files). + +# 0.27.1 [2021-01-27] + +- Make `OneShotHandler`s `max_dial_negotiate` limit configurable. + [PR 1936](https://github.com/libp2p/rust-libp2p/pull/1936). + +- Fix handling of DialPeerCondition::Always. + [PR 1937](https://github.com/libp2p/rust-libp2p/pull/1937). + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index e2126cb4..cd0f74fe 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-swarm" edition = "2018" description = "The libp2p swarm" -version = "0.26.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,7 +15,7 @@ name = "libp2p_swarm" [dependencies] either = "1.6.0" futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../core", package = "fluence-fork-libp2p-core" } log = "0.4" rand = "0.7" smallvec = "1.0" @@ -24,7 +24,7 @@ void = "1" [dev-dependencies] libp2p-mplex = { path = "../muxers/mplex", package = "fluence-fork-libp2p-mplex" } -libp2p-noise = { path = "../protocols/noise", package = "fluence-fork-libp2p-noise" } +libp2p-noise = { path = "../transports/noise", package = "fluence-fork-libp2p-noise" } quickcheck = "0.9.0" rand = "0.7.2" diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 30e6fb38..5a8da7be 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -82,21 +82,21 @@ pub trait NetworkBehaviour: Send + 'static { /// address should be the most likely to be reachable. fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec; - /// Indicates the behaviour that we connected to the node with the given peer id. + /// Indicate to the behaviour that we connected to the node with the given peer id. /// /// This node now has a handler (as spawned by `new_handler`) running in the background. /// - /// This method is only called when the connection to the peer is - /// established, preceded by `inject_connection_established`. + /// This method is only called when the first connection to the peer is established, preceded by + /// [`inject_connection_established`](NetworkBehaviour::inject_connection_established). fn inject_connected(&mut self, peer_id: &PeerId); - /// Indicates the behaviour that we disconnected from the node with the given peer id. + /// Indicates to the behaviour that we disconnected from the node with the given peer id. /// /// There is no handler running anymore for this node. Any event that has been sent to it may /// or may not have been processed by the handler. /// - /// This method is only called when the last established connection to the peer - /// is closed, preceded by `inject_connection_closed`. + /// This method is only called when the last established connection to the peer is closed, + /// preceded by [`inject_connection_closed`](NetworkBehaviour::inject_connection_closed). fn inject_disconnected(&mut self, peer_id: &PeerId); /// Informs the behaviour about a newly established connection to a peer. diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 77dd4605..c61f12e1 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -383,7 +383,7 @@ where TBehaviour: NetworkBehaviour, let handler = me.behaviour.new_handler() .into_node_handler_builder() .with_substream_upgrade_protocol_override(me.substream_upgrade_protocol_override); - me.network.peer(peer_id.clone()) + me.network.peer(*peer_id) .dial(first, addrs, handler) .map(|_| ()) .map_err(DialError::ConnectionLimit) @@ -408,7 +408,7 @@ where TBehaviour: NetworkBehaviour, /// Returns the peer ID of the swarm passed as parameter. pub fn local_peer_id(me: &Self) -> &PeerId { - &me.network.local_peer_id() + me.network.local_peer_id() } /// Returns an iterator for [`AddressRecord`]s of external addresses @@ -451,7 +451,7 @@ where TBehaviour: NetworkBehaviour, /// Any incoming connection and any dialing attempt will immediately be rejected. /// This function has no effect if the peer is already banned. pub fn ban_peer_id(me: &mut Self, peer_id: PeerId) { - if me.banned_peers.insert(peer_id.clone()) { + if me.banned_peers.insert(peer_id) { if let Some(peer) = me.network.peer(peer_id).into_connected() { peer.disconnect(); } @@ -504,7 +504,7 @@ where TBehaviour: NetworkBehaviour, match this.network.poll(cx) { Poll::Pending => network_not_ready = true, Poll::Ready(NetworkEvent::ConnectionEvent { connection, event }) => { - let peer = connection.peer_id().clone(); + let peer = connection.peer_id(); let connection = connection.id(); this.behaviour.inject_event(peer, connection, event); }, @@ -514,10 +514,10 @@ where TBehaviour: NetworkBehaviour, this.behaviour.inject_address_change(&peer, &connection, &old_endpoint, &new_endpoint); }, Poll::Ready(NetworkEvent::ConnectionEstablished { connection, num_established }) => { - let peer_id = connection.peer_id().clone(); + let peer_id = connection.peer_id(); let endpoint = connection.endpoint().clone(); if this.banned_peers.contains(&peer_id) { - this.network.peer(peer_id.clone()) + this.network.peer(peer_id) .into_connected() .expect("the Network just notified us that we were connected; QED") .disconnect(); @@ -645,7 +645,7 @@ where TBehaviour: NetworkBehaviour, // before polling the behaviour again. If the targeted peer // meanwhie disconnected, the event is discarded. if let Some((peer_id, handler, event)) = this.pending_event.take() { - if let Some(mut peer) = this.network.peer(peer_id.clone()).into_connected() { + if let Some(mut peer) = this.network.peer(peer_id).into_connected() { match handler { PendingNotifyHandler::One(conn_id) => if let Some(mut conn) = peer.connection(conn_id) { @@ -691,11 +691,9 @@ where TBehaviour: NetworkBehaviour, this.behaviour.inject_dial_failure(&peer_id); } else { let condition_matched = match condition { - DialPeerCondition::Disconnected - if this.network.is_disconnected(&peer_id) => true, - DialPeerCondition::NotDialing - if !this.network.is_dialing(&peer_id) => true, - _ => false + DialPeerCondition::Disconnected => this.network.is_disconnected(&peer_id), + DialPeerCondition::NotDialing => !this.network.is_dialing(&peer_id), + DialPeerCondition::Always => true, }; if condition_matched { if ExpandedSwarm::dial(this, &peer_id).is_ok() { @@ -708,7 +706,7 @@ where TBehaviour: NetworkBehaviour, log::trace!("Condition for new dialing attempt to {:?} not met: {:?}", peer_id, condition); let self_listening = &this.listened_addrs; - if let Some(mut peer) = this.network.peer(peer_id.clone()).into_dialing() { + if let Some(mut peer) = this.network.peer(peer_id).into_dialing() { let addrs = this.behaviour.addresses_of_peer(peer.id()); let mut attempt = peer.some_attempt(); for a in addrs { @@ -721,7 +719,7 @@ where TBehaviour: NetworkBehaviour, } }, Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => { - if let Some(mut peer) = this.network.peer(peer_id.clone()).into_connected() { + if let Some(mut peer) = this.network.peer(peer_id).into_connected() { match handler { NotifyHandler::One(connection) => { if let Some(mut conn) = peer.connection(connection) { @@ -745,7 +743,7 @@ where TBehaviour: NetworkBehaviour, }, Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => { for addr in this.network.address_translation(&address) { - if this.external_addrs.iter().all(|a| &a.addr != &addr) { + if this.external_addrs.iter().all(|a| a.addr != addr) { this.behaviour.inject_new_external_addr(&addr); } this.external_addrs.add(addr, score); @@ -900,7 +898,7 @@ impl<'a> PollParameters for SwarmPollParameters<'a> { } fn local_peer_id(&self) -> &PeerId { - self.local_peer_id + &self.local_peer_id } } @@ -927,7 +925,7 @@ where TBehaviour: NetworkBehaviour, ) -> Self { SwarmBuilder { local_peer_id, - transport: transport, + transport, behaviour, network_config: Default::default(), substream_upgrade_protocol_override: None, diff --git a/swarm/src/protocols_handler.rs b/swarm/src/protocols_handler.rs index 3c4eeea6..42c61101 100644 --- a/swarm/src/protocols_handler.rs +++ b/swarm/src/protocols_handler.rs @@ -55,7 +55,7 @@ use libp2p_core::{ ConnectedPoint, Multiaddr, PeerId, - upgrade::{self, UpgradeError}, + upgrade::UpgradeError, }; use std::{cmp::Ordering, error, fmt, task::Context, task::Poll, time::Duration}; use wasm_timer::Instant; @@ -242,7 +242,6 @@ pub trait ProtocolsHandler: Send + 'static { pub struct SubstreamProtocol { upgrade: TUpgrade, info: TInfo, - upgrade_protocol: upgrade::Version, timeout: Duration, } @@ -255,18 +254,10 @@ impl SubstreamProtocol { SubstreamProtocol { upgrade, info, - upgrade_protocol: upgrade::Version::V1, timeout: Duration::from_secs(10), } } - /// Sets the multistream-select protocol (version) to use for negotiating - /// protocols upgrades on outbound substreams. - pub fn with_upgrade_protocol(mut self, version: upgrade::Version) -> Self { - self.upgrade_protocol = version; - self - } - /// Maps a function over the protocol upgrade. pub fn map_upgrade(self, f: F) -> SubstreamProtocol where @@ -275,7 +266,6 @@ impl SubstreamProtocol { SubstreamProtocol { upgrade: f(self.upgrade), info: self.info, - upgrade_protocol: self.upgrade_protocol, timeout: self.timeout, } } @@ -288,7 +278,6 @@ impl SubstreamProtocol { SubstreamProtocol { upgrade: self.upgrade, info: f(self.info), - upgrade_protocol: self.upgrade_protocol, timeout: self.timeout, } } @@ -315,8 +304,8 @@ impl SubstreamProtocol { } /// Converts the substream protocol configuration into the contained upgrade. - pub fn into_upgrade(self) -> (upgrade::Version, TUpgrade, TInfo) { - (self.upgrade_protocol, self.upgrade, self.info) + pub fn into_upgrade(self) -> (TUpgrade, TInfo) { + (self.upgrade, self.info) } } @@ -512,7 +501,7 @@ where T: ProtocolsHandler } fn inbound_protocol(&self) -> ::InboundProtocol { - self.listen_protocol().into_upgrade().1 + self.listen_protocol().into_upgrade().0 } } diff --git a/swarm/src/protocols_handler/multi.rs b/swarm/src/protocols_handler/multi.rs index 4bc2e04a..f23de96c 100644 --- a/swarm/src/protocols_handler/multi.rs +++ b/swarm/src/protocols_handler/multi.rs @@ -37,7 +37,7 @@ use crate::upgrade::{ }; use futures::{future::BoxFuture, prelude::*}; use libp2p_core::{ConnectedPoint, Multiaddr, PeerId}; -use libp2p_core::upgrade::{self, ProtocolName, UpgradeError, NegotiationError, ProtocolError}; +use libp2p_core::upgrade::{ProtocolName, UpgradeError, NegotiationError, ProtocolError}; use rand::Rng; use std::{ cmp, @@ -76,15 +76,12 @@ where /// Create and populate a `MultiHandler` from the given handler iterator. /// /// It is an error for any two protocols handlers to share the same protocol name. - /// - /// > **Note**: All handlers should use the same [`upgrade::Version`] for - /// > the inbound and outbound [`SubstreamProtocol`]s. pub fn try_from_iter(iter: I) -> Result where I: IntoIterator { let m = MultiHandler { handlers: HashMap::from_iter(iter) }; - uniq_proto_names(m.handlers.values().map(|h| h.listen_protocol().into_upgrade().1))?; + uniq_proto_names(m.handlers.values().map(|h| h.listen_protocol().into_upgrade().0))?; Ok(m) } } @@ -105,34 +102,22 @@ where type OutboundOpenInfo = (K, ::OutboundOpenInfo); fn listen_protocol(&self) -> SubstreamProtocol { - let (upgrade, info, timeout, version) = self.handlers.iter() - .map(|(k, h)| { - let p = h.listen_protocol(); - let t = *p.timeout(); - let (v, u, i) = p.into_upgrade(); - (k.clone(), (v, u, i, t)) + let (upgrade, info, timeout) = self.handlers.iter() + .map(|(key, handler)| { + let proto = handler.listen_protocol(); + let timeout = *proto.timeout(); + let (upgrade, info) = proto.into_upgrade(); + (key.clone(), (upgrade, info, timeout)) }) - .fold((Upgrade::new(), Info::new(), Duration::from_secs(0), None), - |(mut upg, mut inf, mut timeout, mut version), (k, (v, u, i, t))| { + .fold((Upgrade::new(), Info::new(), Duration::from_secs(0)), + |(mut upg, mut inf, mut timeout), (k, (u, i, t))| { upg.upgrades.push((k.clone(), u)); inf.infos.push((k, i)); timeout = cmp::max(timeout, t); - version = version.map_or(Some(v), |vv| - if v != vv { - // Different upgrade (i.e. protocol negotiation) protocol - // versions are usually incompatible and not negotiated - // themselves, so a protocol upgrade may fail. - log::warn!("Differing upgrade versions. Defaulting to V1."); - Some(upgrade::Version::V1) - } else { - Some(v) - }); - (upg, inf, timeout, version) + (upg, inf, timeout) } ); - SubstreamProtocol::new(upgrade, info) - .with_timeout(timeout) - .with_upgrade_protocol(version.unwrap_or(upgrade::Version::V1)) + SubstreamProtocol::new(upgrade, info).with_timeout(timeout) } fn inject_fully_negotiated_outbound ( @@ -315,9 +300,6 @@ where /// Create and populate an `IntoMultiHandler` from the given iterator. /// /// It is an error for any two protocols handlers to share the same protocol name. - /// - /// > **Note**: All handlers should use the same [`upgrade::Version`] for - /// > the inbound and outbound [`SubstreamProtocol`]s. pub fn try_from_iter(iter: I) -> Result where I: IntoIterator diff --git a/swarm/src/protocols_handler/node_handler.rs b/swarm/src/protocols_handler/node_handler.rs index ce4b6b31..72730117 100644 --- a/swarm/src/protocols_handler/node_handler.rs +++ b/swarm/src/protocols_handler/node_handler.rs @@ -116,7 +116,7 @@ where >>, /// For each outbound substream request, how to upgrade it. The first element of the tuple /// is the unique identifier (see `unique_dial_upgrade_id`). - queued_dial_upgrades: Vec<(u64, (upgrade::Version, SendWrapper))>, + queued_dial_upgrades: Vec<(u64, SendWrapper)>, /// Unique identifier assigned to each queued dial upgrade. unique_dial_upgrade_id: u64, /// The currently planned connection & handler shutdown. @@ -245,8 +245,8 @@ where match endpoint { SubstreamEndpoint::Listener => { let protocol = self.handler.listen_protocol(); - let timeout = protocol.timeout().clone(); - let (_, upgrade, user_data) = protocol.into_upgrade(); + let timeout = *protocol.timeout(); + let (upgrade, user_data) = protocol.into_upgrade(); let upgrade = upgrade::apply_inbound(substream, SendWrapper(upgrade)); let timeout = Delay::new(timeout); self.negotiating_in.push(SubstreamUpgrade { @@ -268,7 +268,8 @@ where } }; - let (_, (mut version, upgrade)) = self.queued_dial_upgrades.remove(pos); + let (_, upgrade) = self.queued_dial_upgrades.remove(pos); + let mut version = upgrade::Version::default(); if let Some(v) = self.substream_upgrade_protocol_override { if v != version { log::debug!("Substream upgrade protocol override: {:?} -> {:?}", version, v); @@ -334,10 +335,10 @@ where } Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => { let id = self.unique_dial_upgrade_id; - let timeout = protocol.timeout().clone(); + let timeout = *protocol.timeout(); self.unique_dial_upgrade_id += 1; - let (version, upgrade, info) = protocol.into_upgrade(); - self.queued_dial_upgrades.push((id, (version, SendWrapper(upgrade)))); + let (upgrade, info) = protocol.into_upgrade(); + self.queued_dial_upgrades.push((id, SendWrapper(upgrade))); return Poll::Ready(Ok( ConnectionHandlerEvent::OutboundSubstreamRequest((id, info, timeout)), )); diff --git a/swarm/src/protocols_handler/one_shot.rs b/swarm/src/protocols_handler/one_shot.rs index c83d8227..3baf779a 100644 --- a/swarm/src/protocols_handler/one_shot.rs +++ b/swarm/src/protocols_handler/one_shot.rs @@ -47,8 +47,6 @@ where dial_queue: SmallVec<[TOutbound; 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, - /// Maximum number of concurrent outbound substreams being opened. Value is never modified. - max_dial_negotiated: u32, /// Value to return from `connection_keep_alive`. keep_alive: KeepAlive, /// The configuration container for the handler @@ -71,7 +69,6 @@ where events_out: SmallVec::new(), dial_queue: SmallVec::new(), dial_negotiated: 0, - max_dial_negotiated: 8, keep_alive: KeepAlive::Yes, config, } @@ -204,7 +201,7 @@ where } if !self.dial_queue.is_empty() { - if self.dial_negotiated < self.max_dial_negotiated { + if self.dial_negotiated < self.config.max_dial_negotiated { self.dial_negotiated += 1; let upgrade = self.dial_queue.remove(0); return Poll::Ready( @@ -233,6 +230,8 @@ pub struct OneShotHandlerConfig { pub keep_alive_timeout: Duration, /// Timeout for outbound substream upgrades. pub outbound_substream_timeout: Duration, + /// Maximum number of concurrent outbound substreams being opened. + pub max_dial_negotiated: u32, } impl Default for OneShotHandlerConfig { @@ -240,6 +239,7 @@ impl Default for OneShotHandlerConfig { OneShotHandlerConfig { keep_alive_timeout: Duration::from_secs(10), outbound_substream_timeout: Duration::from_secs(10), + max_dial_negotiated: 8, } } } diff --git a/swarm/src/protocols_handler/select.rs b/swarm/src/protocols_handler/select.rs index 42bc310e..d8005eef 100644 --- a/swarm/src/protocols_handler/select.rs +++ b/swarm/src/protocols_handler/select.rs @@ -110,9 +110,9 @@ where fn listen_protocol(&self) -> SubstreamProtocol { let proto1 = self.proto1.listen_protocol(); let proto2 = self.proto2.listen_protocol(); - let timeout = std::cmp::max(proto1.timeout(), proto2.timeout()).clone(); - let (_, u1, i1) = proto1.into_upgrade(); - let (_, u2, i2) = proto2.into_upgrade(); + let timeout = *std::cmp::max(proto1.timeout(), proto2.timeout()); + let (u1, i1) = proto1.into_upgrade(); + let (u2, i2) = proto2.into_upgrade(); let choice = SelectUpgrade::new(SendWrapper(u1), SendWrapper(u2)); SubstreamProtocol::new(choice, (i1, i2)).with_timeout(timeout) } diff --git a/swarm/src/toggle.rs b/swarm/src/toggle.rs index bb901126..ea2b63b6 100644 --- a/swarm/src/toggle.rs +++ b/swarm/src/toggle.rs @@ -241,7 +241,7 @@ where .expect("Can't receive an inbound substream if disabled; QED") .inject_fully_negotiated_inbound(out, info) } else { - panic!("Unpexpected Either::Right in enabled `inject_fully_negotiated_inbound`.") + panic!("Unexpected Either::Right in enabled `inject_fully_negotiated_inbound`.") } } @@ -271,6 +271,21 @@ where } fn inject_listen_upgrade_error(&mut self, info: Self::InboundOpenInfo, err: ProtocolsHandlerUpgrErr<::Error>) { + let (inner, info) = match (self.inner.as_mut(), info) { + (Some(inner), Either::Left(info)) => (inner, info), + // Ignore listen upgrade errors in disabled state. + (None, Either::Right(())) => return, + (Some(_), Either::Right(())) => panic!( + "Unexpected `Either::Right` inbound info through \ + `inject_listen_upgrade_error` in enabled state.", + ), + (None, Either::Left(_)) => panic!( + "Unexpected `Either::Left` inbound info through \ + `inject_listen_upgrade_error` in disabled state.", + ), + + }; + let err = match err { ProtocolsHandlerUpgrErr::Timeout => ProtocolsHandlerUpgrErr::Timeout, ProtocolsHandlerUpgrErr::Timer => ProtocolsHandlerUpgrErr::Timer, @@ -280,13 +295,8 @@ where EitherError::B(v) => void::unreachable(v) })) }; - if let Either::Left(info) = info { - self.inner.as_mut() - .expect("Can't receive an inbound substream if disabled; QED") - .inject_listen_upgrade_error(info, err) - } else { - panic!("Unexpected Either::Right on enabled `inject_listen_upgrade_error`.") - } + + inner.inject_listen_upgrade_error(info, err) } fn connection_keep_alive(&self) -> KeepAlive { @@ -307,3 +317,32 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::protocols_handler::DummyProtocolsHandler; + + /// A disabled [`ToggleProtoHandler`] can receive listen upgrade errors in + /// the following two cases: + /// + /// 1. Protocol negotiation on an incoming stream failed with no protocol + /// being agreed on. + /// + /// 2. When combining [`ProtocolsHandler`] implementations a single + /// [`ProtocolsHandler`] might be notified of an inbound upgrade error + /// unrelated to its own upgrade logic. For example when nesting a + /// [`ToggleProtoHandler`] in a + /// [`ProtocolsHandlerSelect`](crate::protocols_handler::ProtocolsHandlerSelect) + /// the former might receive an inbound upgrade error even when disabled. + /// + /// [`ToggleProtoHandler`] should ignore the error in both of these cases. + #[test] + fn ignore_listen_upgrade_error_when_disabled() { + let mut handler = ToggleProtoHandler:: { + inner: None, + }; + + handler.inject_listen_upgrade_error(Either::Right(()), ProtocolsHandlerUpgrErr::Timeout); + } +} diff --git a/protocols/deflate/CHANGELOG.md b/transports/deflate/CHANGELOG.md similarity index 70% rename from protocols/deflate/CHANGELOG.md rename to transports/deflate/CHANGELOG.md index 0f27277e..26b92b28 100644 --- a/protocols/deflate/CHANGELOG.md +++ b/transports/deflate/CHANGELOG.md @@ -1,3 +1,12 @@ +# 0.27.1 [2021-01-27] + +- Ensure read buffers are initialised. + [PR 1933](https://github.com/libp2p/rust-libp2p/pull/1933). + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/protocols/deflate/Cargo.toml b/transports/deflate/Cargo.toml similarity index 75% rename from protocols/deflate/Cargo.toml rename to transports/deflate/Cargo.toml index 786cf4c1..dccc2717 100644 --- a/protocols/deflate/Cargo.toml +++ b/transports/deflate/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-deflate" edition = "2018" description = "Deflate encryption protocol for libp2p" -version = "0.26.1" +version = "0.27.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,12 +14,12 @@ name = "libp2p_deflate" [dependencies] futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } flate2 = "1.0" [dev-dependencies] async-std = "1.6.2" -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } +libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" } quickcheck = "0.9" rand = "0.7" diff --git a/protocols/deflate/src/lib.rs b/transports/deflate/src/lib.rs similarity index 98% rename from protocols/deflate/src/lib.rs rename to transports/deflate/src/lib.rs index 6ec576d8..d93e6ed2 100644 --- a/protocols/deflate/src/lib.rs +++ b/transports/deflate/src/lib.rs @@ -133,10 +133,7 @@ impl AsyncRead for DeflateOutput loop { // Read from `self.inner` into `self.read_interm` if necessary. if this.read_interm.is_empty() && !this.inner_read_eof { - unsafe { - this.read_interm.reserve(256); - this.read_interm.set_len(this.read_interm.capacity()); - } + this.read_interm.resize(this.read_interm.capacity() + 256, 0); match AsyncRead::poll_read(Pin::new(&mut this.inner), cx, &mut this.read_interm) { Poll::Ready(Ok(0)) => { diff --git a/protocols/deflate/tests/test.rs b/transports/deflate/tests/test.rs similarity index 100% rename from protocols/deflate/tests/test.rs rename to transports/deflate/tests/test.rs diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index 93e28055..dc327318 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 632f1aec..90b133d4 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-dns" edition = "2018" description = "DNS transport implementation for libp2p" -version = "0.26.1" +version = "0.27.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_dns" [dependencies] -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } log = "0.4.1" futures = "0.3.1" diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index b9bd3763..beba6778 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -202,6 +202,10 @@ where Ok(future.boxed().right_future()) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.inner.address_translation(server, observed) + } } /// Error that can be generated by the DNS layer. @@ -289,6 +293,10 @@ mod tests { }; Ok(Box::pin(future::ready(Ok(())))) } + + fn address_translation(&self, _: &Multiaddr, _: &Multiaddr) -> Option { + None + } } futures::executor::block_on(async move { diff --git a/protocols/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md similarity index 97% rename from protocols/noise/CHANGELOG.md rename to transports/noise/CHANGELOG.md index 7fd47cec..d1d0a6ba 100644 --- a/protocols/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.29.0 [2021-01-12] + +- Update dependencies. + # 0.28.0 [2020-12-17] - Update `libp2p-core`. diff --git a/protocols/noise/Cargo.toml b/transports/noise/Cargo.toml similarity index 79% rename from protocols/noise/Cargo.toml rename to transports/noise/Cargo.toml index bba8a474..039f33ba 100644 --- a/protocols/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "fluence-fork-libp2p-noise" description = "Cryptographic handshake protocol using the noise framework." -version = "0.28.1" +version = "0.29.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,13 +11,13 @@ edition = "2018" name = "libp2p_noise" [dependencies] -bytes = "0.5" +bytes = "1" curve25519-dalek = "3.0.0" futures = "0.3.1" lazy_static = "1.2" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } log = "0.4" -prost = "0.6.1" +prost = "0.7" rand = "0.7.2" sha2 = "0.9.1" static_assertions = "1" @@ -31,14 +31,14 @@ snow = { version = "0.7.1", features = ["ring-resolver"], default-features = fal snow = { version = "0.7.1", features = ["default-resolver"], default-features = false } [dev-dependencies] +async-io = "1.2.0" env_logger = "0.8.1" -libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } +libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" } quickcheck = "0.9.0" sodiumoxide = "0.2.5" [build-dependencies] -prost-build = "0.6" - +prost-build = "0.7" [package.metadata.workspaces] independent = true diff --git a/protocols/noise/build.rs b/transports/noise/build.rs similarity index 100% rename from protocols/noise/build.rs rename to transports/noise/build.rs diff --git a/protocols/noise/src/error.rs b/transports/noise/src/error.rs similarity index 95% rename from protocols/noise/src/error.rs rename to transports/noise/src/error.rs index 0fd00076..8b836d5e 100644 --- a/protocols/noise/src/error.rs +++ b/transports/noise/src/error.rs @@ -24,6 +24,7 @@ use std::{error::Error, fmt, io}; /// libp2p_noise error type. #[derive(Debug)] +#[non_exhaustive] pub enum NoiseError { /// An I/O error has been encountered. Io(io::Error), @@ -38,8 +39,6 @@ pub enum NoiseError { InvalidPayload(prost::DecodeError), /// A signature was required and could not be created. SigningError(identity::error::SigningError), - #[doc(hidden)] - __Nonexhaustive } impl fmt::Display for NoiseError { @@ -51,7 +50,6 @@ impl fmt::Display for NoiseError { NoiseError::InvalidPayload(e) => write!(f, "{}", e), NoiseError::AuthenticationFailed => f.write_str("Authentication failed"), NoiseError::SigningError(e) => write!(f, "{}", e), - NoiseError::__Nonexhaustive => f.write_str("__Nonexhaustive") } } } @@ -65,7 +63,6 @@ impl Error for NoiseError { NoiseError::AuthenticationFailed => None, NoiseError::InvalidPayload(e) => Some(e), NoiseError::SigningError(e) => Some(e), - NoiseError::__Nonexhaustive => None } } } diff --git a/protocols/noise/src/io.rs b/transports/noise/src/io.rs similarity index 99% rename from protocols/noise/src/io.rs rename to transports/noise/src/io.rs index 992988e9..c7bd110c 100644 --- a/protocols/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -115,7 +115,7 @@ impl AsyncWrite for NoiseOutput { this.send_offset += n; trace!("write: buffered {} bytes", this.send_offset); - return Poll::Ready(Ok(n)) + Poll::Ready(Ok(n)) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/protocols/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs similarity index 98% rename from protocols/noise/src/io/framed.rs rename to transports/noise/src/io/framed.rs index 156703de..000300bd 100644 --- a/protocols/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -27,7 +27,6 @@ use crate::io::NoiseOutput; use futures::ready; use futures::prelude::*; use log::{debug, trace}; -use snow; use std::{fmt, io, pin::Pin, task::{Context, Poll}}; /// Max. size of a noise message. @@ -261,9 +260,9 @@ where WriteState::Ready => { return Poll::Ready(Ok(())); } - WriteState::WriteLen { len, mut buf, mut off } => { + WriteState::WriteLen { len, buf, mut off } => { trace!("write: frame len ({}, {:?}, {}/2)", len, buf, off); - match write_frame_len(&mut this.io, cx, &mut buf, &mut off) { + match write_frame_len(&mut this.io, cx, &buf, &mut off) { Poll::Ready(Ok(true)) => (), Poll::Ready(Ok(false)) => { trace!("write: eof"); @@ -324,12 +323,12 @@ where buf: u16::to_be_bytes(n as u16), off: 0 }; - return Ok(()) + Ok(()) } Err(e) => { log::error!("encryption error: {:?}", e); this.write_state = WriteState::EncErr; - return Err(io::ErrorKind::InvalidData.into()) + Err(io::ErrorKind::InvalidData.into()) } } } diff --git a/protocols/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs similarity index 100% rename from protocols/noise/src/io/handshake.rs rename to transports/noise/src/io/handshake.rs diff --git a/protocols/noise/src/io/handshake/payload.proto b/transports/noise/src/io/handshake/payload.proto similarity index 100% rename from protocols/noise/src/io/handshake/payload.proto rename to transports/noise/src/io/handshake/payload.proto diff --git a/protocols/noise/src/lib.rs b/transports/noise/src/lib.rs similarity index 100% rename from protocols/noise/src/lib.rs rename to transports/noise/src/lib.rs diff --git a/protocols/noise/src/protocol.rs b/transports/noise/src/protocol.rs similarity index 100% rename from protocols/noise/src/protocol.rs rename to transports/noise/src/protocol.rs diff --git a/protocols/noise/src/protocol/x25519.rs b/transports/noise/src/protocol/x25519.rs similarity index 97% rename from protocols/noise/src/protocol/x25519.rs rename to transports/noise/src/protocol/x25519.rs index 80bba174..389c5b94 100644 --- a/protocols/noise/src/protocol/x25519.rs +++ b/transports/noise/src/protocol/x25519.rs @@ -158,8 +158,7 @@ impl Keypair { /// Returns `None` if the given identity keypair cannot be used as an X25519 keypair. /// /// > **Note**: If the identity keypair is already used in the context - /// > of other cryptographic protocols outside of Noise, e.g. for - /// > signing in the `secio` protocol, it should be preferred to + /// > of other cryptographic protocols outside of Noise, it should be preferred to /// > create a new static X25519 keypair for use in the Noise protocol. /// > /// > See also: @@ -205,9 +204,8 @@ impl SecretKey { /// Construct a X25519 secret key from a Ed25519 secret key. /// /// > **Note**: If the Ed25519 secret key is already used in the context - /// > of other cryptographic protocols outside of Noise, e.g. for - /// > signing in the `secio` protocol, it should be preferred to - /// > create a new keypair for use in the Noise protocol. + /// > of other cryptographic protocols outside of Noise, it should be preferred + /// > to create a new keypair for use in the Noise protocol. /// > /// > See also: /// > diff --git a/protocols/noise/src/protocol/x25519_spec.rs b/transports/noise/src/protocol/x25519_spec.rs similarity index 100% rename from protocols/noise/src/protocol/x25519_spec.rs rename to transports/noise/src/protocol/x25519_spec.rs diff --git a/protocols/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs similarity index 98% rename from protocols/noise/tests/smoke.rs rename to transports/noise/tests/smoke.rs index 744d447a..4a4c81b5 100644 --- a/protocols/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -18,15 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use async_io::Async; use futures::{future::{self, Either}, prelude::*}; use libp2p_core::identity; use libp2p_core::upgrade::{self, Negotiated, apply_inbound, apply_outbound}; use libp2p_core::transport::{Transport, ListenerEvent}; use libp2p_noise::{Keypair, X25519, X25519Spec, NoiseConfig, RemoteIdentity, NoiseError, NoiseOutput}; -use libp2p_tcp::{TcpConfig, TcpTransStream}; +use libp2p_tcp::TcpConfig; use log::info; use quickcheck::QuickCheck; -use std::{convert::TryInto, io}; +use std::{convert::TryInto, io, net::TcpStream}; #[allow(dead_code)] fn core_upgrade_compat() { @@ -175,7 +176,7 @@ fn ik_xx() { QuickCheck::new().max_tests(30).quickcheck(prop as fn(Vec) -> bool) } -type Output = (RemoteIdentity, NoiseOutput>); +type Output = (RemoteIdentity, NoiseOutput>>); fn run(server_transport: T, client_transport: U, messages: I) where diff --git a/protocols/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md similarity index 90% rename from protocols/plaintext/CHANGELOG.md rename to transports/plaintext/CHANGELOG.md index a24261f2..bb8ba040 100644 --- a/protocols/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.27.1 [2021-02-15] + +- Update dependencies. + +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/protocols/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml similarity index 73% rename from protocols/plaintext/Cargo.toml rename to transports/plaintext/Cargo.toml index 44ec6e97..c800e5e6 100644 --- a/protocols/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-plaintext" edition = "2018" description = "Plaintext encryption dummy protocol for libp2p" -version = "0.26.1" +version = "0.27.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,13 +13,13 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_plaintext" [dependencies] -bytes = "0.5" +bytes = "1" futures = "0.3.1" -futures_codec = "0.4.0" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +asynchronous-codec = "0.6" +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } log = "0.4.8" -prost = "0.6.1" -unsigned-varint = { version = "0.5.1", features = ["futures-codec"] } +prost = "0.7" +unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0.2" [dev-dependencies] @@ -28,7 +28,7 @@ quickcheck = "0.9.0" rand = "0.7" [build-dependencies] -prost-build = "0.6" +prost-build = "0.7" [package.metadata.workspaces] diff --git a/protocols/plaintext/build.rs b/transports/plaintext/build.rs similarity index 100% rename from protocols/plaintext/build.rs rename to transports/plaintext/build.rs diff --git a/protocols/plaintext/src/error.rs b/transports/plaintext/src/error.rs similarity index 100% rename from protocols/plaintext/src/error.rs rename to transports/plaintext/src/error.rs diff --git a/protocols/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs similarity index 92% rename from protocols/plaintext/src/handshake.rs rename to transports/plaintext/src/handshake.rs index dda29af4..078a5109 100644 --- a/protocols/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -24,7 +24,7 @@ use crate::structs_proto::Exchange; use bytes::{Bytes, BytesMut}; use futures::prelude::*; -use futures_codec::Framed; +use asynchronous_codec::{Framed, FramedParts}; use libp2p_core::{PublicKey, PeerId}; use log::{debug, trace}; use prost::Message; @@ -134,12 +134,9 @@ where } }; - // The `Framed` wrapper may have buffered additional data that - // was already received but is no longer part of the plaintext - // handshake. We need to capture that data before dropping - // the `Framed` wrapper via `Framed::into_inner()`. - let read_buffer = framed_socket.read_buffer().clone().freeze(); - trace!("received exchange from remote; pubkey = {:?}", context.state.public_key); - Ok((framed_socket.into_inner(), context.state, read_buffer)) + + let FramedParts { io, read_buffer, write_buffer, .. } = framed_socket.into_parts(); + assert!(write_buffer.is_empty()); + Ok((io, context.state, read_buffer.freeze())) } diff --git a/protocols/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs similarity index 100% rename from protocols/plaintext/src/lib.rs rename to transports/plaintext/src/lib.rs diff --git a/protocols/plaintext/src/structs.proto b/transports/plaintext/src/structs.proto similarity index 100% rename from protocols/plaintext/src/structs.proto rename to transports/plaintext/src/structs.proto diff --git a/protocols/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs similarity index 100% rename from protocols/plaintext/tests/smoke.rs rename to transports/plaintext/tests/smoke.rs diff --git a/protocols/pnet/CHANGELOG.md b/transports/pnet/CHANGELOG.md similarity index 100% rename from protocols/pnet/CHANGELOG.md rename to transports/pnet/CHANGELOG.md diff --git a/protocols/pnet/Cargo.toml b/transports/pnet/Cargo.toml similarity index 100% rename from protocols/pnet/Cargo.toml rename to transports/pnet/Cargo.toml diff --git a/protocols/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs similarity index 100% rename from protocols/pnet/src/crypt_writer.rs rename to transports/pnet/src/crypt_writer.rs diff --git a/protocols/pnet/src/lib.rs b/transports/pnet/src/lib.rs similarity index 100% rename from protocols/pnet/src/lib.rs rename to transports/pnet/src/lib.rs diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index afc17d1f..b7d98c14 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -1,3 +1,22 @@ +# 0.27.2 [unreleased] + +- Update to `if-watch-0.2`. + +# 0.27.1 [2021-02-15] + +- Update dependencies. + +# 0.27.0 [2021-01-12] + +- Add support for port reuse and (re)add transport-specific + address translation. Thereby use only `async-io` instead of + `async-std`, renaming the feature accordingly. `async-io` + is a default feature, with an additional `tokio` feature + as before. + [PR 1887](https://github.com/libp2p/rust-libp2p/pull/1887) + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `async-io`. diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 326a0441..1da88b07 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-tcp" edition = "2018" description = "TCP/IP transport protocol for libp2p" -version = "0.26.1" +version = "0.27.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,19 +13,27 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_tcp" [dependencies] -async-std = { version = "1.6.5", optional = true } -futures = "0.3.1" +async-io-crate = { package = "async-io", version = "1.2.0", optional = true } +futures = "0.3.8" futures-timer = "3.0" -if-addrs = "0.6.4" +if-watch = { version = "0.2.0", optional = true } +if-addrs = { version = "0.6.4", optional = true } ipnet = "2.0.0" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } -log = "0.4.1" -socket2 = { version = "0.3.12" } -tokio = { version = "0.3", default-features = false, features = ["net"], optional = true } +libc = "0.2.80" +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } +log = "0.4.11" +socket2 = { version = "0.3.17", features = ["reuseport"] } +tokio-crate = { package = "tokio", version = "1.0.1", default-features = false, features = ["net"], optional = true } + +[features] +default = ["async-io"] +tokio = ["tokio-crate", "if-addrs"] +async-io = ["async-io-crate", "if-watch"] [dev-dependencies] -libp2p-tcp = { path = ".", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } - +async-std = { version = "1.6.5", features = ["attributes"] } +tokio-crate = { package = "tokio", version = "1.0.1", default-features = false, features = ["net", "rt"] } +env_logger = "0.8.2" [package.metadata.workspaces] independent = true diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index b08062fe..be045bff 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -22,362 +22,634 @@ //! //! # Usage //! -//! This crate provides two structs, `TcpConfig` and `TokioTcpConfig`, depending on which -//! features are enabled. -//! -//! Both the `TcpConfig` and `TokioTcpConfig` structs implement the `Transport` trait of the -//! `core` library. See the documentation of `core` and of libp2p in general to learn how to -//! use the `Transport` trait. +//! This crate provides a `TcpConfig` and `TokioTcpConfig`, depending on +//! the enabled features, which implement the `Transport` trait for use as a +//! transport with `libp2p-core` or `libp2p-swarm`. -use futures::{future::{self, Ready}, prelude::*}; +mod provider; + +#[cfg(feature = "async-io")] +pub use provider::async_io; + +/// The type of a [`GenTcpConfig`] using the `async-io` implementation. +#[cfg(feature = "async-io")] +pub type TcpConfig = GenTcpConfig; + +#[cfg(feature = "tokio")] +pub use provider::tokio; + +/// The type of a [`GenTcpConfig`] using the `tokio` implementation. +#[cfg(feature = "tokio")] +pub type TokioTcpConfig = GenTcpConfig; + +use futures::{ + future::{self, BoxFuture, Ready}, + prelude::*, + ready, +}; use futures_timer::Delay; -use if_addrs::{IfAddr, get_if_addrs}; -use ipnet::{IpNet, Ipv4Net, Ipv6Net}; use libp2p_core::{ - Transport, - multiaddr::{Protocol, Multiaddr}, - transport::{ListenerEvent, TransportError} + address_translation, + multiaddr::{Multiaddr, Protocol}, + transport::{ListenerEvent, Transport, TransportError}, }; -use log::{debug, trace}; -use socket2::{Socket, Domain, Type}; +use socket2::{Domain, Socket, Type}; use std::{ - collections::VecDeque, - convert::TryFrom, + collections::HashSet, io, - iter::{self, FromIterator}, - net::{IpAddr, SocketAddr}, + net::{SocketAddr, IpAddr, TcpListener}, pin::Pin, + sync::{Arc, RwLock}, task::{Context, Poll}, - time::Duration + time::Duration, }; -macro_rules! codegen { - ($feature_name:expr, $tcp_config:ident, $tcp_trans_stream:ident, $tcp_listen_stream:ident, $apply_config:ident, $tcp_stream:ty, $tcp_listener:ty) => { +use provider::{Provider, IfEvent}; -/// Represents the configuration for a TCP/IP transport capability for libp2p. +/// The configuration for a TCP/IP transport capability for libp2p. /// -/// The TCP sockets created by libp2p will need to be progressed by running the futures and streams -/// obtained by libp2p through the tokio reactor. -#[cfg_attr(docsrs, doc(cfg(feature = $feature_name)))] -#[derive(Debug, Clone, Default)] -pub struct $tcp_config { - /// How long a listener should sleep after receiving an error, before trying again. - sleep_on_error: Duration, +/// A [`GenTcpConfig`] implements the [`Transport`] interface and thus +/// is consumed on [`Transport::listen_on`] and [`Transport::dial`]. +/// However, the config can be cheaply cloned to perform multiple such +/// operations with the same config. +#[derive(Clone, Debug)] +pub struct GenTcpConfig { + /// The type of the I/O provider. + _impl: std::marker::PhantomData, /// TTL to set for opened sockets, or `None` to keep default. ttl: Option, /// `TCP_NODELAY` to set for opened sockets, or `None` to keep default. nodelay: Option, + /// Size of the listen backlog for listen sockets. + backlog: u32, + /// The configuration of port reuse when dialing. + port_reuse: PortReuse, } -impl $tcp_config { - /// Creates a new configuration object for TCP/IP. - pub fn new() -> $tcp_config { - $tcp_config { - sleep_on_error: Duration::from_millis(100), - ttl: None, - nodelay: None, +type Port = u16; + +/// The configuration for port reuse of listening sockets. +#[derive(Debug, Clone)] +enum PortReuse { + /// Port reuse is disabled, i.e. ephemeral local ports are + /// used for outgoing TCP connections. + Disabled, + /// Port reuse when dialing is enabled, i.e. the local + /// address and port that a new socket for an outgoing + /// connection is bound to are chosen from an existing + /// listening socket, if available. + Enabled { + /// The addresses and ports of the listening sockets + /// registered as eligible for port reuse when dialing. + listen_addrs: Arc>> + }, +} + +impl PortReuse { + /// Registers a socket address for port reuse. + /// + /// Has no effect if port reuse is disabled. + fn register(&mut self, ip: IpAddr, port: Port) { + if let PortReuse::Enabled { listen_addrs } = self { + log::trace!("Registering for port reuse: {}:{}", ip, port); + listen_addrs + .write() + .expect("`register()` and `unregister()` never panic while holding the lock") + .insert((ip, port)); } } - /// Sets the TTL to set for opened sockets. + /// Unregisters a socket address for port reuse. + /// + /// Has no effect if port reuse is disabled. + fn unregister(&mut self, ip: IpAddr, port: Port) { + if let PortReuse::Enabled { listen_addrs } = self { + log::trace!("Unregistering for port reuse: {}:{}", ip, port); + listen_addrs + .write() + .expect("`register()` and `unregister()` never panic while holding the lock") + .remove(&(ip, port)); + } + } + + /// Selects a listening socket address suitable for use + /// as the local socket address when dialing. + /// + /// If multiple listening sockets are registered for port + /// reuse, one is chosen whose IP protocol version and + /// loopback status is the same as that of `remote_ip`. + /// + /// Returns `None` if port reuse is disabled or no suitable + /// listening socket address is found. + fn local_dial_addr(&self, remote_ip: &IpAddr) -> Option { + if let PortReuse::Enabled { listen_addrs } = self { + for (ip, port) in listen_addrs + .read() + .expect("`register()` and `unregister()` never panic while holding the lock") + .iter() + { + if ip.is_ipv4() == remote_ip.is_ipv4() + && ip.is_loopback() == remote_ip.is_loopback() + { + return Some(SocketAddr::new(*ip, *port)) + } + } + } + + None + } +} + +impl GenTcpConfig +where + T: Provider + Send, +{ + /// Creates a new configuration for a TCP/IP transport: + /// + /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. + /// See [`GenTcpConfig::nodelay`]. + /// * Reuse of listening ports is _disabled_. + /// See [`GenTcpConfig::port_reuse`]. + /// * No custom `IP_TTL` is set. The default of the OS TCP stack applies. + /// See [`GenTcpConfig::ttl`]. + /// * The size of the listen backlog for new listening sockets is `1024`. + /// See [`GenTcpConfig::listen_backlog`]. + pub fn new() -> Self { + Self { + ttl: None, + nodelay: None, + backlog: 1024, + port_reuse: PortReuse::Disabled, + _impl: std::marker::PhantomData, + } + } + + /// Configures the `IP_TTL` option for new sockets. pub fn ttl(mut self, value: u32) -> Self { self.ttl = Some(value); self } - /// Sets the `TCP_NODELAY` to set for opened sockets. + /// Configures the `TCP_NODELAY` option for new sockets. pub fn nodelay(mut self, value: bool) -> Self { self.nodelay = Some(value); self } -} -impl Transport for $tcp_config { - type Output = $tcp_trans_stream; - type Error = io::Error; - type Listener = Pin, Self::Error>> + Send>>; - type ListenerUpgrade = Ready>; - type Dial = Pin> + Send>>; + /// Configures the listen backlog for new listen sockets. + pub fn listen_backlog(mut self, backlog: u32) -> Self { + self.backlog = backlog; + self + } - fn listen_on(self, addr: Multiaddr) -> Result> { - let socket_addr = - if let Ok(sa) = multiaddr_to_socketaddr(&addr) { - sa - } else { - return Err(TransportError::MultiaddrNotSupported(addr)) - }; - - async fn do_listen(cfg: $tcp_config, socket_addr: SocketAddr) - -> Result>, io::Error>, io::Error>>, io::Error> - { - let socket = if socket_addr.is_ipv4() { - Socket::new(Domain::ipv4(), Type::stream(), Some(socket2::Protocol::tcp()))? - } else { - let s = Socket::new(Domain::ipv6(), Type::stream(), Some(socket2::Protocol::tcp()))?; - s.set_only_v6(true)?; - s - }; - if cfg!(target_family = "unix") { - socket.set_reuse_address(true)?; + /// Configures port reuse for local sockets, which implies + /// reuse of listening ports for outgoing connections to + /// enhance NAT traversal capabilities. + /// + /// Please refer to e.g. [RFC 4787](https://tools.ietf.org/html/rfc4787) + /// section 4 and 5 for some of the NAT terminology used here. + /// + /// There are two main use-cases for port reuse among local + /// sockets: + /// + /// 1. Creating multiple listening sockets for the same address + /// and port to allow accepting connections on multiple threads + /// without having to synchronise access to a single listen socket. + /// + /// 2. Creating outgoing connections whose local socket is bound to + /// the same address and port as a listening socket. In the rare + /// case of simple NATs with both endpoint-independent mapping and + /// endpoint-independent filtering, this can on its own already + /// permit NAT traversal by other nodes sharing the observed + /// external address of the local node. For the common case of + /// NATs with address-dependent or address and port-dependent + /// filtering, port reuse for outgoing connections can facilitate + /// further TCP hole punching techniques for NATs that perform + /// endpoint-independent mapping. Port reuse cannot facilitate + /// NAT traversal in the presence of "symmetric" NATs that employ + /// both address/port-dependent mapping and filtering, unless + /// there is some means of port prediction. + /// + /// Both use-cases are enabled when port reuse is enabled, with port reuse + /// for outgoing connections (`2.` above) always being implied. + /// + /// > **Note**: Due to the identification of a TCP socket by a 4-tuple + /// > of source IP address, source port, destination IP address and + /// > destination port, with port reuse enabled there can be only + /// > a single outgoing connection to a particular address and port + /// > of a peer per local listening socket address. + /// + /// If enabled, the returned `GenTcpConfig` and all of its `Clone`s + /// keep track of the listen socket addresses as they are reported + /// by polling [`TcpListenStream`]s obtained from [`GenTcpConfig::listen_on()`]. + /// + /// In contrast, two `GenTcpConfig`s constructed separately via [`GenTcpConfig::new()`] + /// maintain these addresses independently. It is thus possible to listen on + /// multiple addresses, enabling port reuse for each, knowing exactly which + /// listen address is reused when dialing with a specific `GenTcpConfig`, as in + /// the following example: + /// + /// ```no_run + /// # use libp2p_core::transport::ListenerEvent; + /// # use libp2p_core::{Multiaddr, Transport}; + /// # use futures::stream::StreamExt; + /// #[cfg(feature = "async-io")] + /// #[async_std::main] + /// async fn main() -> std::io::Result<()> { + /// use libp2p_tcp::TcpConfig; + /// + /// let listen_addr1: Multiaddr = "/ip4/127.0.0.1/tcp/9001".parse().unwrap(); + /// let listen_addr2: Multiaddr = "/ip4/127.0.0.1/tcp/9002".parse().unwrap(); + /// + /// let tcp1 = TcpConfig::new().port_reuse(true); + /// let mut listener1 = tcp1.clone().listen_on(listen_addr1.clone()).expect("listener"); + /// match listener1.next().await.expect("event")? { + /// ListenerEvent::NewAddress(listen_addr) => { + /// println!("Listening on {:?}", listen_addr); + /// let mut stream = tcp1.dial(listen_addr2.clone()).unwrap().await?; + /// // `stream` has `listen_addr1` as its local socket address. + /// } + /// _ => {} + /// } + /// + /// let tcp2 = TcpConfig::new().port_reuse(true); + /// let mut listener2 = tcp2.clone().listen_on(listen_addr2).expect("listener"); + /// match listener2.next().await.expect("event")? { + /// ListenerEvent::NewAddress(listen_addr) => { + /// println!("Listening on {:?}", listen_addr); + /// let mut socket = tcp2.dial(listen_addr1).unwrap().await?; + /// // `stream` has `listen_addr2` as its local socket address. + /// } + /// _ => {} + /// } + /// Ok(()) + /// } + /// ``` + /// + /// If a single `GenTcpConfig` is used and cloned for the creation of multiple + /// listening sockets or a wildcard listen socket address is used to listen + /// on any interface, there can be multiple such addresses registered for + /// port reuse. In this case, one is chosen whose IP protocol version and + /// loopback status is the same as that of the remote address. Consequently, for + /// maximum control of the local listening addresses and ports that are used + /// for outgoing connections, a new `GenTcpConfig` should be created for each + /// listening socket, avoiding the use of wildcard addresses which bind a + /// socket to all network interfaces. + /// + /// When this option is enabled on a unix system, the socket + /// option `SO_REUSEPORT` is set, if available, to permit + /// reuse of listening ports for multiple sockets. + pub fn port_reuse(mut self, port_reuse: bool) -> Self { + self.port_reuse = if port_reuse { + PortReuse::Enabled { + listen_addrs: Arc::new(RwLock::new(HashSet::new())) } - socket.bind(&socket_addr.into())?; - socket.listen(1024)?; // we may want to make this configurable + } else { + PortReuse::Disabled + }; - // Note: Tokio's TcpListener::from_std, which the TcpListener's TryFrom implementation - // uses, does not set the socket into non-blocking mode. - #[cfg(feature = "tokio")] - socket.set_nonblocking(true); + self + } - let listener = <$tcp_listener>::try_from(socket.into_tcp_listener()) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + fn create_socket(&self, socket_addr: &SocketAddr) -> io::Result { + let domain = if socket_addr.is_ipv4() { + Domain::ipv4() + } else { + Domain::ipv6() + }; + let socket = Socket::new(domain, Type::stream(), Some(socket2::Protocol::tcp()))?; + if socket_addr.is_ipv6() { + socket.set_only_v6(true)?; + } + if let Some(ttl) = self.ttl { + socket.set_ttl(ttl)?; + } + if let Some(nodelay) = self.nodelay { + socket.set_nodelay(nodelay)?; + } + socket.set_reuse_address(true)?; + #[cfg(unix)] + if let PortReuse::Enabled { .. } = &self.port_reuse { + socket.set_reuse_port(true)?; + } + Ok(socket) + } - let local_addr = listener.local_addr()?; - let port = local_addr.port(); + fn do_listen(self, socket_addr: SocketAddr) -> io::Result> { + let socket = self.create_socket(&socket_addr)?; + socket.bind(&socket_addr.into())?; + socket.listen(self.backlog as _)?; + socket.set_nonblocking(true)?; + TcpListenStream::::new(socket.into_tcp_listener(), self.port_reuse) + } - // Determine all our listen addresses which is either a single local IP address - // or (if a wildcard IP address was used) the addresses of all our interfaces, - // as reported by `get_if_addrs`. - let addrs = - if socket_addr.ip().is_unspecified() { - let addrs = host_addresses(port)?; - debug!("Listening on {:?}", addrs.iter().map(|(_, _, ma)| ma).collect::>()); - Addresses::Many(addrs) - } else { - let ma = ip_to_multiaddr(local_addr.ip(), port); - debug!("Listening on {:?}", ma); - Addresses::One(ma) - }; + async fn do_dial(self, socket_addr: SocketAddr) -> Result { + let socket = self.create_socket(&socket_addr)?; - // Generate `NewAddress` events for each new `Multiaddr`. - let pending = match addrs { - Addresses::One(ref ma) => { - let event = ListenerEvent::NewAddress(ma.clone()); - let mut list = VecDeque::new(); - list.push_back(Ok(event)); - list - } - Addresses::Many(ref aa) => { - aa.iter() - .map(|(_, _, ma)| ma) - .cloned() - .map(ListenerEvent::NewAddress) - .map(Result::Ok) - .collect::>() - } - }; - - let listen_stream = $tcp_listen_stream { - stream: listener, - pause: None, - pause_duration: cfg.sleep_on_error, - port, - addrs, - pending, - config: cfg - }; - - Ok(stream::unfold(listen_stream, |s| s.next().map(Some))) + if let Some(addr) = self.port_reuse.local_dial_addr(&socket_addr.ip()) { + log::trace!("Binding dial socket to listen socket {}", addr); + socket.bind(&addr.into())?; } - Ok(Box::pin(do_listen(self, socket_addr).try_flatten_stream())) + socket.set_nonblocking(true)?; + + match socket.connect(&socket_addr.into()) { + Ok(()) => {} + Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} + Err(err) => return Err(err), + }; + + let stream = T::new_stream(socket.into_tcp_stream()).await?; + Ok(stream) + } +} + +impl Transport for GenTcpConfig +where + T: Provider + Send + 'static, + T::Listener: Unpin, + T::IfWatcher: Unpin, + T::Stream: Unpin, +{ + type Output = T::Stream; + type Error = io::Error; + type Dial = Pin> + Send>>; + type Listener = TcpListenStream; + type ListenerUpgrade = Ready>; + + fn listen_on(self, addr: Multiaddr) -> Result> { + let socket_addr = if let Ok(sa) = multiaddr_to_socketaddr(&addr) { + sa + } else { + return Err(TransportError::MultiaddrNotSupported(addr)); + }; + log::debug!("listening on {}", socket_addr); + self.do_listen(socket_addr) + .map_err(TransportError::Other) } fn dial(self, addr: Multiaddr) -> Result> { - let socket_addr = - if let Ok(socket_addr) = multiaddr_to_socketaddr(&addr) { - if socket_addr.port() == 0 || socket_addr.ip().is_unspecified() { - debug!("Instantly refusing dialing {}, as it is invalid", addr); - return Err(TransportError::Other(io::ErrorKind::ConnectionRefused.into())) - } - socket_addr - } else { - return Err(TransportError::MultiaddrNotSupported(addr)) - }; - - debug!("Dialing {}", addr); - - async fn do_dial(cfg: $tcp_config, socket_addr: SocketAddr) -> Result<$tcp_trans_stream, io::Error> { - let stream = <$tcp_stream>::connect(&socket_addr).await?; - $apply_config(&cfg, &stream)?; - Ok($tcp_trans_stream { inner: stream }) - } - - Ok(Box::pin(do_dial(self, socket_addr))) - } -} - -/// Stream that listens on an TCP/IP address. -#[cfg_attr(docsrs, doc(cfg(feature = $feature_name)))] -pub struct $tcp_listen_stream { - /// The incoming connections. - stream: $tcp_listener, - /// The current pause if any. - pause: Option, - /// How long to pause after an error. - pause_duration: Duration, - /// The port which we use as our listen port in listener event addresses. - port: u16, - /// The set of known addresses. - addrs: Addresses, - /// Temporary buffer of listener events. - pending: Buffer<$tcp_trans_stream>, - /// Original configuration. - config: $tcp_config -} - -impl $tcp_listen_stream { - /// Takes ownership of the listener, and returns the next incoming event and the listener. - async fn next(mut self) -> (Result>, io::Error>, io::Error>, Self) { - loop { - if let Some(event) = self.pending.pop_front() { - return (event, self); + let socket_addr = if let Ok(socket_addr) = multiaddr_to_socketaddr(&addr) { + if socket_addr.port() == 0 || socket_addr.ip().is_unspecified() { + return Err(TransportError::MultiaddrNotSupported(addr)); } - - if let Some(pause) = self.pause.take() { - let _ = pause.await; - } - - // TODO: do we get the peer_addr at the same time? - let (sock, _) = match self.stream.accept().await { - Ok(s) => s, - Err(e) => { - debug!("error accepting incoming connection: {}", e); - self.pause = Some(Delay::new(self.pause_duration)); - return (Ok(ListenerEvent::Error(e)), self); - } - }; - - let sock_addr = match sock.peer_addr() { - Ok(addr) => addr, - Err(err) => { - debug!("Failed to get peer address: {:?}", err); - continue - } - }; - - let local_addr = match sock.local_addr() { - Ok(sock_addr) => { - if let Addresses::Many(ref mut addrs) = self.addrs { - if let Err(err) = check_for_interface_changes(&sock_addr, self.port, addrs, &mut self.pending) { - return (Ok(ListenerEvent::Error(err)), self); - } - } - ip_to_multiaddr(sock_addr.ip(), sock_addr.port()) - } - Err(err) => { - debug!("Failed to get local address of incoming socket: {:?}", err); - continue - } - }; - - let remote_addr = ip_to_multiaddr(sock_addr.ip(), sock_addr.port()); - - match $apply_config(&self.config, &sock) { - Ok(()) => { - trace!("Incoming connection from {} at {}", remote_addr, local_addr); - self.pending.push_back(Ok(ListenerEvent::Upgrade { - upgrade: future::ok($tcp_trans_stream { inner: sock }), - local_addr, - remote_addr - })) - } - Err(err) => { - debug!("Error upgrading incoming connection from {}: {:?}", remote_addr, err); - self.pending.push_back(Ok(ListenerEvent::Upgrade { - upgrade: future::err(err), - local_addr, - remote_addr - })) - } - } - } - } -} - -/// Wraps around a `TcpStream` and adds logging for important events. -#[cfg_attr(docsrs, doc(cfg(feature = $feature_name)))] -#[derive(Debug)] -pub struct $tcp_trans_stream { - inner: $tcp_stream, -} - -impl Drop for $tcp_trans_stream { - fn drop(&mut self) { - if let Ok(addr) = self.inner.peer_addr() { - debug!("Dropped TCP connection to {:?}", addr); + socket_addr } else { - debug!("Dropped TCP connection to undeterminate peer"); + return Err(TransportError::MultiaddrNotSupported(addr)); + }; + log::debug!("dialing {}", socket_addr); + Ok(Box::pin(self.do_dial(socket_addr))) + } + + /// When port reuse is disabled and hence ephemeral local ports are + /// used for outgoing connections, the returned address is the + /// `observed` address with the port replaced by the port of the + /// `listen` address. + /// + /// If port reuse is enabled, `Some(observed)` is returned, as there + /// is a chance that the `observed` address _and_ port are reachable + /// for other peers if there is a NAT in the way that does endpoint- + /// independent filtering. Furthermore, even if that is not the case + /// and TCP hole punching techniques must be used for NAT traversal, + /// the `observed` address is still the one that a remote should connect + /// to for the purpose of the hole punching procedure, as it represents + /// the mapped IP and port of the NAT device in front of the local + /// node. + /// + /// `None` is returned if one of the given addresses is not a TCP/IP + /// address. + fn address_translation(&self, listen: &Multiaddr, observed: &Multiaddr) -> Option { + match &self.port_reuse { + PortReuse::Disabled => address_translation(listen, observed), + PortReuse::Enabled { .. } => Some(observed.clone()), } } } -/// Applies the socket configuration parameters to a socket. -fn $apply_config(config: &$tcp_config, socket: &$tcp_stream) -> Result<(), io::Error> { - if let Some(ttl) = config.ttl { - socket.set_ttl(ttl)?; - } +type TcpListenerEvent = ListenerEvent>, io::Error>; - if let Some(nodelay) = config.nodelay { - socket.set_nodelay(nodelay)?; - } - - Ok(()) +enum IfWatch { + Pending(BoxFuture<'static, io::Result>), + Ready(TIfWatcher), } -}; -} - -#[cfg(feature = "async-std")] -codegen!("async-std", TcpConfig, TcpTransStream, TcpListenStream, apply_config_async_std, async_std::net::TcpStream, async_std::net::TcpListener); - -#[cfg(feature = "tokio")] -codegen!("tokio", TokioTcpConfig, TokioTcpTransStream, TokioTcpListenStream, apply_config_tokio, tokio::net::TcpStream, tokio::net::TcpListener); - -#[cfg(feature = "async-std")] -impl AsyncRead for TcpTransStream { - fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { - AsyncRead::poll_read(Pin::new(&mut self.inner), cx, buf) +/// The listening addresses of a [`TcpListenStream`]. +enum InAddr { + /// The stream accepts connections on a single interface. + One { + addr: IpAddr, + out: Option + }, + /// The stream accepts connections on all interfaces. + Any { + addrs: HashSet, + if_watch: IfWatch, } } -#[cfg(feature = "async-std")] -impl AsyncWrite for TcpTransStream { - fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - AsyncWrite::poll_write(Pin::new(&mut self.inner), cx, buf) +/// A stream of incoming connections on one or more interfaces. +pub struct TcpListenStream +where + T: Provider +{ + /// The socket address that the listening socket is bound to, + /// which may be a "wildcard address" like `INADDR_ANY` or `IN6ADDR_ANY` + /// when listening on all interfaces for IPv4 respectively IPv6 connections. + listen_addr: SocketAddr, + /// The async listening socket for incoming connections. + listener: T::Listener, + /// The IP addresses of network interfaces on which the listening socket + /// is accepting connections. + /// + /// If the listen socket listens on all interfaces, these may change over + /// time as interfaces become available or unavailable. + in_addr: InAddr, + /// The port reuse configuration for outgoing connections. + /// + /// If enabled, all IP addresses on which this listening stream + /// is accepting connections (`in_addr`) are registered for reuse + /// as local addresses for the sockets of outgoing connections. They are + /// unregistered when the stream encounters an error or is dropped. + port_reuse: PortReuse, + /// How long to sleep after a (non-fatal) error while trying + /// to accept a new connection. + sleep_on_error: Duration, + /// The current pause, if any. + pause: Option, +} + +impl TcpListenStream +where + T: Provider +{ + /// Constructs a `TcpListenStream` for incoming connections around + /// the given `TcpListener`. + fn new(listener: TcpListener, port_reuse: PortReuse) -> io::Result { + let listen_addr = listener.local_addr()?; + + let in_addr = if match &listen_addr { + SocketAddr::V4(a) => a.ip().is_unspecified(), + SocketAddr::V6(a) => a.ip().is_unspecified(), + } { + // The `addrs` are populated via `if_watch` when the + // `TcpListenStream` is polled. + InAddr::Any { + addrs: HashSet::new(), + if_watch: IfWatch::Pending(T::if_watcher()), + } + } else { + InAddr::One { + out: Some(ip_to_multiaddr(listen_addr.ip(), listen_addr.port())), + addr: listen_addr.ip(), + } + }; + + let listener = T::new_listener(listener)?; + + Ok(TcpListenStream { + port_reuse, + listener, + listen_addr, + in_addr, + pause: None, + sleep_on_error: Duration::from_millis(100), + }) } - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - AsyncWrite::poll_flush(Pin::new(&mut self.inner), cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - AsyncWrite::poll_close(Pin::new(&mut self.inner), cx) + /// Disables port reuse for any listen address of this stream. + /// + /// This is done when the `TcpListenStream` encounters a fatal + /// error (for the stream) or is dropped. + /// + /// Has no effect if port reuse is disabled. + fn disable_port_reuse(&mut self) { + match &self.in_addr { + InAddr::One { addr, .. } => { + self.port_reuse.unregister(*addr, self.listen_addr.port()); + }, + InAddr::Any { addrs, .. } => { + for addr in addrs { + self.port_reuse.unregister(*addr, self.listen_addr.port()); + } + } + } } } -#[cfg(feature = "tokio")] -impl AsyncRead for TokioTcpTransStream { - fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { - // Adapted from - // https://github.com/tokio-rs/tokio/blob/6d99e1c7dec4c6a37c4c7bf2801bc82cc210351d/tokio-util/src/compat.rs#L126. - let mut read_buf = tokio::io::ReadBuf::new(buf); - futures::ready!(tokio::io::AsyncRead::poll_read(Pin::new(&mut self.inner), cx, &mut read_buf))?; - Poll::Ready(Ok(read_buf.filled().len())) +impl Drop for TcpListenStream +where + T: Provider +{ + fn drop(&mut self) { + self.disable_port_reuse(); } } -#[cfg(feature = "tokio")] -impl AsyncWrite for TokioTcpTransStream { - fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { - tokio::io::AsyncWrite::poll_write(Pin::new(&mut self.inner), cx, buf) - } +impl Stream for TcpListenStream +where + T: Provider, + T::Listener: Unpin, + T::Stream: Unpin, + T::IfWatcher: Unpin, +{ + type Item = Result, io::Error>; - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - tokio::io::AsyncWrite::poll_flush(Pin::new(&mut self.inner), cx) - } + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let me = Pin::into_inner(self); - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - tokio::io::AsyncWrite::poll_shutdown(Pin::new(&mut self.inner), cx) + loop { + match &mut me.in_addr { + InAddr::Any { if_watch, addrs } => match if_watch { + // If we listen on all interfaces, wait for `if-watch` to be ready. + IfWatch::Pending(f) => match ready!(Pin::new(f).poll(cx)) { + Ok(w) => { + *if_watch = IfWatch::Ready(w); + continue + } + Err(err) => { + log::debug! { + "Failed to begin observing interfaces: {:?}. Scheduling retry.", + err + }; + *if_watch = IfWatch::Pending(T::if_watcher()); + me.pause = Some(Delay::new(me.sleep_on_error)); + return Poll::Ready(Some(Ok(ListenerEvent::Error(err)))); + } + }, + // Consume all events for up/down interface changes. + IfWatch::Ready(watch) => while let Poll::Ready(ev) = T::poll_interfaces(watch, cx) { + match ev { + Ok(IfEvent::Up(inet)) => { + let ip = inet.addr(); + if me.listen_addr.is_ipv4() == ip.is_ipv4() && addrs.insert(ip) { + let ma = ip_to_multiaddr(ip, me.listen_addr.port()); + log::debug!("New listen address: {}", ma); + me.port_reuse.register(ip, me.listen_addr.port()); + return Poll::Ready(Some(Ok(ListenerEvent::NewAddress(ma)))); + } + } + Ok(IfEvent::Down(inet)) => { + let ip = inet.addr(); + if me.listen_addr.is_ipv4() == ip.is_ipv4() && addrs.remove(&ip) { + let ma = ip_to_multiaddr(ip, me.listen_addr.port()); + log::debug!("Expired listen address: {}", ma); + me.port_reuse.unregister(ip, me.listen_addr.port()); + return Poll::Ready(Some(Ok(ListenerEvent::AddressExpired(ma)))); + } + } + Err(err) => { + log::debug! { + "Failure polling interfaces: {:?}. Scheduling retry.", + err + }; + me.pause = Some(Delay::new(me.sleep_on_error)); + return Poll::Ready(Some(Ok(ListenerEvent::Error(err)))); + } + } + }, + }, + // If the listener is bound to a single interface, make sure the + // address is registered for port reuse and reported once. + InAddr::One { addr, out } => if let Some(multiaddr) = out.take() { + me.port_reuse.register(*addr, me.listen_addr.port()); + return Poll::Ready(Some(Ok(ListenerEvent::NewAddress(multiaddr)))) + } + } + + if let Some(mut pause) = me.pause.take() { + match Pin::new(&mut pause).poll(cx) { + Poll::Ready(_) => {} + Poll::Pending => { + me.pause = Some(pause); + return Poll::Pending; + } + } + } + + // Take the pending connection from the backlog. + let incoming = match T::poll_accept(&mut me.listener, cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Ok(incoming)) => incoming, + Poll::Ready(Err(e)) => { + // These errors are non-fatal for the listener stream. + log::error!("error accepting incoming connection: {}", e); + me.pause = Some(Delay::new(me.sleep_on_error)); + return Poll::Ready(Some(Ok(ListenerEvent::Error(e)))); + } + }; + + let local_addr = ip_to_multiaddr(incoming.local_addr.ip(), incoming.local_addr.port()); + let remote_addr = ip_to_multiaddr(incoming.remote_addr.ip(), incoming.remote_addr.port()); + + log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); + + return Poll::Ready(Some(Ok(ListenerEvent::Upgrade { + upgrade: future::ok(incoming.stream), + local_addr, + remote_addr, + }))); + } } } @@ -400,161 +672,19 @@ fn multiaddr_to_socketaddr(addr: &Multiaddr) -> Result { // Create a [`Multiaddr`] from the given IP address and port number. fn ip_to_multiaddr(ip: IpAddr, port: u16) -> Multiaddr { - let proto = match ip { - IpAddr::V4(ip) => Protocol::Ip4(ip), - IpAddr::V6(ip) => Protocol::Ip6(ip) - }; - let it = iter::once(proto).chain(iter::once(Protocol::Tcp(port))); - Multiaddr::from_iter(it) -} - -// Collect all local host addresses and use the provided port number as listen port. -fn host_addresses(port: u16) -> io::Result> { - let mut addrs = Vec::new(); - for iface in get_if_addrs()? { - let ip = iface.ip(); - let ma = ip_to_multiaddr(ip, port); - let ipn = match iface.addr { - IfAddr::V4(ip4) => { - let prefix_len = (!u32::from_be_bytes(ip4.netmask.octets())).leading_zeros(); - let ipnet = Ipv4Net::new(ip4.ip, prefix_len as u8) - .expect("prefix_len is the number of bits in a u32, so can not exceed 32"); - IpNet::V4(ipnet) - } - IfAddr::V6(ip6) => { - let prefix_len = (!u128::from_be_bytes(ip6.netmask.octets())).leading_zeros(); - let ipnet = Ipv6Net::new(ip6.ip, prefix_len as u8) - .expect("prefix_len is the number of bits in a u128, so can not exceed 128"); - IpNet::V6(ipnet) - } - }; - addrs.push((ip, ipn, ma)) - } - Ok(addrs) -} - -/// Listen address information. -#[derive(Debug)] -enum Addresses { - /// A specific address is used to listen. - One(Multiaddr), - /// A set of addresses is used to listen. - Many(Vec<(IpAddr, IpNet, Multiaddr)>) -} - -type Buffer = VecDeque>, io::Error>, io::Error>>; - -// If we listen on all interfaces, find out to which interface the given -// socket address belongs. In case we think the address is new, check -// all host interfaces again and report new and expired listen addresses. -fn check_for_interface_changes( - socket_addr: &SocketAddr, - listen_port: u16, - listen_addrs: &mut Vec<(IpAddr, IpNet, Multiaddr)>, - pending: &mut Buffer -) -> Result<(), io::Error> { - // Check for exact match: - if listen_addrs.iter().find(|(ip, ..)| ip == &socket_addr.ip()).is_some() { - return Ok(()) - } - - // No exact match => check netmask - if listen_addrs.iter().find(|(_, net, _)| net.contains(&socket_addr.ip())).is_some() { - return Ok(()) - } - - // The local IP address of this socket is new to us. - // We check for changes in the set of host addresses and report new - // and expired addresses. - // - // TODO: We do not detect expired addresses unless there is a new address. - let old_listen_addrs = std::mem::replace(listen_addrs, host_addresses(listen_port)?); - - // Check for addresses no longer in use. - for (ip, _, ma) in old_listen_addrs.iter() { - if listen_addrs.iter().find(|(i, ..)| i == ip).is_none() { - debug!("Expired listen address: {}", ma); - pending.push_back(Ok(ListenerEvent::AddressExpired(ma.clone()))); - } - } - - // Check for new addresses. - for (ip, _, ma) in listen_addrs.iter() { - if old_listen_addrs.iter().find(|(i, ..)| i == ip).is_none() { - debug!("New listen address: {}", ma); - pending.push_back(Ok(ListenerEvent::NewAddress(ma.clone()))); - } - } - - // We should now be able to find the local address, if not something - // is seriously wrong and we report an error. - if listen_addrs.iter() - .find(|(ip, net, _)| ip == &socket_addr.ip() || net.contains(&socket_addr.ip())) - .is_none() - { - let msg = format!("{} does not match any listen address", socket_addr.ip()); - return Err(io::Error::new(io::ErrorKind::Other, msg)) - } - - Ok(()) + Multiaddr::empty() + .with(ip.into()) + .with(Protocol::Tcp(port)) } #[cfg(test)] mod tests { - use futures::prelude::*; - use libp2p_core::{Transport, multiaddr::{Multiaddr, Protocol}, transport::ListenerEvent}; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use super::multiaddr_to_socketaddr; - #[cfg(feature = "async-std")] - use super::TcpConfig; - - #[test] - #[cfg(feature = "async-std")] - fn wildcard_expansion() { - fn test(addr: Multiaddr) { - let mut listener = TcpConfig::new().listen_on(addr).expect("listener"); - - // Get the first address. - let addr = futures::executor::block_on_stream(listener.by_ref()) - .next() - .expect("some event") - .expect("no error") - .into_new_address() - .expect("listen address"); - - // Process all initial `NewAddress` events and make sure they - // do not contain wildcard address or port. - let server = listener - .take_while(|event| match event.as_ref().unwrap() { - ListenerEvent::NewAddress(a) => { - let mut iter = a.iter(); - match iter.next().expect("ip address") { - Protocol::Ip4(ip) => assert!(!ip.is_unspecified()), - Protocol::Ip6(ip) => assert!(!ip.is_unspecified()), - other => panic!("Unexpected protocol: {}", other) - } - if let Protocol::Tcp(port) = iter.next().expect("port") { - assert_ne!(0, port) - } else { - panic!("No TCP port in address: {}", a) - } - futures::future::ready(true) - } - _ => futures::future::ready(false) - }) - .for_each(|_| futures::future::ready(())); - - let client = TcpConfig::new().dial(addr).expect("dialer"); - async_std::task::block_on(futures::future::join(server, client)).1.unwrap(); - } - - test("/ip4/0.0.0.0/tcp/0".parse().unwrap()); - test("/ip6/::1/tcp/0".parse().unwrap()); - } + use futures::channel::mpsc; + use super::*; #[test] fn multiaddr_to_tcp_conversion() { - use std::net::Ipv6Addr; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; assert!( multiaddr_to_socketaddr(&"/ip4/127.0.0.1/udp/1234".parse::().unwrap()) @@ -602,45 +732,65 @@ mod tests { } #[test] - #[cfg(feature = "async-std")] fn communicating_between_dialer_and_listener() { - fn test(addr: Multiaddr) { - let (ready_tx, ready_rx) = futures::channel::oneshot::channel(); - let mut ready_tx = Some(ready_tx); + env_logger::try_init().ok(); - async_std::task::spawn(async move { - let tcp = TcpConfig::new(); - let mut listener = tcp.listen_on(addr).unwrap(); - - loop { - match listener.next().await.unwrap().unwrap() { - ListenerEvent::NewAddress(listen_addr) => { - ready_tx.take().unwrap().send(listen_addr).unwrap(); - }, - ListenerEvent::Upgrade { upgrade, .. } => { - let mut upgrade = upgrade.await.unwrap(); - let mut buf = [0u8; 3]; - upgrade.read_exact(&mut buf).await.unwrap(); - assert_eq!(buf, [1, 2, 3]); - upgrade.write_all(&[4, 5, 6]).await.unwrap(); - }, - _ => unreachable!() + async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { + let tcp = GenTcpConfig::::new(); + let mut listener = tcp.listen_on(addr).unwrap(); + loop { + match listener.next().await.unwrap().unwrap() { + ListenerEvent::NewAddress(listen_addr) => { + ready_tx.send(listen_addr).await.unwrap(); } + ListenerEvent::Upgrade { upgrade, .. } => { + let mut upgrade = upgrade.await.unwrap(); + let mut buf = [0u8; 3]; + upgrade.read_exact(&mut buf).await.unwrap(); + assert_eq!(buf, [1, 2, 3]); + upgrade.write_all(&[4, 5, 6]).await.unwrap(); + return + } + e => panic!("Unexpected listener event: {:?}", e), } - }); + } + } - async_std::task::block_on(async move { - let addr = ready_rx.await.unwrap(); - let tcp = TcpConfig::new(); + async fn dialer(mut ready_rx: mpsc::Receiver) { + let addr = ready_rx.next().await.unwrap(); + let tcp = GenTcpConfig::::new(); - // Obtain a future socket through dialing - let mut socket = tcp.dial(addr.clone()).unwrap().await.unwrap(); - socket.write_all(&[0x1, 0x2, 0x3]).await.unwrap(); + // Obtain a future socket through dialing + let mut socket = tcp.dial(addr.clone()).unwrap().await.unwrap(); + socket.write_all(&[0x1, 0x2, 0x3]).await.unwrap(); - let mut buf = [0u8; 3]; - socket.read_exact(&mut buf).await.unwrap(); - assert_eq!(buf, [4, 5, 6]); - }); + let mut buf = [0u8; 3]; + socket.read_exact(&mut buf).await.unwrap(); + assert_eq!(buf, [4, 5, 6]); + } + + fn test(addr: Multiaddr) { + #[cfg(feature = "async-io")] + { + let (ready_tx, ready_rx) = mpsc::channel(1); + let listener = listener::(addr.clone(), ready_tx); + let dialer = dialer::(ready_rx); + let listener = async_std::task::spawn(listener); + async_std::task::block_on(dialer); + async_std::task::block_on(listener); + } + + #[cfg(feature = "tokio")] + { + let (ready_tx, ready_rx) = mpsc::channel(1); + let listener = listener::(addr.clone(), ready_tx); + let dialer = dialer::(ready_rx); + let rt = tokio_crate::runtime::Builder::new_current_thread().enable_io().build().unwrap(); + let tasks = tokio_crate::task::LocalSet::new(); + let listener = tasks.spawn_local(listener); + tasks.block_on(&rt, dialer); + tasks.block_on(&rt, listener).unwrap(); + } } test("/ip4/127.0.0.1/tcp/0".parse().unwrap()); @@ -648,49 +798,234 @@ mod tests { } #[test] - #[cfg(feature = "async-std")] - fn replace_port_0_in_returned_multiaddr_ipv4() { - let tcp = TcpConfig::new(); + fn wildcard_expansion() { + env_logger::try_init().ok(); - let addr = "/ip4/127.0.0.1/tcp/0".parse::().unwrap(); - assert!(addr.to_string().contains("tcp/0")); + async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { + let tcp = GenTcpConfig::::new(); + let mut listener = tcp.listen_on(addr).unwrap(); - let new_addr = futures::executor::block_on_stream(tcp.listen_on(addr).unwrap()) - .next() - .expect("some event") - .expect("no error") - .into_new_address() - .expect("listen address"); + loop { + match listener.next().await.unwrap().unwrap() { + ListenerEvent::NewAddress(a) => { + let mut iter = a.iter(); + match iter.next().expect("ip address") { + Protocol::Ip4(ip) => assert!(!ip.is_unspecified()), + Protocol::Ip6(ip) => assert!(!ip.is_unspecified()), + other => panic!("Unexpected protocol: {}", other), + } + if let Protocol::Tcp(port) = iter.next().expect("port") { + assert_ne!(0, port) + } else { + panic!("No TCP port in address: {}", a) + } + ready_tx.send(a).await.ok(); + return + } + _ => {} + } + } + } - assert!(!new_addr.to_string().contains("tcp/0")); + async fn dialer(mut ready_rx: mpsc::Receiver) { + let dest_addr = ready_rx.next().await.unwrap(); + let tcp = GenTcpConfig::::new(); + tcp.dial(dest_addr).unwrap().await.unwrap(); + } + + fn test(addr: Multiaddr) { + #[cfg(feature = "async-io")] + { + let (ready_tx, ready_rx) = mpsc::channel(1); + let listener = listener::(addr.clone(), ready_tx); + let dialer = dialer::(ready_rx); + let listener = async_std::task::spawn(listener); + async_std::task::block_on(dialer); + async_std::task::block_on(listener); + } + + #[cfg(feature = "tokio")] + { + let (ready_tx, ready_rx) = mpsc::channel(1); + let listener = listener::(addr.clone(), ready_tx); + let dialer = dialer::(ready_rx); + let rt = tokio_crate::runtime::Builder::new_current_thread().enable_io().build().unwrap(); + let tasks = tokio_crate::task::LocalSet::new(); + let listener = tasks.spawn_local(listener); + tasks.block_on(&rt, dialer); + tasks.block_on(&rt, listener).unwrap(); + } + } + + test("/ip4/0.0.0.0/tcp/0".parse().unwrap()); + test("/ip6/::1/tcp/0".parse().unwrap()); } #[test] - #[cfg(feature = "async-std")] - fn replace_port_0_in_returned_multiaddr_ipv6() { - let tcp = TcpConfig::new(); + fn port_reuse_dialing() { + env_logger::try_init().ok(); - let addr: Multiaddr = "/ip6/::1/tcp/0".parse().unwrap(); - assert!(addr.to_string().contains("tcp/0")); + async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { + let tcp = GenTcpConfig::::new(); + let mut listener = tcp.listen_on(addr).unwrap(); + loop { + match listener.next().await.unwrap().unwrap() { + ListenerEvent::NewAddress(listen_addr) => { + ready_tx.send(listen_addr).await.ok(); + } + ListenerEvent::Upgrade { upgrade, .. } => { + let mut upgrade = upgrade.await.unwrap(); + let mut buf = [0u8; 3]; + upgrade.read_exact(&mut buf).await.unwrap(); + assert_eq!(buf, [1, 2, 3]); + upgrade.write_all(&[4, 5, 6]).await.unwrap(); + return + } + e => panic!("Unexpected event: {:?}", e), + } + } + } - let new_addr = futures::executor::block_on_stream(tcp.listen_on(addr).unwrap()) - .next() - .expect("some event") - .expect("no error") - .into_new_address() - .expect("listen address"); + async fn dialer(addr: Multiaddr, mut ready_rx: mpsc::Receiver) { + let dest_addr = ready_rx.next().await.unwrap(); + let tcp = GenTcpConfig::::new().port_reuse(true); + let mut listener = tcp.clone().listen_on(addr).unwrap(); + match listener.next().await.unwrap().unwrap() { + ListenerEvent::NewAddress(_) => { + // Obtain a future socket through dialing + let mut socket = tcp.dial(dest_addr).unwrap().await.unwrap(); + socket.write_all(&[0x1, 0x2, 0x3]).await.unwrap(); + // socket.flush().await; + let mut buf = [0u8; 3]; + socket.read_exact(&mut buf).await.unwrap(); + assert_eq!(buf, [4, 5, 6]); + } + e => panic!("Unexpected listener event: {:?}", e) + } + } - assert!(!new_addr.to_string().contains("tcp/0")); + fn test(addr: Multiaddr) { + #[cfg(feature = "async-io")] + { + let (ready_tx, ready_rx) = mpsc::channel(1); + let listener = listener::(addr.clone(), ready_tx); + let dialer = dialer::(addr.clone(), ready_rx); + let listener = async_std::task::spawn(listener); + async_std::task::block_on(dialer); + async_std::task::block_on(listener); + } + + #[cfg(feature = "tokio")] + { + let (ready_tx, ready_rx) = mpsc::channel(1); + let listener = listener::(addr.clone(), ready_tx); + let dialer = dialer::(addr.clone(), ready_rx); + let rt = tokio_crate::runtime::Builder::new_current_thread().enable_io().build().unwrap(); + let tasks = tokio_crate::task::LocalSet::new(); + let listener = tasks.spawn_local(listener); + tasks.block_on(&rt, dialer); + tasks.block_on(&rt, listener).unwrap(); + } + } + + test("/ip4/127.0.0.1/tcp/0".parse().unwrap()); + test("/ip6/::1/tcp/0".parse().unwrap()); } #[test] - #[cfg(feature = "async-std")] - fn larger_addr_denied() { - let tcp = TcpConfig::new(); + fn port_reuse_listening() { + env_logger::try_init().ok(); - let addr = "/ip4/127.0.0.1/tcp/12345/tcp/12345" - .parse::() - .unwrap(); - assert!(tcp.listen_on(addr).is_err()); + async fn listen_twice(addr: Multiaddr) { + let tcp = GenTcpConfig::::new().port_reuse(true); + let mut listener1 = tcp.clone().listen_on(addr).unwrap(); + match listener1.next().await.unwrap().unwrap() { + ListenerEvent::NewAddress(addr1) => { + // Listen on the same address a second time. + let mut listener2 = tcp.clone().listen_on(addr1.clone()).unwrap(); + match listener2.next().await.unwrap().unwrap() { + ListenerEvent::NewAddress(addr2) => { + assert_eq!(addr1, addr2); + return + } + e => panic!("Unexpected listener event: {:?}", e), + } + } + e => panic!("Unexpected listener event: {:?}", e), + } + } + + fn test(addr: Multiaddr) { + #[cfg(feature = "async-io")] + { + let listener = listen_twice::(addr.clone()); + async_std::task::block_on(listener); + } + + #[cfg(feature = "tokio")] + { + let listener = listen_twice::(addr.clone()); + let rt = tokio_crate::runtime::Builder::new_current_thread().enable_io().build().unwrap(); + rt.block_on(listener); + } + } + + test("/ip4/127.0.0.1/tcp/0".parse().unwrap()); + } + + #[test] + fn listen_port_0() { + env_logger::try_init().ok(); + + async fn listen(addr: Multiaddr) -> Multiaddr { + GenTcpConfig::::new() + .listen_on(addr) + .unwrap() + .next() + .await + .expect("some event") + .expect("no error") + .into_new_address() + .expect("listen address") + } + + fn test(addr: Multiaddr) { + #[cfg(feature = "async-io")] + { + let new_addr = async_std::task::block_on(listen::(addr.clone())); + assert!(!new_addr.to_string().contains("tcp/0")); + } + + #[cfg(feature = "tokio")] + { + let rt = tokio_crate::runtime::Builder::new_current_thread().enable_io().build().unwrap(); + let new_addr = rt.block_on(listen::(addr.clone())); + assert!(!new_addr.to_string().contains("tcp/0")); + } + } + + test("/ip6/::1/tcp/0".parse().unwrap()); + test("/ip4/127.0.0.1/tcp/0".parse().unwrap()); + } + + #[test] + fn listen_invalid_addr() { + env_logger::try_init().ok(); + + fn test(addr: Multiaddr) { + #[cfg(feature = "async-io")] + { + let tcp = TcpConfig::new(); + assert!(tcp.listen_on(addr.clone()).is_err()); + } + + #[cfg(feature = "tokio")] + { + let tcp = TokioTcpConfig::new(); + assert!(tcp.listen_on(addr.clone()).is_err()); + } + } + + test("/ip4/127.0.0.1/tcp/12345/tcp/12345".parse().unwrap()); } } diff --git a/transports/tcp/src/provider.rs b/transports/tcp/src/provider.rs new file mode 100644 index 00000000..091a6691 --- /dev/null +++ b/transports/tcp/src/provider.rs @@ -0,0 +1,81 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! The interface for providers of non-blocking TCP implementations. + +#[cfg(feature = "async-io")] +pub mod async_io; + +#[cfg(feature = "tokio")] +pub mod tokio; + +use futures::io::{AsyncRead, AsyncWrite}; +use futures::future::BoxFuture; +use ipnet::IpNet; +use std::task::{Context, Poll}; +use std::{fmt, io}; +use std::net::{SocketAddr, TcpListener, TcpStream}; + +/// An event relating to a change of availability of an address +/// on a network interface. +pub enum IfEvent { + Up(IpNet), + Down(IpNet), +} + +/// An incoming connection returned from [`Provider::poll_accept()`]. +pub struct Incoming { + pub stream: S, + pub local_addr: SocketAddr, + pub remote_addr: SocketAddr, +} + +/// The interface for non-blocking TCP I/O providers. +pub trait Provider: Clone + Send + 'static { + /// The type of TCP streams obtained from [`Provider::new_stream`] + /// and [`Provider::poll_accept`]. + type Stream: AsyncRead + AsyncWrite + Send + Unpin + fmt::Debug; + /// The type of TCP listeners obtained from [`Provider::new_listener`]. + type Listener: Send + Unpin; + /// The type of network interface observers obtained from [`Provider::if_watcher`]. + type IfWatcher: Send + Unpin; + + /// Creates an instance of [`Self::IfWatcher`] that can be polled for + /// network interface changes via [`Self::poll_interfaces`]. + fn if_watcher() -> BoxFuture<'static, io::Result>; + + /// Creates a new listener wrapping the given [`TcpListener`] that + /// can be polled for incoming connections via [`Self::poll_accept()`]. + fn new_listener(_: TcpListener) -> io::Result; + + /// Creates a new stream for an outgoing connection, wrapping the + /// given [`TcpStream`]. The given `TcpStream` is initiating a + /// connection, but implementations must wait for the connection + /// setup to complete, i.e. for the stream to be writable. + fn new_stream(_: TcpStream) -> BoxFuture<'static, io::Result>; + + /// Polls a [`Self::Listener`] for an incoming connection, ensuring a task wakeup, + /// if necessary. + fn poll_accept(_: &mut Self::Listener, _: &mut Context<'_>) -> Poll>>; + + /// Polls a [`Self::IfWatcher`] for network interface changes, ensuring a task wakeup, + /// if necessary. + fn poll_interfaces(_: &mut Self::IfWatcher, _: &mut Context<'_>) -> Poll>; +} diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs new file mode 100644 index 00000000..b4ce74d6 --- /dev/null +++ b/transports/tcp/src/provider/async_io.rs @@ -0,0 +1,82 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::{Provider, IfEvent, Incoming}; + +use async_io_crate::Async; +use futures::{ + future::{BoxFuture, FutureExt}, +}; +use std::io; +use std::task::{Poll, Context}; +use std::net; + +#[derive(Copy, Clone)] +pub enum Tcp {} + +impl Provider for Tcp { + type Stream = Async; + type Listener = Async; + type IfWatcher = if_watch::IfWatcher; + + fn if_watcher() -> BoxFuture<'static, io::Result> { + if_watch::IfWatcher::new().boxed() + } + + fn new_listener(l: net::TcpListener) -> io::Result { + Async::new(l) + } + + fn new_stream(s: net::TcpStream) -> BoxFuture<'static, io::Result> { + async move { + let stream = Async::new(s)?; + stream.writable().await?; + Ok(stream) + }.boxed() + } + + fn poll_accept(l: &mut Self::Listener, cx: &mut Context<'_>) -> Poll>> { + let (stream, remote_addr) = loop { + match l.poll_readable(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + Poll::Ready(Ok(())) => match l.accept().now_or_never() { + Some(Err(e)) => return Poll::Ready(Err(e)), + Some(Ok(res)) => break res, + None => { + // Since it doesn't do any harm, account for false positives of + // `poll_readable` just in case, i.e. try again. + } + } + } + }; + + let local_addr = stream.get_ref().local_addr()?; + + Poll::Ready(Ok(Incoming { stream, local_addr, remote_addr })) + } + + fn poll_interfaces(w: &mut Self::IfWatcher, cx: &mut Context<'_>) -> Poll> { + w.poll_unpin(cx).map_ok(|e| match e { + if_watch::IfEvent::Up(a) => IfEvent::Up(a), + if_watch::IfEvent::Down(a) => IfEvent::Down(a), + }) + } +} diff --git a/transports/tcp/src/provider/tokio.rs b/transports/tcp/src/provider/tokio.rs new file mode 100644 index 00000000..0e8136f2 --- /dev/null +++ b/transports/tcp/src/provider/tokio.rs @@ -0,0 +1,168 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use super::{Provider, IfEvent, Incoming}; + +use futures::{ + future::{self, BoxFuture, FutureExt}, + prelude::*, +}; +use futures_timer::Delay; +use if_addrs::{IfAddr, get_if_addrs}; +use ipnet::{IpNet, Ipv4Net, Ipv6Net}; +use std::collections::HashSet; +use std::convert::TryFrom; +use std::io; +use std::task::{Poll, Context}; +use std::time::Duration; +use std::net; +use std::pin::Pin; + +#[derive(Copy, Clone)] +pub enum Tcp {} + +pub struct IfWatcher { + addrs: HashSet, + delay: Delay, + pending: Vec, +} + +impl Provider for Tcp { + type Stream = TcpStream; + type Listener = tokio_crate::net::TcpListener; + type IfWatcher = IfWatcher; + + fn if_watcher() -> BoxFuture<'static, io::Result> { + future::ready(Ok( + IfWatcher { + addrs: HashSet::new(), + delay: Delay::new(Duration::from_secs(0)), + pending: Vec::new(), + } + )).boxed() + } + + fn new_listener(l: net::TcpListener) -> io::Result { + tokio_crate::net::TcpListener::try_from(l) + } + + fn new_stream(s: net::TcpStream) -> BoxFuture<'static, io::Result> { + async move { + let stream = tokio_crate::net::TcpStream::try_from(s)?; + stream.writable().await?; + Ok(TcpStream(stream)) + }.boxed() + } + + fn poll_accept(l: &mut Self::Listener, cx: &mut Context<'_>) + -> Poll>> + { + let (stream, remote_addr) = match l.poll_accept(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok((stream, remote_addr))) => (stream, remote_addr) + }; + + let local_addr = stream.local_addr()?; + let stream = TcpStream(stream); + + Poll::Ready(Ok(Incoming { stream, local_addr, remote_addr })) + } + + fn poll_interfaces(w: &mut Self::IfWatcher, cx: &mut Context<'_>) -> Poll> { + loop { + if let Some(event) = w.pending.pop() { + return Poll::Ready(Ok(event)) + } + + match Pin::new(&mut w.delay).poll(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(()) => { + let ifs = get_if_addrs()?; + let addrs = ifs.into_iter().map(|iface| match iface.addr { + IfAddr::V4(ip4) => { + let prefix_len = (!u32::from_be_bytes(ip4.netmask.octets())).leading_zeros(); + let ipnet = Ipv4Net::new(ip4.ip, prefix_len as u8) + .expect("prefix_len can not exceed 32"); + IpNet::V4(ipnet) + } + IfAddr::V6(ip6) => { + let prefix_len = (!u128::from_be_bytes(ip6.netmask.octets())).leading_zeros(); + let ipnet = Ipv6Net::new(ip6.ip, prefix_len as u8) + .expect("prefix_len can not exceed 128"); + IpNet::V6(ipnet) + } + }).collect::>(); + + for down in w.addrs.difference(&addrs) { + w.pending.push(IfEvent::Down(*down)); + } + + for up in addrs.difference(&w.addrs) { + w.pending.push(IfEvent::Up(*up)); + } + + w.addrs = addrs; + w.delay.reset(Duration::from_secs(10)); + } + } + } + } +} + +/// A [`tokio_crate::net::TcpStream`] that implements [`AsyncRead`] and [`AsyncWrite`]. +#[derive(Debug)] +pub struct TcpStream(pub tokio_crate::net::TcpStream); + +impl Into for TcpStream { + fn into(self: TcpStream) -> tokio_crate::net::TcpStream { + self.0 + } +} + +impl AsyncRead for TcpStream { + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { + let mut read_buf = tokio_crate::io::ReadBuf::new(buf); + futures::ready!(tokio_crate::io::AsyncRead::poll_read(Pin::new(&mut self.0), cx, &mut read_buf))?; + Poll::Ready(Ok(read_buf.filled().len())) + } +} + +impl AsyncWrite for TcpStream { + fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { + tokio_crate::io::AsyncWrite::poll_write(Pin::new(&mut self.0), cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + tokio_crate::io::AsyncWrite::poll_flush(Pin::new(&mut self.0), cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + tokio_crate::io::AsyncWrite::poll_shutdown(Pin::new(&mut self.0), cx) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[io::IoSlice<'_>] + ) -> Poll> { + tokio_crate::io::AsyncWrite::poll_write_vectored(Pin::new(&mut self.0), cx, bufs) + } +} diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index 8c721bc9..209187e6 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index cdaf6961..827feb50 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-uds" edition = "2018" description = "Unix domain sockets transport for libp2p" -version = "0.26.1" +version = "0.27.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -15,10 +15,10 @@ name = "libp2p_uds" [target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies] async-std = { version = "1.6.2", optional = true } -libp2p-core = { package = "fluence-fork-libp2p-core", version = "0.26.1", path = "../../core" } +libp2p-core = { package = "fluence-fork-libp2p-core", version = "0.27.1", path = "../../core" } log = "0.4.1" futures = "0.3.1" -tokio = { version = "0.3", default-features = false, features = ["net"], optional = true } +tokio = { version = "1.0.1", default-features = false, features = ["net"], optional = true } [target.'cfg(all(unix, not(target_os = "emscripten")))'.dev-dependencies] tempfile = "3.0" diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 05efae63..ce698e22 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -109,6 +109,10 @@ impl Transport for $uds_config { Err(TransportError::MultiaddrNotSupported(addr)) } } + + fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } } }; diff --git a/transports/wasm-ext/CHANGELOG.md b/transports/wasm-ext/CHANGELOG.md index 64c12e3d..bd36be01 100644 --- a/transports/wasm-ext/CHANGELOG.md +++ b/transports/wasm-ext/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.27.0 [2021-01-12] + +- Update dependencies. + # 0.26.0 [2020-12-17] - Update `libp2p-core`. diff --git a/transports/wasm-ext/Cargo.toml b/transports/wasm-ext/Cargo.toml index 773202fa..1ae6065f 100644 --- a/transports/wasm-ext/Cargo.toml +++ b/transports/wasm-ext/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fluence-fork-libp2p-wasm-ext" -version = "0.26.1" +version = "0.27.0" authors = ["Pierre Krieger "] edition = "2018" description = "Allows passing in an external transport in a WASM environment" @@ -15,7 +15,7 @@ name = "libp2p_wasm_ext" [dependencies] futures = "0.3.1" js-sys = "0.3.19" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } parity-send-wrapper = "0.1.0" wasm-bindgen = "0.2.42" wasm-bindgen-futures = "0.4.4" diff --git a/transports/wasm-ext/src/lib.rs b/transports/wasm-ext/src/lib.rs index 8c0e5012..cec2ad1c 100644 --- a/transports/wasm-ext/src/lib.rs +++ b/transports/wasm-ext/src/lib.rs @@ -206,6 +206,10 @@ impl Transport for ExtTransport { inner: SendWrapper::new(promise.into()), }) } + + fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } } /// Future that dial a remote through an external transport. @@ -480,11 +484,7 @@ impl Drop for Connection { /// Returns true if `err` is an error about an address not being supported. fn is_not_supported_error(err: &JsValue) -> bool { if let Some(err) = err.dyn_ref::() { - if String::from(err.name()) == "NotSupportedError" { - true - } else { - false - } + err.name() == "NotSupportedError" } else { false } diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index 279b25cd..9e4d5160 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.28.0 [2021-01-12] + +- Update dependencies. + # 0.27.0 [2020-12-17] - Update `libp2p-core`. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index a91e1f63..c00ee7d6 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -2,7 +2,7 @@ name = "fluence-fork-libp2p-websocket" edition = "2018" description = "WebSocket transport for libp2p" -version = "0.27.1" +version = "0.28.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,21 +13,19 @@ categories = ["network-programming", "asynchronous"] name = "libp2p_websocket" [dependencies] -async-tls = "0.11.0" +futures-rustls = "0.21" either = "1.5.3" futures = "0.3.1" -libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" } +libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" } log = "0.4.8" quicksink = "0.1" -rustls = "0.19.0" rw-stream-sink = "0.2.0" soketto = { version = "0.4.1", features = ["deflate"] } url = "2.1" -webpki = "0.21" webpki-roots = "0.21" [dev-dependencies] -libp2p-tcp = { path = "../tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" } +libp2p-tcp = { path = "../tcp", package = "fluence-fork-libp2p-tcp" } [package.metadata.workspaces] independent = true diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 9f6d6efd..d5bd63b8 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_tls::{client, server}; +use futures_rustls::{webpki, client, server}; use crate::{error::Error, tls}; use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; @@ -262,6 +262,10 @@ where Ok(Box::pin(future)) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } } impl WsConfig @@ -306,7 +310,7 @@ where if use_tls { // begin TLS session let dns_name = dns_name.expect("for use_tls we have checked that dns_name is some"); trace!("starting TLS handshake with {}", address); - let stream = self.tls_config.client.connect(&dns_name, stream) + let stream = self.tls_config.client.connect(dns_name.as_ref(), stream) .map_err(|e| { debug!("TLS handshake with {} failed: {}", address, e); Error::Tls(tls::Error::from(e)) @@ -586,4 +590,3 @@ where .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) } } - diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index 10026a42..0ee346fd 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -113,6 +113,10 @@ where fn dial(self, addr: Multiaddr) -> Result> { self.transport.map(wrap_connection as WrapperFn).dial(addr) } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } } /// Type alias corresponding to `framed::WsConfig::Listener`. diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index 7ffdd057..d72535cd 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_tls::{TlsConnector, TlsAcceptor}; +use futures_rustls::{rustls, webpki, TlsConnector, TlsAcceptor}; use std::{fmt, io, sync::Arc}; /// TLS configuration. @@ -130,6 +130,7 @@ pub(crate) fn dns_name_ref(name: &str) -> Result, Error> /// TLS related errors. #[derive(Debug)] +#[non_exhaustive] pub enum Error { /// An underlying I/O error. Io(io::Error), @@ -137,9 +138,6 @@ pub enum Error { Tls(Box), /// The DNS name was invalid. InvalidDnsName(String), - - #[doc(hidden)] - __Nonexhaustive } impl fmt::Display for Error { @@ -148,7 +146,6 @@ impl fmt::Display for Error { Error::Io(e) => write!(f, "i/o error: {}", e), Error::Tls(e) => write!(f, "tls error: {}", e), Error::InvalidDnsName(n) => write!(f, "invalid DNS name: {}", n), - Error::__Nonexhaustive => f.write_str("__Nonexhaustive") } } } @@ -158,7 +155,7 @@ impl std::error::Error for Error { match self { Error::Io(e) => Some(e), Error::Tls(e) => Some(&**e), - Error::InvalidDnsName(_) | Error::__Nonexhaustive => None + Error::InvalidDnsName(_) => None } } }