Merge pull request #48 from fluencelabs/merge_0.36

Merge 0.36
This commit is contained in:
folex 2021-03-09 18:13:27 +03:00 committed by GitHub
commit d1ed9e76bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
173 changed files with 3031 additions and 4568 deletions

View File

@ -13,20 +13,20 @@ jobs:
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.6.0
uses: styfle/cancel-workflow-action@0.8.0
with:
access_token: ${{ github.token }}
- uses: actions/checkout@v2
- name: Cache CARGO_HOME
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: ~/.cargo
key: cargo-home-${{ hashFiles('Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: target
key: cargo-build-target-${{ hashFiles('Cargo.toml') }}
@ -47,7 +47,7 @@ jobs:
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.6.0
uses: styfle/cancel-workflow-action@0.8.0
with:
access_token: ${{ github.token }}
@ -71,13 +71,13 @@ jobs:
run: apt-get install -y cmake
- name: Cache CARGO_HOME
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: ~/.cargo
key: cargo-home-${{ hashFiles('Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: target
key: wasm-cargo-build-target-${{ hashFiles('Cargo.toml') }}
@ -95,7 +95,7 @@ jobs:
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.6.0
uses: styfle/cancel-workflow-action@0.8.0
with:
access_token: ${{ github.token }}
@ -109,7 +109,7 @@ jobs:
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.6.0
uses: styfle/cancel-workflow-action@0.8.0
with:
access_token: ${{ github.token }}
@ -123,13 +123,13 @@ jobs:
components: clippy
- name: Cache CARGO_HOME
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: ~/.cargo
key: cargo-home-${{ hashFiles('Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: target
key: cargo-build-target-${{ hashFiles('Cargo.toml') }}
@ -138,14 +138,14 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -A clippy::mutable_key_type -A clippy::type_complexity
args: -- -A clippy::type_complexity -A clippy::pedantic -A clippy::style
run-benchmarks:
runs-on: ubuntu-latest
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.6.0
uses: styfle/cancel-workflow-action@0.8.0
with:
access_token: ${{ github.token }}
@ -158,13 +158,13 @@ jobs:
override: true
- name: Cache CARGO_HOME
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: ~/.cargo
key: cargo-home-${{ hashFiles('Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: target
key: cargo-build-target-${{ hashFiles('Cargo.toml') }}
@ -183,20 +183,20 @@ jobs:
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.6.0
uses: styfle/cancel-workflow-action@0.8.0
with:
access_token: ${{ github.token }}
- uses: actions/checkout@v2
- name: Cache CARGO_HOME
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: ~/.cargo
key: cargo-home-${{ hashFiles('Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
uses: actions/cache@v2.1.4
with:
path: target
key: cargo-build-target-${{ hashFiles('Cargo.toml') }}

View File

@ -1,45 +1,84 @@
# Individual crates
## Main APIs
- [`libp2p-core` CHANGELOG](core/CHANGELOG.md)
- [`libp2p-deflate` CHANGELOG](protocols/deflate/CHANGELOG.md)
- [`libp2p-dns` CHANGELOG](transports/dns/CHANGELOG.md)
- [`libp2p-swarm` CHANGELOG](swarm/CHANGELOG.md)
- [`libp2p-swarm-derive` CHANGELOG](swarm-derive/CHANGELOG.md)
## Application Protocols
- [`libp2p-floodsub` CHANGELOG](protocols/floodsub/CHANGELOG.md)
- [`libp2p-gossipsub` CHANGELOG](protocols/gossipsub/CHANGELOG.md)
- [`libp2p-identify` CHANGELOG](protocols/identify/CHANGELOG.md)
- [`libp2p-kad` CHANGELOG](protocols/kad/CHANGELOG.md)
- [`libp2p-mdns` CHANGELOG](protocols/mdns/CHANGELOG.md)
- [`libp2p-mplex` CHANGELOG](muxers/mplex/CHANGELOG.md)
- [`libp2p-noise` CHANGELOG](protocols/noise/CHANGELOG.md)
- [`libp2p-ping` CHANGELOG](protocols/ping/CHANGELOG.md)
- [`libp2p-plaintext` CHANGELOG](protocols/plaintext/CHANGELOG.md)
- [`libp2p-pnet` CHANGELOG](protocols/pnet/CHANGELOG.md)
- [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md)
- [`libp2p-secio` CHANGELOG](protocols/secio/CHANGELOG.md)
- [`libp2p-swarm` CHANGELOG](swarm/CHANGELOG.md)
## Transport Protocols & Upgrades
- [`libp2p-deflate` CHANGELOG](transports/deflate/CHANGELOG.md)
- [`libp2p-dns` CHANGELOG](transports/dns/CHANGELOG.md)
- [`libp2p-noise` CHANGELOG](transports/noise/CHANGELOG.md)
- [`libp2p-plaintext` CHANGELOG](transports/plaintext/CHANGELOG.md)
- [`libp2p-pnet` CHANGELOG](transports/pnet/CHANGELOG.md)
- [`libp2p-tcp` CHANGELOG](transports/tcp/CHANGELOG.md)
- [`libp2p-uds` CHANGELOG](transports/uds/CHANGELOG.md)
- [`libp2p-wasm-ext` CHANGELOG](transports/wasm-ext/CHANGELOG.md)
- [`libp2p-websocket` CHANGELOG](transports/websocket/CHANGELOG.md)
## Multiplexers
- [`libp2p-mplex` CHANGELOG](muxers/mplex/CHANGELOG.md)
- [`libp2p-yamux` CHANGELOG](muxers/yamux/CHANGELOG.md)
- [`multistream-select` CHANGELOG](misc/multistream-select/CHANGELOG.md)
## Utilities
- [`parity-multiaddr` CHANGELOG](misc/multiaddr/CHANGELOG.md)
- [`libp2p-core-derive` CHANGELOG](misc/core-derive/CHANGELOG.md)
- [`multistream-select` CHANGELOG](misc/multistream-select/CHANGELOG.md)
# Version 0.34.0 [unreleased]
# `libp2p` facade crate
- Update `libp2p-request-response`.
## Version 0.36.0 [unreleased]
# Version 0.33.0 [2020-12-17]
- Update libp2p crates.
- Do not leak default features from libp2p crates.
[PR 1986](https://github.com/libp2p/rust-libp2p/pull/1986).
## Version 0.35.1 [2021-02-17]
- Update `libp2p-yamux` to latest patch version.
## Version 0.35.0 [2021-02-15]
- Use `libp2p-swarm-derive`, the former `libp2p-core-derive`.
- Update `libp2p-deflate`, `libp2p-gossipsub`, `libp2p-mdns`, `libp2p-request-response`,
`libp2p-swarm` and `libp2p-tcp`.
## Version 0.34.0 [2021-01-12]
- Update `libp2p-core` and all dependent crates.
# Version 0.32.2 [2020-12-10]
- The `tcp-async-std` feature is now `tcp-async-io`, still
enabled by default.
## Version 0.33.0 [2020-12-17]
- Update `libp2p-core` and all dependent crates.
## Version 0.32.2 [2020-12-10]
- Update `libp2p-websocket`.
# Version 0.32.1 [2020-12-09]
## Version 0.32.1 [2020-12-09]
- Update minimum patch version of `libp2p-websocket`.
# Version 0.32.0 [2020-12-08]
## Version 0.32.0 [2020-12-08]
- Update `libp2p-request-response`.
@ -47,57 +86,57 @@
- Update `libp2p-websocket` minimum patch version.
# Version 0.31.2 [2020-12-02]
## Version 0.31.2 [2020-12-02]
- Bump minimum `libp2p-core` patch version.
# Version 0.31.1 [2020-11-26]
## Version 0.31.1 [2020-11-26]
- Bump minimum `libp2p-tcp` patch version.
# Version 0.31.0 [2020-11-25]
## Version 0.31.0 [2020-11-25]
- Update `multistream-select` and all dependent crates.
# Version 0.30.1 [2020-11-11]
## Version 0.30.1 [2020-11-11]
- Update `libp2p-plaintext`.
# Version 0.30.0 [2020-11-09]
## Version 0.30.0 [2020-11-09]
- Update `libp2p-mdns`, `libp2p-tcp` and `libp2p-uds` as well as `libp2p-core`
and all its dependers.
# Version 0.29.1 [2020-10-20]
## Version 0.29.1 [2020-10-20]
- Update `libp2p-core`.
# Version 0.29.0 [2020-10-16]
## Version 0.29.0 [2020-10-16]
- Update `libp2p-core`, `libp2p-floodsub`, `libp2p-gossipsub`, `libp2p-mplex`,
`libp2p-noise`, `libp2p-plaintext`, `libp2p-pnet`, `libp2p-request-response`,
`libp2p-swarm`, `libp2p-tcp`, `libp2p-websocket` and `parity-multiaddr`.
# Version 0.28.1 [2020-09-10]
## Version 0.28.1 [2020-09-10]
- Update to `libp2p-core` `0.22.1`.
# Version 0.28.0 [2020-09-09]
## Version 0.28.0 [2020-09-09]
- Update `libp2p-yamux` to `0.25.0`. *Step 4 of 4 in a multi-release
upgrade process.* See the `libp2p-yamux` CHANGELOG for details.
# Version 0.27.0 [2020-09-09]
## Version 0.27.0 [2020-09-09]
- Update `libp2p-yamux` to `0.24.0`. *Step 3 of 4 in a multi-release
upgrade process.* See the `libp2p-yamux` CHANGELOG for details.
# Version 0.26.0 [2020-09-09]
## Version 0.26.0 [2020-09-09]
- Update `libp2p-yamux` to `0.23.0`. *Step 2 of 4 in a multi-release
upgrade process.* See the `libp2p-yamux` CHANGELOG for details.
# Version 0.25.0 [2020-09-09]
## Version 0.25.0 [2020-09-09]
- Remove the deprecated `libp2p-secio` dependency. To continue to use
SECIO, add an explicit dependency on `libp2p-secio`. However,
@ -114,12 +153,12 @@ changelog for details about the `LegacyConfig`.
[PR 1714]: https://github.com/libp2p/rust-libp2p/pull/1714
# Version 0.24.0 [2020-08-18]
## Version 0.24.0 [2020-08-18]
- Update `libp2p-core`, `libp2p-gossipsub`, `libp2p-kad`, `libp2p-mdns`,
`libp2p-ping`, `libp2p-request-response`, `libp2p-swarm` and dependent crates.
# Version 0.23.0 (2020-08-03)
## Version 0.23.0 (2020-08-03)
**NOTE**: For a smooth upgrade path from `0.21` to `> 0.22`
on an existing deployment, this version must not be skipped
@ -130,7 +169,7 @@ changelog for details about the `LegacyConfig`.
- Refactored bandwidth logging ([PR 1670](https://github.com/libp2p/rust-libp2p/pull/1670)).
# Version 0.22.0 (2020-07-17)
## Version 0.22.0 (2020-07-17)
**NOTE**: For a smooth upgrade path from `0.21` to `> 0.22`
on an existing deployment using `libp2p-noise`, this version
@ -138,11 +177,11 @@ must not be skipped!
- Bump `libp2p-noise` dependency to `0.21`.
# Version 0.21.1 (2020-07-02)
## Version 0.21.1 (2020-07-02)
- Bump `libp2p-websockets` lower bound.
# Version 0.21.0 (2020-07-01)
## Version 0.21.0 (2020-07-01)
- Conditional compilation fixes for the `wasm32-wasi` target
([PR 1633](https://github.com/libp2p/rust-libp2p/pull/1633)).
@ -152,7 +191,7 @@ must not be skipped!
- Updated libp2p dependencies.
# Version 0.19.1 (2020-05-25)
## Version 0.19.1 (2020-05-25)
- Temporarily pin all `async-std` dependencies to `< 1.6`.
[PR 1589](https://github.com/libp2p/rust-libp2p/pull/1589)
@ -160,7 +199,7 @@ must not be skipped!
- `libp2p-core-derive`: Fully qualified std::result::Result in macro
[PR 1587](https://github.com/libp2p/rust-libp2p/pull/1587)
# Version 0.19.0 (2020-05-18)
## Version 0.19.0 (2020-05-18)
- `libp2p-core`, `libp2p-swarm`: Added support for multiple dialing
attempts per peer, with a configurable limit.
@ -218,12 +257,12 @@ must not be skipped!
be supported. IPv4 listener addresses are not affected by this change.
[PR 1555](https://github.com/libp2p/rust-libp2p/pull/1555)
# Version 0.18.1 (2020-04-17)
## Version 0.18.1 (2020-04-17)
- `libp2p-swarm`: Make sure inject_dial_failure is called in all situations.
[PR 1549](https://github.com/libp2p/rust-libp2p/pull/1549)
# Version 0.18.0 (2020-04-09)
## Version 0.18.0 (2020-04-09)
- `libp2p-core`: Treat connection limit errors as pending connection errors.
[PR 1546](https://github.com/libp2p/rust-libp2p/pull/1546)
@ -240,7 +279,7 @@ must not be skipped!
- `libp2p-wasm-ext`: Fix "parsed is null" errors being thrown.
[PR 1535](https://github.com/libp2p/rust-libp2p/pull/1535)
# Version 0.17.0 (2020-04-02)
## Version 0.17.0 (2020-04-02)
- `libp2p-core`: Finished "identity hashing" for peer IDs migration.
[PR 1460](https://github.com/libp2p/rust-libp2p/pull/1460)
@ -287,18 +326,18 @@ must not be skipped!
- `multihash`: Removed the crate in favour of the upstream crate.
[PR 1472](https://github.com/libp2p/rust-libp2p/pull/1472)
# Version 0.16.2 (2020-02-28)
## Version 0.16.2 (2020-02-28)
- Fixed yamux connections not properly closing and being stuck in the `CLOSE_WAIT` state.
- Added a `websocket_transport()` function in `libp2p-wasm-ext`, behind a Cargo feature.
- Fixed ambiguity in `IntoProtocolsHandler::select` vs `ProtocolsHandler::select` in the `NetworkBehaviour` custom derive.
# Version 0.16.1 (2020-02-18)
## Version 0.16.1 (2020-02-18)
- Fixed wrong representation of `PeerId`s being used in `Kademlia::get_closest_peers`.
- Implemented `FusedStream` for `Swarm`.
# Version 0.16.0 (2020-02-13)
## Version 0.16.0 (2020-02-13)
- Removed the `Substream` associated type from the `ProtocolsHandler` trait. The type of the substream is now always `libp2p::swarm::NegotiatedSubstream`.
- As a consequence of the previous change, most of the implementations of the `NetworkBehaviour` trait provided by libp2p (`Ping`, `Identify`, `Kademlia`, `Floodsub`, `Gossipsub`) have lost a generic parameter.
@ -314,7 +353,7 @@ must not be skipped!
- All crates prefixed with `libp2p-` now use the same version number.
- Added a new variant `ListenerEvent::Error` for listeners to report non-fatal errors. `libp2p-tcp` uses this variant to report errors that happen on remote sockets before they have been accepted and errors when trying to determine the local machine's IP address.
# Version 0.15.0 (2020-01-24)
## Version 0.15.0 (2020-01-24)
- Added `libp2p-gossipsub`.
- Added `SwarmBuilder::executor` to allow configuring which tasks executor to use.
@ -327,7 +366,7 @@ must not be skipped!
- Fixed `libp2p-kad` keeping connections alive when it shouldn't.
- Fixed `InboundUpgrade` not always properly implemented on `NoiseConfig`.
# Version 0.14.0-alpha.1 (2020-01-07)
## Version 0.14.0-alpha.1 (2020-01-07)
- Upgraded the crate to stable futures.
- Use varints instead of fixed sized (4 byte) integers to delimit plaintext 2.0 messages to align implementation with the specification.
@ -339,16 +378,16 @@ must not be skipped!
- Revamped the API of `libp2p_websockets::framed`.
- Added protocol string to `Error::UnknownProtocolString`.
# Version 0.13.2 (2020-01-02)
## Version 0.13.2 (2020-01-02)
- Fixed the `libp2p-noise` handshake not flushing the underlying stream before waiting for a response.
- Fixed semver issue with the `protobuf` crate.
# Version 0.13.1 (2019-11-13)
## Version 0.13.1 (2019-11-13)
- Maintenance release to bump dependencies and deal with an accidental breaking change in multihash 0.1.4.
# Version 0.13.0 (2019-11-05)
## Version 0.13.0 (2019-11-05)
- Reworked the transport upgrade API. See https://github.com/libp2p/rust-libp2p/pull/1240 for more information.
- Added a parameter allowing to choose the protocol negotiation protocol when upgrading a connection or a substream. See https://github.com/libp2p/rust-libp2p/pull/1245 for more information.
@ -361,7 +400,7 @@ must not be skipped!
- Added some `Debug` trait implementations.
- Fixed potential arithmetic overflows in `libp2p-kad` and `multistream-select`.
# Version 0.12.0 (2019-08-15)
## Version 0.12.0 (2019-08-15)
- In some situations, `multistream-select` will now assume that protocol negotiation immediately succeeds. If it turns out that it failed, an error is generated when reading or writing from/to the stream.
- Replaced `listen_addr` with `local_addr` in events related to incoming connections. The address no longer has to match a previously-reported address.
@ -372,7 +411,7 @@ must not be skipped!
- Added `Toggle::is_enabled()`.
- Removed `IdentifyTransport`.
# Version 0.11.0 (2019-07-18)
## Version 0.11.0 (2019-07-18)
- `libp2p-kad`: Completed the core functionality of the record storage API, thereby extending the `RecordStore` for provider records. All records expire by default and are subject to regular republication and caching as per the Kademlia spec(s). Expiration and publication intervals are configurable through the `KademliaConfig`.
- `libp2p-kad`: The routing table now never stores peers without a known (listen) address. In particular, on receiving a new inbound connection, the Kademlia behaviour now emits `KademliaEvent::UnroutablePeer` to indicate that in order for the peer to be added to the routing table and hence considered a reachable node in the DHT, a listen address of the peer must be discovered and reported via `Kademlia::add_address`. This is usually achieved through the use of the `Identify` protocol on the same connection(s).
@ -383,7 +422,7 @@ must not be skipped!
- Replaced unbounded channels with bounded ones at the boundary between the `Network` (formerly `RawSwarm`) and `NodeHandler`. The node handlers will now wait if the main task is busy, instead of continuing to push events to the channel.
- Fixed the `address_translation` function ignoring `/dns` addresses.
# Version 0.10.0 (2019-06-25)
## Version 0.10.0 (2019-06-25)
- `PollParameters` is now a trait instead of a struct.
- The `Swarm` can now be customized with connection information.
@ -392,12 +431,12 @@ must not be skipped!
- Improved the heuristics for determining external multiaddresses based on reports.
- Various fixes to Kademlia iterative queries and the WebSockets transport.
# Version 0.9.1 (2019-06-05)
## Version 0.9.1 (2019-06-05)
- `EitherOutput` now implements `Stream` and `Sink` if their variants also implement these traits.
- `libp2p::websocket::error::Error` now implements `Sync`.
# Version 0.9.0 (2019-06-04)
## Version 0.9.0 (2019-06-04)
- Major fixes and performance improvements to libp2p-kad.
- Initial prototype for record storage in libp2p-kad.
@ -409,11 +448,11 @@ must not be skipped!
- Added some utility functions in `core::identity::secp256k1`.
- It is now possible to inject an artificial connection in the `RawSwarm`.
# Version 0.8.1 (2019-05-15)
## Version 0.8.1 (2019-05-15)
- Fixed a vulnerability in ED25519 signatures verification in libp2p-core.
# Version 0.8.0 (2019-05-15)
## Version 0.8.0 (2019-05-15)
- Crate now successfully runs from within the browser when compiled to WASM.
- Modified the constructors of `NoiseConfig` to accept any type of public key. The Noise handshake has consequently been modified.
@ -429,11 +468,11 @@ must not be skipped!
- Added `multiaddr::from_url`.
- Added `OptionalTransport`.
# Version 0.7.1 (2019-05-15)
## Version 0.7.1 (2019-05-15)
- Fixed a vulnerability in ED25519 signatures verification in libp2p-core.
# Version 0.7.0 (2019-04-23)
## Version 0.7.0 (2019-04-23)
- Fixed the inactive connections shutdown mechanism not working.
- `Transport::listen_on` must now return a `Stream` that produces `ListenEvent`s. This makes it possible to notify about listened addresses at a later point in time.
@ -449,7 +488,7 @@ must not be skipped!
- Reworked the `PingEvent`.
- Renamed `KeepAlive::Forever` to `Yes` and `KeepAlive::Now` to `No`.
# Version 0.6.0 (2019-03-29)
## Version 0.6.0 (2019-03-29)
- Replaced `NetworkBehaviour::inject_dial_failure` with `inject_dial_failure` and
`inject_addr_reach_failure`. The former is called when we have finished trying to dial a node
@ -462,7 +501,7 @@ must not be skipped!
- Added `Swarm::external_addresses`.
- Added a `core::swarm::toggle::Toggle` that allows having a disabled `NetworkBehaviour`.
# Version 0.5.0 (2019-03-13)
## Version 0.5.0 (2019-03-13)
- Moved the `SecioKeypair` struct in `core/identity` and renamed it to `Keypair`.
- mplex now supports half-closed substreams.
@ -479,15 +518,15 @@ must not be skipped!
- Reworked some API of `core/nodes/node.rs` and `core/nodes/handled_node.rs`.
- The core now works even outside of a tokio context.
# Version 0.4.2 (2019-02-27)
## Version 0.4.2 (2019-02-27)
- Fixed periodic pinging not working.
# Version 0.4.1 (2019-02-20)
## Version 0.4.1 (2019-02-20)
- Fixed wrong version of libp2p-noise.
# Version 0.4.0 (2019-02-20)
## Version 0.4.0 (2019-02-20)
- The `multiaddr!` macro has been moved to the `multiaddr` crate and is now reexported under the name `build_multiaddr!`.
- Modified the functions in `upgrade::transfer` to be more convenient to use.
@ -503,12 +542,12 @@ must not be skipped!
- Added `IdentifyEvent::SendBack`, when we send back our information.
- Rewrote the `MemoryTransport` to be similar to the `TcpConfig`.
# Version 0.3.1 (2019-02-02)
## Version 0.3.1 (2019-02-02)
- Added `NetworkBehaviour::inject_replaced` that is called whenever we replace a connection with a different connection to the same peer.
- Fixed various issues with Kademlia.
# Version 0.3.0 (2019-01-30)
## Version 0.3.0 (2019-01-30)
- Removed the `topology` module and everything it contained, including the `Topology` trait.
- Added `libp2p-noise` that supports Noise handshakes, as an alternative to `libp2p-secio`.
@ -527,15 +566,15 @@ must not be skipped!
- Added `SecioKeypair::ed25519_raw_key()`.
- Fix improper connection shutdown in `ProtocolsHandler`.
# Version 0.2.2 (2019-01-14)
## Version 0.2.2 (2019-01-14)
- Fixed improper dependencies versions causing deriving `NetworkBehaviour` to generate an error.
# Version 0.2.1 (2019-01-14)
## Version 0.2.1 (2019-01-14)
- Added the `IntoNodeHandler` and `IntoProtocolsHandler` traits, allowing node handlers and protocol handlers to know the `PeerId` of the node they are interacting with.
# Version 0.2 (2019-01-10)
## Version 0.2 (2019-01-10)
- The `Transport` trait now has an `Error` associated type instead of always using `std::io::Error`.
- Merged `PeriodicPing` and `PingListen` into one `Ping` behaviour.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p"
edition = "2018"
description = "Peer-to-peer networking library"
version = "0.34.1"
version = "0.36.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -28,7 +28,7 @@ default = [
"pnet",
"request-response",
"secp256k1",
"tcp-async-std",
"tcp-async-io",
"uds",
"wasm-ext",
"websocket",
@ -47,7 +47,7 @@ ping = ["libp2p-ping"]
plaintext = ["libp2p-plaintext"]
pnet = ["libp2p-pnet"]
request-response = ["libp2p-request-response"]
tcp-async-std = ["libp2p-tcp", "libp2p-tcp/async-std"]
tcp-async-io = ["libp2p-tcp", "libp2p-tcp/async-io"]
tcp-tokio = ["libp2p-tcp", "libp2p-tcp/tokio"]
uds = ["libp2p-uds"]
wasm-ext = ["libp2p-wasm-ext"]
@ -61,67 +61,66 @@ all-features = true
[dependencies]
atomic = "0.5.0"
bytes = "0.5"
bytes = "1"
futures = "0.3.1"
lazy_static = "1.2"
libp2p-core = { version = "0.26.1", path = "core", package = "fluence-fork-libp2p-core" }
libp2p-core-derive = { version = "0.21.1", path = "misc/core-derive", package = "fluence-fork-libp2p-core-derive" }
libp2p-floodsub = { version = "0.26.1", path = "protocols/floodsub", optional = true, package = "fluence-fork-libp2p-floodsub" }
libp2p-gossipsub = { version = "0.26.1", path = "./protocols/gossipsub", optional = true, package = "fluence-fork-libp2p-gossipsub" }
libp2p-identify = { version = "0.26.1", path = "protocols/identify", optional = true, package = "fluence-fork-libp2p-identify" }
libp2p-kad = { version = "0.27.0", path = "protocols/kad", optional = true, package = "fluence-fork-libp2p-kad" }
libp2p-mplex = { version = "0.26.1", path = "muxers/mplex", optional = true, package = "fluence-fork-libp2p-mplex" }
libp2p-noise = { version = "0.28.1", path = "protocols/noise", optional = true, package = "fluence-fork-libp2p-noise" }
libp2p-ping = { version = "0.26.1", path = "protocols/ping", optional = true, package = "fluence-fork-libp2p-ping" }
libp2p-plaintext = { version = "0.26.1", path = "protocols/plaintext", optional = true, package = "fluence-fork-libp2p-plaintext" }
libp2p-pnet = { version = "0.20.1", path = "protocols/pnet", optional = true, package = "fluence-fork-libp2p-pnet" }
libp2p-request-response = { version = "0.9.1", path = "protocols/request-response", optional = true, package = "fluence-fork-libp2p-request-response" }
libp2p-swarm = { version = "0.26.1", path = "swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-uds = { version = "0.26.1", path = "transports/uds", optional = true, package = "fluence-fork-libp2p-uds" }
libp2p-wasm-ext = { version = "0.26.1", path = "transports/wasm-ext", optional = true, package = "fluence-fork-libp2p-wasm-ext" }
libp2p-yamux = { version = "0.29.1", path = "muxers/yamux", optional = true, package = "fluence-fork-libp2p-yamux" }
multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.10.1", path = "misc/multiaddr" }
libp2p-core = { version = "0.27.1", path = "core", default-features = false, package = "fluence-fork-libp2p-core" }
libp2p-floodsub = { version = "0.28.0", path = "protocols/floodsub", optional = true, package = "fluence-fork-libp2p-floodsub" }
libp2p-gossipsub = { version = "0.29.0", path = "./protocols/gossipsub", optional = true, package = "fluence-fork-libp2p-gossipsub" }
libp2p-identify = { version = "0.28.0", path = "protocols/identify", optional = true, package = "fluence-fork-libp2p-identify" }
libp2p-kad = { version = "0.29.0", path = "protocols/kad", optional = true, package = "fluence-fork-libp2p-kad" }
libp2p-mplex = { version = "0.27.1", path = "muxers/mplex", optional = true, package = "fluence-fork-libp2p-mplex" }
libp2p-noise = { version = "0.29.0", path = "transports/noise", optional = true, package = "fluence-fork-libp2p-noise" }
libp2p-ping = { version = "0.28.0", path = "protocols/ping", optional = true, package = "fluence-fork-libp2p-ping" }
libp2p-plaintext = { version = "0.27.1", path = "transports/plaintext", optional = true, package = "fluence-fork-libp2p-plaintext" }
libp2p-pnet = { version = "0.20.0", path = "transports/pnet", optional = true, package = "fluence-fork-libp2p-pnet" }
libp2p-request-response = { version = "0.10.0", path = "protocols/request-response", optional = true, package = "fluence-fork-libp2p-request-response" }
libp2p-swarm = { version = "0.28.0", path = "swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-swarm-derive = { version = "0.22.0", path = "swarm-derive", package = "fluence-fork-libp2p-swarm-derive" }
libp2p-uds = { version = "0.27.0", path = "transports/uds", optional = true, package = "fluence-fork-libp2p-uds" }
libp2p-wasm-ext = { version = "0.27.0", path = "transports/wasm-ext", default-features = false, optional = true, package = "fluence-fork-libp2p-wasm-ext" }
libp2p-yamux = { version = "0.30.1", path = "muxers/yamux", optional = true, package = "fluence-fork-libp2p-yamux" }
multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.11.1", path = "misc/multiaddr" }
parking_lot = "0.11.0"
pin-project = "1.0.0"
smallvec = "1.0"
wasm-timer = "0.2.4"
[target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies]
libp2p-deflate = { package = "fluence-fork-libp2p-deflate", version = "0.26.1", path = "protocols/deflate", optional = true }
libp2p-dns = { package = "fluence-fork-libp2p-dns", version = "0.26.1", path = "transports/dns", optional = true }
libp2p-mdns = { package = "fluence-fork-libp2p-mdns", version = "0.27.0", path = "protocols/mdns", optional = true }
libp2p-tcp = { package = "fluence-fork-libp2p-tcp", version = "0.26.1", path = "transports/tcp", optional = true }
libp2p-websocket = { package = "fluence-fork-libp2p-websocket", version = "0.27.0", path = "transports/websocket", optional = true }
libp2p-deflate = { version = "0.27.1", path = "transports/deflate", optional = true, package = "fluence-fork-libp2p-deflate" }
libp2p-dns = { version = "0.27.0", path = "transports/dns", optional = true, package = "fluence-fork-libp2p-dns" }
libp2p-mdns = { version = "0.29.0", path = "protocols/mdns", optional = true, package = "fluence-fork-libp2p-mdns" }
libp2p-tcp = { version = "0.27.1", path = "transports/tcp", default-features = false, optional = true, package = "fluence-fork-libp2p-tcp" }
libp2p-websocket = { version = "0.28.0", path = "transports/websocket", optional = true, package = "fluence-fork-libp2p-websocket" }
[dev-dependencies]
async-std = "1.6.2"
async-std = { version = "1.6.2", features = ["attributes"] }
env_logger = "0.8.1"
tokio = { version = "0.3", features = ["io-util", "io-std", "stream", "macros", "rt", "rt-multi-thread"] }
trust-graph = "0.2.0"
tokio = { version = "1.0.1", features = ["io-util", "io-std", "macros", "rt", "rt-multi-thread"] }
trust-graph = "0.2.5"
[workspace]
members = [
"core",
"misc/core-derive",
"misc/multiaddr",
"misc/multistream-select",
"misc/peer-id-generator",
"muxers/mplex",
"muxers/yamux",
"protocols/deflate",
"protocols/floodsub",
"protocols/gossipsub",
"protocols/identify",
"protocols/kad",
"protocols/mdns",
"protocols/noise",
"protocols/ping",
"protocols/plaintext",
"protocols/pnet",
"protocols/request-response",
"protocols/secio",
"swarm",
"swarm-derive",
"transports/deflate",
"transports/dns",
"transports/noise",
"transports/plaintext",
"transports/pnet",
"transports/tcp",
"transports/uds",
"transports/websocket",

View File

@ -2,8 +2,6 @@
<a href="http://libp2p.io/"><img src="https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square" /></a>
<a href="http://webchat.freenode.net/?channels=%23libp2p"><img src="https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square" /></a>
<a href="https://riot.permaweb.io/#/room/#libp2p:permaweb.io"><img src="https://img.shields.io/badge/matrix-%23libp2p%3Apermaweb.io-blue.svg?style=flat-square" /> </a>
<a href="https://discord.gg/66KBrm2"><img src="https://img.shields.io/discord/475789330380488707?color=blueviolet&label=discord&style=flat-square" /></a>
[![dependency status](https://deps.rs/repo/github/libp2p/rust-libp2p/status.svg?style=flat-square)](https://deps.rs/repo/github/libp2p/rust-libp2p)
This repository is the central place for Rust development of the [libp2p](https://libp2p.io) spec.
@ -23,6 +21,34 @@ Where to ask questions?
- In the #libp2p IRC channel on freenode.
- By opening an issue in this repository.
## Repository Structure
The main components of this repository are structured as follows:
* `core/`: The implementation of `libp2p-core` with its `Network`,
`Transport` and `StreamMuxer` API on which almost all other crates depend.
* `transports/`: Implementations of transport protocols (e.g. TCP) and protocol upgrades
(e.g. for authenticated encryption, compression, ...) based on the `libp2p-core` `Transport`
API .
* `muxers/`: Implementations of the `StreamMuxer` interface of `libp2p-core`,
e.g. (sub)stream multiplexing protocols on top of (typically TCP) connections.
Multiplexing protocols are (mandatory) `Transport` upgrades.
* `swarm/`: The implementation of `libp2p-swarm` building on `libp2p-core`
with the central interfaces `NetworkBehaviour` and `ProtocolsHandler` used
to implement application protocols (see `protocols/`).
* `protocols/`: Implementations of application protocols based on the
`libp2p-swarm` APIs.
* `misc/`: Utility libraries.
* `examples/`: Worked examples of built-in application protocols (see `protocols/`)
with common `Transport` configurations.
## Notable users
(open a pull request if you want your project to be added here)
@ -31,6 +57,7 @@ Where to ask questions?
- https://github.com/paritytech/substrate
- https://github.com/sigp/lighthouse
- https://github.com/golemfactory/golem-libp2p
- https://github.com/comit-network/comit-rs
- https://github.com/comit-network
- https://github.com/rs-ipfs/rust-ipfs
- https://github.com/marcopoloprotocol/marcopolo
- https://github.com/ChainSafe/forest

View File

@ -1,3 +1,15 @@
# 0.27.1 [2021-02-15]
- Update dependencies.
# 0.27.0 [2021-01-12]
- (Re)add `Transport::address_translation` to permit transport-specific
translations of observed addresses onto listening addresses.
[PR 1887](https://github.com/libp2p/rust-libp2p/pull/1887)
- Update dependencies.
# 0.26.0 [2020-12-17]
- Make `PeerId` be `Copy`, including small `PeerId` API changes.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-core"
edition = "2018"
description = "Core traits and structs of libp2p"
version = "0.26.1"
version = "0.27.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -23,18 +23,18 @@ futures-timer = "3"
lazy_static = "1.2"
libsecp256k1 = { version = "0.3.1", optional = true }
log = "0.4"
multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.10.1", path = "../misc/multiaddr" }
multiaddr = { package = "fluence-fork-parity-multiaddr", version = "0.11", path = "../misc/multiaddr" }
multihash = { version = "0.13", default-features = false, features = ["std", "multihash-impl", "identity", "sha2"] }
multistream-select = { version = "0.9.2", path = "../misc/multistream-select", package = "fluence-fork-multistream-select" }
multistream-select = { version = "0.10", path = "../misc/multistream-select", package = "fluence-fork-multistream-select" }
parking_lot = "0.11.0"
pin-project = "1.0.0"
prost = "0.6.1"
prost = "0.7"
rand = "0.7"
rw-stream-sink = "0.2.0"
sha2 = "0.9.1"
smallvec = "1.0"
thiserror = "1.0"
unsigned-varint = "0.5"
unsigned-varint = "0.7"
void = "1"
zeroize = "1"
serde = { version = "1.0.114", default-features = false }
@ -43,17 +43,17 @@ serde = { version = "1.0.114", default-features = false }
ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false }
[dev-dependencies]
async-std = "1.6.2"
async-std = { version = "1.6.2", features = ["attributes"] }
criterion = "0.3"
libp2p-mplex = { path = "../muxers/mplex", package = "fluence-fork-libp2p-mplex" }
libp2p-noise = { path = "../protocols/noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" }
libp2p-noise = { path = "../transports/noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../transports/tcp", package = "fluence-fork-libp2p-tcp" }
multihash = { version = "0.13", default-features = false, features = ["arb"] }
quickcheck = "0.9.0"
wasm-timer = "0.2"
[build-dependencies]
prost-build = "0.6"
prost-build = "0.7"
[features]
default = ["secp256k1"]

View File

@ -428,6 +428,8 @@ mod tests {
fn dial(self, _: Multiaddr) -> Result<Self::Dial, transport::TransportError<Self::Error>> {
panic!()
}
fn address_translation(&self, _: &Multiaddr, _: &Multiaddr) -> Option<Multiaddr> { None }
}
async_std::task::block_on(async move {
@ -466,6 +468,8 @@ mod tests {
fn dial(self, _: Multiaddr) -> Result<Self::Dial, transport::TransportError<Self::Error>> {
panic!()
}
fn address_translation(&self, _: &Multiaddr, _: &Multiaddr) -> Option<Multiaddr> { None }
}
async_std::task::block_on(async move {

View File

@ -182,13 +182,13 @@ where
},
PoolEvent::ConnectionEvent { ref connection, ref event } => {
f.debug_struct("PoolEvent::ConnectionEvent")
.field("peer", connection.peer_id())
.field("peer", &connection.peer_id())
.field("event", event)
.finish()
},
PoolEvent::AddressChange { ref connection, ref new_endpoint, ref old_endpoint } => {
f.debug_struct("PoolEvent::AddressChange")
.field("peer", connection.peer_id())
.field("peer", &connection.peer_id())
.field("new_endpoint", new_endpoint)
.field("old_endpoint", old_endpoint)
.finish()
@ -325,8 +325,8 @@ impl<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
// "established" connection.
let future = future.and_then({
let endpoint = endpoint.clone();
let expected_peer = peer.clone();
let local_id = self.local_id.clone();
let expected_peer = peer;
let local_id = self.local_id;
move |(peer_id, muxer)| {
if let Some(peer) = expected_peer {
if peer != peer_id {
@ -376,7 +376,7 @@ impl<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
self.counters.check_max_established_per_peer(self.num_peer_established(&i.peer_id))?;
let id = self.manager.add(c, i.clone());
self.counters.inc_established(&i.endpoint);
self.established.entry(i.peer_id.clone()).or_default().insert(id, i.endpoint);
self.established.entry(i.peer_id).or_default().insert(id, i.endpoint);
Ok(id)
}
@ -667,7 +667,7 @@ impl<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
}
// Add the connection to the pool.
let peer = entry.connected().peer_id.clone();
let peer = entry.connected().peer_id;
let conns = self.established.entry(peer).or_default();
let num_established = NonZeroU32::new(u32::try_from(conns.len() + 1).unwrap())
.expect("n + 1 is always non-zero; qed");
@ -786,8 +786,8 @@ impl<TInEvent> EstablishedConnection<'_, TInEvent> {
}
/// Returns the identity of the connected peer.
pub fn peer_id(&self) -> &PeerId {
&self.entry.connected().peer_id
pub fn peer_id(&self) -> PeerId {
self.entry.connected().peer_id
}
/// Returns the local connection ID.
@ -842,6 +842,7 @@ where
I: Iterator<Item = ConnectionId>
{
/// Obtains the next connection, if any.
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Option<EstablishedConnection<'_, TInEvent>>
{
while let Some(id) = self.ids.next() {

View File

@ -477,4 +477,11 @@ where
},
}
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
match self {
EitherTransport::Left(a) => a.address_translation(server, observed),
EitherTransport::Right(b) => b.address_translation(server, observed),
}
}
}

View File

@ -30,7 +30,6 @@ use crate::{
Executor,
Multiaddr,
PeerId,
address_translation,
connection::{
ConnectionId,
ConnectionLimit,
@ -145,11 +144,10 @@ where
local_peer_id: PeerId,
config: NetworkConfig,
) -> Self {
let pool_local_id = local_peer_id.clone();
Network {
local_peer_id,
listeners: ListenersStream::new(transport),
pool: Pool::new(pool_local_id, config.manager_config, config.limits),
pool: Pool::new(local_peer_id, config.manager_config, config.limits),
dialing: Default::default(),
}
}
@ -176,30 +174,27 @@ where
self.listeners.listen_addrs()
}
/// Call this function in order to know which address remotes should dial to
/// access your local node.
/// Maps the given `observed_addr`, representing an address of the local
/// node observed by a remote peer, onto the locally known listen addresses
/// to yield one or more addresses of the local node that may be publicly
/// reachable.
///
/// When receiving an observed address on a tcp connection that we initiated, the observed
/// address contains our tcp dial port, not our tcp listen port. We know which port we are
/// listening on, thereby we can replace the port within the observed address.
///
/// When receiving an observed address on a tcp connection that we did **not** initiated, the
/// observed address should contain our listening port. In case it differs from our listening
/// port there might be a proxy along the path.
///
/// # Arguments
///
/// * `observed_addr` - should be an address a remote observes you as, which can be obtained for
/// example with the identify protocol.
/// I.e. this method incorporates the view of other peers into the listen
/// addresses seen by the local node to account for possible IP and port
/// mappings performed by intermediate network devices in an effort to
/// obtain addresses for the local peer that are also reachable for peers
/// other than the peer who reported the `observed_addr`.
///
/// The translation is transport-specific. See [`Transport::address_translation`].
pub fn address_translation<'a>(&'a self, observed_addr: &'a Multiaddr)
-> impl Iterator<Item = Multiaddr> + 'a
where
TMuxer: 'a,
THandler: 'a,
{
let transport = self.listeners.transport();
let mut addrs: Vec<_> = self.listen_addrs()
.filter_map(move |server| address_translation(server, observed_addr))
.filter_map(move |server| transport.address_translation(server, observed_addr))
.collect();
// remove duplicates
@ -384,7 +379,7 @@ where
let event = match self.pool.poll(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(PoolEvent::ConnectionEstablished { connection, num_established }) => {
if let hash_map::Entry::Occupied(mut e) = self.dialing.entry(connection.peer_id().clone()) {
if let hash_map::Entry::Occupied(mut e) = self.dialing.entry(connection.peer_id()) {
e.get_mut().retain(|s| s.current.0 != connection.id());
if e.get().is_empty() {
e.remove();
@ -530,7 +525,7 @@ where
if let Some(pos) = attempts.iter().position(|s| s.current.0 == id) {
let attempt = attempts.remove(pos);
let last = attempts.is_empty();
Some((peer.clone(), attempt, last))
Some((*peer, attempt, last))
} else {
None
}
@ -549,7 +544,7 @@ where
if let Some(handler) = handler {
let next_attempt = attempt.remaining.remove(0);
let opts = DialingOpts {
peer: peer_id.clone(),
peer: peer_id,
handler,
address: next_attempt,
remaining: attempt.remaining

View File

@ -223,7 +223,7 @@ where
};
let id = network.dial_peer(DialingOpts {
peer: peer_id.clone(),
peer: peer_id,
handler,
address,
remaining: remaining.into_iter().collect(),
@ -435,7 +435,7 @@ where
pub fn attempt(&mut self, id: ConnectionId)
-> Option<DialingAttempt<'_, TInEvent>>
{
if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id.clone()) {
if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id) {
if let Some(pos) = attempts.get().iter().position(|s| s.current.0 == id) {
if let Some(inner) = self.network.pool.get_outgoing(id) {
return Some(DialingAttempt { pos, inner, attempts })
@ -662,7 +662,8 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
}
/// Obtains the next dialing connection, if any.
pub fn next<'b>(&'b mut self) -> Option<DialingAttempt<'b, TInEvent>> {
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Option<DialingAttempt<'_, TInEvent>> {
// If the number of elements reduced, the current `DialingAttempt` has been
// aborted and iteration needs to continue from the previous position to
// account for the removed element.
@ -676,7 +677,7 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
return None
}
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) {
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(*self.peer_id) {
let id = attempts.get()[self.pos].current.0;
if let Some(inner) = self.pool.get_outgoing(id) {
let conn = DialingAttempt { pos: self.pos, inner, attempts };
@ -697,7 +698,7 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
return None
}
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) {
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(*self.peer_id) {
let id = attempts.get()[self.pos].current.0;
if let Some(inner) = self.pool.get_outgoing(id) {
return Some(DialingAttempt { pos: self.pos, inner, attempts })

View File

@ -128,6 +128,11 @@ pub trait Transport {
where
Self: Sized;
/// Performs a transport-specific mapping of an address `observed` by
/// a remote onto a local `listen` address to yield an address for
/// the local node that may be reachable for other peers.
fn address_translation(&self, listen: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr>;
/// Boxes the transport, including custom transport errors.
fn boxed(self) -> boxed::Boxed<Self::Output>
where

View File

@ -69,6 +69,10 @@ where
};
Ok(future)
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.transport.address_translation(server, observed)
}
}
/// Custom `Stream` to avoid boxing.

View File

@ -51,6 +51,7 @@ type ListenerUpgrade<O> = Pin<Box<dyn Future<Output = io::Result<O>> + Send>>;
trait Abstract<O> {
fn listen_on(&self, addr: Multiaddr) -> Result<Listener<O>, TransportError<io::Error>>;
fn dial(&self, addr: Multiaddr) -> Result<Dial<O>, TransportError<io::Error>>;
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr>;
}
impl<T, O> Abstract<O> for T
@ -78,6 +79,10 @@ where
.map_err(|e| e.map(box_err))?;
Ok(Box::pin(fut) as Dial<_>)
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
Transport::address_translation(self, server, observed)
}
}
impl<O> fmt::Debug for Boxed<O> {
@ -108,6 +113,10 @@ impl<O> Transport for Boxed<O> {
fn dial(self, addr: Multiaddr) -> Result<Self::Dial, TransportError<Self::Error>> {
self.inner.dial(addr)
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.inner.address_translation(server, observed)
}
}
fn box_err<E: Error + Send + Sync + 'static>(e: E) -> io::Error {

View File

@ -74,4 +74,12 @@ where
Err(TransportError::MultiaddrNotSupported(addr))
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
if let Some(addr) = self.0.address_translation(server, observed) {
Some(addr)
} else {
self.1.address_translation(server, observed)
}
}
}

View File

@ -67,6 +67,10 @@ impl<TOut> Transport for DummyTransport<TOut> {
fn dial(self, addr: Multiaddr) -> Result<Self::Dial, TransportError<Self::Error>> {
Err(TransportError::MultiaddrNotSupported(addr))
}
fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option<Multiaddr> {
None
}
}
/// Implementation of `AsyncRead` and `AsyncWrite`. Not meant to be instanciated.

View File

@ -57,6 +57,10 @@ where
let p = ConnectedPoint::Dialer { address: addr };
Ok(MapFuture { inner: future, args: Some((self.fun, p)) })
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.transport.address_translation(server, observed)
}
}
/// Custom `Stream` implementation to avoid boxing.

View File

@ -64,6 +64,10 @@ where
Err(err) => Err(err.map(map)),
}
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.transport.address_translation(server, observed)
}
}
/// Listening stream for `MapErr`.

View File

@ -191,6 +191,10 @@ impl Transport for MemoryTransport {
DialFuture::new(port).ok_or(TransportError::Other(MemoryTransportError::Unreachable))
}
fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option<Multiaddr> {
None
}
}
/// Error that can be produced from the `MemoryTransport`.

View File

@ -74,4 +74,12 @@ where
Err(TransportError::MultiaddrNotSupported(addr))
}
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
if let Some(inner) = &self.0 {
inner.address_translation(server, observed)
} else {
None
}
}
}

View File

@ -101,6 +101,10 @@ where
timer: Delay::new(self.outgoing_timeout),
})
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.inner.address_translation(server, observed)
}
}
// TODO: can be removed and replaced with an `impl Stream` once impl Trait is fully stable

View File

@ -334,6 +334,10 @@ where
fn listen_on(self, addr: Multiaddr) -> Result<Self::Listener, TransportError<Self::Error>> {
self.0.listen_on(addr)
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.0.address_translation(server, observed)
}
}
/// An inbound or outbound upgrade.
@ -383,6 +387,10 @@ where
upgrade: self.upgrade
})
}
fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option<Multiaddr> {
self.inner.address_translation(server, observed)
}
}
/// Errors produced by a transport upgrade.

View File

@ -41,10 +41,12 @@ fn deny_incoming_connec() {
swarm1.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap();
let address = async_std::task::block_on(future::poll_fn(|cx| {
if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm1.poll(cx) {
Poll::Ready(listen_addr)
} else {
panic!("Was expecting the listen address to be reported")
match swarm1.poll(cx) {
Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) => {
Poll::Ready(listen_addr)
}
Poll::Pending => Poll::Pending,
_ => panic!("Was expecting the listen address to be reported"),
}
}));
@ -67,7 +69,7 @@ fn deny_incoming_connec() {
multiaddr,
error: PendingConnectionError::Transport(_)
}) => {
assert_eq!(peer_id, *swarm1.local_peer_id());
assert_eq!(&peer_id, swarm1.local_peer_id());
assert_eq!(multiaddr, address);
return Poll::Ready(Ok(()));
},
@ -95,15 +97,15 @@ fn dial_self() {
let mut swarm = test_network(NetworkConfig::default());
swarm.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap();
let (local_address, mut swarm) = async_std::task::block_on(
future::lazy(move |cx| {
if let Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) = swarm.poll(cx) {
Ok::<_, void::Void>((listen_addr, swarm))
} else {
panic!("Was expecting the listen address to be reported")
let local_address = async_std::task::block_on(future::poll_fn(|cx| {
match swarm.poll(cx) {
Poll::Ready(NetworkEvent::NewListenerAddress { listen_addr, .. }) => {
Poll::Ready(listen_addr)
}
}))
.unwrap();
Poll::Pending => Poll::Pending,
_ => panic!("Was expecting the listen address to be reported"),
}
}));
swarm.dial(&local_address, TestHandler()).unwrap();

View File

@ -36,7 +36,6 @@
//! --features="floodsub mplex noise tcp-tokio mdns-tokio"
//! ```
use futures::prelude::*;
use libp2p::{
Multiaddr,
NetworkBehaviour,
@ -121,7 +120,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
// Create a Swarm to manage peers and events.
let mut swarm = {
let mdns = Mdns::new().await?;
let mdns = Mdns::new(Default::default()).await?;
let mut behaviour = MyBehaviour {
floodsub: Floodsub::new(peer_id.clone()),
mdns,
@ -154,10 +153,15 @@ async fn main() -> Result<(), Box<dyn Error>> {
loop {
let to_publish = {
tokio::select! {
line = stdin.try_next() => Some((floodsub_topic.clone(), line?.expect("Stdin closed"))),
line = stdin.next_line() => {
let line = line?.expect("stdin closed");
Some((floodsub_topic.clone(), line))
}
event = swarm.next() => {
println!("New Event: {:?}", event);
None
// All events are handled by the `NetworkBehaviourEventProcess`es.
// I.e. the `swarm.next()` future drives the `Swarm` without ever
// terminating.
panic!("Unexpected event: {:?}", event);
}
}
};

View File

@ -58,7 +58,7 @@ use libp2p::{
NetworkBehaviour,
identity,
floodsub::{self, Floodsub, FloodsubEvent},
mdns::{Mdns, MdnsEvent},
mdns::{Mdns, MdnsConfig, MdnsEvent},
swarm::NetworkBehaviourEventProcess
};
use std::{error::Error, task::{Context, Poll}};
@ -121,7 +121,7 @@ fn main() -> Result<(), Box<dyn Error>> {
// Create a Swarm to manage peers and events
let mut swarm = {
let mdns = task::block_on(Mdns::new())?;
let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?;
let mut behaviour = MyBehaviour {
floodsub: Floodsub::new(local_peer_id.clone()),
mdns,

View File

@ -60,11 +60,11 @@ use libp2p::{
Swarm,
build_development_transport,
identity,
mdns::{Mdns, MdnsEvent},
mdns::{Mdns, MdnsConfig, MdnsEvent},
swarm::NetworkBehaviourEventProcess
};
use std::{error::Error, task::{Context, Poll}};
use trust_graph::TrustGraph;
use trust_graph::{TrustGraph, InMemoryStorage};
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
@ -159,8 +159,12 @@ fn main() -> Result<(), Box<dyn Error>> {
libp2p::identity::Keypair::Ed25519(kp) => kp,
_ => unreachable!("only ed25519 supported"),
};
let kademlia = Kademlia::new(local_key, local_peer_id.clone(), store, TrustGraph::new(vec![]));
let mdns = task::block_on(Mdns::new())?;
let trust = {
let storage = InMemoryStorage::new_in_memory(vec![]);
TrustGraph::new(storage)
};
let kademlia = Kademlia::new(local_key, local_peer_id.clone(), store, trust);
let mdns = task::block_on(Mdns::new(MdnsConfig::default()))?;
let behaviour = MyBehaviour { kademlia, mdns };
Swarm::new(transport, behaviour, local_peer_id)
};

View File

@ -18,43 +18,42 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use async_std::task;
use libp2p::mdns::service::{MdnsPacket, MdnsService};
use libp2p::{identity, mdns::{Mdns, MdnsConfig, MdnsEvent}, PeerId, Swarm};
use std::error::Error;
fn main() -> Result<(), Box<dyn Error>> {
// This example provides passive discovery of the libp2p nodes on the
// network that send mDNS queries and answers.
task::block_on(async move {
let mut service = MdnsService::new().await?;
loop {
let (srv, packet) = service.next().await;
match packet {
MdnsPacket::Query(query) => {
// We detected a libp2p mDNS query on the network. In a real application, you
// probably want to answer this query by doing `query.respond(...)`.
println!("Detected query from {:?}", query.remote_addr());
}
MdnsPacket::Response(response) => {
// We detected a libp2p mDNS response on the network. Responses are for
// everyone and not just for the requester, which makes it possible to
// passively listen.
for peer in response.discovered_peers() {
println!("Discovered peer {:?}", peer.id());
// These are the self-reported addresses of the peer we just discovered.
for addr in peer.addresses() {
println!(" Address = {:?}", addr);
}
}
}
MdnsPacket::ServiceDiscovery(query) => {
// The last possibility is a service detection query from DNS-SD.
// Just like `Query`, in a real application you probably want to call
// `query.respond`.
println!("Detected service query from {:?}", query.remote_addr());
#[async_std::main]
async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
// Create a random PeerId.
let id_keys = identity::Keypair::generate_ed25519();
let peer_id = PeerId::from(id_keys.public());
println!("Local peer id: {:?}", peer_id);
// Create a transport.
let transport = libp2p::build_development_transport(id_keys)?;
// Create an MDNS network behaviour.
let behaviour = Mdns::new(MdnsConfig::default()).await?;
// Create a Swarm that establishes connections through the given transport.
// Note that the MDNS behaviour itself will not actually inititiate any connections,
// as it only uses UDP.
let mut swarm = Swarm::new(transport, behaviour, peer_id);
Swarm::listen_on(&mut swarm, "/ip4/0.0.0.0/tcp/0".parse()?)?;
loop {
match swarm.next().await {
MdnsEvent::Discovered(peers) => {
for (peer, addr) in peers {
println!("discovered {} {}", peer, addr);
}
}
MdnsEvent::Expired(expired) => {
for (peer, addr) in expired {
println!("expired {} {}", peer, addr);
}
}
service = srv
}
})
}
}

View File

@ -1,3 +1,16 @@
# 0.11.1 [2021-02-15]
- Update dependencies
# 0.11.0 [2021-01-12]
- Update dependencies
# 0.10.1 [2021-01-12]
- Fix compilation with serde-1.0.119.
[PR 1912](https://github.com/libp2p/rust-libp2p/pull/1912)
# 0.10.0 [2020-11-25]
- Upgrade multihash to `0.13`.

View File

@ -6,7 +6,7 @@ description = "Implementation of the multiaddr format"
homepage = "https://github.com/libp2p/rust-libp2p"
keywords = ["multiaddr", "ipfs"]
license = "MIT"
version = "0.10.1"
version = "0.11.1"
[features]
default = ["url"]
@ -23,7 +23,7 @@ multihash = { version = "0.13", default-features = false, features = ["std", "mu
percent-encoding = "2.1.0"
serde = "1.0.70"
static_assertions = "1.1"
unsigned-varint = "0.5"
unsigned-varint = "0.7"
url = { version = "2.1.0", optional = true, default-features = false }
[dev-dependencies]

View File

@ -1,6 +1,4 @@
use std::borrow::Cow;
use std::fmt::Debug;
use std::fmt;
use std::{borrow::Cow, fmt};
/// Represents an Onion v3 address
#[derive(Clone)]
@ -43,7 +41,7 @@ impl<'a> From<(&'a [u8; 35], u16)> for Onion3Addr<'a> {
}
}
impl Debug for Onion3Addr<'_> {
impl fmt::Debug for Onion3Addr<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_tuple("Onion3Addr")
.field(&format!("{:02x?}", &self.0[..]))

View File

@ -1,3 +1,17 @@
# 0.10.2 [2021-03-01]
- Re-enable "parallel negotiation" if the dialer has 3 or more
alternative protocols.
[PR 1934](https://github.com/libp2p/rust-libp2p/pull/1934)
# 0.10.1 [2021-02-15]
- Update dependencies.
# 0.10.0 [2021-01-12]
- Update dependencies.
# 0.9.1 [2020-12-02]
- Ensure uniform outcomes for failed negotiations with both

View File

@ -1,7 +1,7 @@
[package]
name = "fluence-fork-multistream-select"
description = "Multistream-select negotiation protocol for libp2p"
version = "0.9.2"
version = "0.10.2"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -13,19 +13,19 @@ edition = "2018"
name = "multistream_select"
[dependencies]
bytes = "0.5"
bytes = "1"
futures = "0.3"
log = "0.4"
pin-project = "1.0.0"
smallvec = "1.0"
unsigned-varint = "0.5"
unsigned-varint = "0.7"
[dev-dependencies]
async-std = "1.6.2"
env_logger = "0.8"
libp2p-core = { path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" }
libp2p-plaintext = { path = "../../protocols/plaintext", package = "fluence-fork-libp2p-plaintext" }
libp2p-plaintext = { path = "../../transports/plaintext", package = "fluence-fork-libp2p-plaintext" }
quickcheck = "0.9.0"
rand = "0.7.2"
rw-stream-sink = "0.2.1"

View File

@ -56,17 +56,12 @@ where
I::Item: AsRef<[u8]>
{
let iter = protocols.into_iter();
// NOTE: Temporarily disabled "parallel" negotiation in order to correct the
// "ls" responses towards interoperability and (new) spec compliance.
// See https://github.com/libp2p/rust-libp2p/issues/1795.
Either::Left(dialer_select_proto_serial(inner, iter, version))
// We choose between the "serial" and "parallel" strategies based on the number of protocols.
// if iter.size_hint().1.map(|n| n <= 3).unwrap_or(false) {
// Either::Left(dialer_select_proto_serial(inner, iter, version))
// } else {
// Either::Right(dialer_select_proto_parallel(inner, iter, version))
// }
if iter.size_hint().1.map(|n| n <= 3).unwrap_or(false) {
Either::Left(dialer_select_proto_serial(inner, iter, version))
} else {
Either::Right(dialer_select_proto_parallel(inner, iter, version))
}
}
/// Future, returned by `dialer_select_proto`, which selects a protocol and dialer

View File

@ -1,3 +1,11 @@
# 0.27.1 [2021-02-15]
- Update dependencies.
# 0.27.0 [2021-01-12]
- Update dependencies.
# 0.26.0 [2020-12-17]
- Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-mplex"
edition = "2018"
description = "Mplex multiplexing protocol for libp2p"
version = "0.26.1"
version = "0.27.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -13,24 +13,24 @@ categories = ["network-programming", "asynchronous"]
name = "libp2p_mplex"
[dependencies]
bytes = "0.5"
bytes = "1"
futures = "0.3.1"
futures_codec = "0.4.1"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
asynchronous-codec = "0.6"
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
log = "0.4"
nohash-hasher = "0.2"
parking_lot = "0.11"
rand = "0.7"
smallvec = "1.4"
unsigned-varint = { version = "0.5", features = ["futures-codec"] }
unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] }
[dev-dependencies]
async-std = "1.7.0"
criterion = "0.3"
env_logger = "0.8"
futures = "0.3"
libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" }
libp2p-plaintext = { path = "../../protocols/plaintext", package = "fluence-fork-libp2p-plaintext" }
libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" }
libp2p-plaintext = { path = "../../transports/plaintext", package = "fluence-fork-libp2p-plaintext" }
quickcheck = "0.9"
rand = "0.7"

View File

@ -19,7 +19,7 @@
// DEALINGS IN THE SOFTWARE.
use bytes::{BufMut, Bytes, BytesMut};
use futures_codec::{Decoder, Encoder};
use asynchronous_codec::{Decoder, Encoder};
use libp2p_core::Endpoint;
use std::{fmt, hash::{Hash, Hasher}, io, mem};
use unsigned_varint::{codec, encode};

View File

@ -24,7 +24,7 @@ use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId};
use log::{debug, trace};
use futures::{prelude::*, ready, stream::Fuse};
use futures::task::{AtomicWaker, ArcWake, waker_ref, WakerRef};
use futures_codec::Framed;
use asynchronous_codec::Framed;
use nohash_hasher::{IntMap, IntSet};
use parking_lot::Mutex;
use smallvec::SmallVec;
@ -321,7 +321,7 @@ where
// Remove the substream, scheduling pending frames as necessary.
match self.substreams.remove(&id) {
None => return,
None => {},
Some(state) => {
// If we fell below the substream limit, notify tasks that had
// interest in opening an outbound substream earlier.
@ -442,7 +442,7 @@ where
// Read the next frame.
match ready!(self.poll_read_frame(cx, Some(id)))? {
Frame::Data { data, stream_id } if stream_id.into_local() == id => {
return Poll::Ready(Ok(Some(data.clone())))
return Poll::Ready(Ok(Some(data)))
},
Frame::Data { stream_id, data } => {
// The data frame is for a different stream than the one
@ -595,18 +595,16 @@ where
// this task again to have a chance at progress.
trace!("{}: No task to read from blocked stream. Waking current task.", self.id);
cx.waker().clone().wake();
} else if let Some(id) = stream_id {
// We woke some other task, but are still interested in
// reading `Data` frames from the current stream when unblocked.
debug_assert!(blocked_id != &id, "Unexpected attempt at reading a new \
frame from a substream with a full buffer.");
let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id);
} else {
if let Some(id) = stream_id {
// We woke some other task, but are still interested in
// reading `Data` frames from the current stream when unblocked.
debug_assert!(blocked_id != &id, "Unexpected attempt at reading a new \
frame from a substream with a full buffer.");
let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id);
} else {
// We woke some other task but are still interested in
// reading new `Open` frames when unblocked.
let _ = NotifierRead::register_next_stream(&self.notifier_read, cx.waker());
}
// We woke some other task but are still interested in
// reading new `Open` frames when unblocked.
let _ = NotifierRead::register_next_stream(&self.notifier_read, cx.waker());
}
return Poll::Pending
@ -932,7 +930,7 @@ impl NotifierRead {
impl ArcWake for NotifierRead {
fn wake_by_ref(this: &Arc<Self>) {
let wakers = mem::replace(&mut *this.read_stream.lock(), Default::default());
let wakers = mem::take(&mut *this.read_stream.lock());
for (_, waker) in wakers {
waker.wake();
}
@ -963,7 +961,7 @@ impl NotifierWrite {
impl ArcWake for NotifierWrite {
fn wake_by_ref(this: &Arc<Self>) {
let wakers = mem::replace(&mut *this.pending.lock(), Default::default());
let wakers = mem::take(&mut *this.pending.lock());
for waker in wakers {
waker.wake();
}
@ -985,7 +983,7 @@ impl NotifierOpen {
}
fn wake_all(&mut self) {
let wakers = mem::replace(&mut self.pending, Default::default());
let wakers = mem::take(&mut self.pending);
for waker in wakers {
waker.wake();
}
@ -1006,7 +1004,7 @@ mod tests {
use async_std::task;
use bytes::BytesMut;
use futures::prelude::*;
use futures_codec::{Decoder, Encoder};
use asynchronous_codec::{Decoder, Encoder};
use quickcheck::*;
use rand::prelude::*;
use std::collections::HashSet;

View File

@ -106,7 +106,7 @@ where
-> Poll<Result<Self::Substream, io::Error>>
{
let stream_id = ready!(self.io.lock().poll_open_stream(cx))?;
return Poll::Ready(Ok(Substream::new(stream_id)))
Poll::Ready(Ok(Substream::new(stream_id)))
}
fn destroy_outbound(&self, _substream: Self::OutboundSubstream) {

View File

@ -1,3 +1,11 @@
# 0.30.1 [2021-02-17]
- Update `yamux` to `0.8.1`.
# 0.30.0 [2021-01-12]
- Update dependencies.
# 0.29.0 [2020-12-17]
- Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-yamux"
edition = "2018"
description = "Yamux multiplexing protocol for libp2p"
version = "0.29.1"
version = "0.30.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -14,10 +14,10 @@ name = "libp2p_yamux"
[dependencies]
futures = "0.3.1"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
parking_lot = "0.11"
thiserror = "1.0"
yamux = "0.8.0"
yamux = "0.8.1"
[package.metadata.workspaces]
independent = true

View File

@ -1,3 +1,11 @@
# 0.28.0 [unreleased]
- Update `libp2p-swarm`.
# 0.27.0 [2021-01-12]
- Update dependencies.
# 0.26.0 [2020-12-17]
- Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-floodsub"
edition = "2018"
description = "Floodsub protocol for libp2p"
version = "0.26.1"
version = "0.28.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -16,15 +16,15 @@ name = "libp2p_floodsub"
cuckoofilter = "0.5.0"
fnv = "1.0"
futures = "0.3.1"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
log = "0.4"
prost = "0.6.1"
prost = "0.7"
rand = "0.7"
smallvec = "1.0"
[build-dependencies]
prost-build = "0.6"
prost-build = "0.7"
[package.metadata.workspaces]

View File

@ -34,7 +34,6 @@ use libp2p_swarm::{
DialPeerCondition,
};
use log::warn;
use rand;
use smallvec::SmallVec;
use std::{collections::VecDeque, iter};
use std::collections::hash_map::{DefaultHasher, HashMap};
@ -89,7 +88,7 @@ impl Floodsub {
if self.connected_peers.contains_key(&peer_id) {
for topic in self.subscribed_topics.iter().cloned() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(),
peer_id,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
@ -102,7 +101,7 @@ impl Floodsub {
}
}
if self.target_peers.insert(peer_id.clone()) {
if self.target_peers.insert(peer_id) {
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id, condition: DialPeerCondition::Disconnected
});
@ -125,7 +124,7 @@ impl Floodsub {
for peer in self.connected_peers.keys() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer.clone(),
peer_id: *peer,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
@ -156,7 +155,7 @@ impl Floodsub {
for peer in self.connected_peers.keys() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer.clone(),
peer_id: *peer,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
@ -196,7 +195,7 @@ impl Floodsub {
fn publish_many_inner(&mut self, topic: impl IntoIterator<Item = impl Into<Topic>>, data: impl Into<Vec<u8>>, check_self_subscriptions: bool) {
let message = FloodsubMessage {
source: self.config.local_peer_id.clone(),
source: self.config.local_peer_id,
data: data.into(),
// If the sequence numbers are predictable, then an attacker could flood the network
// with packets with the predetermined sequence numbers and absorb our legitimate
@ -231,7 +230,7 @@ impl Floodsub {
}
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(),
peer_id: *peer_id,
handler: NotifyHandler::Any,
event: FloodsubRpc {
subscriptions: Vec::new(),
@ -259,7 +258,7 @@ impl NetworkBehaviour for Floodsub {
if self.target_peers.contains(id) {
for topic in self.subscribed_topics.iter().cloned() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: id.clone(),
peer_id: *id,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
@ -272,7 +271,7 @@ impl NetworkBehaviour for Floodsub {
}
}
self.connected_peers.insert(id.clone(), SmallVec::new());
self.connected_peers.insert(*id, SmallVec::new());
}
fn inject_disconnected(&mut self, id: &PeerId) {
@ -283,7 +282,7 @@ impl NetworkBehaviour for Floodsub {
// try to reconnect.
if self.target_peers.contains(id) {
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id: id.clone(),
peer_id: *id,
condition: DialPeerCondition::Disconnected
});
}
@ -312,7 +311,7 @@ impl NetworkBehaviour for Floodsub {
remote_peer_topics.push(subscription.topic.clone());
}
self.events.push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Subscribed {
peer_id: propagation_source.clone(),
peer_id: propagation_source,
topic: subscription.topic,
}));
}
@ -321,7 +320,7 @@ impl NetworkBehaviour for Floodsub {
remote_peer_topics.remove(pos);
}
self.events.push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Unsubscribed {
peer_id: propagation_source.clone(),
peer_id: propagation_source,
topic: subscription.topic,
}));
}
@ -364,7 +363,7 @@ impl NetworkBehaviour for Floodsub {
if let Some(pos) = rpcs_to_dispatch.iter().position(|(p, _)| p == peer_id) {
rpcs_to_dispatch[pos].1.messages.push(message.clone());
} else {
rpcs_to_dispatch.push((peer_id.clone(), FloodsubRpc {
rpcs_to_dispatch.push((*peer_id, FloodsubRpc {
subscriptions: Vec::new(),
messages: vec![message.clone()],
}));

View File

@ -1,3 +1,21 @@
# 0.29.0 [unreleased]
- Update `libp2p-swarm`.
# 0.28.0 [2021-02-15]
- Prevent non-published messages being added to caches.
[PR 1930](https://github.com/libp2p/rust-libp2p/pull/1930)
- Update dependencies.
# 0.27.0 [2021-01-12]
- Update dependencies.
- Implement Gossipsub v1.1 specification.
[PR 1720](https://github.com/libp2p/rust-libp2p/pull/1720)
# 0.26.0 [2020-12-17]
- Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-gossipsub"
edition = "2018"
description = "Gossipsub protocol for libp2p"
version = "0.26.1"
version = "0.29.0"
authors = ["Age Manning <Age@AgeManning.com>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -13,37 +13,37 @@ categories = ["network-programming", "asynchronous"]
name = "libp2p_gossipsub"
[dependencies]
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
bytes = "0.5.6"
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
bytes = "1.0"
byteorder = "1.3.4"
fnv = "1.0.7"
futures = "0.3.5"
rand = "0.7.3"
futures_codec = "0.4.1"
asynchronous-codec = "0.6"
wasm-timer = "0.2.4"
unsigned-varint = { version = "0.5.0", features = ["futures-codec"] }
unsigned-varint = { version = "0.7.0", features = ["asynchronous_codec"] }
log = "0.4.11"
sha2 = "0.9.1"
base64 = "0.13.0"
smallvec = "1.4.2"
prost = "0.6.1"
prost = "0.7"
hex_fmt = "0.3.0"
regex = "1.4.0"
[dev-dependencies]
async-std = "1.6.3"
env_logger = "0.8.1"
libp2p-plaintext = { path = "../plaintext", package = "fluence-fork-libp2p-plaintext" }
libp2p-plaintext = { path = "../../transports/plaintext", package = "fluence-fork-libp2p-plaintext" }
libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" }
libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" }
libp2p-noise = { path = "../../protocols/noise", package = "fluence-fork-libp2p-noise" }
libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" }
quickcheck = "0.9.2"
hex = "0.4.2"
derive_builder = "0.9.0"
[build-dependencies]
prost-build = "0.6.1"
prost-build = "0.7"
[package.metadata.workspaces]
independent = true

View File

@ -78,7 +78,7 @@ impl BackoffStorage {
backoffs_by_heartbeat: &mut Vec<HashSet<_>>,
heartbeat_interval,
backoff_slack| {
let pair = (topic.clone(), peer.clone());
let pair = (topic.clone(), *peer);
let index = (heartbeat_index.0
+ Self::heartbeats(&time, heartbeat_interval)
+ backoff_slack as usize)
@ -90,12 +90,12 @@ impl BackoffStorage {
.backoffs
.entry(topic.clone())
.or_insert_with(HashMap::new)
.entry(peer.clone())
.entry(*peer)
{
Entry::Occupied(mut o) => {
let (backoff, index) = o.get();
if backoff < &instant {
let pair = (topic.clone(), peer.clone());
let pair = (topic.clone(), *peer);
if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) {
s.remove(&pair);
}

View File

@ -574,7 +574,7 @@ where
// calculate the message id from the un-transformed data
let msg_id = self.config.message_id(&GossipsubMessage {
source: raw_message.source.clone(),
source: raw_message.source,
data, // the uncompressed form
sequence_number: raw_message.sequence_number,
topic: raw_message.topic.clone(),
@ -591,15 +591,11 @@ where
// check that the size doesn't exceed the max transmission size
if event.encoded_len() > self.config.max_transmit_size() {
// NOTE: The size limit can be reached by excessive topics or an excessive message.
// This is an estimate that should be within 10% of the true encoded value. It is
// possible to have a message that exceeds the RPC limit and is not caught here. A
// warning log will be emitted in this case.
return Err(PublishError::MessageTooLarge);
}
// Add published message to the duplicate cache.
if !self.duplicate_cache.insert(msg_id.clone()) {
// Check the if the message has been published before
if self.duplicate_cache.contains(&msg_id) {
// This message has already been seen. We don't re-publish messages that have already
// been published on the network.
warn!(
@ -609,24 +605,13 @@ where
return Err(PublishError::Duplicate);
}
// If the message isn't a duplicate add it to the memcache.
self.mcache.put(&msg_id, raw_message.clone());
debug!("Publishing message: {:?}", msg_id);
// If the message is anonymous or has a random author add it to the published message ids
// cache.
if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config {
if !self.config.allow_self_origin() {
self.published_message_ids.insert(msg_id.clone());
}
}
let topic_hash = raw_message.topic.clone();
// If we are not flood publishing forward the message to mesh peers.
let mesh_peers_sent =
!self.config.flood_publish() && self.forward_msg(&msg_id, raw_message, None)?;
!self.config.flood_publish() && self.forward_msg(&msg_id, raw_message.clone(), None)?;
let mut recipient_peers = HashSet::new();
if let Some(set) = self.topic_peers.get(&topic_hash) {
@ -644,7 +629,7 @@ where
// Explicit peers
for peer in &self.explicit_peers {
if set.contains(peer) {
recipient_peers.insert(peer.clone());
recipient_peers.insert(*peer);
}
}
@ -655,7 +640,7 @@ where
.score_below_threshold(peer, |ts| ts.publish_threshold)
.0
{
recipient_peers.insert(peer.clone());
recipient_peers.insert(*peer);
}
}
@ -665,7 +650,7 @@ where
// If we have fanout peers add them to the map.
if self.fanout.contains_key(&topic_hash) {
for peer in self.fanout.get(&topic_hash).expect("Topic must exist") {
recipient_peers.insert(peer.clone());
recipient_peers.insert(*peer);
}
} else {
// We have no fanout peers, select mesh_n of them and add them to the fanout
@ -688,7 +673,7 @@ where
self.fanout.insert(topic_hash.clone(), new_peers.clone());
for peer in new_peers {
debug!("Peer added to fanout: {:?}", peer);
recipient_peers.insert(peer.clone());
recipient_peers.insert(peer);
}
}
// We are publishing to fanout peers - update the time we published
@ -702,10 +687,23 @@ where
return Err(PublishError::InsufficientPeers);
}
// If the message isn't a duplicate and we have sent it to some peers add it to the
// duplicate cache and memcache.
self.duplicate_cache.insert(msg_id.clone());
self.mcache.put(&msg_id, raw_message);
// If the message is anonymous or has a random author add it to the published message ids
// cache.
if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config {
if !self.config.allow_self_origin() {
self.published_message_ids.insert(msg_id.clone());
}
}
// Send to peers we know are subscribed to the topic.
for peer_id in recipient_peers.iter() {
debug!("Sending message to peer: {:?}", peer_id);
self.send_message(peer_id.clone(), event.clone())?;
self.send_message(*peer_id, event.clone())?;
}
info!("Published message: {:?}", &msg_id);
@ -777,7 +775,7 @@ where
pub fn add_explicit_peer(&mut self, peer_id: &PeerId) {
debug!("Adding explicit peer {}", peer_id);
self.explicit_peers.insert(peer_id.clone());
self.explicit_peers.insert(*peer_id);
self.check_explicit_peer_connection(peer_id);
}
@ -792,7 +790,7 @@ where
/// Blacklists a peer. All messages from this peer will be rejected and any message that was
/// created by this peer will be rejected.
pub fn blacklist_peer(&mut self, peer_id: &PeerId) {
if self.blacklisted_peers.insert(peer_id.clone()) {
if self.blacklisted_peers.insert(*peer_id) {
debug!("Peer has been blacklisted: {}", peer_id);
}
}
@ -944,7 +942,7 @@ where
}
Self::control_pool_add(
&mut self.control_pool,
peer_id.clone(),
peer_id,
GossipsubControlAction::Graft {
topic_hash: topic_hash.clone(),
},
@ -1019,7 +1017,7 @@ where
// Send a PRUNE control message
info!("LEAVE: Sending PRUNE to peer: {:?}", peer);
let control = self.make_prune(topic_hash, &peer, self.config.do_px());
Self::control_pool_add(&mut self.control_pool, peer.clone(), control);
Self::control_pool_add(&mut self.control_pool, peer, control);
}
}
debug!("Completed LEAVE for topic: {:?}", topic_hash);
@ -1031,7 +1029,7 @@ where
// Connect to peer
debug!("Connecting to explicit peer {:?}", peer_id);
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id: peer_id.clone(),
peer_id: *peer_id,
condition: DialPeerCondition::Disconnected,
});
}
@ -1078,7 +1076,7 @@ where
// IHAVE flood protection
let peer_have = self
.count_received_ihave
.entry(peer_id.clone())
.entry(*peer_id)
.or_insert(0);
*peer_have += 1;
if *peer_have > self.config.max_ihave_messages() {
@ -1124,7 +1122,7 @@ where
}
if !iwant_ids.is_empty() {
let iasked = self.count_sent_iwant.entry(peer_id.clone()).or_insert(0);
let iasked = self.count_sent_iwant.entry(*peer_id).or_insert(0);
let mut iask = iwant_ids.len();
if *iasked + iask > self.config.max_ihave_length() {
iask = self.config.max_ihave_length().saturating_sub(*iasked);
@ -1149,7 +1147,7 @@ where
let message_ids = iwant_ids_vec.into_iter().cloned().collect::<Vec<_>>();
if let Some((_, _, _, gossip_promises)) = &mut self.peer_score {
gossip_promises.add_promise(
peer_id.clone(),
*peer_id,
&message_ids,
Instant::now() + self.config.iwant_followup_time(),
);
@ -1161,7 +1159,7 @@ where
Self::control_pool_add(
&mut self.control_pool,
peer_id.clone(),
*peer_id,
GossipsubControlAction::IWant { message_ids },
);
}
@ -1205,11 +1203,11 @@ where
// Send the messages to the peer
let message_list = cached_messages
.into_iter()
.map(|entry| RawGossipsubMessage::from(entry.1))
.map(|entry| entry.1)
.collect();
if self
.send_message(
peer_id.clone(),
*peer_id,
GossipsubRpc {
subscriptions: Vec::new(),
messages: message_list,
@ -1313,7 +1311,7 @@ where
"GRAFT: Mesh link added for peer: {:?} in topic: {:?}",
peer_id, &topic_hash
);
peers.insert(peer_id.clone());
peers.insert(*peer_id);
if let Some((peer_score, ..)) = &mut self.peer_score {
peer_score.graft(peer_id, topic_hash);
@ -1345,7 +1343,7 @@ where
if self
.send_message(
peer_id.clone(),
*peer_id,
GossipsubRpc {
subscriptions: Vec::new(),
messages: Vec::new(),
@ -1455,7 +1453,7 @@ where
// it, see https://github.com/libp2p/specs/pull/217
if let Some(peer_id) = p.peer_id {
// mark as px peer
self.px_peers.insert(peer_id.clone());
self.px_peers.insert(peer_id);
// dial peer
self.events.push_back(NetworkBehaviourAction::DialPeer {
@ -1609,7 +1607,7 @@ where
if !self.duplicate_cache.insert(msg_id.clone()) {
debug!(
"Message already received, ignoring. Message: {}",
msg_id.clone()
msg_id
);
if let Some((peer_score, ..)) = &mut self.peer_score {
peer_score.duplicated_message(propagation_source, &msg_id, &message.topic);
@ -1636,7 +1634,7 @@ where
debug!("Sending received message to user");
self.events.push_back(NetworkBehaviourAction::GenerateEvent(
GossipsubEvent::Message {
propagation_source: propagation_source.clone(),
propagation_source: *propagation_source,
message_id: msg_id.clone(),
message,
},
@ -1742,7 +1740,7 @@ where
match subscription.action {
GossipsubSubscriptionAction::Subscribe => {
if peer_list.insert(propagation_source.clone()) {
if peer_list.insert(*propagation_source) {
debug!(
"SUBSCRIPTION: Adding gossip peer: {} to topic: {:?}",
propagation_source.to_string(),
@ -1772,7 +1770,7 @@ where
{
if let Some(peers) = self.mesh.get_mut(&subscription.topic_hash) {
if peers.len() < self.config.mesh_n_low()
&& peers.insert(propagation_source.clone())
&& peers.insert(*propagation_source)
{
debug!(
"SUBSCRIPTION: Adding peer {} to the mesh for topic {:?}",
@ -1798,7 +1796,7 @@ where
// generates a subscription event to be polled
application_event.push(NetworkBehaviourAction::GenerateEvent(
GossipsubEvent::Subscribed {
peer_id: propagation_source.clone(),
peer_id: *propagation_source,
topic: subscription.topic_hash.clone(),
},
));
@ -1814,11 +1812,11 @@ where
// remove topic from the peer_topics mapping
subscribed_topics.remove(&subscription.topic_hash);
unsubscribed_peers
.push((propagation_source.clone(), subscription.topic_hash.clone()));
.push((*propagation_source, subscription.topic_hash.clone()));
// generate an unsubscribe event to be polled
application_event.push(NetworkBehaviourAction::GenerateEvent(
GossipsubEvent::Unsubscribed {
peer_id: propagation_source.clone(),
peer_id: *propagation_source,
topic: subscription.topic_hash.clone(),
},
));
@ -1836,7 +1834,7 @@ where
if !grafts.is_empty()
&& self
.send_message(
propagation_source.clone(),
*propagation_source,
GossipsubRpc {
subscriptions: Vec::new(),
messages: Vec::new(),
@ -1901,7 +1899,7 @@ where
let peer_score = &self.peer_score;
let mut score = |p: &PeerId| match peer_score {
Some((peer_score, ..)) => *scores
.entry(p.clone())
.entry(*p)
.or_insert_with(|| peer_score.score(p)),
_ => 0.0,
};
@ -1928,9 +1926,9 @@ where
topic_hash
);
let current_topic = to_prune.entry(p.clone()).or_insert_with(Vec::new);
let current_topic = to_prune.entry(*p).or_insert_with(Vec::new);
current_topic.push(topic_hash.clone());
no_px.insert(p.clone());
no_px.insert(*p);
true
} else {
false
@ -1965,7 +1963,7 @@ where
},
);
for peer in &peer_list {
let current_topic = to_graft.entry(peer.clone()).or_insert_with(Vec::new);
let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new);
current_topic.push(topic_hash.clone());
}
// update the mesh
@ -2048,7 +2046,7 @@ where
},
);
for peer in &peer_list {
let current_topic = to_graft.entry(peer.clone()).or_insert_with(Vec::new);
let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new);
current_topic.push(topic_hash.clone());
}
// update the mesh
@ -2104,8 +2102,7 @@ where
},
);
for peer in &peer_list {
let current_topic =
to_graft.entry(peer.clone()).or_insert_with(Vec::new);
let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new);
current_topic.push(topic_hash.clone());
}
// update the mesh
@ -2153,12 +2150,12 @@ where
"HEARTBEAT: Peer removed from fanout for topic: {:?}",
topic_hash
);
to_remove_peers.push(peer.clone());
to_remove_peers.push(*peer);
}
}
None => {
// remove if the peer has disconnected
to_remove_peers.push(peer.clone());
to_remove_peers.push(*peer);
}
}
}
@ -2207,7 +2204,7 @@ where
.iter()
.map(|p| {
(
p.clone(),
*p,
peer_score
.as_ref()
.expect("peer_score.is_some()")
@ -2297,7 +2294,7 @@ where
// send an IHAVE message
Self::control_pool_add(
&mut self.control_pool,
peer.clone(),
peer,
GossipsubControlAction::IHave {
topic_hash: topic_hash.clone(),
message_ids: peer_message_ids,
@ -2348,7 +2345,7 @@ where
// send the control messages
if self
.send_message(
peer.clone(),
*peer,
GossipsubRpc {
subscriptions: Vec::new(),
messages: Vec::new(),
@ -2376,7 +2373,7 @@ where
.collect();
if self
.send_message(
peer.clone(),
*peer,
GossipsubRpc {
subscriptions: Vec::new(),
messages: Vec::new(),
@ -2416,7 +2413,7 @@ where
if let Some(mesh_peers) = self.mesh.get(&topic) {
for peer_id in mesh_peers {
if Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref() {
recipient_peers.insert(peer_id.clone());
recipient_peers.insert(*peer_id);
}
}
}
@ -2428,7 +2425,7 @@ where
&& Some(p) != message.source.as_ref()
&& topics.contains(&message.topic)
{
recipient_peers.insert(p.clone());
recipient_peers.insert(*p);
}
}
}
@ -2438,7 +2435,7 @@ where
let event = Arc::new(
GossipsubRpc {
subscriptions: Vec::new(),
messages: vec![RawGossipsubMessage::from(message.clone())],
messages: vec![message.clone()],
control_msgs: Vec::new(),
}
.into_protobuf(),
@ -2446,7 +2443,7 @@ where
for peer in recipient_peers.iter() {
debug!("Sending message: {:?} to peer {:?}", msg_id, peer);
self.send_message(peer.clone(), event.clone())?;
self.send_message(*peer, event.clone())?;
}
debug!("Completed forwarding message");
Ok(true)
@ -2492,7 +2489,7 @@ where
};
Ok(RawGossipsubMessage {
source: Some(author.clone()),
source: Some(*author),
data,
// To be interoperable with the go-implementation this is treated as a 64-bit
// big-endian uint.
@ -2505,7 +2502,7 @@ where
}
PublishConfig::Author(peer_id) => {
Ok(RawGossipsubMessage {
source: Some(peer_id.clone()),
source: Some(*peer_id),
data,
// To be interoperable with the go-implementation this is treated as a 64-bit
// big-endian uint.
@ -2593,7 +2590,7 @@ where
for message in messages {
self.events
.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(),
peer_id,
event: message,
handler: NotifyHandler::Any,
})
@ -2776,7 +2773,7 @@ where
// send our subscriptions to the peer
if self
.send_message(
peer_id.clone(),
*peer_id,
GossipsubRpc {
messages: Vec::new(),
subscriptions,
@ -2791,7 +2788,7 @@ where
}
// Insert an empty set of the topics of this peer until known.
self.peer_topics.insert(peer_id.clone(), Default::default());
self.peer_topics.insert(*peer_id, Default::default());
// By default we assume a peer is only a floodsub peer.
//
@ -2799,11 +2796,11 @@ where
// update the type of peer that this is in order to determine which kind of routing should
// occur.
self.peer_protocols
.entry(peer_id.clone())
.entry(*peer_id)
.or_insert(PeerKind::Floodsub);
if let Some((peer_score, ..)) = &mut self.peer_score {
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(*peer_id);
}
}
@ -2888,7 +2885,7 @@ where
if !self.peer_topics.contains_key(peer_id) && !self.px_peers.contains(peer_id) {
// The first connection is outbound and it is not a peer from peer exchange => mark
// it as outbound peer
self.outbound_peers.insert(peer_id.clone());
self.outbound_peers.insert(*peer_id);
}
}
@ -3251,7 +3248,7 @@ impl fmt::Debug for PublishConfig {
mod local_test {
use super::*;
use crate::IdentTopic;
use futures_codec::Encoder;
use asynchronous_codec::Encoder;
use quickcheck::*;
use rand::Rng;

View File

@ -393,7 +393,7 @@ impl Default for GossipsubConfigBuilder {
let mut source_string = if let Some(peer_id) = message.source.as_ref() {
peer_id.to_base58()
} else {
PeerId::from_bytes(&vec![0, 1, 0])
PeerId::from_bytes(&[0, 1, 0])
.expect("Valid peer id")
.to_base58()
};

View File

@ -83,7 +83,7 @@ impl GossipPromises {
self.promises.retain(|msg, peers| {
peers.retain(|peer_id, expires| {
if *expires < now {
let count = result.entry(peer_id.clone()).or_insert(0);
let count = result.entry(*peer_id).or_insert(0);
*count += 1;
debug!(
"The peer {} broke the promise to deliver message {} in time!",

View File

@ -24,7 +24,7 @@ use crate::protocol::{GossipsubCodec, ProtocolConfig};
use crate::types::{GossipsubRpc, PeerKind, RawGossipsubMessage};
use futures::prelude::*;
use futures::StreamExt;
use futures_codec::Framed;
use asynchronous_codec::Framed;
use libp2p_core::upgrade::{InboundUpgrade, NegotiationError, OutboundUpgrade, UpgradeError};
use libp2p_swarm::protocols_handler::{
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,

View File

@ -19,7 +19,7 @@
// DEALINGS IN THE SOFTWARE.
//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon
//! flooodsub and meshsub routing protocols.
//! floodsub and meshsub routing protocols.
//!
//! # Overview
//!

View File

@ -110,7 +110,7 @@ impl MessageCache {
let count = iwant_counts
.entry(message_id.clone())
.or_default()
.entry(peer.clone())
.entry(*peer)
.or_default();
*count += 1;
*count

View File

@ -432,7 +432,7 @@ impl PeerScore {
/// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it
pub fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) {
trace!("Add ip for peer {}, ip: {}", peer_id, ip);
let peer_stats = self.peer_stats.entry(peer_id.clone()).or_default();
let peer_stats = self.peer_stats.entry(*peer_id).or_default();
// Mark the peer as connected (currently the default is connected, but we don't want to
// rely on the default).
@ -443,7 +443,7 @@ impl PeerScore {
self.peer_ips
.entry(ip)
.or_insert_with(HashSet::new)
.insert(peer_id.clone());
.insert(*peer_id);
}
/// Removes an ip from a peer
@ -474,7 +474,7 @@ impl PeerScore {
pub fn remove_peer(&mut self, peer_id: &PeerId) {
// we only retain non-positive scores of peers
if self.score(peer_id) > 0f64 {
if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(peer_id.clone()) {
if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(*peer_id) {
Self::remove_ips_for_peer(entry.get(), &mut self.peer_ips, peer_id);
entry.remove();
}
@ -692,11 +692,11 @@ impl PeerScore {
DeliveryStatus::Unknown => {
// the message is being validated; track the peer delivery and wait for
// the Deliver/Reject notification.
record.peers.insert(from.clone());
record.peers.insert(*from);
}
DeliveryStatus::Valid(validated) => {
// mark the peer delivery time to only count a duplicate delivery once.
record.peers.insert(from.clone());
record.peers.insert(*from);
self.mark_duplicate_message_delivery(from, topic_hash, Some(validated));
}
DeliveryStatus::Invalid => {

View File

@ -32,7 +32,7 @@ use bytes::Bytes;
use bytes::BytesMut;
use futures::future;
use futures::prelude::*;
use futures_codec::{Decoder, Encoder, Framed};
use asynchronous_codec::{Decoder, Encoder, Framed};
use libp2p_core::{
identity::PublicKey, InboundUpgrade, OutboundUpgrade, PeerId, ProtocolName, UpgradeInfo,
};

View File

@ -1,3 +1,11 @@
# 0.28.0 [unreleased]
- Update `libp2p-swarm`.
# 0.27.0 [2021-01-12]
- Update dependencies.
# 0.26.0 [2020-12-17]
- Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-identify"
edition = "2018"
description = "Nodes identifcation protocol for libp2p"
version = "0.26.1"
version = "0.28.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -14,22 +14,21 @@ name = "libp2p_identify"
[dependencies]
futures = "0.3.1"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
log = "0.4.1"
prost = "0.6.1"
prost = "0.7"
smallvec = "1.0"
wasm-timer = "0.2"
[dev-dependencies]
async-std = "1.6.2"
libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" }
libp2p-noise = { path = "../../protocols/noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" }
libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" }
[build-dependencies]
prost-build = "0.6"
prost-build = "0.7"
[package.metadata.workspaces]
independent = true

View File

@ -117,7 +117,7 @@ impl NetworkBehaviour for Identify {
ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(),
};
self.observed_addresses.entry(peer_id.clone()).or_default().insert(*conn, addr);
self.observed_addresses.entry(*peer_id).or_default().insert(*conn, addr);
}
fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _: &ConnectedPoint) {

View File

@ -35,13 +35,12 @@ use std::{fmt, io, iter, pin::Pin};
pub struct IdentifyProtocolConfig;
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct RemoteInfo {
/// Information about the remote.
pub info: IdentifyInfo,
/// Address the remote sees for us.
pub observed_addr: Multiaddr,
_priv: ()
}
/// The substream on which a reply is expected to be sent.
@ -80,7 +79,7 @@ where
agent_version: Some(info.agent_version),
protocol_version: Some(info.protocol_version),
public_key: Some(pubkey_bytes),
listen_addrs: listen_addrs,
listen_addrs,
observed_addr: Some(observed_addr.to_vec()),
protocols: info.protocols
};
@ -158,8 +157,7 @@ where
Ok(RemoteInfo {
info,
observed_addr: observed_addr.clone(),
_priv: ()
observed_addr,
})
})
}

View File

@ -1,3 +1,20 @@
# 0.29.0 [unreleased]
- Update `libp2p-swarm`.
# 0.28.1 [2021-02-15]
- Update dependencies.
# 0.28.0 [2021-01-12]
- Update dependencies.
# 0.27.1 [2021-01-11]
- Add From impls for `kbucket::Key`.
[PR 1909](https://github.com/libp2p/rust-libp2p/pull/1909).
# 0.27.0 [2020-12-17]
- Update `libp2p-core` and `libp2p-swarm`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-kad"
edition = "2018"
description = "Kademlia protocol for libp2p"
version = "0.27.1"
version = "0.29.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -14,37 +14,38 @@ name = "libp2p_kad"
[dependencies]
arrayvec = "0.5.1"
bytes = "0.5"
bytes = "1"
either = "1.5"
fnv = "1.0"
futures_codec = "0.4"
asynchronous-codec = "0.6"
futures = "0.3.1"
log = "0.4"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
prost = "0.6.1"
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
prost = "0.7"
rand = "0.7.2"
sha2 = "0.9.1"
smallvec = "1.0"
wasm-timer = "0.2"
uint = "0.8"
unsigned-varint = { version = "0.5", features = ["futures-codec"] }
uint = "0.9"
unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] }
void = "1.0"
bs58 = "0.3.0"
derivative = "2.0.2"
trust-graph = "0.2.0"
trust-graph = "0.2.5"
fluence-identity = "0.2.4"
prometheus = "0.9.0"
[dev-dependencies]
futures-timer = "3.0"
libp2p-noise = { path = "../noise", package = "fluence-fork-libp2p-noise" }
libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" }
libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" }
quickcheck = "0.9.0"
env_logger = "0.7.1"
[build-dependencies]
prost-build = "0.6"
prost-build = "0.7"
[package.metadata.workspaces]

View File

@ -34,6 +34,7 @@ pub enum Remove {
KeepLast = 1
}
#[allow(clippy::len_without_is_empty)]
impl Addresses {
/// Creates a new list of addresses.
pub fn new(addr: Multiaddr) -> Addresses {

View File

@ -58,12 +58,14 @@ use std::task::{Context, Poll};
use std::vec;
use wasm_timer::Instant;
use libp2p_core::identity::ed25519::{Keypair, PublicKey};
use trust_graph::{TrustGraph, Certificate};
use trust_graph::{Certificate};
use derivative::Derivative;
use crate::metrics::Metrics;
pub use crate::query::QueryStats;
type TrustGraph = trust_graph::TrustGraph<trust_graph::InMemoryStorage>;
/// `Kademlia` is a `NetworkBehaviour` that implements the libp2p
/// Kademlia protocol.
pub struct Kademlia<TStore> {
@ -363,7 +365,7 @@ where
.record_replication_interval
.or(config.record_publication_interval)
.map(|interval| PutRecordJob::new(
id.clone(),
id,
interval,
config.record_publication_interval,
config.record_ttl,
@ -398,7 +400,7 @@ where
}
/// Gets an iterator over immutable references to all running queries.
pub fn iter_queries<'a>(&'a self) -> impl Iterator<Item = QueryRef<'a>> {
pub fn iter_queries(&self) -> impl Iterator<Item = QueryRef<'_>> {
self.queries.iter().filter_map(|query|
if !query.is_finished() {
Some(QueryRef { query })
@ -408,7 +410,7 @@ where
}
/// Gets an iterator over mutable references to all running queries.
pub fn iter_queries_mut<'a>(&'a mut self) -> impl Iterator<Item = QueryMut<'a>> {
pub fn iter_queries_mut(&mut self) -> impl Iterator<Item = QueryMut<'_>> {
self.queries.iter_mut().filter_map(|query|
if !query.is_finished() {
Some(QueryMut { query })
@ -418,7 +420,7 @@ where
}
/// Gets an immutable reference to a running query, if it exists.
pub fn query<'a>(&'a self, id: &QueryId) -> Option<QueryRef<'a>> {
pub fn query(&self, id: &QueryId) -> Option<QueryRef<'_>> {
self.queries.get(id).and_then(|query|
if !query.is_finished() {
Some(QueryRef { query })
@ -461,7 +463,7 @@ where
if entry.value().insert(address) {
self.queued_events.push_back(NetworkBehaviourAction::GenerateEvent(
KademliaEvent::RoutingUpdated {
peer: peer.clone(),
peer: *peer,
addresses: entry.value().clone().into(),
old_peer: None,
}
@ -647,7 +649,7 @@ where
/// with an explicit expiration will always expire at that instant and until then
/// is subject to regular (re-)replication and (re-)publication.
pub fn put_record(&mut self, mut record: Record, quorum: Quorum) -> Result<QueryId, store::Error> {
record.publisher = Some(self.kbuckets.local_key().preimage().clone());
record.publisher = Some(*self.kbuckets.local_key().preimage());
self.store.put(record.clone())?;
self.metrics.store_put();
record.expires = record.expires.or_else(||
@ -712,7 +714,7 @@ where
pub fn bootstrap(&mut self) -> Result<QueryId, NoKnownPeers> {
let local_key = self.kbuckets.local_key().clone();
let info = QueryInfo::Bootstrap {
peer: local_key.preimage().clone(),
peer: *local_key.preimage(),
remaining: None
};
let peers = Self::closest_keys(&mut self.kbuckets, &local_key).collect::<Vec<_>>();
@ -756,7 +758,7 @@ where
// TODO: calculate weight for self?
let record = ProviderRecord::new(
key.clone(),
self.kbuckets.local_key().preimage().clone(),
*self.kbuckets.local_key().preimage(),
local_addrs);
self.store.add_provider(record)?;
let target = kbucket::Key::new(key.clone());
@ -766,7 +768,7 @@ where
bs58::encode(target.as_ref()).into_string(), // sha256
);
let provider_key = self.kbuckets.local_public_key();
let certificates = self.trust.get_all_certs(&provider_key, &[]);
let certificates = self.get_certificates(&provider_key);
let peers = Self::closest_keys(&mut self.kbuckets, &target);
let context = AddProviderContext::Publish;
let info = QueryInfo::AddProvider {
@ -843,20 +845,18 @@ where
}
}
let local_id = self.kbuckets.local_key().preimage().clone();
let others_iter = peers.filter(|p| p.node_id != local_id);
let local_id = self.kbuckets.local_key().preimage();
let others_iter = peers.filter(|p| &p.node_id != local_id);
let trust = &self.trust;
if let Some(query) = self.queries.get_mut(query_id) {
log::trace!("Request to {:?} in query {:?} succeeded.", source, query_id);
for peer in others_iter.clone() {
log::trace!("Peer {:?} reported by {:?} in query {:?}.",
peer, source, query_id);
query.inner.contacts.insert(peer.node_id.clone(), peer.clone().into());
log::trace!("Peer {:?} reported by {:?} in query {:?}.", peer, source, query_id);
query.inner.contacts.insert(peer.node_id, peer.clone().into());
}
query.on_success(source, others_iter.map(|kp| WeightedPeer {
peer_id: kp.node_id.clone().into(),
weight: trust.weight(&kp.public_key).unwrap_or_default()
weight: get_weight(trust, &kp.public_key),
}))
}
}
@ -875,7 +875,7 @@ where
.map(KadPeer::from)
.collect();
peers.iter_mut().for_each(|mut peer|
peer.certificates = self.trust.get_all_certs(&peer.public_key, &[])
peer.certificates = self.get_certificates(&peer.public_key)
);
peers
}
@ -908,14 +908,12 @@ where
// The provider is either the local node and we fill in
// the local addresses on demand,
let self_key = kbuckets.local_public_key();
let certificates = trust.get_all_certs(&self_key, &[]);
let multiaddrs = local_addrs.iter().cloned().collect::<Vec<_>>();
Some(KadPeer {
public_key: self_key,
node_id,
multiaddrs,
connection_ty,
certificates
multiaddrs: local_addrs.iter().cloned().collect::<Vec<_>>(),
certificates: get_certificates(&trust, &self_key),
public_key: self_key,
})
} else {
let key = kbucket::Key::from(node_id);
@ -928,16 +926,16 @@ where
} else {
p.addresses
};
let certificates = node_id.as_public_key().and_then(|provider_pk|
match provider_pk {
libp2p_core::identity::PublicKey::Ed25519(pk) =>
Some(trust.get_all_certs(pk, &[])),
let certificates = {
match node_id.as_public_key() {
Some(libp2p_core::identity::PublicKey::Ed25519(pk)) =>
get_certificates(&trust, &pk),
key => {
log::warn!("Provider {} has a non-Ed25519 public key: {:?}", node_id, key);
None
vec![]
}
}
).unwrap_or_default();
};
KadPeer {
node_id,
@ -970,7 +968,7 @@ where
/// Starts an iterative `ADD_PROVIDER` query for the given key.
fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) {
let provider_key = self.kbuckets.local_public_key();
let certificates = self.trust.get_all_certs(&provider_key, &[]);
let certificates = self.get_certificates(&provider_key);
let info = QueryInfo::AddProvider {
context,
key: key.clone(),
@ -1072,7 +1070,7 @@ where
{
let addresses = contact.addresses.clone();
let peer = entry.key().preimage().clone();
let weight = trust.weight(contact.public_key.clone()).unwrap_or(0);
let weight = get_weight(&trust, &contact.public_key);
debug!(
"Calculated weight for {} pk {}: {}",
entry.key().preimage(),
@ -1221,10 +1219,10 @@ where
phase: AddProviderPhase::GetClosestPeers,
..
} => {
let provider_id = params.local_peer_id().clone();
let provider_id = *params.local_peer_id();
let external_addresses = params.external_addresses().map(|r| r.addr).collect();
let provider_key = self.kbuckets.local_public_key();
let certificates = self.trust.get_all_certs(&provider_key, &[]);
let certificates = self.get_certificates(&provider_key);
let inner = QueryInner::new(QueryInfo::AddProvider {
context,
key,
@ -1241,7 +1239,7 @@ where
let peers = result.peers.into_iter().map(|peer_id| {
let weight = contacts
.get(&peer_id)
.and_then(|c| trust.weight(&c.public_key))
.map(|c| get_weight(&trust, &c.public_key))
.unwrap_or_default();
WeightedPeer {
peer_id: peer_id.into(),
@ -1299,7 +1297,8 @@ where
let trust = &self.trust;
let weight =
result.inner.contacts.get(peer_id)
.and_then(|c| trust.weight(&c.public_key)).unwrap_or_default();
.map(|c| get_weight(&trust, &c.public_key))
.unwrap_or_default();
let peer = WeightedPeer {
weight,
peer_id: cache_key
@ -1342,9 +1341,10 @@ where
let trust = &self.trust;
let peers = result.peers.into_iter().map(|peer_id| {
let weight =
contacts.get(&peer_id).and_then(|c|
trust.weight(&c.public_key)
).unwrap_or_default();
contacts
.get(&peer_id)
.map(|c| get_weight(&trust, &c.public_key))
.unwrap_or_default();
WeightedPeer {
peer_id: peer_id.into(),
@ -1738,6 +1738,26 @@ where
log!("\n{}", buckets);
}
}
fn get_certificates(&self, key: &PublicKey) -> Vec<Certificate> {
get_certificates(&self.trust, key)
}
fn get_weight(&self, key: &PublicKey) -> u32 {
get_weight(&self.trust, key)
}
}
fn get_certificates(trust: &TrustGraph, key: &PublicKey) -> Vec<Certificate> {
fluence_identity::PublicKey::from_libp2p(&key).map(|key|
trust.get_all_certs(&key, &[]).unwrap_or_default()
).unwrap_or_default()
}
fn get_weight(trust: &TrustGraph, key: &PublicKey) -> u32 {
fluence_identity::PublicKey::from_libp2p(&key).map(|key|
trust.weight(&key).unwrap_or_default().unwrap_or_default()
).unwrap_or(0)
}
/// Exponentially decrease the given duration (base 2).
@ -1804,7 +1824,7 @@ where
});
}
self.connected_peers.insert(peer.clone());
self.connected_peers.insert(*peer);
self.metrics.node_connected();
}
@ -1913,7 +1933,7 @@ where
for query in self.queries.iter_mut() {
query.on_failure(id);
}
self.connection_updated(id.clone(), None, NodeStatus::Disconnected);
self.connection_updated(*id, None, NodeStatus::Disconnected);
self.connected_peers.remove(id);
}
@ -1932,7 +1952,7 @@ where
// since the remote address on an inbound connection may be specific
// to that connection (e.g. typically the TCP port numbers).
let new_address = match endpoint {
ConnectedPoint::Dialer { address } => Some(address.clone()),
ConnectedPoint::Dialer { address } => Some(address),
ConnectedPoint::Listener { .. } => None,
};
@ -2080,7 +2100,7 @@ where
key, records, quorum, cache_at
} = &mut query.inner.info {
if let Some(record) = record {
records.push(PeerRecord{ peer: Some(source.clone()), record });
records.push(PeerRecord{ peer: Some(source), record });
let quorum = quorum.get();
if records.len() >= quorum {
@ -2104,7 +2124,7 @@ where
// closest node to the key that did *not* return the
// value is tracked in order to cache the record on
// that node if the query turns out to be successful.
let source_key = kbucket::Key::from(source.clone());
let source_key = kbucket::Key::from(source);
if let Some(cache_key) = cache_at {
let key = kbucket::Key::new(key.clone());
if source_key.distance(&key) < cache_key.distance(&key) {
@ -2135,7 +2155,7 @@ where
if let QueryInfo::PutRecord {
phase: PutRecordPhase::PutRecord { success, .. }, quorum, ..
} = &mut query.inner.info {
success.push(source.clone());
success.push(source);
let quorum = quorum.get();
if success.len() >= quorum {
@ -2270,7 +2290,7 @@ where
peer_id, event, handler: NotifyHandler::Any
});
} else if &peer_id != self.kbuckets.local_key().preimage() {
query.inner.pending_rpcs.push((peer_id.clone(), event));
query.inner.pending_rpcs.push((peer_id, event));
self.queued_events.push_back(NetworkBehaviourAction::DialPeer {
peer_id, condition: DialPeerCondition::Disconnected
});
@ -2801,7 +2821,7 @@ impl QueryInfo {
key: key.clone(),
provider: crate::protocol::KadPeer {
public_key: provider_key.clone(),
node_id: provider_id.clone(),
node_id: *provider_id,
multiaddrs: external_addresses.clone(),
connection_ty: crate::protocol::KadConnectionType::Connected,
certificates: certificates.clone(),

View File

@ -48,6 +48,7 @@ use quickcheck::*;
use rand::{Rng, random, thread_rng, rngs::StdRng, SeedableRng};
use std::{collections::{HashSet, HashMap}, time::Duration, num::NonZeroUsize, u64};
use libp2p_core::identity::ed25519;
use trust_graph::InMemoryStorage;
type TestSwarm = Swarm<Kademlia<MemoryStore>>;
@ -67,8 +68,12 @@ fn build_node_with_config(cfg: KademliaConfig) -> (ed25519::Keypair, Multiaddr,
.boxed();
let local_id = local_public_key.clone().into_peer_id();
let trust = {
let pk = fluence_identity::PublicKey::from_libp2p(&ed25519_key.public()).unwrap();
let storage = InMemoryStorage::new_in_memory(vec![(pk, 1)]);
TrustGraph::new(storage)
};
let store = MemoryStore::new(local_id.clone());
let trust = TrustGraph::new(vec![(ed25519_key.public(), 1)]);
let behaviour = Kademlia::with_config(ed25519_key.clone(), local_id.clone(), store, cfg.clone(), trust);
let mut swarm = Swarm::new(transport, behaviour, local_id);
@ -172,6 +177,7 @@ fn bootstrap() {
).into_iter()
.map(|(_, _a, s)| s)
.collect::<Vec<_>>();
let swarm_ids: Vec<_> = swarms.iter()
.map(Swarm::local_peer_id)
.cloned()
@ -477,7 +483,7 @@ fn put_record() {
// Connect `single_swarm` to three bootnodes.
for i in 0..3 {
single_swarm.2.add_address(
Swarm::local_peer_id(&fully_connected_swarms[0].2),
&Swarm::local_peer_id(&fully_connected_swarms[0].2),
fully_connected_swarms[i].1.clone(),
fully_connected_swarms[i].0.public(),
);
@ -758,7 +764,7 @@ fn add_provider() {
// Connect `single_swarm` to three bootnodes.
for i in 0..3 {
single_swarm.2.add_address(
Swarm::local_peer_id(&fully_connected_swarms[0].2),
&Swarm::local_peer_id(&fully_connected_swarms[0].2),
fully_connected_swarms[i].1.clone(),
fully_connected_swarms[i].0.public(),
);
@ -960,8 +966,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() {
trudy.2.store.put(record_trudy.clone()).unwrap();
// Make `trudy` and `bob` known to `alice`.
alice.2.add_address(Swarm::local_peer_id(&trudy.2), trudy.1.clone(), trudy.0.public());
alice.2.add_address(Swarm::local_peer_id(&bob.2), bob.1.clone(), bob.0.public());
alice.2.add_address(&Swarm::local_peer_id(&trudy.2), trudy.1.clone(), trudy.0.public());
alice.2.add_address(&Swarm::local_peer_id(&bob.2), bob.1.clone(), bob.0.public());
// Drop the swarm addresses.
let (mut alice, mut bob, mut trudy) = (alice.2, bob.2, trudy.2);
@ -1191,7 +1197,8 @@ fn make_swarms(total: usize, config: KademliaConfig) -> Vec<(Keypair, Multiaddr,
#[cfg(test)]
mod certificates {
use super::*;
use trust_graph::{KeyPair, current_time};
use trust_graph::current_time;
use fluence_identity::{KeyPair, PublicKey};
fn gen_root_cert(from: &KeyPair, to: PublicKey) -> Certificate {
let cur_time = current_time();
@ -1223,7 +1230,7 @@ mod certificates {
}
fn bs(pk: PublicKey) -> String {
bs58::encode(pk.encode()).into_string()
bs58::encode(pk.to_bytes()).into_string()
}
#[test]
@ -1239,15 +1246,19 @@ mod certificates {
// Set same weights to all nodes, so they store each other's certificates
let weights = swarms.iter().map(|(kp, _, _)| (kp.public(), 1)).collect::<Vec<_>>();
for swarm in swarms.iter_mut() {
swarm.2.trust.add_root_weights(weights.clone());
for (pk, weight) in weights.iter() {
let pk = fluence_identity::PublicKey::from_libp2p(&pk).unwrap();
swarm.2.trust.add_root_weight(pk, *weight);
}
}
let mut swarms = swarms.into_iter();
let (first_kp, _, first) = swarms.next().unwrap();
// issue certs from each swarm to the first swarm, so all swarms trust the first one
let mut swarms = swarms.map(|(kp, _, mut swarm)| {
let pk = fluence_identity::PublicKey::from_libp2p(&first_kp.public()).unwrap();
// root cert, its chain is [self-signed: swarm -> swarm, swarm -> first]
let root = gen_root_cert(&kp.clone().into(), first_kp.public());
let root = gen_root_cert(&kp.clone().into(), pk);
swarm.trust.add(&root, current_time()).unwrap();
SwarmWithKeypair { swarm, kp }
});
@ -1258,16 +1269,25 @@ mod certificates {
// issue cert from the first swarm to the second (will be later disseminated via kademlia)
// chain: 0 -> 1
let cert_0_1 = gen_root_cert(&swarm0.kp.clone().into(), swarm1.kp.public());
let cert_0_1 = {
let pk = fluence_identity::PublicKey::from_libp2p(&swarm1.kp.public()).unwrap();
gen_root_cert(&swarm0.kp.clone().into(), pk)
};
swarm0.swarm.trust.add(&cert_0_1, current_time()).unwrap();
let cert_0_1_check = swarm0.swarm.trust.get_all_certs(&swarm1.kp.public(), &[]);
let cert_0_1_check = {
let pk = fluence_identity::PublicKey::from_libp2p(&swarm1.kp.public()).unwrap();
swarm0.swarm.trust.get_all_certs(pk, &[]).unwrap()
};
assert_eq!(cert_0_1_check.len(), 1);
let cert_0_1_check = cert_0_1_check.into_iter().nth(0).unwrap();
assert_eq!(cert_0_1, cert_0_1_check);
// check that this certificate (with root prepended) can be added to trust graph of any other node
// chain: (2 -> 0)
let mut cert_2_0_1 = gen_root_cert(&swarm2.kp.clone().into(), swarm0.kp.public());
let mut cert_2_0_1 = {
let pk = fluence_identity::PublicKey::from_libp2p(&swarm0.kp.public()).unwrap();
gen_root_cert(&swarm2.kp.clone().into(), pk)
};
// chain: (2 -> 0) ++ (0 -> 1)
cert_2_0_1.chain.extend_from_slice(&cert_0_1.chain[1..]);
swarm2.swarm.trust.add(cert_2_0_1, current_time()).unwrap();
@ -1305,13 +1325,26 @@ mod certificates {
// check that certificates for `swarm[1].kp` were disseminated
for swarm in swarms.iter().skip(2) {
let disseminated = swarm.swarm.trust.get_all_certs(kp_1.clone(), &[]);
let disseminated = {
let pk = fluence_identity::PublicKey::from_libp2p(&kp_1).unwrap();
swarm.swarm.trust.get_all_certs(&pk, &[]).unwrap()
};
// take only certificate converging to current `swarm` public key
let disseminated = disseminated.into_iter().find(|c| &c.chain[0].issued_for == &swarm.kp.public()).unwrap();
let disseminated = {
let pk = fluence_identity::PublicKey::from_libp2p(&swarm.kp.public()).unwrap();
disseminated.into_iter().find(|c| &c.chain[0].issued_for == &pk).unwrap()
};
// swarm -> swarm0 -> swarm1
assert_eq!(disseminated.chain.len(), 3);
let pubkeys = disseminated.chain.iter().map(|c| &c.issued_for).collect::<Vec<_>>();
assert_eq!(pubkeys, vec![&swarm.kp.public(), &swarms[0].kp.public(), &swarms[1].kp.public()]);
assert_eq!(
pubkeys,
vec![
&fluence_identity::PublicKey::from_libp2p(&swarm.kp.public()).unwrap(),
&fluence_identity::PublicKey::from_libp2p(&swarms[0].kp.public()).unwrap(),
&fluence_identity::PublicKey::from_libp2p(&swarms[1].kp.public()).unwrap(),
]
);
// last trust in the certificate must be equal to previously generated (0 -> 1) trust
let last = disseminated.chain.last().unwrap();

View File

@ -531,7 +531,7 @@ where
}
KademliaHandlerIn::FindNodeReq { key, user_data } => {
let msg = KadRequestMsg::FindNode { key };
self.substreams.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone())));
self.substreams.push(SubstreamState::OutPendingOpen(msg, Some(user_data)));
}
KademliaHandlerIn::FindNodeRes {
closer_peers,
@ -550,7 +550,7 @@ where
};
let msg = KadResponseMsg::FindNode {
closer_peers: closer_peers.clone(),
closer_peers,
};
self.substreams
.push(SubstreamState::InPendingSend(conn_id, substream, msg));
@ -559,7 +559,7 @@ where
KademliaHandlerIn::GetProvidersReq { key, user_data } => {
let msg = KadRequestMsg::GetProviders { key };
self.substreams
.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone())));
.push(SubstreamState::OutPendingOpen(msg, Some(user_data)));
}
KademliaHandlerIn::GetProvidersRes {
closer_peers,
@ -582,8 +582,8 @@ where
};
let msg = KadResponseMsg::GetProviders {
closer_peers: closer_peers.clone(),
provider_peers: provider_peers.clone(),
closer_peers,
provider_peers,
};
self.substreams
.push(SubstreamState::InPendingSend(conn_id, substream, msg));
@ -622,7 +622,7 @@ where
let msg = KadResponseMsg::GetValue {
record,
closer_peers: closer_peers.clone(),
closer_peers,
};
self.substreams
.push(SubstreamState::InPendingSend(conn_id, substream, msg));

View File

@ -224,15 +224,11 @@ impl PutRecordJob {
}
if let PeriodicJobState::Running(records) = &mut self.inner.state {
loop {
if let Some(r) = records.next() {
if r.is_expired(now) {
store.remove(&r.key)
} else {
return Poll::Ready(r)
}
for r in records {
if r.is_expired(now) {
store.remove(&r.key)
} else {
break
return Poll::Ready(r)
}
}
@ -301,15 +297,11 @@ impl AddProviderJob {
}
if let PeriodicJobState::Running(keys) = &mut self.inner.state {
loop {
if let Some(r) = keys.next() {
if r.is_expired(now) {
store.remove_provider(&r.key, &r.provider)
} else {
return Poll::Ready(r)
}
for r in keys {
if r.is_expired(now) {
store.remove_provider(&r.key, &r.provider)
} else {
break
return Poll::Ready(r)
}
}

View File

@ -68,6 +68,8 @@
mod bucket;
mod entry;
#[allow(clippy::ptr_offset_with_cast)]
#[allow(clippy::assign_op_pattern)]
mod key;
mod sub_bucket;
mod swamp;

View File

@ -25,6 +25,7 @@ use sha2::{Digest, Sha256};
use std::borrow::Borrow;
use std::hash::{Hash, Hasher};
use uint::*;
use crate::record;
construct_uint! {
/// 256-bit unsigned integer.
@ -54,7 +55,7 @@ impl<T> Key<T> {
/// [`Key::into_preimage`].
pub fn new(preimage: T) -> Key<T>
where
T: Borrow<[u8]>,
T: Borrow<[u8]>
{
let bytes = KeyBytes::new(preimage.borrow());
Key { preimage, bytes }
@ -73,7 +74,7 @@ impl<T> Key<T> {
/// Computes the distance of the keys according to the XOR metric.
pub fn distance<U>(&self, other: &U) -> Distance
where
U: AsRef<KeyBytes>,
U: AsRef<KeyBytes>
{
self.bytes.distance(other)
}
@ -114,6 +115,18 @@ impl From<PeerId> for Key<PeerId> {
}
}
impl From<Vec<u8>> for Key<Vec<u8>> {
fn from(b: Vec<u8>) -> Self {
Key::new(b)
}
}
impl From<record::Key> for Key<record::Key> {
fn from(k: record::Key) -> Self {
Key::new(k)
}
}
impl<T> AsRef<KeyBytes> for Key<T> {
fn as_ref(&self) -> &KeyBytes {
&self.bytes
@ -143,7 +156,7 @@ impl KeyBytes {
/// value through a random oracle.
pub fn new<T>(value: T) -> Self
where
T: Borrow<[u8]>,
T: Borrow<[u8]>
{
KeyBytes(Sha256::digest(value.borrow()))
}
@ -151,7 +164,7 @@ impl KeyBytes {
/// Computes the distance of the keys according to the XOR metric.
pub fn distance<U>(&self, other: &U) -> Distance
where
U: AsRef<KeyBytes>,
U: AsRef<KeyBytes>
{
let a = U256::from(self.0.as_slice());
let b = U256::from(other.as_ref().0.as_slice());

View File

@ -31,7 +31,7 @@ use codec::UviBytes;
use crate::dht_proto as proto;
use crate::record::{self, Record};
use futures::prelude::*;
use futures_codec::Framed;
use asynchronous_codec::Framed;
use libp2p_core::{Multiaddr, PeerId};
use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use prost::Message;
@ -133,13 +133,16 @@ impl TryFrom<proto::message::Peer> for KadPeer {
for cert in peer.certificates.into_iter() {
let mut chain = Vec::with_capacity(cert.chain.len());
for trust in cert.chain.into_iter() {
let issued_for = PublicKey::decode(trust.issued_for.as_slice())
let issued_for = fluence_identity::PublicKey::from_bytes(trust.issued_for.as_slice())
.map_err(|e|
invalid_data(format!("invalid issued_for: {}", e).as_str())
)?;
let expires_at: Duration = Duration::from_secs(trust.expires_at_secs);
let issued_at: Duration = Duration::from_secs(trust.issued_at_secs);
let signature: Vec<u8> = trust.signature;
let signature = fluence_identity::Signature::from_bytes(&trust.signature)
.map_err(|e|
invalid_data(format!("invalid signature: {}", e).as_str())
)?;
let trust = Trust::new(issued_for, expires_at, issued_at, signature);
chain.push(trust);
@ -163,9 +166,9 @@ impl Into<proto::message::Peer> for KadPeer {
proto::Certificate {
chain: cert.chain.into_iter().map(|trust| {
proto::Trust {
issued_for: trust.issued_for.encode().to_vec(),
issued_for: trust.issued_for.to_bytes().to_vec(),
expires_at_secs: trust.expires_at.as_secs(),
signature: trust.signature,
signature: trust.signature.to_bytes().to_vec(),
issued_at_secs: trust.issued_at.as_secs(),
}
}).collect(),

View File

@ -238,9 +238,9 @@ impl<TInner> QueryPool<TInner> {
}
if self.queries.is_empty() {
return QueryPoolState::Idle
QueryPoolState::Idle
} else {
return QueryPoolState::Waiting(None)
QueryPoolState::Waiting(None)
}
}
}

View File

@ -154,7 +154,7 @@ impl ClosestPeersIter {
return false
}
let key = Key::from(peer.clone());
let key = Key::from(*peer);
let distance = key.distance(&self.target);
// Mark the peer as succeeded.
@ -233,7 +233,7 @@ impl ClosestPeersIter {
return false
}
let key = Key::from(peer.clone());
let key = Key::from(*peer);
let distance = key.distance(&self.target);
match self.closest_peers.entry(distance) {

View File

@ -131,7 +131,7 @@ impl FixedPeersIter {
pub fn next(&mut self) -> PeersIterState<'_> {
match &mut self.state {
State::Finished => return PeersIterState::Finished,
State::Finished => PeersIterState::Finished,
State::Waiting { num_waiting } => {
if *num_waiting >= self.parallelism.get() {
return PeersIterState::WaitingAtCapacity
@ -144,7 +144,7 @@ impl FixedPeersIter {
} else {
return PeersIterState::Waiting(None)
}
Some(p) => match self.peers.entry(p.clone()) {
Some(p) => match self.peers.entry(p) {
Entry::Occupied(_) => {} // skip duplicates
Entry::Vacant(e) => {
*num_waiting += 1;

View File

@ -205,7 +205,7 @@ impl<'a> RecordStore<'a> for MemoryStore {
let p = providers.remove(i);
self.provided.remove(&p);
}
if providers.len() == 0 {
if providers.is_empty() {
e.remove();
}
}

View File

@ -1,3 +1,28 @@
# 0.29.0 [unreleased]
- Introduce `MdnsConfig` with configurable TTL of discovered peer
records and configurable multicast query interval. The default
query interval is increased from 20 seconds to 5 minutes, to
significantly reduce bandwidth usage. To ensure timely peer
discovery in the majority of cases, a multicast query is
initiated whenever a change on a network interface is detected,
which includes MDNS initialisation at node startup. If necessary
the MDNS query interval can be reduced via the `MdnsConfig`.
The `MdnsService` has been removed from the public API, making
it compulsory that all uses occur through the `Mdns` `NetworkBehaviour`.
An `MdnsConfig` must now be given to `Mdns::new()`.
[PR 1977](https://github.com/libp2p/rust-libp2p/pull/1977).
- Update `libp2p-swarm`.
# 0.28.1 [2021-02-15]
- Update dependencies.
# 0.28.0 [2021-01-12]
- Update dependencies.
# 0.27.0 [2020-12-17]
- Update `libp2p-swarm` and `libp2p-core`.

View File

@ -1,7 +1,7 @@
[package]
name = "fluence-fork-libp2p-mdns"
edition = "2018"
version = "0.27.1"
version = "0.29.0"
description = "Implementation of the libp2p mDNS discovery method"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
@ -13,24 +13,24 @@ categories = ["network-programming", "asynchronous"]
name = "libp2p_mdns"
[dependencies]
async-io = "1.3.0"
data-encoding = "2.3.1"
async-io = "1.3.1"
data-encoding = "2.3.2"
dns-parser = "0.8.0"
futures = "0.3.8"
if-watch = "0.1.6"
futures = "0.3.13"
if-watch = "0.2.0"
lazy_static = "1.4.0"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
log = "0.4.11"
rand = "0.7.3"
smallvec = "1.5.0"
socket2 = { version = "0.3.17", features = ["reuseport"] }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
log = "0.4.14"
rand = "0.8.3"
smallvec = "1.6.1"
socket2 = { version = "0.3.19", features = ["reuseport"] }
void = "1.0.2"
[dev-dependencies]
async-std = "1.7.0"
async-std = "1.9.0"
if-addrs = "0.6.5"
tokio = { version = "0.3.4", default-features = false, features = ["rt", "rt-multi-thread"] }
tokio = { version = "1.2.0", default-features = false, features = ["rt", "rt-multi-thread"] }
[package.metadata.workspaces]
independent = true

View File

@ -18,33 +18,80 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::service::{MdnsPacket, MdnsService, build_query_response, build_service_discovery_response};
use async_io::Timer;
use crate::dns::{build_query, build_query_response, build_service_discovery_response};
use crate::query::MdnsPacket;
use async_io::{Async, Timer};
use futures::prelude::*;
use if_watch::{IfEvent, IfWatcher};
use lazy_static::lazy_static;
use libp2p_core::{
Multiaddr,
PeerId,
address_translation,
connection::ConnectionId,
multiaddr::Protocol
address_translation, connection::ConnectionId, multiaddr::Protocol, Multiaddr, PeerId,
};
use libp2p_swarm::{
NetworkBehaviour,
NetworkBehaviourAction,
PollParameters,
ProtocolsHandler,
protocols_handler::DummyProtocolsHandler
protocols_handler::DummyProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction,
PollParameters, ProtocolsHandler,
};
use smallvec::SmallVec;
use std::{cmp, fmt, io, iter, mem, pin::Pin, time::{Duration, Instant}, task::Context, task::Poll};
use socket2::{Domain, Socket, Type};
use std::{
cmp,
collections::VecDeque,
fmt, io, iter,
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
pin::Pin,
task::Context,
task::Poll,
time::{Duration, Instant},
};
const MDNS_RESPONSE_TTL: std::time::Duration = Duration::from_secs(5 * 60);
lazy_static! {
static ref IPV4_MDNS_MULTICAST_ADDRESS: SocketAddr =
SocketAddr::from((Ipv4Addr::new(224, 0, 0, 251), 5353));
}
pub struct MdnsConfig {
/// TTL to use for mdns records.
pub ttl: Duration,
/// Interval at which to poll the network for new peers. This isn't
/// necessary during normal operation but avoids the case that an
/// initial packet was lost and not discovering any peers until a new
/// peer joins the network. Receiving an mdns packet resets the timer
/// preventing unnecessary traffic.
pub query_interval: Duration,
}
impl Default for MdnsConfig {
fn default() -> Self {
Self {
ttl: Duration::from_secs(6 * 60),
query_interval: Duration::from_secs(5 * 60),
}
}
}
/// A `NetworkBehaviour` for mDNS. Automatically discovers peers on the local network and adds
/// them to the topology.
#[derive(Debug)]
pub struct Mdns {
/// The inner service.
service: MdnsBusyWrapper,
/// Main socket for listening.
recv_socket: Async<UdpSocket>,
/// Query socket for making queries.
send_socket: Async<UdpSocket>,
/// Iface watcher.
if_watch: IfWatcher,
/// Buffer used for receiving data from the main socket.
/// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000
/// bytes, if it can be ensured that all participating devices can handle such large packets.
/// For computers with several interfaces and IP addresses responses can easily reach sizes in
/// the range of 3000 bytes, so 4096 seems sensible for now. For more information see
/// [rfc6762](https://tools.ietf.org/html/rfc6762#page-46).
recv_buffer: [u8; 4096],
/// Buffers pending to send on the main socket.
send_buffer: VecDeque<Vec<u8>>,
/// List of nodes that we have discovered, the address, and when their TTL expires.
///
@ -56,45 +103,55 @@ pub struct Mdns {
///
/// `None` if `discovered_nodes` is empty.
closest_expiration: Option<Timer>,
}
/// `MdnsService::next` takes ownership of `self`, returning a future that resolves with both itself
/// and a `MdnsPacket` (similar to the old Tokio socket send style). The two states are thus `Free`
/// with an `MdnsService` or `Busy` with a future returning the original `MdnsService` and an
/// `MdnsPacket`.
enum MdnsBusyWrapper {
Free(MdnsService),
Busy(Pin<Box<dyn Future<Output = (MdnsService, MdnsPacket)> + Send>>),
Poisoned,
}
/// Queued events.
events: VecDeque<MdnsEvent>,
impl fmt::Debug for MdnsBusyWrapper {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Free(service) => {
fmt.debug_struct("MdnsBusyWrapper::Free")
.field("service", service)
.finish()
},
Self::Busy(_) => {
fmt.debug_struct("MdnsBusyWrapper::Busy")
.finish()
}
Self::Poisoned => {
fmt.debug_struct("MdnsBusyWrapper::Poisoned")
.finish()
}
}
}
/// Discovery interval.
query_interval: Duration,
/// Record ttl.
ttl: Duration,
/// Discovery timer.
timeout: Timer,
}
impl Mdns {
/// Builds a new `Mdns` behaviour.
pub async fn new() -> io::Result<Self> {
pub async fn new(config: MdnsConfig) -> io::Result<Self> {
let recv_socket = {
let socket = Socket::new(
Domain::ipv4(),
Type::dgram(),
Some(socket2::Protocol::udp()),
)?;
socket.set_reuse_address(true)?;
#[cfg(unix)]
socket.set_reuse_port(true)?;
socket.bind(&SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 5353).into())?;
let socket = socket.into_udp_socket();
socket.set_multicast_loop_v4(true)?;
socket.set_multicast_ttl_v4(255)?;
Async::new(socket)?
};
let send_socket = {
let socket = UdpSocket::bind((Ipv4Addr::UNSPECIFIED, 0))?;
Async::new(socket)?
};
let if_watch = if_watch::IfWatcher::new().await?;
Ok(Self {
service: MdnsBusyWrapper::Free(MdnsService::new().await?),
recv_socket,
send_socket,
if_watch,
recv_buffer: [0; 4096],
send_buffer: Default::default(),
discovered_nodes: SmallVec::new(),
closest_expiration: None,
events: Default::default(),
query_interval: config.query_interval,
ttl: config.ttl,
timeout: Timer::interval(config.query_interval),
})
}
@ -107,6 +164,77 @@ impl Mdns {
pub fn discovered_nodes(&self) -> impl ExactSizeIterator<Item = &PeerId> {
self.discovered_nodes.iter().map(|(p, _, _)| p)
}
fn inject_mdns_packet(&mut self, packet: MdnsPacket, params: &impl PollParameters) {
self.timeout.set_interval(self.query_interval);
match packet {
MdnsPacket::Query(query) => {
for packet in build_query_response(
query.query_id(),
*params.local_peer_id(),
params.listened_addresses(),
self.ttl,
) {
self.send_buffer.push_back(packet);
}
}
MdnsPacket::Response(response) => {
// We replace the IP address with the address we observe the
// remote as and the address they listen on.
let obs_ip = Protocol::from(response.remote_addr().ip());
let obs_port = Protocol::Udp(response.remote_addr().port());
let observed: Multiaddr = iter::once(obs_ip).chain(iter::once(obs_port)).collect();
let mut discovered: SmallVec<[_; 4]> = SmallVec::new();
for peer in response.discovered_peers() {
if peer.id() == params.local_peer_id() {
continue;
}
let new_expiration = Instant::now() + peer.ttl();
let mut addrs: Vec<Multiaddr> = Vec::new();
for addr in peer.addresses() {
if let Some(new_addr) = address_translation(&addr, &observed) {
addrs.push(new_addr.clone())
}
addrs.push(addr.clone())
}
for addr in addrs {
if let Some((_, _, cur_expires)) = self
.discovered_nodes
.iter_mut()
.find(|(p, a, _)| p == peer.id() && *a == addr)
{
*cur_expires = cmp::max(*cur_expires, new_expiration);
} else {
self.discovered_nodes
.push((*peer.id(), addr.clone(), new_expiration));
}
discovered.push((*peer.id(), addr));
}
}
self.closest_expiration = self
.discovered_nodes
.iter()
.fold(None, |exp, &(_, _, elem_exp)| {
Some(exp.map(|exp| cmp::min(exp, elem_exp)).unwrap_or(elem_exp))
})
.map(Timer::at);
self.events
.push_back(MdnsEvent::Discovered(DiscoveredAddrsIter {
inner: discovered.into_iter(),
}));
}
MdnsPacket::ServiceDiscovery(disc) => {
let resp = build_service_discovery_response(disc.query_id(), self.ttl);
self.send_buffer.push_back(resp);
}
}
}
}
impl NetworkBehaviour for Mdns {
@ -149,138 +277,102 @@ impl NetworkBehaviour for Mdns {
Self::OutEvent,
>,
> {
// Remove expired peers.
if let Some(ref mut closest_expiration) = self.closest_expiration {
match Pin::new(closest_expiration).poll(cx) {
Poll::Ready(now) => {
let mut expired = SmallVec::<[(PeerId, Multiaddr); 4]>::new();
while let Some(pos) = self.discovered_nodes.iter().position(|(_, _, exp)| *exp < now) {
let (peer_id, addr, _) = self.discovered_nodes.remove(pos);
expired.push((peer_id, addr));
while let Poll::Ready(event) = Pin::new(&mut self.if_watch).poll(cx) {
let multicast = From::from([224, 0, 0, 251]);
let socket = self.recv_socket.get_ref();
match event {
Ok(IfEvent::Up(inet)) => {
if inet.addr().is_loopback() {
continue;
}
if !expired.is_empty() {
let event = MdnsEvent::Expired(ExpiredAddrsIter {
inner: expired.into_iter(),
});
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event));
if let IpAddr::V4(addr) = inet.addr() {
log::trace!("joining multicast on iface {}", addr);
if let Err(err) = socket.join_multicast_v4(&multicast, &addr) {
log::error!("join multicast failed: {}", err);
} else {
self.send_buffer.push_back(build_query());
}
}
},
Poll::Pending => (),
}
Ok(IfEvent::Down(inet)) => {
if inet.addr().is_loopback() {
continue;
}
if let IpAddr::V4(addr) = inet.addr() {
log::trace!("leaving multicast on iface {}", addr);
if let Err(err) = socket.leave_multicast_v4(&multicast, &addr) {
log::error!("leave multicast failed: {}", err);
}
}
}
Err(err) => log::error!("if watch returned an error: {}", err),
}
}
// Polling the mDNS service, and obtain the list of nodes discovered this round.
let discovered = loop {
let service = mem::replace(&mut self.service, MdnsBusyWrapper::Poisoned);
let packet = match service {
MdnsBusyWrapper::Free(service) => {
self.service = MdnsBusyWrapper::Busy(Box::pin(service.next()));
continue;
},
MdnsBusyWrapper::Busy(mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready((service, packet)) => {
self.service = MdnsBusyWrapper::Free(service);
packet
},
Poll::Pending => {
self.service = MdnsBusyWrapper::Busy(fut);
return Poll::Pending;
}
// Poll receive socket.
while self.recv_socket.poll_readable(cx).is_ready() {
match self
.recv_socket
.recv_from(&mut self.recv_buffer)
.now_or_never()
{
Some(Ok((len, from))) => {
if let Some(packet) = MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from)
{
self.inject_mdns_packet(packet, params);
}
},
MdnsBusyWrapper::Poisoned => panic!("Mdns poisoned"),
};
match packet {
MdnsPacket::Query(query) => {
// MaybeBusyMdnsService should always be Free.
if let MdnsBusyWrapper::Free(ref mut service) = self.service {
for packet in build_query_response(
query.query_id(),
params.local_peer_id().clone(),
params.listened_addresses().into_iter(),
MDNS_RESPONSE_TTL,
) {
service.enqueue_response(packet)
}
} else { debug_assert!(false); }
},
MdnsPacket::Response(response) => {
// We replace the IP address with the address we observe the
// remote as and the address they listen on.
let obs_ip = Protocol::from(response.remote_addr().ip());
let obs_port = Protocol::Udp(response.remote_addr().port());
let observed: Multiaddr = iter::once(obs_ip)
.chain(iter::once(obs_port))
.collect();
let mut discovered: SmallVec<[_; 4]> = SmallVec::new();
for peer in response.discovered_peers() {
if peer.id() == params.local_peer_id() {
continue;
}
let new_expiration = Instant::now() + peer.ttl();
let mut addrs: Vec<Multiaddr> = Vec::new();
for addr in peer.addresses() {
if let Some(new_addr) = address_translation(&addr, &observed) {
addrs.push(new_addr.clone())
}
addrs.push(addr.clone())
}
for addr in addrs {
if let Some((_, _, cur_expires)) = self.discovered_nodes.iter_mut()
.find(|(p, a, _)| p == peer.id() && *a == addr)
{
*cur_expires = cmp::max(*cur_expires, new_expiration);
} else {
self.discovered_nodes.push((peer.id().clone(), addr.clone(), new_expiration));
}
discovered.push((peer.id().clone(), addr));
}
}
break discovered;
},
MdnsPacket::ServiceDiscovery(disc) => {
// MaybeBusyMdnsService should always be Free.
if let MdnsBusyWrapper::Free(ref mut service) = self.service {
let resp = build_service_discovery_response(
disc.query_id(),
MDNS_RESPONSE_TTL,
);
service.enqueue_response(resp);
} else { debug_assert!(false); }
},
}
Some(Err(err)) => log::error!("Failed reading datagram: {}", err),
_ => {}
}
};
}
if Pin::new(&mut self.timeout).poll_next(cx).is_ready() {
self.send_buffer.push_back(build_query());
}
// Send responses.
if !self.send_buffer.is_empty() {
while self.send_socket.poll_writable(cx).is_ready() {
if let Some(packet) = self.send_buffer.pop_front() {
match self
.send_socket
.send_to(&packet, *IPV4_MDNS_MULTICAST_ADDRESS)
.now_or_never()
{
Some(Ok(_)) => {}
Some(Err(err)) => log::error!("{}", err),
None => self.send_buffer.push_front(packet),
}
} else {
break;
}
}
}
// Emit discovered event.
if let Some(event) = self.events.pop_front() {
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event));
}
// Emit expired event.
if let Some(ref mut closest_expiration) = self.closest_expiration {
if let Poll::Ready(now) = Pin::new(closest_expiration).poll(cx) {
let mut expired = SmallVec::<[(PeerId, Multiaddr); 4]>::new();
while let Some(pos) = self
.discovered_nodes
.iter()
.position(|(_, _, exp)| *exp < now)
{
let (peer_id, addr, _) = self.discovered_nodes.remove(pos);
expired.push((peer_id, addr));
}
// Getting this far implies that we discovered new nodes. As the final step, we need to
// refresh `closest_expiration`.
self.closest_expiration = self.discovered_nodes.iter()
.fold(None, |exp, &(_, _, elem_exp)| {
Some(exp.map(|exp| cmp::min(exp, elem_exp)).unwrap_or(elem_exp))
})
.map(Timer::at);
if !expired.is_empty() {
let event = MdnsEvent::Expired(ExpiredAddrsIter {
inner: expired.into_iter(),
});
Poll::Ready(NetworkBehaviourAction::GenerateEvent(MdnsEvent::Discovered(DiscoveredAddrsIter {
inner: discovered.into_iter(),
})))
}
}
impl fmt::Debug for Mdns {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Mdns")
.field("service", &self.service)
.finish()
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event));
}
}
}
Poll::Pending
}
}
@ -299,7 +391,7 @@ pub enum MdnsEvent {
/// Iterator that produces the list of addresses that have been discovered.
pub struct DiscoveredAddrsIter {
inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>
inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>,
}
impl Iterator for DiscoveredAddrsIter {
@ -316,19 +408,17 @@ impl Iterator for DiscoveredAddrsIter {
}
}
impl ExactSizeIterator for DiscoveredAddrsIter {
}
impl ExactSizeIterator for DiscoveredAddrsIter {}
impl fmt::Debug for DiscoveredAddrsIter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("DiscoveredAddrsIter")
.finish()
fmt.debug_struct("DiscoveredAddrsIter").finish()
}
}
/// Iterator that produces the list of addresses that have expired.
pub struct ExpiredAddrsIter {
inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>
inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>,
}
impl Iterator for ExpiredAddrsIter {
@ -345,12 +435,10 @@ impl Iterator for ExpiredAddrsIter {
}
}
impl ExactSizeIterator for ExpiredAddrsIter {
}
impl ExactSizeIterator for ExpiredAddrsIter {}
impl fmt::Debug for ExpiredAddrsIter {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("ExpiredAddrsIter")
.finish()
fmt.debug_struct("ExpiredAddrsIter").finish()
}
}

View File

@ -114,7 +114,7 @@ pub fn build_query_response(
let ttl = duration_to_secs(ttl);
// Add a limit to 2^16-1 addresses, as the protocol limits to this number.
let mut addresses = addresses.take(65535);
let addresses = addresses.take(65535);
let peer_id_bytes = encode_peer_id(&peer_id);
debug_assert!(peer_id_bytes.len() <= 0xffff);
@ -127,7 +127,7 @@ pub fn build_query_response(
// Encode the addresses as TXT records, and multiple TXT records into a
// response packet.
while let Some(addr) = addresses.next() {
for addr in addresses {
let txt_to_send = format!("dnsaddr={}/p2p/{}", addr.to_string(), peer_id.to_base58());
let mut txt_record = Vec::with_capacity(txt_to_send.len());
match append_txt_record(&mut txt_record, &peer_id_bytes, ttl, &txt_to_send) {
@ -203,7 +203,7 @@ pub fn build_service_discovery_response(id: u16, ttl: Duration) -> MdnsPacket {
}
/// Constructs an MDNS query response packet for an address lookup.
fn query_response_packet(id: u16, peer_id: &Vec<u8>, records: &Vec<Vec<u8>>, ttl: u32) -> MdnsPacket {
fn query_response_packet(id: u16, peer_id: &[u8], records: &[Vec<u8>], ttl: u32) -> MdnsPacket {
let mut out = Vec::with_capacity(records.len() * MAX_TXT_RECORD_SIZE);
append_u16(&mut out, id);
@ -264,7 +264,9 @@ fn append_u16(out: &mut Vec<u8>, value: u16) {
/// be compatible with RFC 1035.
fn segment_peer_id(peer_id: String) -> String {
// Guard for the most common case
if peer_id.len() <= MAX_LABEL_LENGTH { return peer_id }
if peer_id.len() <= MAX_LABEL_LENGTH {
return peer_id;
}
// This will only perform one allocation except in extreme circumstances.
let mut out = String::with_capacity(peer_id.len() + 8);
@ -347,7 +349,7 @@ fn append_character_string(out: &mut Vec<u8>, ascii_str: &str) -> Result<(), Mdn
}
/// Appends a TXT record to `out`.
fn append_txt_record<'a>(
fn append_txt_record(
out: &mut Vec<u8>,
name: &[u8],
ttl_secs: u32,
@ -391,8 +393,10 @@ impl fmt::Display for MdnsResponseError {
MdnsResponseError::TxtRecordTooLong => {
write!(f, "TXT record invalid because it is too long")
}
MdnsResponseError::NonAsciiMultiaddr =>
write!(f, "A multiaddr contains non-ASCII characters when serialized"),
MdnsResponseError::NonAsciiMultiaddr => write!(
f,
"A multiaddr contains non-ASCII characters when serialized"
),
}
}
}
@ -414,7 +418,9 @@ mod tests {
#[test]
fn build_query_response_correct() {
let my_peer_id = identity::Keypair::generate_ed25519().public().into_peer_id();
let my_peer_id = identity::Keypair::generate_ed25519()
.public()
.into_peer_id();
let addr1 = "/ip4/1.2.3.4/tcp/5000".parse().unwrap();
let addr2 = "/ip6/::1/udp/10000".parse().unwrap();
let packets = build_query_response(
@ -446,7 +452,10 @@ mod tests {
assert_eq!(segment_peer_id(str_63.clone()), str_63);
assert_eq!(segment_peer_id(str_64), [&str_63, "x"].join("."));
assert_eq!(segment_peer_id(str_126), [&str_63, str_63.as_str()].join("."));
assert_eq!(
segment_peer_id(str_126),
[&str_63, str_63.as_str()].join(".")
);
assert_eq!(segment_peer_id(str_127), [&str_63, &str_63, "x"].join("."));
}

View File

@ -35,12 +35,8 @@ const SERVICE_NAME: &[u8] = b"_p2p._udp.local";
/// The meta query for looking up the `SERVICE_NAME`.
const META_QUERY_SERVICE: &[u8] = b"_services._dns-sd._udp.local";
pub use crate::{
behaviour::{Mdns, MdnsEvent},
service::MdnsService,
};
pub use crate::behaviour::{Mdns, MdnsConfig, MdnsEvent};
mod behaviour;
mod dns;
pub mod service;
mod query;

305
protocols/mdns/src/query.rs Normal file
View File

@ -0,0 +1,305 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{dns, META_QUERY_SERVICE, SERVICE_NAME};
use dns_parser::{Packet, RData};
use libp2p_core::{
multiaddr::{Multiaddr, Protocol},
PeerId,
};
use std::{convert::TryFrom, fmt, net::SocketAddr, str, time::Duration};
/// A valid mDNS packet received by the service.
#[derive(Debug)]
pub enum MdnsPacket {
/// A query made by a remote.
Query(MdnsQuery),
/// A response sent by a remote in response to one of our queries.
Response(MdnsResponse),
/// A request for service discovery.
ServiceDiscovery(MdnsServiceDiscovery),
}
impl MdnsPacket {
pub fn new_from_bytes(buf: &[u8], from: SocketAddr) -> Option<MdnsPacket> {
match Packet::parse(buf) {
Ok(packet) => {
if packet.header.query {
if packet
.questions
.iter()
.any(|q| q.qname.to_string().as_bytes() == SERVICE_NAME)
{
let query = MdnsPacket::Query(MdnsQuery {
from,
query_id: packet.header.id,
});
Some(query)
} else if packet
.questions
.iter()
.any(|q| q.qname.to_string().as_bytes() == META_QUERY_SERVICE)
{
// TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE?
let discovery = MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery {
from,
query_id: packet.header.id,
});
Some(discovery)
} else {
None
}
} else {
let resp = MdnsPacket::Response(MdnsResponse::new(packet, from));
Some(resp)
}
}
Err(err) => {
log::debug!("Parsing mdns packet failed: {:?}", err);
None
}
}
}
}
/// A received mDNS query.
pub struct MdnsQuery {
/// Sender of the address.
from: SocketAddr,
/// Id of the received DNS query. We need to pass this ID back in the results.
query_id: u16,
}
impl MdnsQuery {
/// Source address of the packet.
pub fn remote_addr(&self) -> &SocketAddr {
&self.from
}
/// Query id of the packet.
pub fn query_id(&self) -> u16 {
self.query_id
}
}
impl fmt::Debug for MdnsQuery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsQuery")
.field("from", self.remote_addr())
.field("query_id", &self.query_id)
.finish()
}
}
/// A received mDNS service discovery query.
pub struct MdnsServiceDiscovery {
/// Sender of the address.
from: SocketAddr,
/// Id of the received DNS query. We need to pass this ID back in the results.
query_id: u16,
}
impl MdnsServiceDiscovery {
/// Source address of the packet.
pub fn remote_addr(&self) -> &SocketAddr {
&self.from
}
/// Query id of the packet.
pub fn query_id(&self) -> u16 {
self.query_id
}
}
impl fmt::Debug for MdnsServiceDiscovery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsServiceDiscovery")
.field("from", self.remote_addr())
.field("query_id", &self.query_id)
.finish()
}
}
/// A received mDNS response.
pub struct MdnsResponse {
peers: Vec<MdnsPeer>,
from: SocketAddr,
}
impl MdnsResponse {
/// Creates a new `MdnsResponse` based on the provided `Packet`.
pub fn new(packet: Packet<'_>, from: SocketAddr) -> MdnsResponse {
let peers = packet
.answers
.iter()
.filter_map(|record| {
if record.name.to_string().as_bytes() != SERVICE_NAME {
return None;
}
let record_value = match record.data {
RData::PTR(record) => record.0.to_string(),
_ => return None,
};
let mut peer_name = match record_value.rsplitn(4, |c| c == '.').last() {
Some(n) => n.to_owned(),
None => return None,
};
// if we have a segmented name, remove the '.'
peer_name.retain(|c| c != '.');
let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) {
Ok(bytes) => match PeerId::from_bytes(&bytes) {
Ok(id) => id,
Err(_) => return None,
},
Err(_) => return None,
};
Some(MdnsPeer::new(&packet, record_value, peer_id, record.ttl))
})
.collect();
MdnsResponse { peers, from }
}
/// Returns the list of peers that have been reported in this packet.
///
/// > **Note**: Keep in mind that this will also contain the responses we sent ourselves.
pub fn discovered_peers(&self) -> impl Iterator<Item = &MdnsPeer> {
self.peers.iter()
}
/// Source address of the packet.
#[inline]
pub fn remote_addr(&self) -> &SocketAddr {
&self.from
}
}
impl fmt::Debug for MdnsResponse {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsResponse")
.field("from", self.remote_addr())
.finish()
}
}
/// A peer discovered by the service.
pub struct MdnsPeer {
addrs: Vec<Multiaddr>,
/// Id of the peer.
peer_id: PeerId,
/// TTL of the record in seconds.
ttl: u32,
}
impl MdnsPeer {
/// Creates a new `MdnsPeer` based on the provided `Packet`.
pub fn new(
packet: &Packet<'_>,
record_value: String,
my_peer_id: PeerId,
ttl: u32,
) -> MdnsPeer {
let addrs = packet
.additional
.iter()
.filter_map(|add_record| {
if add_record.name.to_string() != record_value {
return None;
}
if let RData::TXT(ref txt) = add_record.data {
Some(txt)
} else {
None
}
})
.flat_map(|txt| txt.iter())
.filter_map(|txt| {
// TODO: wrong, txt can be multiple character strings
let addr = match dns::decode_character_string(txt) {
Ok(a) => a,
Err(_) => return None,
};
if !addr.starts_with(b"dnsaddr=") {
return None;
}
let addr = match str::from_utf8(&addr[8..]) {
Ok(a) => a,
Err(_) => return None,
};
let mut addr = match addr.parse::<Multiaddr>() {
Ok(a) => a,
Err(_) => return None,
};
match addr.pop() {
Some(Protocol::P2p(peer_id)) => {
if let Ok(peer_id) = PeerId::try_from(peer_id) {
if peer_id != my_peer_id {
return None;
}
} else {
return None;
}
}
_ => return None,
};
Some(addr)
})
.collect();
MdnsPeer {
addrs,
peer_id: my_peer_id,
ttl,
}
}
/// Returns the id of the peer.
#[inline]
pub fn id(&self) -> &PeerId {
&self.peer_id
}
/// Returns the requested time-to-live for the record.
#[inline]
pub fn ttl(&self) -> Duration {
Duration::from_secs(u64::from(self.ttl))
}
/// Returns the list of addresses the peer says it is listening on.
///
/// Filters out invalid addresses.
pub fn addresses(&self) -> &Vec<Multiaddr> {
&self.addrs
}
}
impl fmt::Debug for MdnsPeer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsPeer")
.field("peer_id", &self.peer_id)
.finish()
}
}

View File

@ -1,709 +0,0 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::{SERVICE_NAME, META_QUERY_SERVICE, dns};
use async_io::{Async, Timer};
use dns_parser::{Packet, RData};
use futures::{prelude::*, select};
use if_watch::{IfEvent, IfWatcher};
use lazy_static::lazy_static;
use libp2p_core::{multiaddr::{Multiaddr, Protocol}, PeerId};
use log::warn;
use socket2::{Socket, Domain, Type};
use std::{convert::TryFrom, fmt, io, net::{IpAddr, Ipv4Addr, UdpSocket, SocketAddr}, str, time::{Duration, Instant}};
pub use dns::{build_query_response, build_service_discovery_response};
lazy_static! {
static ref IPV4_MDNS_MULTICAST_ADDRESS: SocketAddr = SocketAddr::from((
Ipv4Addr::new(224, 0, 0, 251),
5353,
));
}
/// A running service that discovers libp2p peers and responds to other libp2p peers' queries on
/// the local network.
///
/// # Usage
///
/// In order to use mDNS to discover peers on the local network, use the `MdnsService`. This is
/// done by creating a `MdnsService` then polling it in the same way as you would poll a stream.
///
/// Polling the `MdnsService` can produce either an `MdnsQuery`, corresponding to an mDNS query
/// received by another node on the local network, or an `MdnsResponse` corresponding to a response
/// to a query previously emitted locally. The `MdnsService` will automatically produce queries,
/// which means that you will receive responses automatically.
///
/// When you receive an `MdnsQuery`, use the `respond` method to send back an answer to the node
/// that emitted the query.
///
/// When you receive an `MdnsResponse`, use the provided methods to query the information received
/// in the response.
///
/// # Example
///
/// ```rust
/// # use futures::prelude::*;
/// # use futures::executor::block_on;
/// # use libp2p_core::{identity, Multiaddr, PeerId};
/// # use libp2p_mdns::service::{MdnsPacket, build_query_response, build_service_discovery_response};
/// # use std::{io, time::Duration, task::Poll};
/// # fn main() {
/// # let my_peer_id = PeerId::from(identity::Keypair::generate_ed25519().public());
/// # let my_listened_addrs: Vec<Multiaddr> = vec![];
/// # async {
/// # let mut service = libp2p_mdns::service::MdnsService::new().await.unwrap();
/// let _future_to_poll = async {
/// let (mut service, packet) = service.next().await;
///
/// match packet {
/// MdnsPacket::Query(query) => {
/// println!("Query from {:?}", query.remote_addr());
/// let packets = build_query_response(
/// query.query_id(),
/// my_peer_id.clone(),
/// vec![].into_iter(),
/// Duration::from_secs(120),
/// );
/// for packet in packets {
/// service.enqueue_response(packet);
/// }
/// }
/// MdnsPacket::Response(response) => {
/// for peer in response.discovered_peers() {
/// println!("Discovered peer {:?}", peer.id());
/// for addr in peer.addresses() {
/// println!("Address = {:?}", addr);
/// }
/// }
/// }
/// MdnsPacket::ServiceDiscovery(disc) => {
/// let resp = build_service_discovery_response(
/// disc.query_id(),
/// Duration::from_secs(120),
/// );
/// service.enqueue_response(resp);
/// }
/// }
/// };
/// # };
/// # }
pub struct MdnsService {
/// Main socket for listening.
socket: Async<UdpSocket>,
/// Socket for sending queries on the network.
query_socket: Async<UdpSocket>,
/// Interval for sending queries.
query_interval: Timer,
/// Whether we send queries on the network at all.
/// Note that we still need to have an interval for querying, as we need to wake up the socket
/// regularly to recover from errors. Otherwise we could simply use an `Option<Timer>`.
silent: bool,
/// Buffer used for receiving data from the main socket.
/// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000
/// bytes, if it can be ensured that all participating devices can handle such large packets.
/// For computers with several interfaces and IP addresses responses can easily reach sizes in
/// the range of 3000 bytes, so 4096 seems sensible for now. For more information see
/// [rfc6762](https://tools.ietf.org/html/rfc6762#page-46).
recv_buffer: [u8; 4096],
/// Buffers pending to send on the main socket.
send_buffers: Vec<Vec<u8>>,
/// Buffers pending to send on the query socket.
query_send_buffers: Vec<Vec<u8>>,
/// Iface watch.
if_watch: IfWatcher,
}
impl MdnsService {
/// Starts a new mDNS service.
pub async fn new() -> io::Result<Self> {
Self::new_inner(false).await
}
/// Same as `new`, but we don't automatically send queries on the network.
pub async fn silent() -> io::Result<Self> {
Self::new_inner(true).await
}
/// Starts a new mDNS service.
async fn new_inner(silent: bool) -> io::Result<Self> {
let socket = {
let socket = Socket::new(Domain::ipv4(), Type::dgram(), Some(socket2::Protocol::udp()))?;
socket.set_reuse_address(true)?;
#[cfg(unix)]
socket.set_reuse_port(true)?;
socket.bind(&SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 5353).into())?;
let socket = socket.into_udp_socket();
socket.set_multicast_loop_v4(true)?;
socket.set_multicast_ttl_v4(255)?;
Async::new(socket)?
};
// Given that we pass an IP address to bind, which does not need to be resolved, we can
// use std::net::UdpSocket::bind, instead of its async counterpart from async-std.
let query_socket = {
let socket = std::net::UdpSocket::bind((Ipv4Addr::UNSPECIFIED, 0))?;
Async::new(socket)?
};
let if_watch = if_watch::IfWatcher::new().await?;
Ok(Self {
socket,
query_socket,
query_interval: Timer::interval_at(Instant::now(), Duration::from_secs(20)),
silent,
recv_buffer: [0; 4096],
send_buffers: Vec::new(),
query_send_buffers: Vec::new(),
if_watch,
})
}
pub fn enqueue_response(&mut self, rsp: Vec<u8>) {
self.send_buffers.push(rsp);
}
/// Returns a future resolving to itself and the next received `MdnsPacket`.
//
// **Note**: Why does `next` take ownership of itself?
//
// `MdnsService::next` needs to be called from within `NetworkBehaviour`
// implementations. Given that traits cannot have async methods the
// respective `NetworkBehaviour` implementation needs to somehow keep the
// Future returned by `MdnsService::next` across classic `poll`
// invocations. The instance method `next` can either take a reference or
// ownership of itself:
//
// 1. Taking a reference - If `MdnsService::poll` takes a reference to
// `&self` the respective `NetworkBehaviour` implementation would need to
// keep both the Future as well as its `MdnsService` instance across poll
// invocations. Given that in this case the Future would have a reference
// to `MdnsService`, the `NetworkBehaviour` implementation struct would
// need to be self-referential which is not possible without unsafe code in
// Rust.
//
// 2. Taking ownership - Instead `MdnsService::next` takes ownership of
// self and returns it alongside an `MdnsPacket` once the actual future
// resolves, not forcing self-referential structures on the caller.
pub async fn next(mut self) -> (Self, MdnsPacket) {
loop {
// Flush the send buffer of the main socket.
while !self.send_buffers.is_empty() {
let to_send = self.send_buffers.remove(0);
match self.socket.send_to(&to_send, *IPV4_MDNS_MULTICAST_ADDRESS).await {
Ok(bytes_written) => {
debug_assert_eq!(bytes_written, to_send.len());
}
Err(_) => {
// Errors are non-fatal because they can happen for example if we lose
// connection to the network.
self.send_buffers.clear();
break;
}
}
}
// Flush the query send buffer.
while !self.query_send_buffers.is_empty() {
let to_send = self.query_send_buffers.remove(0);
match self.query_socket.send_to(&to_send, *IPV4_MDNS_MULTICAST_ADDRESS).await {
Ok(bytes_written) => {
debug_assert_eq!(bytes_written, to_send.len());
}
Err(_) => {
// Errors are non-fatal because they can happen for example if we lose
// connection to the network.
self.query_send_buffers.clear();
break;
}
}
}
select! {
res = self.socket.recv_from(&mut self.recv_buffer).fuse() => match res {
Ok((len, from)) => {
match MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from) {
Some(packet) => return (self, packet),
None => {},
}
},
Err(_) => {
// Errors are non-fatal and can happen if we get disconnected from the network.
// The query interval will wake up the task at some point so that we can try again.
},
},
_ = self.query_interval.next().fuse() => {
// Ensure underlying task is woken up on the next interval tick.
while let Some(_) = self.query_interval.next().now_or_never() {};
if !self.silent {
let query = dns::build_query();
self.query_send_buffers.push(query.to_vec());
}
},
event = self.if_watch.next().fuse() => {
let multicast = From::from([224, 0, 0, 251]);
let socket = self.socket.get_ref();
match event {
Ok(IfEvent::Up(inet)) => {
if inet.addr().is_loopback() {
continue;
}
if let IpAddr::V4(addr) = inet.addr() {
log::trace!("joining multicast on iface {}", addr);
if let Err(err) = socket.join_multicast_v4(&multicast, &addr) {
log::error!("join multicast failed: {}", err);
}
}
}
Ok(IfEvent::Down(inet)) => {
if inet.addr().is_loopback() {
continue;
}
if let IpAddr::V4(addr) = inet.addr() {
log::trace!("leaving multicast on iface {}", addr);
if let Err(err) = socket.leave_multicast_v4(&multicast, &addr) {
log::error!("leave multicast failed: {}", err);
}
}
}
Err(err) => log::error!("if watch returned an error: {}", err),
}
}
};
}
}
}
impl fmt::Debug for MdnsService {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("$service_name")
.field("silent", &self.silent)
.finish()
}
}
/// A valid mDNS packet received by the service.
#[derive(Debug)]
pub enum MdnsPacket {
/// A query made by a remote.
Query(MdnsQuery),
/// A response sent by a remote in response to one of our queries.
Response(MdnsResponse),
/// A request for service discovery.
ServiceDiscovery(MdnsServiceDiscovery),
}
impl MdnsPacket {
fn new_from_bytes(buf: &[u8], from: SocketAddr) -> Option<MdnsPacket> {
match Packet::parse(buf) {
Ok(packet) => {
if packet.header.query {
if packet
.questions
.iter()
.any(|q| q.qname.to_string().as_bytes() == SERVICE_NAME)
{
let query = MdnsPacket::Query(MdnsQuery {
from,
query_id: packet.header.id,
});
return Some(query);
} else if packet
.questions
.iter()
.any(|q| q.qname.to_string().as_bytes() == META_QUERY_SERVICE)
{
// TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE?
let discovery = MdnsPacket::ServiceDiscovery(
MdnsServiceDiscovery {
from,
query_id: packet.header.id,
},
);
return Some(discovery);
} else {
return None;
}
} else {
let resp = MdnsPacket::Response(MdnsResponse::new (
packet,
from,
));
return Some(resp);
}
}
Err(err) => {
warn!("Parsing mdns packet failed: {:?}", err);
return None;
}
}
}
}
/// A received mDNS query.
pub struct MdnsQuery {
/// Sender of the address.
from: SocketAddr,
/// Id of the received DNS query. We need to pass this ID back in the results.
query_id: u16,
}
impl MdnsQuery {
/// Source address of the packet.
pub fn remote_addr(&self) -> &SocketAddr {
&self.from
}
/// Query id of the packet.
pub fn query_id(&self) -> u16 {
self.query_id
}
}
impl fmt::Debug for MdnsQuery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsQuery")
.field("from", self.remote_addr())
.field("query_id", &self.query_id)
.finish()
}
}
/// A received mDNS service discovery query.
pub struct MdnsServiceDiscovery {
/// Sender of the address.
from: SocketAddr,
/// Id of the received DNS query. We need to pass this ID back in the results.
query_id: u16,
}
impl MdnsServiceDiscovery {
/// Source address of the packet.
pub fn remote_addr(&self) -> &SocketAddr {
&self.from
}
/// Query id of the packet.
pub fn query_id(&self) -> u16 {
self.query_id
}
}
impl fmt::Debug for MdnsServiceDiscovery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsServiceDiscovery")
.field("from", self.remote_addr())
.field("query_id", &self.query_id)
.finish()
}
}
/// A received mDNS response.
pub struct MdnsResponse {
peers: Vec<MdnsPeer>,
from: SocketAddr,
}
impl MdnsResponse {
/// Creates a new `MdnsResponse` based on the provided `Packet`.
fn new(packet: Packet<'_>, from: SocketAddr) -> MdnsResponse {
let peers = packet.answers.iter().filter_map(|record| {
if record.name.to_string().as_bytes() != SERVICE_NAME {
return None;
}
let record_value = match record.data {
RData::PTR(record) => record.0.to_string(),
_ => return None,
};
let mut peer_name = match record_value.rsplitn(4, |c| c == '.').last() {
Some(n) => n.to_owned(),
None => return None,
};
// if we have a segmented name, remove the '.'
peer_name.retain(|c| c != '.');
let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) {
Ok(bytes) => match PeerId::from_bytes(&bytes) {
Ok(id) => id,
Err(_) => return None,
},
Err(_) => return None,
};
Some(MdnsPeer::new (
&packet,
record_value,
peer_id,
record.ttl,
))
}).collect();
MdnsResponse {
peers,
from,
}
}
/// Returns the list of peers that have been reported in this packet.
///
/// > **Note**: Keep in mind that this will also contain the responses we sent ourselves.
pub fn discovered_peers(&self) -> impl Iterator<Item = &MdnsPeer> {
self.peers.iter()
}
/// Source address of the packet.
#[inline]
pub fn remote_addr(&self) -> &SocketAddr {
&self.from
}
}
impl fmt::Debug for MdnsResponse {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsResponse")
.field("from", self.remote_addr())
.finish()
}
}
/// A peer discovered by the service.
pub struct MdnsPeer {
addrs: Vec<Multiaddr>,
/// Id of the peer.
peer_id: PeerId,
/// TTL of the record in seconds.
ttl: u32,
}
impl MdnsPeer {
/// Creates a new `MdnsPeer` based on the provided `Packet`.
pub fn new(packet: &Packet<'_>, record_value: String, my_peer_id: PeerId, ttl: u32) -> MdnsPeer {
let addrs = packet
.additional
.iter()
.filter_map(|add_record| {
if add_record.name.to_string() != record_value {
return None;
}
if let RData::TXT(ref txt) = add_record.data {
Some(txt)
} else {
None
}
})
.flat_map(|txt| txt.iter())
.filter_map(|txt| {
// TODO: wrong, txt can be multiple character strings
let addr = match dns::decode_character_string(txt) {
Ok(a) => a,
Err(_) => return None,
};
if !addr.starts_with(b"dnsaddr=") {
return None;
}
let addr = match str::from_utf8(&addr[8..]) {
Ok(a) => a,
Err(_) => return None,
};
let mut addr = match addr.parse::<Multiaddr>() {
Ok(a) => a,
Err(_) => return None,
};
match addr.pop() {
Some(Protocol::P2p(peer_id)) => {
if let Ok(peer_id) = PeerId::try_from(peer_id) {
if peer_id != my_peer_id {
return None;
}
} else {
return None;
}
},
_ => return None,
};
Some(addr)
}).collect();
MdnsPeer {
addrs,
peer_id: my_peer_id,
ttl,
}
}
/// Returns the id of the peer.
#[inline]
pub fn id(&self) -> &PeerId {
&self.peer_id
}
/// Returns the requested time-to-live for the record.
#[inline]
pub fn ttl(&self) -> Duration {
Duration::from_secs(u64::from(self.ttl))
}
/// Returns the list of addresses the peer says it is listening on.
///
/// Filters out invalid addresses.
pub fn addresses(&self) -> &Vec<Multiaddr> {
&self.addrs
}
}
impl fmt::Debug for MdnsPeer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MdnsPeer")
.field("peer_id", &self.peer_id)
.finish()
}
}
#[cfg(test)]
mod tests {
macro_rules! testgen {
($runtime_name:ident, $service_name:ty, $block_on_fn:tt) => {
mod $runtime_name {
use libp2p_core::{PeerId, multihash::{Code, MultihashDigest}};
use std::time::Duration;
use crate::service::MdnsPacket;
fn discover(peer_id: PeerId) {
let fut = async {
let mut service = <$service_name>::new().await.unwrap();
loop {
let next = service.next().await;
service = next.0;
match next.1 {
MdnsPacket::Query(query) => {
let resp = crate::dns::build_query_response(
query.query_id(),
peer_id.clone(),
vec![].into_iter(),
Duration::from_secs(120),
);
for r in resp {
service.enqueue_response(r);
}
}
MdnsPacket::Response(response) => {
for peer in response.discovered_peers() {
if peer.id() == &peer_id {
return;
}
}
}
MdnsPacket::ServiceDiscovery(_) => panic!(
"did not expect a service discovery packet",
)
}
}
};
$block_on_fn(Box::pin(fut));
}
// As of today the underlying UDP socket is not stubbed out. Thus tests run in parallel to
// this unit tests inter fear with it. Test needs to be run in sequence to ensure test
// properties.
#[test]
fn respect_query_interval() {
let own_ips: Vec<std::net::IpAddr> = if_addrs::get_if_addrs().unwrap()
.into_iter()
.map(|i| i.addr.ip())
.collect();
let fut = async {
let mut service = <$service_name>::new().await.unwrap();
let mut sent_queries = vec![];
loop {
let next = service.next().await;
service = next.0;
match next.1 {
MdnsPacket::Query(query) => {
// Ignore queries from other nodes.
let source_ip = query.remote_addr().ip();
if !own_ips.contains(&source_ip) {
continue;
}
sent_queries.push(query);
if sent_queries.len() > 1 {
return;
}
}
// Ignore response packets. We don't stub out the UDP socket, thus this is
// either random noise from the network, or noise from other unit tests
// running in parallel.
MdnsPacket::Response(_) => {},
MdnsPacket::ServiceDiscovery(_) => {
panic!("Did not expect a service discovery packet.");
},
}
}
};
$block_on_fn(Box::pin(fut));
}
#[test]
fn discover_normal_peer_id() {
discover(PeerId::random())
}
#[test]
fn discover_long_peer_id() {
let max_value = String::from_utf8(vec![b'f'; 42]).unwrap();
let hash = Code::Identity.digest(max_value.as_ref());
discover(PeerId::from_multihash(hash).unwrap())
}
}
}
}
testgen!(
async_std,
crate::service::MdnsService,
(|fut| async_std::task::block_on::<_, ()>(fut))
);
testgen!(
tokio,
crate::service::MdnsService,
(|fut| tokio::runtime::Runtime::new().unwrap().block_on::<futures::future::BoxFuture<()>>(fut))
);
}

View File

@ -1,3 +1,11 @@
# 0.28.0 [unreleased]
- Update `libp2p-swarm`.
# 0.27.0 [2021-01-12]
- Update dependencies.
# 0.26.0 [2020-12-17]
- Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-ping"
edition = "2018"
description = "Ping protocol for libp2p"
version = "0.26.1"
version = "0.28.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -14,8 +14,8 @@ name = "libp2p_ping"
[dependencies]
futures = "0.3.1"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
log = "0.4.1"
rand = "0.7.2"
void = "1.0"
@ -23,8 +23,8 @@ wasm-timer = "0.2"
[dev-dependencies]
async-std = "1.6.2"
libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" }
libp2p-noise = { path = "../../protocols/noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" }
libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" }
libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" }
libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" }
quickcheck = "0.9.0"

View File

@ -62,21 +62,16 @@ fn ping_pong() {
let mut count2 = count.get();
let peer1 = async move {
while let Some(_) = swarm1.next().now_or_never() {}
for l in Swarm::listeners(&swarm1) {
tx.send(l.clone()).await.unwrap();
}
loop {
match swarm1.next().await {
PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } => {
match swarm1.next_event().await {
SwarmEvent::NewListenAddr(listener) => tx.send(listener).await.unwrap(),
SwarmEvent::Behaviour(PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) }) => {
count1 -= 1;
if count1 == 0 {
return (pid1.clone(), peer, rtt)
}
},
PingEvent { result: Err(e), .. } => panic!("Ping failure: {:?}", e),
SwarmEvent::Behaviour(PingEvent { result: Err(e), .. }) => panic!("Ping failure: {:?}", e),
_ => {}
}
}
@ -132,16 +127,11 @@ fn max_failures() {
Swarm::listen_on(&mut swarm1, addr).unwrap();
let peer1 = async move {
while let Some(_) = swarm1.next().now_or_never() {}
for l in Swarm::listeners(&swarm1) {
tx.send(l.clone()).await.unwrap();
}
let mut count1: u8 = 0;
loop {
match swarm1.next_event().await {
SwarmEvent::NewListenAddr(listener) => tx.send(listener).await.unwrap(),
SwarmEvent::Behaviour(PingEvent {
result: Ok(PingSuccess::Ping { .. }), ..
}) => {

View File

@ -1,4 +1,17 @@
# 0.9.0 [unreleased]
# 0.10.0 [unreleased]
- Update `libp2p-swarm`.
# 0.9.1 [2021-02-15]
- Make `is_pending_outbound` return true on pending connection.
[PR 1928](https://github.com/libp2p/rust-libp2p/pull/1928).
- Update dependencies.
# 0.9.0 [2021-01-12]
- Update dependencies.
- Re-export `throttled`-specific response channel. [PR
1902](https://github.com/libp2p/rust-libp2p/pull/1902).
@ -59,4 +72,3 @@ https://github.com/libp2p/rust-libp2p/pull/1606).
# 0.1.0
- Initial release.

View File

@ -2,7 +2,7 @@
name = "fluence-fork-libp2p-request-response"
edition = "2018"
description = "Generic Request/Response Protocols"
version = "0.9.1"
version = "0.10.0"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
@ -14,22 +14,22 @@ name = "libp2p_request_response"
[dependencies]
async-trait = "0.1"
bytes = "0.5.6"
bytes = "1"
futures = "0.3.1"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.26.1", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
libp2p-core = { version = "0.27.1", path = "../../core", package = "fluence-fork-libp2p-core" }
libp2p-swarm = { version = "0.28.0", path = "../../swarm", package = "fluence-fork-libp2p-swarm" }
log = "0.4.11"
lru = "0.6"
minicbor = { version = "0.7", features = ["std", "derive"] }
rand = "0.7"
smallvec = "1.4"
unsigned-varint = { version = "0.5", features = ["std", "futures"] }
unsigned-varint = { version = "0.7", features = ["std", "futures"] }
wasm-timer = "0.2"
[dev-dependencies]
async-std = "1.6.2"
libp2p-noise = { path = "../noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" }
libp2p-noise = { path = "../../transports/noise", package = "fluence-fork-libp2p-noise" }
libp2p-tcp = { path = "../../transports/tcp", package = "fluence-fork-libp2p-tcp" }
libp2p-yamux = { path = "../../muxers/yamux", package = "fluence-fork-libp2p-yamux" }
rand = "0.7"

View File

@ -230,6 +230,12 @@ impl<TResponse> ResponseChannel<TResponse> {
}
/// The ID of an inbound or outbound request.
///
/// Note: [`RequestId`]'s uniqueness is only guaranteed between two
/// inbound and likewise between two outbound requests. There is no
/// uniqueness guarantee in a set of both inbound and outbound
/// [`RequestId`]s nor in a set of inbound or outbound requests
/// originating from different [`RequestResponse`] behaviours.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct RequestId(u64);
@ -371,10 +377,10 @@ where
if let Some(request) = self.try_send_request(peer, request) {
self.pending_events.push_back(NetworkBehaviourAction::DialPeer {
peer_id: peer.clone(),
peer_id: *peer,
condition: DialPeerCondition::Disconnected,
});
self.pending_outbound_requests.entry(peer.clone()).or_default().push(request);
self.pending_outbound_requests.entry(*peer).or_default().push(request);
}
request_id
@ -403,7 +409,7 @@ where
///
/// Addresses added in this way are only removed by `remove_address`.
pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) {
self.addresses.entry(peer.clone()).or_default().push(address);
self.addresses.entry(*peer).or_default().push(address);
}
/// Removes an address of a peer previously added via `add_address`.
@ -431,9 +437,16 @@ where
/// [`PeerId`] initiated by [`RequestResponse::send_request`] is still
/// pending, i.e. waiting for a response.
pub fn is_pending_outbound(&self, peer: &PeerId, request_id: &RequestId) -> bool {
self.connected.get(peer)
// Check if request is already sent on established connection.
let est_conn = self.connected.get(peer)
.map(|cs| cs.iter().any(|c| c.pending_inbound_responses.contains(request_id)))
.unwrap_or(false)
.unwrap_or(false);
// Check if request is still pending to be sent.
let pen_conn = self.pending_outbound_requests.get(peer)
.map(|rps| rps.iter().any(|rp| {rp.request_id == *request_id}))
.unwrap_or(false);
est_conn || pen_conn
}
/// Checks whether an inbound request from the peer with the provided
@ -466,7 +479,7 @@ where
let conn = &mut connections[ix];
conn.pending_inbound_responses.insert(request.request_id);
self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer.clone(),
peer_id: *peer,
handler: NotifyHandler::One(conn.id),
event: request
});
@ -563,7 +576,7 @@ where
ConnectedPoint::Dialer { address } => Some(address.clone()),
ConnectedPoint::Listener { .. } => None
};
self.connected.entry(peer.clone())
self.connected.entry(*peer)
.or_default()
.push(Connection::new(*conn, address));
}
@ -584,7 +597,7 @@ where
for request_id in connection.pending_outbound_responses {
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
RequestResponseEvent::InboundFailure {
peer: peer_id.clone(),
peer: *peer_id,
request_id,
error: InboundFailure::ConnectionClosed
}
@ -595,7 +608,7 @@ where
for request_id in connection.pending_inbound_responses {
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
RequestResponseEvent::OutboundFailure {
peer: peer_id.clone(),
peer: *peer_id,
request_id,
error: OutboundFailure::ConnectionClosed
}
@ -618,7 +631,7 @@ where
for request in pending {
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
RequestResponseEvent::OutboundFailure {
peer: peer.clone(),
peer: *peer,
request_id: request.request_id,
error: OutboundFailure::DialFailure
}
@ -647,10 +660,10 @@ where
RequestResponseEvent::Message { peer, message }));
}
RequestResponseHandlerEvent::Request { request_id, request, sender } => {
let channel = ResponseChannel { request_id, peer: peer.clone(), sender };
let channel = ResponseChannel { request_id, peer, sender };
let message = RequestResponseMessage::Request { request_id, request, channel };
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
RequestResponseEvent::Message { peer: peer.clone(), message }
RequestResponseEvent::Message { peer, message }
));
match self.get_connection_mut(&peer, connection) {
@ -662,7 +675,7 @@ where
None => {
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
RequestResponseEvent::InboundFailure {
peer: peer.clone(),
peer,
request_id,
error: InboundFailure::ConnectionClosed
}

View File

@ -251,7 +251,7 @@ where
} else if let Some(info) = self.offline_peer_info.get_mut(p) {
info.recv_budget.limit.set(limit)
}
self.limit_overrides.insert(p.clone(), Limit::new(limit));
self.limit_overrides.insert(*p, Limit::new(limit));
}
/// Remove any limit overrides for the given peer.
@ -286,7 +286,7 @@ where
let mut info = PeerInfo::new(limit);
info.send_budget.remaining -= 1;
let remaining = info.send_budget.remaining;
self.offline_peer_info.put(p.clone(), info);
self.offline_peer_info.put(*p, info);
remaining
};
@ -428,13 +428,13 @@ where
if !self.peer_info.contains_key(p) {
if let Some(info) = self.offline_peer_info.pop(p) {
let recv_budget = info.recv_budget.remaining;
self.peer_info.insert(p.clone(), info);
self.peer_info.insert(*p, info);
if recv_budget > 1 {
self.send_credit(p, recv_budget - 1);
}
} else {
let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit);
self.peer_info.insert(p.clone(), PeerInfo::new(limit));
self.peer_info.insert(*p, PeerInfo::new(limit));
}
}
}
@ -442,7 +442,7 @@ where
fn inject_disconnected(&mut self, p: &PeerId) {
log::trace!("{:08x}: disconnected from {}", self.id, p);
if let Some(info) = self.peer_info.remove(p) {
self.offline_peer_info.put(p.clone(), info.into_disconnected());
self.offline_peer_info.put(*p, info.into_disconnected());
}
self.behaviour.inject_disconnected(p)
}
@ -528,7 +528,7 @@ where
if info.send_budget.grant < Some(id) {
if info.send_budget.remaining == 0 && credit > 0 {
log::trace!("{:08x}: sending to peer {} can resume", self.id, peer);
self.events.push_back(Event::ResumeSending(peer.clone()))
self.events.push_back(Event::ResumeSending(peer))
}
info.send_budget.remaining += credit;
info.send_budget.grant = Some(id);
@ -549,7 +549,7 @@ where
};
if info.recv_budget.remaining == 0 {
log::debug!("{:08x}: peer {} exceeds its budget", self.id, peer);
self.events.push_back(Event::TooManyInboundRequests(peer.clone()));
self.events.push_back(Event::TooManyInboundRequests(peer));
continue
}
info.recv_budget.remaining -= 1;

View File

@ -31,13 +31,41 @@ use libp2p_core::{
};
use libp2p_noise::{NoiseConfig, X25519Spec, Keypair};
use libp2p_request_response::*;
use libp2p_swarm::Swarm;
use libp2p_swarm::{Swarm, SwarmEvent};
use libp2p_tcp::TcpConfig;
use futures::{prelude::*, channel::mpsc, executor::LocalPool, task::SpawnExt};
use rand::{self, Rng};
use std::{io, iter};
use std::{collections::HashSet, num::NonZeroU16};
#[test]
fn is_response_outbound() {
let ping = Ping("ping".to_string().into_bytes());
let offline_peer = PeerId::random();
let protocols = iter::once((PingProtocol(), ProtocolSupport::Full));
let cfg = RequestResponseConfig::default();
let (peer1_id, trans) = mk_transport();
let ping_proto1 = RequestResponse::new(PingCodec(), protocols.clone(), cfg.clone());
let mut swarm1 = Swarm::new(trans, ping_proto1, peer1_id.clone());
let request_id1 = swarm1.send_request(&offline_peer, ping.clone());
match futures::executor::block_on(swarm1.next()) {
RequestResponseEvent::OutboundFailure{peer, request_id: req_id, error: _error} => {
assert_eq!(&offline_peer, &peer);
assert_eq!(req_id, request_id1);
},
e => panic!("Peer: Unexpected event: {:?}", e),
}
let request_id2 = swarm1.send_request(&offline_peer, ping.clone());
assert!(!swarm1.is_pending_outbound(&offline_peer, &request_id1));
assert!(swarm1.is_pending_outbound(&offline_peer, &request_id2));
}
/// Exercises a simple ping protocol.
#[test]
fn ping_protocol() {
@ -64,27 +92,24 @@ fn ping_protocol() {
let expected_pong = pong.clone();
let peer1 = async move {
while let Some(_) = swarm1.next().now_or_never() {}
let l = Swarm::listeners(&swarm1).next().unwrap();
tx.send(l.clone()).await.unwrap();
loop {
match swarm1.next().await {
RequestResponseEvent::Message {
match swarm1.next_event().await {
SwarmEvent::NewListenAddr(addr) => tx.send(addr).await.unwrap(),
SwarmEvent::Behaviour(RequestResponseEvent::Message {
peer,
message: RequestResponseMessage::Request { request, channel, .. }
} => {
}) => {
assert_eq!(&request, &expected_ping);
assert_eq!(&peer, &peer2_id);
swarm1.send_response(channel, pong.clone()).unwrap();
},
RequestResponseEvent::ResponseSent {
SwarmEvent::Behaviour(RequestResponseEvent::ResponseSent {
peer, ..
} => {
}) => {
assert_eq!(&peer, &peer2_id);
}
e => panic!("Peer1: Unexpected event: {:?}", e)
SwarmEvent::Behaviour(e) => panic!("Peer1: Unexpected event: {:?}", e),
_ => {}
}
}
};
@ -96,6 +121,7 @@ fn ping_protocol() {
let addr = rx.next().await.unwrap();
swarm2.add_address(&peer1_id, addr.clone());
let mut req_id = swarm2.send_request(&peer1_id, ping.clone());
assert!(swarm2.is_pending_outbound(&peer1_id, &req_id));
loop {
match swarm2.next().await {
@ -205,26 +231,24 @@ fn ping_protocol_throttled() {
swarm2.set_receive_limit(NonZeroU16::new(limit2).unwrap());
let peer1 = async move {
while let Some(_) = swarm1.next().now_or_never() {}
let l = Swarm::listeners(&swarm1).next().unwrap();
tx.send(l.clone()).await.unwrap();
for i in 1 .. {
match swarm1.next().await {
throttled::Event::Event(RequestResponseEvent::Message {
match swarm1.next_event().await {
SwarmEvent::NewListenAddr(addr) => tx.send(addr).await.unwrap(),
SwarmEvent::Behaviour(throttled::Event::Event(RequestResponseEvent::Message {
peer,
message: RequestResponseMessage::Request { request, channel, .. },
}) => {
})) => {
assert_eq!(&request, &expected_ping);
assert_eq!(&peer, &peer2_id);
swarm1.send_response(channel, pong.clone()).unwrap();
},
throttled::Event::Event(RequestResponseEvent::ResponseSent {
SwarmEvent::Behaviour(throttled::Event::Event(RequestResponseEvent::ResponseSent {
peer, ..
}) => {
})) => {
assert_eq!(&peer, &peer2_id);
}
e => panic!("Peer1: Unexpected event: {:?}", e)
SwarmEvent::Behaviour(e) => panic!("Peer1: Unexpected event: {:?}", e),
_ => {}
}
if i % 31 == 0 {
let lim = rand::thread_rng().gen_range(1, 17);

View File

@ -1,36 +0,0 @@
# 0.26.0 [2020-12-17]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25]
- Update `libp2p-core`.
# 0.24.0 [2020-11-09]
- Update dependencies.
# 0.23.0 [2020-10-16]
- Update dependencies.
# 0.22.0 [2020-09-09]
- As of this release, SECIO is deprecated. Please use `libp2p-noise` instead.
For some more context, [see here](https://blog.ipfs.io/2020-08-07-deprecating-secio/).
- Bump `libp2p-core` dependency.
# 0.21.0 [2020-08-18]
- Bump `libp2p-core` dependency.
# 0.20.0 [2020-07-01]
- Updated dependencies.
- Conditional compilation fixes for the `wasm32-wasi` target
([PR 1633](https://github.com/libp2p/rust-libp2p/pull/1633)).
# 0.19.2 [2020-06-22]
- Updated dependencies.

View File

@ -1,61 +0,0 @@
[package]
name = "fluence-fork-libp2p-secio"
edition = "2018"
description = "Secio encryption protocol for libp2p"
version = "0.26.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
keywords = ["peer-to-peer", "libp2p", "networking"]
categories = ["network-programming", "asynchronous"]
[lib]
name = "libp2p_secio"
[badges]
maintenance = { status = "deprecated" }
[dependencies]
aes-ctr = "0.3"
aesni = { version = "0.6", features = ["nocheck"], optional = true }
ctr = "0.3"
futures = "0.3.1"
hmac = "0.9.0"
lazy_static = "1.2.0"
libp2p-core = { version = "0.26.1", path = "../../core", package = "fluence-fork-libp2p-core" }
log = "0.4.6"
prost = "0.6.1"
pin-project = "1.0.0"
quicksink = "0.1"
rand = "0.7"
rw-stream-sink = "0.2.0"
sha2 = "0.9.1"
static_assertions = "1"
twofish = "0.2.0"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ring = { version = "0.16.9", features = ["alloc"], default-features = false }
[target.'cfg(target_arch = "wasm32")'.dependencies]
js-sys = "0.3.10"
parity-send-wrapper = "0.1"
wasm-bindgen = "0.2.33"
wasm-bindgen-futures = "0.4.5"
web-sys = { version = "0.3.10", features = ["Crypto", "CryptoKey", "SubtleCrypto", "Window"] }
[build-dependencies]
prost-build = "0.6"
[features]
default = ["secp256k1"]
secp256k1 = []
aes-all = ["aesni"]
[dev-dependencies]
async-std = "1.6.2"
criterion = "0.3"
libp2p-mplex = { path = "../../muxers/mplex", package = "fluence-fork-libp2p-mplex" }
libp2p-tcp = { path = "../../transports/tcp", features = ["async-std"], package = "fluence-fork-libp2p-tcp" }
[package.metadata.workspaces]
independent = true

View File

@ -1,24 +0,0 @@
// Copyright 2020 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
fn main() {
prost_build::compile_protos(&["src/structs.proto"], &["src"]).unwrap();
}

View File

@ -1,226 +0,0 @@
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This module contains some utilities for algorithm support exchange.
//!
//! One important part of the SECIO handshake is negotiating algorithms. This is what this module
//! helps you with.
use crate::error::SecioError;
#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))]
use ring::digest;
use std::cmp::Ordering;
use crate::stream_cipher::Cipher;
use crate::KeyAgreement;
const ECDH_P256: &str = "P-256";
const ECDH_P384: &str = "P-384";
const AES_128: &str = "AES-128";
const AES_256: &str = "AES-256";
const TWOFISH_CTR: &str = "TwofishCTR";
const NULL: &str = "NULL";
const SHA_256: &str = "SHA256";
const SHA_512: &str = "SHA512";
pub(crate) const DEFAULT_AGREEMENTS_PROPOSITION: &str = "P-256,P-384";
pub(crate) const DEFAULT_CIPHERS_PROPOSITION: &str = "AES-128,AES-256,TwofishCTR";
pub(crate) const DEFAULT_DIGESTS_PROPOSITION: &str = "SHA256,SHA512";
/// Return a proposition string from the given sequence of `KeyAgreement` values.
pub fn key_agreements_proposition<'a, I>(xchgs: I) -> String
where
I: IntoIterator<Item=&'a KeyAgreement>
{
let mut s = String::new();
for x in xchgs {
match x {
KeyAgreement::EcdhP256 => {
s.push_str(ECDH_P256);
s.push(',')
}
KeyAgreement::EcdhP384 => {
s.push_str(ECDH_P384);
s.push(',')
}
}
}
s.pop(); // remove trailing comma if any
s
}
/// Given two key agreement proposition strings try to figure out a match.
///
/// The `Ordering` parameter determines which argument is preferred. If `Less` or `Equal` we
/// try for each of `theirs` every one of `ours`, for `Greater` it's the other way around.
pub fn select_agreement(r: Ordering, ours: &str, theirs: &str) -> Result<KeyAgreement, SecioError> {
let (a, b) = match r {
Ordering::Less | Ordering::Equal => (theirs, ours),
Ordering::Greater => (ours, theirs)
};
for x in a.split(',') {
if b.split(',').any(|y| x == y) {
match x {
ECDH_P256 => return Ok(KeyAgreement::EcdhP256),
ECDH_P384 => return Ok(KeyAgreement::EcdhP384),
_ => continue
}
}
}
Err(SecioError::NoSupportIntersection)
}
/// Return a proposition string from the given sequence of `Cipher` values.
pub fn ciphers_proposition<'a, I>(ciphers: I) -> String
where
I: IntoIterator<Item=&'a Cipher>
{
let mut s = String::new();
for c in ciphers {
match c {
Cipher::Aes128 => {
s.push_str(AES_128);
s.push(',')
}
Cipher::Aes256 => {
s.push_str(AES_256);
s.push(',')
}
Cipher::TwofishCtr => {
s.push_str(TWOFISH_CTR);
s.push(',')
}
Cipher::Null => {
s.push_str(NULL);
s.push(',')
}
}
}
s.pop(); // remove trailing comma if any
s
}
/// Given two cipher proposition strings try to figure out a match.
///
/// The `Ordering` parameter determines which argument is preferred. If `Less` or `Equal` we
/// try for each of `theirs` every one of `ours`, for `Greater` it's the other way around.
pub fn select_cipher(r: Ordering, ours: &str, theirs: &str) -> Result<Cipher, SecioError> {
let (a, b) = match r {
Ordering::Less | Ordering::Equal => (theirs, ours),
Ordering::Greater => (ours, theirs)
};
for x in a.split(',') {
if b.split(',').any(|y| x == y) {
match x {
AES_128 => return Ok(Cipher::Aes128),
AES_256 => return Ok(Cipher::Aes256),
TWOFISH_CTR => return Ok(Cipher::TwofishCtr),
NULL => return Ok(Cipher::Null),
_ => continue
}
}
}
Err(SecioError::NoSupportIntersection)
}
/// Possible digest algorithms.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Digest {
Sha256,
Sha512
}
impl Digest {
/// Returns the size in bytes of a digest of this kind.
#[inline]
pub fn num_bytes(&self) -> usize {
match *self {
Digest::Sha256 => 256 / 8,
Digest::Sha512 => 512 / 8,
}
}
}
/// Return a proposition string from the given sequence of `Digest` values.
pub fn digests_proposition<'a, I>(digests: I) -> String
where
I: IntoIterator<Item=&'a Digest>
{
let mut s = String::new();
for d in digests {
match d {
Digest::Sha256 => {
s.push_str(SHA_256);
s.push(',')
}
Digest::Sha512 => {
s.push_str(SHA_512);
s.push(',')
}
}
}
s.pop(); // remove trailing comma if any
s
}
/// Given two digest proposition strings try to figure out a match.
///
/// The `Ordering` parameter determines which argument is preferred. If `Less` or `Equal` we
/// try for each of `theirs` every one of `ours`, for `Greater` it's the other way around.
pub fn select_digest(r: Ordering, ours: &str, theirs: &str) -> Result<Digest, SecioError> {
let (a, b) = match r {
Ordering::Less | Ordering::Equal => (theirs, ours),
Ordering::Greater => (ours, theirs)
};
for x in a.split(',') {
if b.split(',').any(|y| x == y) {
match x {
SHA_256 => return Ok(Digest::Sha256),
SHA_512 => return Ok(Digest::Sha512),
_ => continue
}
}
}
Err(SecioError::NoSupportIntersection)
}
#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))]
impl Into<&'static digest::Algorithm> for Digest {
#[inline]
fn into(self) -> &'static digest::Algorithm {
match self {
Digest::Sha256 => &digest::SHA256,
Digest::Sha512 => &digest::SHA512,
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn cipher_non_null() {
// This test serves as a safe-guard against accidentally pushing to master a commit that
// sets this constant to `NULL`.
assert!(!super::DEFAULT_CIPHERS_PROPOSITION.contains("NULL"));
}
}

View File

@ -1,234 +0,0 @@
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Individual messages encoding and decoding. Use this after the algorithms have been
//! successfully negotiated.
mod decode;
mod encode;
mod len_prefix;
use aes_ctr::stream_cipher;
use crate::algo_support::Digest;
use decode::DecoderMiddleware;
use encode::EncoderMiddleware;
use futures::prelude::*;
use hmac::{self, Mac, NewMac};
use sha2::{Sha256, Sha512};
pub use len_prefix::LenPrefixCodec;
/// Type returned by `full_codec`.
pub type FullCodec<S> = DecoderMiddleware<EncoderMiddleware<LenPrefixCodec<S>>>;
pub type StreamCipher = Box<dyn stream_cipher::StreamCipher + Send>;
#[derive(Debug, Clone)]
pub enum Hmac {
Sha256(hmac::Hmac<Sha256>),
Sha512(hmac::Hmac<Sha512>),
}
impl Hmac {
/// Returns the size of the hash in bytes.
#[inline]
pub fn num_bytes(&self) -> usize {
match *self {
Hmac::Sha256(_) => 32,
Hmac::Sha512(_) => 64,
}
}
/// Builds a `Hmac` from an algorithm and key.
pub fn from_key(algorithm: Digest, key: &[u8]) -> Self {
// TODO: it would be nice to tweak the hmac crate to add an equivalent to new_varkey that
// never errors
match algorithm {
Digest::Sha256 => Hmac::Sha256(hmac::Hmac::new_varkey(key)
.expect("Hmac::new_varkey accepts any key length")),
Digest::Sha512 => Hmac::Sha512(hmac::Hmac::new_varkey(key)
.expect("Hmac::new_varkey accepts any key length")),
}
}
/// Signs the data.
// TODO: better return type?
pub fn sign(&self, crypted_data: &[u8]) -> Vec<u8> {
match *self {
Hmac::Sha256(ref hmac) => {
let mut hmac = hmac.clone();
hmac.update(crypted_data);
hmac.finalize().into_bytes().to_vec()
},
Hmac::Sha512(ref hmac) => {
let mut hmac = hmac.clone();
hmac.update(crypted_data);
hmac.finalize().into_bytes().to_vec()
},
}
}
/// Verifies that the data matches the expected hash.
// TODO: better error?
pub fn verify(&self, crypted_data: &[u8], expected_hash: &[u8]) -> Result<(), ()> {
match *self {
Hmac::Sha256(ref hmac) => {
let mut hmac = hmac.clone();
hmac.update(crypted_data);
hmac.verify(expected_hash).map_err(|_| ())
},
Hmac::Sha512(ref hmac) => {
let mut hmac = hmac.clone();
hmac.update(crypted_data);
hmac.verify(expected_hash).map_err(|_| ())
},
}
}
}
/// Takes control of `socket`. Returns an object that implements `future::Sink` and
/// `future::Stream`. The `Stream` and `Sink` produce and accept `Vec<u8>` objects.
///
/// The conversion between the stream/sink items and the socket is done with the given cipher and
/// hash algorithm (which are generally decided during the handshake).
pub fn full_codec<S>(
socket: LenPrefixCodec<S>,
cipher_encoding: StreamCipher,
encoding_hmac: Hmac,
cipher_decoder: StreamCipher,
decoding_hmac: Hmac,
remote_nonce: Vec<u8>
) -> FullCodec<S>
where
S: AsyncRead + AsyncWrite + Unpin + Send + 'static
{
let encoder = EncoderMiddleware::new(socket, cipher_encoding, encoding_hmac);
DecoderMiddleware::new(encoder, cipher_decoder, decoding_hmac, remote_nonce)
}
#[cfg(test)]
mod tests {
use super::{full_codec, DecoderMiddleware, EncoderMiddleware, Hmac, LenPrefixCodec};
use crate::algo_support::Digest;
use crate::stream_cipher::{ctr, Cipher};
use crate::error::SecioError;
use async_std::net::{TcpListener, TcpStream};
use futures::{prelude::*, channel::mpsc, channel::oneshot};
const NULL_IV : [u8; 16] = [0; 16];
#[test]
fn raw_encode_then_decode() {
let (data_tx, data_rx) = mpsc::channel::<Vec<u8>>(256);
let cipher_key: [u8; 32] = rand::random();
let hmac_key: [u8; 32] = rand::random();
let mut encoder = EncoderMiddleware::new(
data_tx,
ctr(Cipher::Aes256, &cipher_key, &NULL_IV[..]),
Hmac::from_key(Digest::Sha256, &hmac_key),
);
let mut decoder = DecoderMiddleware::new(
data_rx.map(|v| Ok::<_, SecioError>(v)),
ctr(Cipher::Aes256, &cipher_key, &NULL_IV[..]),
Hmac::from_key(Digest::Sha256, &hmac_key),
Vec::new()
);
let data = b"hello world";
async_std::task::block_on(async move {
encoder.send(data.to_vec()).await.unwrap();
let rx = decoder.next().await.unwrap().unwrap();
assert_eq!(rx, data);
});
}
fn full_codec_encode_then_decode(cipher: Cipher) {
let cipher_key: [u8; 32] = rand::random();
let cipher_key_clone = cipher_key.clone();
let key_size = cipher.key_size();
let hmac_key: [u8; 16] = rand::random();
let hmac_key_clone = hmac_key.clone();
let data = b"hello world";
let data_clone = data.clone();
let nonce = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
let (l_a_tx, l_a_rx) = oneshot::channel();
let nonce2 = nonce.clone();
let server = async {
let listener = TcpListener::bind(&"127.0.0.1:0").await.unwrap();
let listener_addr = listener.local_addr().unwrap();
l_a_tx.send(listener_addr).unwrap();
let (connec, _) = listener.accept().await.unwrap();
let codec = full_codec(
LenPrefixCodec::new(connec, 1024),
ctr(cipher, &cipher_key[..key_size], &NULL_IV[..]),
Hmac::from_key(Digest::Sha256, &hmac_key),
ctr(cipher, &cipher_key[..key_size], &NULL_IV[..]),
Hmac::from_key(Digest::Sha256, &hmac_key),
nonce2.clone()
);
let outcome = codec.map(|v| v.unwrap()).concat().await;
assert_eq!(outcome, data_clone);
};
let client = async {
let listener_addr = l_a_rx.await.unwrap();
let stream = TcpStream::connect(&listener_addr).await.unwrap();
let mut codec = full_codec(
LenPrefixCodec::new(stream, 1024),
ctr(cipher, &cipher_key_clone[..key_size], &NULL_IV[..]),
Hmac::from_key(Digest::Sha256, &hmac_key_clone),
ctr(cipher, &cipher_key_clone[..key_size], &NULL_IV[..]),
Hmac::from_key(Digest::Sha256, &hmac_key_clone),
Vec::new()
);
codec.send(nonce.into()).await.unwrap();
codec.send(data.to_vec().into()).await.unwrap();
};
async_std::task::block_on(future::join(client, server));
}
#[test]
fn full_codec_encode_then_decode_aes128() {
full_codec_encode_then_decode(Cipher::Aes128);
}
#[test]
fn full_codec_encode_then_decode_aes256() {
full_codec_encode_then_decode(Cipher::Aes256);
}
#[test]
fn full_codec_encode_then_decode_twofish() {
full_codec_encode_then_decode(Cipher::TwofishCtr);
}
#[test]
fn full_codec_encode_then_decode_null() {
full_codec_encode_then_decode(Cipher::Null);
}
}

View File

@ -1,136 +0,0 @@
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Individual messages decoding.
use super::{Hmac, StreamCipher};
use crate::error::SecioError;
use futures::prelude::*;
use log::debug;
use std::{cmp::min, pin::Pin, task::Context, task::Poll};
/// Wraps around a `Stream<Item = Vec<u8>>`. The buffers produced by the underlying stream
/// are decoded using the cipher and hmac.
///
/// This struct implements `Stream`, whose stream item are frames of data without the length
/// prefix. The mechanism for removing the length prefix and splitting the incoming data into
/// frames isn't handled by this module.
///
/// Also implements `Sink` for convenience.
#[pin_project::pin_project]
pub struct DecoderMiddleware<S> {
cipher_state: StreamCipher,
hmac: Hmac,
#[pin]
raw_stream: S,
nonce: Vec<u8>
}
impl<S> DecoderMiddleware<S> {
/// Create a new decoder for the given stream, using the provided cipher and HMAC.
///
/// The `nonce` parameter denotes a sequence of bytes which are expected to be found at the
/// beginning of the stream and are checked for equality.
pub fn new(raw_stream: S, cipher: StreamCipher, hmac: Hmac, nonce: Vec<u8>) -> DecoderMiddleware<S> {
DecoderMiddleware {
cipher_state: cipher,
hmac,
raw_stream,
nonce
}
}
}
impl<S> Stream for DecoderMiddleware<S>
where
S: TryStream<Ok = Vec<u8>>,
S::Error: Into<SecioError>,
{
type Item = Result<Vec<u8>, SecioError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
let frame = match TryStream::try_poll_next(this.raw_stream, cx) {
Poll::Ready(Some(Ok(t))) => t,
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => return Poll::Pending,
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err.into()))),
};
if frame.len() < this.hmac.num_bytes() {
debug!("frame too short when decoding secio frame");
return Poll::Ready(Some(Err(SecioError::FrameTooShort)));
}
let content_length = frame.len() - this.hmac.num_bytes();
{
let (crypted_data, expected_hash) = frame.split_at(content_length);
debug_assert_eq!(expected_hash.len(), this.hmac.num_bytes());
if this.hmac.verify(crypted_data, expected_hash).is_err() {
debug!("hmac mismatch when decoding secio frame");
return Poll::Ready(Some(Err(SecioError::HmacNotMatching)));
}
}
let mut data_buf = frame;
data_buf.truncate(content_length);
this.cipher_state.decrypt(&mut data_buf);
if !this.nonce.is_empty() {
let n = min(data_buf.len(), this.nonce.len());
if data_buf[.. n] != this.nonce[.. n] {
return Poll::Ready(Some(Err(SecioError::NonceVerificationFailed)))
}
this.nonce.drain(.. n);
data_buf.drain(.. n);
}
Poll::Ready(Some(Ok(data_buf)))
}
}
impl<S, I> Sink<I> for DecoderMiddleware<S>
where
S: Sink<I>,
{
type Error = S::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.project();
Sink::poll_ready(this.raw_stream, cx)
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
let this = self.project();
Sink::start_send(this.raw_stream, item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.project();
Sink::poll_flush(this.raw_stream, cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.project();
Sink::poll_close(this.raw_stream, cx)
}
}

View File

@ -1,93 +0,0 @@
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Individual messages encoding.
use super::{Hmac, StreamCipher};
use futures::prelude::*;
use std::{pin::Pin, task::Context, task::Poll};
/// Wraps around a `Sink`. Encodes the buffers passed to it and passes it to the underlying sink.
///
/// This struct implements `Sink`. It expects individual frames of data, and outputs individual
/// frames as well, most notably without the length prefix. The mechanism for adding the length
/// prefix is not covered by this module.
///
/// Also implements `Stream` for convenience.
#[pin_project::pin_project]
pub struct EncoderMiddleware<S> {
cipher_state: StreamCipher,
hmac: Hmac,
#[pin]
raw_sink: S,
}
impl<S> EncoderMiddleware<S> {
pub fn new(raw: S, cipher: StreamCipher, hmac: Hmac) -> EncoderMiddleware<S> {
EncoderMiddleware {
cipher_state: cipher,
hmac,
raw_sink: raw,
}
}
}
impl<S> Sink<Vec<u8>> for EncoderMiddleware<S>
where
S: Sink<Vec<u8>>,
{
type Error = S::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.project();
Sink::poll_ready(this.raw_sink, cx)
}
fn start_send(self: Pin<&mut Self>, mut data_buf: Vec<u8>) -> Result<(), Self::Error> {
let this = self.project();
// TODO if SinkError gets refactor to SecioError, then use try_apply_keystream
this.cipher_state.encrypt(&mut data_buf[..]);
let signature = this.hmac.sign(&data_buf[..]);
data_buf.extend_from_slice(signature.as_ref());
Sink::start_send(this.raw_sink, data_buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.project();
Sink::poll_flush(this.raw_sink, cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.project();
Sink::poll_close(this.raw_sink, cx)
}
}
impl<S> Stream for EncoderMiddleware<S>
where
S: Stream,
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
Stream::poll_next(this.raw_sink, cx)
}
}

View File

@ -1,127 +0,0 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use futures::{prelude::*, stream::BoxStream};
use quicksink::Action;
use std::{fmt, io, pin::Pin, task::{Context, Poll}};
/// `Stream` & `Sink` that reads and writes a length prefix in front of the actual data.
pub struct LenPrefixCodec<T> {
stream: BoxStream<'static, io::Result<Vec<u8>>>,
sink: Pin<Box<dyn Sink<Vec<u8>, Error = io::Error> + Send>>,
_mark: std::marker::PhantomData<T>
}
impl<T> fmt::Debug for LenPrefixCodec<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("LenPrefixCodec")
}
}
static_assertions::const_assert! {
std::mem::size_of::<u32>() <= std::mem::size_of::<usize>()
}
impl<T> LenPrefixCodec<T>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static
{
pub fn new(socket: T, max_len: usize) -> Self {
let (r, w) = socket.split();
let stream = futures::stream::unfold(r, move |mut r| async move {
let mut len = [0; 4];
if let Err(e) = r.read_exact(&mut len).await {
if e.kind() == io::ErrorKind::UnexpectedEof {
return None
}
return Some((Err(e), r))
}
let n = u32::from_be_bytes(len) as usize;
if n > max_len {
let msg = format!("data length {} exceeds allowed maximum {}", n, max_len);
return Some((Err(io::Error::new(io::ErrorKind::PermissionDenied, msg)), r))
}
let mut v = vec![0; n];
if let Err(e) = r.read_exact(&mut v).await {
return Some((Err(e), r))
}
Some((Ok(v), r))
});
let sink = quicksink::make_sink(w, move |mut w, action: Action<Vec<u8>>| async move {
match action {
Action::Send(data) => {
if data.len() > max_len {
log::error!("data length {} exceeds allowed maximum {}", data.len(), max_len)
}
w.write_all(&(data.len() as u32).to_be_bytes()).await?;
w.write_all(&data).await?
}
Action::Flush => w.flush().await?,
Action::Close => w.close().await?
}
Ok(w)
});
LenPrefixCodec {
stream: stream.boxed(),
sink: Box::pin(sink),
_mark: std::marker::PhantomData
}
}
}
impl<T> Stream for LenPrefixCodec<T>
where
T: AsyncRead + AsyncWrite + Send + 'static
{
type Item = io::Result<Vec<u8>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.poll_next_unpin(cx)
}
}
impl<T> Sink<Vec<u8>> for LenPrefixCodec<T>
where
T: AsyncRead + AsyncWrite + Send + 'static
{
type Error = io::Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.sink).poll_ready(cx)
}
fn start_send(mut self: Pin<&mut Self>, item: Vec<u8>) -> Result<(), Self::Error> {
Pin::new(&mut self.sink).start_send(item)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.sink).poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.sink).poll_close(cx)
}
}
impl<T> Unpin for LenPrefixCodec<T> {
}

View File

@ -1,144 +0,0 @@
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Defines the `SecioError` enum that groups all possible errors in SECIO.
use aes_ctr::stream_cipher::LoopError;
use std::error;
use std::fmt;
use std::io::Error as IoError;
/// Error at the SECIO layer communication.
#[derive(Debug)]
pub enum SecioError {
/// I/O error.
IoError(IoError),
/// Protocol buffer error.
ProtobufError(prost::DecodeError),
/// Failed to parse one of the handshake protobuf messages.
HandshakeParsingFailure,
/// There is no protocol supported by both the local and remote hosts.
NoSupportIntersection,
/// Failed to generate nonce.
NonceGenerationFailed,
/// Failed to generate ephemeral key.
EphemeralKeyGenerationFailed,
/// Failed to sign a message with our local private key.
SigningFailure,
/// The signature of the exchange packet doesn't verify the remote public key.
SignatureVerificationFailed,
/// Failed to generate the secret shared key from the ephemeral key.
SecretGenerationFailed,
/// The final check of the handshake failed.
NonceVerificationFailed,
/// Error with block cipher.
CipherError(LoopError),
/// The received frame was of invalid length.
FrameTooShort,
/// The hashes of the message didn't match.
HmacNotMatching,
/// We received an invalid proposition from remote.
InvalidProposition(&'static str),
#[doc(hidden)]
__Nonexhaustive
}
impl error::Error for SecioError {
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
SecioError::IoError(ref err) => Some(err),
SecioError::ProtobufError(ref err) => Some(err),
// TODO: The type doesn't implement `Error`
/*SecioError::CipherError(ref err) => {
Some(err)
},*/
_ => None,
}
}
}
impl fmt::Display for SecioError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
match self {
SecioError::IoError(e) =>
write!(f, "I/O error: {}", e),
SecioError::ProtobufError(e) =>
write!(f, "Protobuf error: {}", e),
SecioError::HandshakeParsingFailure =>
f.write_str("Failed to parse one of the handshake protobuf messages"),
SecioError::NoSupportIntersection =>
f.write_str("There is no protocol supported by both the local and remote hosts"),
SecioError::NonceGenerationFailed =>
f.write_str("Failed to generate nonce"),
SecioError::EphemeralKeyGenerationFailed =>
f.write_str("Failed to generate ephemeral key"),
SecioError::SigningFailure =>
f.write_str("Failed to sign a message with our local private key"),
SecioError::SignatureVerificationFailed =>
f.write_str("The signature of the exchange packet doesn't verify the remote public key"),
SecioError::SecretGenerationFailed =>
f.write_str("Failed to generate the secret shared key from the ephemeral key"),
SecioError::NonceVerificationFailed =>
f.write_str("The final check of the handshake failed"),
SecioError::CipherError(e) =>
write!(f, "Error while decoding/encoding data: {:?}", e),
SecioError::FrameTooShort =>
f.write_str("The received frame was of invalid length"),
SecioError::HmacNotMatching =>
f.write_str("The hashes of the message didn't match"),
SecioError::InvalidProposition(msg) =>
write!(f, "invalid proposition: {}", msg),
SecioError::__Nonexhaustive =>
f.write_str("__Nonexhaustive")
}
}
}
impl From<LoopError> for SecioError {
fn from(err: LoopError) -> SecioError {
SecioError::CipherError(err)
}
}
impl From<IoError> for SecioError {
fn from(err: IoError) -> SecioError {
SecioError::IoError(err)
}
}
impl From<prost::DecodeError> for SecioError {
fn from(err: prost::DecodeError) -> SecioError {
SecioError::ProtobufError(err)
}
}

View File

@ -1,58 +0,0 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This module handles the key agreement process. Typically ECDH.
use futures::prelude::*;
use crate::SecioError;
#[path = "exchange/impl_ring.rs"]
#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))]
mod platform;
#[path = "exchange/impl_webcrypto.rs"]
#[cfg(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown"))]
mod platform;
/// Possible key agreement algorithms.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum KeyAgreement {
EcdhP256,
EcdhP384
}
/// Opaque private key type.
pub struct AgreementPrivateKey(platform::AgreementPrivateKey);
/// Generates a new key pair as part of the exchange.
///
/// Returns the opaque private key and the corresponding public key.
#[inline]
pub fn generate_agreement(algorithm: KeyAgreement) -> impl Future<Output = Result<(AgreementPrivateKey, Vec<u8>), SecioError>> {
platform::generate_agreement(algorithm).map_ok(|(pr, pu)| (AgreementPrivateKey(pr), pu))
}
/// Finish the agreement. On success, returns the shared key that both remote agreed upon.
#[inline]
pub fn agree(algorithm: KeyAgreement, my_private_key: AgreementPrivateKey, other_public_key: &[u8], out_size: usize)
-> impl Future<Output = Result<Vec<u8>, SecioError>>
{
platform::agree(algorithm, my_private_key.0, other_public_key, out_size)
}

View File

@ -1,71 +0,0 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Implementation of the key agreement process using the `ring` library.
use crate::{KeyAgreement, SecioError};
use futures::{future, prelude::*};
use log::debug;
use ring::agreement as ring_agreement;
use ring::rand as ring_rand;
impl Into<&'static ring_agreement::Algorithm> for KeyAgreement {
#[inline]
fn into(self) -> &'static ring_agreement::Algorithm {
match self {
KeyAgreement::EcdhP256 => &ring_agreement::ECDH_P256,
KeyAgreement::EcdhP384 => &ring_agreement::ECDH_P384,
}
}
}
/// Opaque private key type.
pub type AgreementPrivateKey = ring_agreement::EphemeralPrivateKey;
/// Generates a new key pair as part of the exchange.
///
/// Returns the opaque private key and the corresponding public key.
pub fn generate_agreement(algorithm: KeyAgreement) -> impl Future<Output = Result<(AgreementPrivateKey, Vec<u8>), SecioError>> {
let rng = ring_rand::SystemRandom::new();
match ring_agreement::EphemeralPrivateKey::generate(algorithm.into(), &rng) {
Ok(tmp_priv_key) => {
let r = tmp_priv_key.compute_public_key()
.map_err(|_| SecioError::EphemeralKeyGenerationFailed)
.map(move |tmp_pub_key| (tmp_priv_key, tmp_pub_key.as_ref().to_vec()));
future::ready(r)
},
Err(_) => {
debug!("failed to generate ECDH key");
future::ready(Err(SecioError::EphemeralKeyGenerationFailed))
},
}
}
/// Finish the agreement. On success, returns the shared key that both remote agreed upon.
pub fn agree(algorithm: KeyAgreement, my_private_key: AgreementPrivateKey, other_public_key: &[u8], _out_size: usize)
-> impl Future<Output = Result<Vec<u8>, SecioError>>
{
let ret = ring_agreement::agree_ephemeral(my_private_key,
&ring_agreement::UnparsedPublicKey::new(algorithm.into(), other_public_key),
SecioError::SecretGenerationFailed,
|key_material| Ok(key_material.to_vec()));
future::ready(ret)
}

View File

@ -1,170 +0,0 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Implementation of the key agreement process using the WebCrypto API.
use crate::{KeyAgreement, SecioError};
use futures::prelude::*;
use parity_send_wrapper::SendWrapper;
use std::{io, pin::Pin, task::Context, task::Poll};
use wasm_bindgen::prelude::*;
/// Opaque private key type. Contains the private key and the `SubtleCrypto` object.
pub type AgreementPrivateKey = SendSyncHack<(JsValue, web_sys::SubtleCrypto)>;
/// We use a `SendWrapper` from the `send_wrapper` crate around our JS data type. JavaScript data
/// types are not `Send`/`Sync`, but since WASM is single-threaded we know that we're only ever
/// going to access them from the same thread.
pub struct SendSyncHack<T>(SendWrapper<T>);
impl<T> Future for SendSyncHack<T>
where T: Future + Unpin {
type Output = T::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
self.0.poll_unpin(cx)
}
}
/// Generates a new key pair as part of the exchange.
///
/// Returns the opaque private key and the corresponding public key.
pub fn generate_agreement(algorithm: KeyAgreement)
-> impl Future<Output = Result<(AgreementPrivateKey, Vec<u8>), SecioError>>
{
let future = async move {
// First step is to create the `SubtleCrypto` object.
let crypto = build_crypto_future().await?;
// We then generate the ephemeral key.
let key_pair = {
let obj = build_curve_obj(algorithm);
let usages = js_sys::Array::new();
usages.push(&JsValue::from_str("deriveKey"));
usages.push(&JsValue::from_str("deriveBits"));
let promise = crypto.generate_key_with_object(&obj, true, usages.as_ref())?;
wasm_bindgen_futures::JsFuture::from(promise).await?
};
// WebCrypto has generated a key-pair. Let's split this key pair into a private key and a
// public key.
let (private, public) = {
let private = js_sys::Reflect::get(&key_pair, &JsValue::from_str("privateKey"));
let public = js_sys::Reflect::get(&key_pair, &JsValue::from_str("publicKey"));
match (private, public) {
(Ok(pr), Ok(pu)) => (pr, pu),
(Err(err), _) => return Err(err),
(_, Err(err)) => return Err(err),
}
};
// Then we turn the public key into an `ArrayBuffer`.
let public = {
let promise = crypto.export_key("raw", &public.into())?;
wasm_bindgen_futures::JsFuture::from(promise).await?
};
// And finally we convert this `ArrayBuffer` into a `Vec<u8>`.
let public = js_sys::Uint8Array::new(&public);
let mut public_buf = vec![0; public.length() as usize];
public.copy_to(&mut public_buf);
Ok((SendSyncHack(SendWrapper::new((private, crypto))), public_buf))
};
let future = future
.map_err(|err| {
SecioError::IoError(io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))
});
SendSyncHack(SendWrapper::new(Box::pin(future)))
}
/// Finish the agreement. On success, returns the shared key that both remote agreed upon.
pub fn agree(algorithm: KeyAgreement, key: AgreementPrivateKey, other_public_key: &[u8], out_size: usize)
-> impl Future<Output = Result<Vec<u8>, SecioError>>
{
let other_public_key = {
// This unsafe is here because the lifetime of `other_public_key` must not outlive the
// `tmp_view`. This is guaranteed by the fact that we clone this array right below.
// See also https://github.com/rustwasm/wasm-bindgen/issues/1303
let tmp_view = unsafe { js_sys::Uint8Array::view(other_public_key) };
js_sys::Uint8Array::new(tmp_view.as_ref())
};
let future = async move {
let (private_key, crypto) = key.0.take();
// We start by importing the remote's public key into the WebCrypto world.
let public_key = {
// Note: contrary to what one might think, we shouldn't add the "deriveBits" usage.
let promise = crypto
.import_key_with_object(
"raw", &js_sys::Object::from(other_public_key.buffer()),
&build_curve_obj(algorithm), false, &js_sys::Array::new()
)?;
wasm_bindgen_futures::JsFuture::from(promise).await?
};
// We then derive the final private key.
let bytes = {
let derive_params = build_curve_obj(algorithm);
let _ = js_sys::Reflect::set(derive_params.as_ref(), &JsValue::from_str("public"), &public_key);
let promise = crypto
.derive_bits_with_object(
&derive_params,
&web_sys::CryptoKey::from(private_key),
8 * out_size as u32
)?;
wasm_bindgen_futures::JsFuture::from(promise).await?
};
let bytes = js_sys::Uint8Array::new(&bytes);
let mut buf = vec![0; bytes.length() as usize];
bytes.copy_to(&mut buf);
Ok(buf)
};
let future = future
.map_err(|err: JsValue| {
SecioError::IoError(io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))
});
SendSyncHack(SendWrapper::new(Box::pin(future)))
}
/// Builds a future that returns the `SubtleCrypto` object.
async fn build_crypto_future() -> Result<web_sys::SubtleCrypto, JsValue> {
web_sys::window()
.ok_or_else(|| JsValue::from_str("Window object not available"))
.and_then(|window| window.crypto())
.map(|crypto| crypto.subtle())
}
/// Builds a `EcKeyGenParams` object.
/// See https://developer.mozilla.org/en-US/docs/Web/API/EcKeyGenParams
fn build_curve_obj(algorithm: KeyAgreement) -> js_sys::Object {
let obj = js_sys::Object::new();
let _ = js_sys::Reflect::set(obj.as_ref(), &JsValue::from_str("name"), &JsValue::from_str("ECDH"));
let _ = js_sys::Reflect::set(obj.as_ref(), &JsValue::from_str("namedCurve"), &JsValue::from_str(match algorithm {
KeyAgreement::EcdhP256 => "P-256",
KeyAgreement::EcdhP384 => "P-384",
}));
obj
}

View File

@ -1,489 +0,0 @@
// Copyright 2017 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::SecioConfig;
use crate::algo_support;
use crate::codec::{full_codec, FullCodec, Hmac, LenPrefixCodec};
use crate::error::SecioError;
use crate::exchange;
use crate::stream_cipher::ctr;
use crate::structs_proto::{Exchange, Propose};
use futures::prelude::*;
use libp2p_core::PublicKey;
use log::{debug, trace};
use prost::Message;
use rand::{self, RngCore};
use sha2::{Digest as ShaDigestTrait, Sha256};
use std::{cmp::{self, Ordering}, io};
/// Performs a handshake on the given socket.
///
/// This function expects that the remote is identified with `remote_public_key`, and the remote
/// will expect that we are identified with `local_key`. Any mismatch somewhere will produce a
/// `SecioError`.
///
/// On success, returns an object that implements the `Sink` and `Stream` trait whose items are
/// buffers of data, plus the public key of the remote, plus the ephemeral public key used during
/// negotiation.
pub async fn handshake<S>(socket: S, config: SecioConfig)
-> Result<(FullCodec<S>, PublicKey, Vec<u8>), SecioError>
where
S: AsyncRead + AsyncWrite + Send + Unpin + 'static
{
let mut socket = LenPrefixCodec::new(socket, config.max_frame_len);
let local_nonce = {
let mut local_nonce = [0; 16];
rand::thread_rng()
.try_fill_bytes(&mut local_nonce)
.map_err(|_| SecioError::NonceGenerationFailed)?;
local_nonce
};
let local_public_key_encoded = config.key.public().into_protobuf_encoding();
// Send our proposition with our nonce, public key and supported protocols.
let local_proposition = Propose {
rand: Some(local_nonce.to_vec()),
pubkey: Some(local_public_key_encoded.clone()),
exchanges: if let Some(ref p) = config.agreements_prop {
trace!("agreements proposition: {}", p);
Some(p.clone())
} else {
trace!("agreements proposition: {}", algo_support::DEFAULT_AGREEMENTS_PROPOSITION);
Some(algo_support::DEFAULT_AGREEMENTS_PROPOSITION.into())
},
ciphers: if let Some(ref p) = config.ciphers_prop {
trace!("ciphers proposition: {}", p);
Some(p.clone())
} else {
trace!("ciphers proposition: {}", algo_support::DEFAULT_CIPHERS_PROPOSITION);
Some(algo_support::DEFAULT_CIPHERS_PROPOSITION.into())
},
hashes: if let Some(ref p) = config.digests_prop {
trace!("digests proposition: {}", p);
Some(p.clone())
} else {
Some(algo_support::DEFAULT_DIGESTS_PROPOSITION.into())
}
};
let local_proposition_bytes = {
let mut buf = Vec::with_capacity(local_proposition.encoded_len());
local_proposition.encode(&mut buf).expect("Vec<u8> provides capacity as needed");
buf
};
trace!("starting handshake; local nonce = {:?}", local_nonce);
trace!("sending proposition to remote");
socket.send(local_proposition_bytes.clone()).await?;
// Receive the remote's proposition.
let remote_proposition_bytes = match socket.next().await {
Some(b) => b?,
None => {
debug!("unexpected eof while waiting for remote's proposition");
return Err(SecioError::IoError(io::ErrorKind::UnexpectedEof.into()))
},
};
let remote_proposition = match Propose::decode(&remote_proposition_bytes[..]) {
Ok(prop) => prop,
Err(_) => {
debug!("failed to parse remote's proposition protobuf message");
return Err(SecioError::HandshakeParsingFailure);
}
};
let remote_public_key_encoded = remote_proposition.pubkey.unwrap_or_default();
let remote_nonce = remote_proposition.rand.unwrap_or_default();
let remote_public_key = match PublicKey::from_protobuf_encoding(&remote_public_key_encoded) {
Ok(p) => p,
Err(_) => {
debug!("failed to parse remote's proposition's pubkey protobuf");
return Err(SecioError::HandshakeParsingFailure);
},
};
trace!("received proposition from remote; pubkey = {:?}; nonce = {:?}",
remote_public_key, remote_nonce);
// In order to determine which protocols to use, we compute two hashes and choose
// based on which hash is larger.
let hashes_ordering = {
let oh1 = {
let mut ctx = Sha256::new();
ctx.update(&remote_public_key_encoded);
ctx.update(&local_nonce);
ctx.finalize()
};
let oh2 = {
let mut ctx = Sha256::new();
ctx.update(&local_public_key_encoded);
ctx.update(&remote_nonce);
ctx.finalize()
};
oh1.cmp(&oh2)
};
let chosen_exchange = {
let ours = config.agreements_prop.as_ref()
.map(|s| s.as_ref())
.unwrap_or(algo_support::DEFAULT_AGREEMENTS_PROPOSITION);
let theirs = &remote_proposition.exchanges.unwrap_or_default();
match algo_support::select_agreement(hashes_ordering, ours, theirs) {
Ok(a) => a,
Err(err) => {
debug!("failed to select an exchange protocol");
return Err(err);
}
}
};
let chosen_cipher = {
let ours = config.ciphers_prop.as_ref()
.map(|s| s.as_ref())
.unwrap_or(algo_support::DEFAULT_CIPHERS_PROPOSITION);
let theirs = &remote_proposition.ciphers.unwrap_or_default();
match algo_support::select_cipher(hashes_ordering, ours, theirs) {
Ok(a) => {
debug!("selected cipher: {:?}", a);
a
}
Err(err) => {
debug!("failed to select a cipher protocol");
return Err(err);
}
}
};
let chosen_hash = {
let ours = config.digests_prop.as_ref()
.map(|s| s.as_ref())
.unwrap_or(algo_support::DEFAULT_DIGESTS_PROPOSITION);
let theirs = &remote_proposition.hashes.unwrap_or_default();
match algo_support::select_digest(hashes_ordering, ours, theirs) {
Ok(a) => {
debug!("selected hash: {:?}", a);
a
}
Err(err) => {
debug!("failed to select a hash protocol");
return Err(err);
}
}
};
// Generate an ephemeral key for the negotiation.
let (tmp_priv_key, tmp_pub_key) = exchange::generate_agreement(chosen_exchange).await?;
// Send the ephemeral pub key to the remote in an `Exchange` struct. The `Exchange` also
// contains a signature of the two propositions encoded with our static public key.
let local_exchange = {
let mut data_to_sign = local_proposition_bytes.clone();
data_to_sign.extend_from_slice(&remote_proposition_bytes);
data_to_sign.extend_from_slice(&tmp_pub_key);
Exchange {
epubkey: Some(tmp_pub_key.clone()),
signature: match config.key.sign(&data_to_sign) {
Ok(sig) => Some(sig),
Err(_) => return Err(SecioError::SigningFailure)
}
}
};
let local_exch = {
let mut buf = Vec::with_capacity(local_exchange.encoded_len());
local_exchange.encode(&mut buf).expect("Vec<u8> provides capacity as needed");
buf
};
// Send our local `Exchange`.
trace!("sending exchange to remote");
socket.send(local_exch).await?;
// Receive the remote's `Exchange`.
let remote_exch = {
let raw = match socket.next().await {
Some(r) => r?,
None => {
debug!("unexpected eof while waiting for remote's exchange");
return Err(SecioError::IoError(io::ErrorKind::UnexpectedEof.into()))
},
};
match Exchange::decode(&raw[..]) {
Ok(e) => {
trace!("received and decoded the remote's exchange");
e
},
Err(err) => {
debug!("failed to parse remote's exchange protobuf; {:?}", err);
return Err(SecioError::HandshakeParsingFailure);
}
}
};
// Check the validity of the remote's `Exchange`. This verifies that the remote was really
// the sender of its proposition, and that it is the owner of both its global and ephemeral
// keys.
{
let mut data_to_verify = remote_proposition_bytes.clone();
data_to_verify.extend_from_slice(&local_proposition_bytes);
data_to_verify.extend_from_slice(remote_exch.epubkey.as_deref().unwrap_or_default());
if !remote_public_key.verify(&data_to_verify, &remote_exch.signature.unwrap_or_default()) {
return Err(SecioError::SignatureVerificationFailed)
}
trace!("successfully verified the remote's signature");
}
// Generate a key from the local ephemeral private key and the remote ephemeral public key,
// derive from it a cipher key, an iv, and a hmac key, and build the encoder/decoder.
let key_material = exchange::agree(
chosen_exchange,
tmp_priv_key,
&remote_exch.epubkey.unwrap_or_default(),
chosen_hash.num_bytes()
).await?;
// Generate a key from the local ephemeral private key and the remote ephemeral public key,
// derive from it a cipher key, an iv, and a hmac key, and build the encoder/decoder.
let mut codec = {
let cipher_key_size = chosen_cipher.key_size();
let iv_size = chosen_cipher.iv_size();
let key = Hmac::from_key(chosen_hash, &key_material);
let mut longer_key = vec![0u8; 2 * (iv_size + cipher_key_size + 20)];
stretch_key(key, &mut longer_key);
let (local_infos, remote_infos) = {
let (first_half, second_half) = longer_key.split_at(longer_key.len() / 2);
match hashes_ordering {
Ordering::Equal => {
let msg = "equal digest of public key and nonce for local and remote";
return Err(SecioError::InvalidProposition(msg))
}
Ordering::Less => (second_half, first_half),
Ordering::Greater => (first_half, second_half),
}
};
let (encoding_cipher, encoding_hmac) = {
let (iv, rest) = local_infos.split_at(iv_size);
let (cipher_key, mac_key) = rest.split_at(cipher_key_size);
let hmac = Hmac::from_key(chosen_hash, mac_key);
let cipher = ctr(chosen_cipher, cipher_key, iv);
(cipher, hmac)
};
let (decoding_cipher, decoding_hmac) = {
let (iv, rest) = remote_infos.split_at(iv_size);
let (cipher_key, mac_key) = rest.split_at(cipher_key_size);
let hmac = Hmac::from_key(chosen_hash, mac_key);
let cipher = ctr(chosen_cipher, cipher_key, iv);
(cipher, hmac)
};
full_codec(
socket,
encoding_cipher,
encoding_hmac,
decoding_cipher,
decoding_hmac,
local_nonce.to_vec()
)
};
// We send back their nonce to check if the connection works.
trace!("checking encryption by sending back remote's nonce");
codec.send(remote_nonce).await?;
Ok((codec, remote_public_key, tmp_pub_key))
}
/// Custom algorithm translated from reference implementations. Needs to be the same algorithm
/// amongst all implementations.
fn stretch_key(hmac: Hmac, result: &mut [u8]) {
match hmac {
Hmac::Sha256(hmac) => stretch_key_inner(hmac, result),
Hmac::Sha512(hmac) => stretch_key_inner(hmac, result),
}
}
fn stretch_key_inner<D>(hmac: ::hmac::Hmac<D>, result: &mut [u8])
where D: ::hmac::digest::Update + ::hmac::digest::BlockInput +
::hmac::digest::FixedOutput + ::hmac::digest::Reset + Default + Clone,
::hmac::Hmac<D>: Clone + ::hmac::crypto_mac::Mac
{
use ::hmac::Mac;
const SEED: &[u8] = b"key expansion";
let mut init_ctxt = hmac.clone();
init_ctxt.update(SEED);
let mut a = init_ctxt.finalize().into_bytes();
let mut j = 0;
while j < result.len() {
let mut context = hmac.clone();
context.update(a.as_ref());
context.update(SEED);
let b = context.finalize().into_bytes();
let todo = cmp::min(b.as_ref().len(), result.len() - j);
result[j..j + todo].copy_from_slice(&b.as_ref()[..todo]);
j += todo;
let mut context = hmac.clone();
context.update(a.as_ref());
a = context.finalize().into_bytes();
}
}
#[cfg(test)]
mod tests {
use super::{handshake, stretch_key};
use crate::{algo_support::Digest, codec::Hmac, SecioConfig};
use libp2p_core::identity;
use futures::{prelude::*, channel::oneshot};
#[test]
#[cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))]
fn handshake_with_self_succeeds_rsa() {
let key1 = {
let mut private = include_bytes!("../tests/test-rsa-private-key.pk8").to_vec();
identity::Keypair::rsa_from_pkcs8(&mut private).unwrap()
};
let key2 = {
let mut private = include_bytes!("../tests/test-rsa-private-key-2.pk8").to_vec();
identity::Keypair::rsa_from_pkcs8(&mut private).unwrap()
};
handshake_with_self_succeeds(SecioConfig::new(key1), SecioConfig::new(key2));
}
#[test]
fn handshake_with_self_succeeds_ed25519() {
let key1 = identity::Keypair::generate_ed25519();
let key2 = identity::Keypair::generate_ed25519();
handshake_with_self_succeeds(SecioConfig::new(key1), SecioConfig::new(key2));
}
#[test]
#[cfg(feature = "secp256k1")]
fn handshake_with_self_succeeds_secp256k1() {
let key1 = {
let mut key = include_bytes!("../tests/test-secp256k1-private-key.der").to_vec();
identity::Keypair::secp256k1_from_der(&mut key).unwrap()
};
let key2 = {
let mut key = include_bytes!("../tests/test-secp256k1-private-key-2.der").to_vec();
identity::Keypair::secp256k1_from_der(&mut key).unwrap()
};
handshake_with_self_succeeds(SecioConfig::new(key1), SecioConfig::new(key2));
}
fn handshake_with_self_succeeds(key1: SecioConfig, key2: SecioConfig) {
let (l_a_tx, l_a_rx) = oneshot::channel();
async_std::task::spawn(async move {
let listener = async_std::net::TcpListener::bind(&"127.0.0.1:0").await.unwrap();
l_a_tx.send(listener.local_addr().unwrap()).unwrap();
let connec = listener.accept().await.unwrap().0;
let mut codec = handshake(connec, key1).await.unwrap().0;
while let Some(packet) = codec.next().await {
let packet = packet.unwrap();
if !packet.is_empty() {
codec.send(packet.into()).await.unwrap();
}
}
});
async_std::task::block_on(async move {
let listen_addr = l_a_rx.await.unwrap();
let connec = async_std::net::TcpStream::connect(&listen_addr).await.unwrap();
let mut codec = handshake(connec, key2).await.unwrap().0;
codec.send(b"hello".to_vec().into()).await.unwrap();
let mut packets_stream = codec.filter(|p| future::ready(!p.as_ref().unwrap().is_empty()));
let packet = packets_stream.next().await.unwrap();
assert_eq!(packet.unwrap(), b"hello");
});
}
#[test]
fn stretch() {
let mut output = [0u8; 32];
let key1 = Hmac::from_key(Digest::Sha256, &[]);
stretch_key(key1, &mut output);
assert_eq!(
&output,
&[
103, 144, 60, 199, 85, 145, 239, 71, 79, 198, 85, 164, 32, 53, 143, 205, 50, 48,
153, 10, 37, 32, 85, 1, 226, 61, 193, 1, 154, 120, 207, 80,
]
);
let key2 = Hmac::from_key(
Digest::Sha256,
&[
157, 166, 80, 144, 77, 193, 198, 6, 23, 220, 87, 220, 191, 72, 168, 197, 54, 33,
219, 225, 84, 156, 165, 37, 149, 224, 244, 32, 170, 79, 125, 35, 171, 26, 178, 176,
92, 168, 22, 27, 205, 44, 229, 61, 152, 21, 222, 81, 241, 81, 116, 236, 74, 166,
89, 145, 5, 162, 108, 230, 55, 54, 9, 17,
],
);
stretch_key(key2, &mut output);
assert_eq!(
&output,
&[
39, 151, 182, 63, 180, 175, 224, 139, 42, 131, 130, 116, 55, 146, 62, 31, 157, 95,
217, 15, 73, 81, 10, 83, 243, 141, 64, 227, 103, 144, 99, 121,
]
);
let key3 = Hmac::from_key(
Digest::Sha256,
&[
98, 219, 94, 104, 97, 70, 139, 13, 185, 110, 56, 36, 66, 3, 80, 224, 32, 205, 102,
170, 59, 32, 140, 245, 86, 102, 231, 68, 85, 249, 227, 243, 57, 53, 171, 36, 62,
225, 178, 74, 89, 142, 151, 94, 183, 231, 208, 166, 244, 130, 130, 209, 248, 65,
19, 48, 127, 127, 55, 82, 117, 154, 124, 108,
],
);
stretch_key(key3, &mut output);
assert_eq!(
&output,
&[
28, 39, 158, 206, 164, 16, 211, 194, 99, 43, 208, 36, 24, 141, 90, 93, 157, 236,
238, 111, 170, 0, 60, 11, 49, 174, 177, 121, 30, 12, 182, 25,
]
);
}
}

Some files were not shown because too many files have changed in this diff Show More