diff --git a/.cargo/config.toml b/.cargo/config.toml index e55adcc9..8c1349f4 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] # Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped. -custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -W clippy::used_underscore_binding -W unreachable_pub -D warnings" +custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -W clippy::used_underscore_binding -W unreachable_pub" diff --git a/.github/actions/cargo-semver-checks/action.yml b/.github/actions/cargo-semver-checks/action.yml deleted file mode 100644 index e9e6844f..00000000 --- a/.github/actions/cargo-semver-checks/action.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: "Run cargo semver-checks" -description: "Install and run the cargo semver-checks tool" -inputs: - crate: - required: true - description: "The crate to run `cargo semver-checks` on." -runs: - using: "composite" - steps: - - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.22.1/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin - shell: bash - - - name: Get released version - shell: bash - id: get-released-version - run: | - MAX_STABLE_VERSION=$(curl https://crates.io/api/v1/crates/${{ inputs.crate }} --silent | jq '.crate.max_stable_version') - echo "version=${MAX_STABLE_VERSION}" >> $GITHUB_OUTPUT - - - shell: bash - run: | - rustc --version | tee .rustc-version - cargo semver-checks --version | tee .semver-checks-version - - - uses: actions/cache@v3 - with: - path: ${{ github.workspace }}/target/semver-checks/cache - key: semver-checks-cache-${{ hashFiles('.rustc-version') }}-${{ hashFiles('.semver-checks-version') }}-${{ inputs.crate }}-${{ steps.get-released-version.outputs.version }} - - - run: cargo semver-checks check-release --package ${{ inputs.crate }} --verbose - shell: bash - env: - CARGO_TERM_VERBOSE: "true" - # debugging https://github.com/libp2p/rust-libp2p/pull/3782#issuecomment-1523346255 - CARGO_HTTP_DEBUG: "true" - CARGO_LOG: "cargo::ops::registry=debug" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0bfe5069..59915a71 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,6 +8,11 @@ updates: commit-message: prefix: "deps" rebase-strategy: "disabled" + groups: + trust-dns: + patterns: + - "trust-dns-*" + - "async-std-resolver" - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 860f0a0d..f4ef3cc8 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -18,11 +18,11 @@ jobs: make_stable_rust_cache: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: shared-key: stable-cache diff --git a/.github/workflows/cargo-audit.yml b/.github/workflows/cargo-audit.yml index 2b5abe19..65c5de03 100644 --- a/.github/workflows/cargo-audit.yml +++ b/.github/workflows/cargo-audit.yml @@ -7,7 +7,7 @@ jobs: audit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cargo-deny-pr.yml b/.github/workflows/cargo-deny-pr.yml deleted file mode 100644 index c9c0e7d4..00000000 --- a/.github/workflows/cargo-deny-pr.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: cargo deny - -on: - push: - paths: - - '**/Cargo.toml' - pull_request: - paths: - - '**/Cargo.toml' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - cargo-deny: - runs-on: ubuntu-latest - strategy: - matrix: - checks: - - advisories - - bans licenses sources - - steps: - - uses: actions/checkout@v3 - - uses: EmbarkStudios/cargo-deny-action@v1 - with: - command: check ${{ matrix.checks }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 470016d6..7b12596b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,6 +12,7 @@ concurrency: env: SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 # Fail cache download after 2 minutes. + RUSTFLAGS: '-Dwarnings' # Never tolerate warnings. jobs: test: @@ -31,13 +32,13 @@ jobs: env: CRATE: ${{ matrix.crate }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: shared-key: stable-cache save-if: false @@ -48,21 +49,8 @@ jobs: - name: Check if we compile without any features activated run: cargo build --package "$CRATE" --no-default-features - - run: cargo clean - - - name: Check if crate has been released - id: check-released - run: | - RESPONSE_CODE=$(curl https://crates.io/api/v1/crates/"$CRATE" --silent --write-out "%{http_code}" --output /dev/null) - echo "code=${RESPONSE_CODE}" - echo "code=${RESPONSE_CODE}" >> $GITHUB_OUTPUT - - - uses: ./.github/actions/cargo-semver-checks - if: steps.check-released.outputs.code == 200 && !contains(fromJSON('["libp2p-swarm-derive"]'), env.CRATE) # Workaround until https://github.com/obi1kenobi/cargo-semver-check/issues/146 is shipped. - with: - crate: ${{ env.CRATE }} - - name: Enforce no dependency on meta crate + if: env.CRATE != 'libp2p-server' run: | cargo metadata --format-version=1 --no-deps | \ jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .dependencies | all(.name != "libp2p")' @@ -88,7 +76,7 @@ jobs: env: CHROMEDRIVER_VERSION: '114.0.5735.90' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: @@ -128,7 +116,7 @@ jobs: os: windows-latest runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: @@ -136,7 +124,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -147,7 +135,7 @@ jobs: name: Compile with MSRV runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Extract MSRV from workspace manifest shell: bash @@ -161,7 +149,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -176,13 +164,13 @@ jobs: - features: "mdns tcp dns tokio" - features: "mdns tcp dns async-std" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -193,13 +181,13 @@ jobs: name: Check rustdoc intra-doc links runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -212,11 +200,11 @@ jobs: fail-fast: false matrix: rust-version: [ - 1.71.0, # current stable + 1.72.0, # current stable beta ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: @@ -225,7 +213,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -236,13 +224,13 @@ jobs: name: IPFS Integration tests runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -252,13 +240,13 @@ jobs: examples: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 with: shared-key: stable-cache save-if: false @@ -271,10 +259,18 @@ jobs: cargo check --manifest-path "$toml"; done + semver: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.23.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin + shell: bash + - uses: obi1kenobi/cargo-semver-checks-action@v2 + rustfmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: @@ -288,7 +284,7 @@ jobs: manifest_lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable @@ -311,7 +307,7 @@ jobs: outputs: members: ${{ steps.cargo-metadata.outputs.members }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable @@ -324,9 +320,9 @@ jobs: name: Check for changes in proto files runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 - run: cargo install --version 0.10.0 pb-rs --locked @@ -351,6 +347,14 @@ jobs: name: Ensure that `Cargo.lock` is up-to-date runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 + - uses: actions/checkout@v4 + - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 - run: cargo metadata --locked --format-version=1 > /dev/null + + cargo-deny: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check advisories bans licenses sources diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 00000000..3b39ef7e --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,39 @@ +name: Publish docker images + +on: + push: + branches: + - 'master' + tags: + - 'libp2p-server-**' + pull_request: + +jobs: + server: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }}-server + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: ./misc/server/Dockerfile + push: ${{ ! github.event.pull_request.head.repo.fork }} # Only push image if we have the required permissions, i.e. not running from a fork + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 8513493b..bffcc60d 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install nightly toolchain run: rustup toolchain install nightly - name: Build Documentation diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 4d63a76c..e3485f25 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -17,8 +17,8 @@ jobs: matrix: flavour: [chromium, native] steps: - - uses: actions/checkout@v3 - - uses: docker/setup-buildx-action@v2 + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 - name: Build ${{ matrix.flavour }} image run: docker buildx build --load -t ${{ matrix.flavour }}-rust-libp2p-head . -f interop-tests/Dockerfile.${{ matrix.flavour }} - name: Run ${{ matrix.flavour }} tests diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cf9f7c9..f6b32c35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - [`libp2p-relay` CHANGELOG](protocols/relay/CHANGELOG.md) - [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md) - [`libp2p-rendezvous` CHANGELOG](protocols/rendezvous/CHANGELOG.md) +- [`libp2p-upnp` CHANGELOG](protocols/upnp/CHANGELOG.md) ## Transport Protocols & Upgrades diff --git a/Cargo.lock b/Cargo.lock index b5612343..4ff71d52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,15 +17,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array", -] - [[package]] name = "aead" version = "0.4.3" @@ -33,7 +24,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array", - "rand_core 0.6.4", ] [[package]] @@ -46,24 +36,13 @@ dependencies = [ "generic-array", ] -[[package]] -name = "aes" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" -dependencies = [ - "aes-soft", - "aesni", - "cipher 0.2.5", -] - [[package]] name = "aes" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher 0.3.0", "cpufeatures", "opaque-debug", @@ -75,7 +54,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher 0.4.4", "cpufeatures", ] @@ -108,37 +87,26 @@ dependencies = [ "subtle", ] -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - [[package]] name = "ahash" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "version_check", ] +[[package]] +name = "aho-corasick" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + [[package]] name = "aho-corasick" version = "1.0.2" @@ -211,9 +179,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arbitrary" @@ -239,29 +207,13 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "asn1-rs" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" -dependencies = [ - "asn1-rs-derive 0.1.0", - "asn1-rs-impl", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "asn1-rs" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive 0.4.0", + "asn1-rs-derive", "asn1-rs-impl", "displaydoc", "nom", @@ -271,18 +223,6 @@ dependencies = [ "time", ] -[[package]] -name = "asn1-rs-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "asn1-rs-derive" version = "0.4.0" @@ -382,7 +322,7 @@ checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "futures-lite", "log", @@ -425,7 +365,7 @@ dependencies = [ "async-lock", "autocfg", "blocking", - "cfg-if 1.0.0", + "cfg-if", "event-listener", "futures-lite", "rustix 0.37.23", @@ -463,16 +403,16 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723" +checksum = "0354a68a52265a3bde76005ddd2726624ef8624614f7f58871301de205a58a59" dependencies = [ "async-std", "async-trait", "futures-io", "futures-util", "pin-utils", - "socket2 0.4.9", + "socket2 0.5.4", "trust-dns-resolver", ] @@ -490,7 +430,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -512,6 +452,17 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http", + "log", + "url", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -522,11 +473,11 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" name = "autonat-example" version = "0.1.0" dependencies = [ - "async-std", "clap", "env_logger 0.10.0", "futures", "libp2p", + "tokio", ] [[package]] @@ -586,7 +537,7 @@ checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -599,12 +550,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -619,9 +564,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" @@ -692,21 +637,14 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-modes" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" -dependencies = [ - "block-padding", - "cipher 0.2.5", -] - [[package]] name = "block-padding" -version = "0.2.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array", +] [[package]] name = "blocking" @@ -723,6 +661,32 @@ dependencies = [ "log", ] +[[package]] +name = "browser-webrtc-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "env_logger 0.10.0", + "futures", + "js-sys", + "libp2p", + "libp2p-webrtc", + "libp2p-webrtc-websys", + "log", + "mime_guess", + "rand 0.8.5", + "rust-embed", + "tokio", + "tokio-util", + "tower", + "tower-http", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-logger", + "web-sys", +] + [[package]] name = "bs58" version = "0.5.0" @@ -732,6 +696,16 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "bstr" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "bumpalo" version = "3.13.0" @@ -746,9 +720,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" dependencies = [ "serde", ] @@ -759,6 +733,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "cbor4ii" version = "0.3.1" @@ -776,21 +759,16 @@ checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "ccm" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" +checksum = "9ae3c82e4355234767756212c570e29833699ab63e6ffd161887314cc5b43847" dependencies = [ - "aead 0.3.2", - "cipher 0.2.5", + "aead 0.5.2", + "cipher 0.4.4", + "ctr 0.9.2", "subtle", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -803,7 +781,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher 0.3.0", "cpufeatures", "zeroize", @@ -826,11 +804,11 @@ dependencies = [ name = "chat-example" version = "0.1.0" dependencies = [ - "async-std", "async-trait", "env_logger 0.10.0", "futures", "libp2p", + "tokio", ] [[package]] @@ -860,15 +838,6 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - [[package]] name = "cipher" version = "0.3.0" @@ -890,9 +859,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.21" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" +checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" dependencies = [ "clap_builder", "clap_derive", @@ -901,9 +870,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.21" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" +checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" dependencies = [ "anstream", "anstyle", @@ -920,7 +889,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -964,7 +933,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen", ] @@ -1040,7 +1009,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1085,7 +1054,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -1095,7 +1064,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -1107,7 +1076,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "memoffset 0.9.0", "scopeguard", @@ -1119,7 +1088,7 @@ version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1128,18 +1097,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.2" @@ -1182,15 +1139,6 @@ dependencies = [ "cipher 0.3.0", ] -[[package]] -name = "ctr" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.9.2" @@ -1226,11 +1174,11 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -1249,42 +1197,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", + "syn 2.0.37", ] [[package]] @@ -1314,7 +1227,7 @@ dependencies = [ ] [[package]] -name = "dcutr" +name = "dcutr-example" version = "0.1.0" dependencies = [ "clap", @@ -1325,17 +1238,6 @@ dependencies = [ "log", ] -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "pem-rfc7468 0.6.0", - "zeroize", -] - [[package]] name = "der" version = "0.7.7" @@ -1343,31 +1245,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" dependencies = [ "const-oid", - "pem-rfc7468 0.7.0", + "pem-rfc7468", "zeroize", ] -[[package]] -name = "der-parser" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" -dependencies = [ - "asn1-rs 0.3.1", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -1375,37 +1263,6 @@ dependencies = [ "rusticata-macros", ] -[[package]] -name = "derive_builder" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - [[package]] name = "digest" version = "0.9.0" @@ -1427,6 +1284,26 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "displaydoc" version = "0.2.4" @@ -1435,11 +1312,11 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] -name = "distributed-key-value-store" +name = "distributed-key-value-store-example" version = "0.1.0" dependencies = [ "async-std", @@ -1455,30 +1332,18 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - [[package]] name = "ecdsa" version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.7", + "der", "digest 0.10.7", - "elliptic-curve 0.13.5", - "rfc6979 0.4.0", - "signature 2.1.0", - "spki 0.7.2", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -1487,8 +1352,8 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ - "pkcs8 0.10.2", - "signature 2.1.0", + "pkcs8", + "signature", ] [[package]] @@ -1497,7 +1362,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "ed25519", "rand_core 0.6.4", "serde", @@ -1511,44 +1376,23 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array", - "group 0.12.1", - "hkdf", - "pem-rfc7468 0.6.0", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.2", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.0", + "ff", "generic-array", - "group 0.13.0", - "pem-rfc7468 0.7.0", - "pkcs8 0.10.2", + "group", + "hkdf", + "pem-rfc7468", + "pkcs8", "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", "subtle", "zeroize", ] @@ -1559,19 +1403,19 @@ version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.37", ] [[package]] @@ -1667,16 +1511,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "ff" version = "0.13.0" @@ -1689,12 +1523,12 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] -name = "file-sharing" +name = "file-sharing-example" version = "0.1.0" dependencies = [ "async-std", @@ -1709,9 +1543,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "miniz_oxide", @@ -1762,6 +1596,15 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.1.0" +dependencies = [ + "futures-timer", + "futures-util", + "tokio", +] + [[package]] name = "futures-channel" version = "0.3.28" @@ -1819,18 +1662,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", -] - -[[package]] -name = "futures-rustls" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" -dependencies = [ - "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "syn 2.0.37", ] [[package]] @@ -1840,7 +1672,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.6", + "rustls 0.21.7", ] [[package]] @@ -1923,7 +1755,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -1934,7 +1766,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -1973,6 +1805,19 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "globset" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" +dependencies = [ + "aho-corasick 0.7.20", + "bstr", + "fnv", + "log", + "regex", +] + [[package]] name = "gloo-timers" version = "0.2.6" @@ -1985,24 +1830,13 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", "rand_core 0.6.4", "subtle", ] @@ -2210,7 +2044,7 @@ dependencies = [ "rustls 0.20.8", "rustls-native-certs", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.4", ] [[package]] @@ -2227,32 +2061,16 @@ dependencies = [ ] [[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "identify" +name = "identify-example" version = "0.1.0" dependencies = [ "async-std", "async-trait", + "env_logger 0.10.0", "futures", "libp2p", ] -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.4.0" @@ -2293,6 +2111,25 @@ dependencies = [ "windows", ] +[[package]] +name = "igd-next" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e065e90a518ab5fedf79aa1e4b784e10f8e484a834f6bda85c42633a2cb7af" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http", + "hyper", + "log", + "rand 0.8.5", + "tokio", + "url", + "xmltree", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -2319,6 +2156,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ + "block-padding", "generic-array", ] @@ -2328,7 +2166,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -2336,9 +2174,9 @@ dependencies = [ [[package]] name = "interceptor" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c142385498b53584546abbfa50188b2677af8e4f879da1ee5d905cb7de5b97a" +checksum = "5927883184e6a819b22d5e4f5f7bc7ca134fde9b2026fbddd8d95249746ba21e" dependencies = [ "async-trait", "bytes", @@ -2368,6 +2206,7 @@ dependencies = [ "libp2p", "libp2p-mplex", "libp2p-webrtc", + "libp2p-webrtc-websys", "log", "mime_guess", "rand 0.8.5", @@ -2403,14 +2242,14 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.3", + "socket2 0.5.4", "widestring", "windows-sys", - "winreg 0.50.0", + "winreg", ] [[package]] -name = "ipfs-kad" +name = "ipfs-kad-example" version = "0.1.0" dependencies = [ "async-std", @@ -2421,7 +2260,7 @@ dependencies = [ ] [[package]] -name = "ipfs-private" +name = "ipfs-private-example" version = "0.1.0" dependencies = [ "async-std", @@ -2486,7 +2325,7 @@ dependencies = [ name = "keygen" version = "0.1.0" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", "clap", "libp2p-core", "libp2p-identity", @@ -2512,9 +2351,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libp2p" @@ -2557,6 +2396,7 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-uds", + "libp2p-upnp", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-webtransport-websys", @@ -2618,7 +2458,7 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" +version = "0.40.1" dependencies = [ "async-std", "either", @@ -2681,7 +2521,7 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.40.0" +version = "0.40.1" dependencies = [ "async-std", "flate2", @@ -2695,10 +2535,11 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.40.0" +version = "0.40.1" dependencies = [ "async-std", "async-std-resolver", + "async-trait", "env_logger 0.10.0", "futures", "libp2p-core", @@ -2735,7 +2576,7 @@ version = "0.45.1" dependencies = [ "async-std", "asynchronous-codec", - "base64 0.21.2", + "base64 0.21.4", "byteorder", "bytes", "either", @@ -2769,13 +2610,14 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.43.0" +version = "0.43.1" dependencies = [ "async-std", "asynchronous-codec", "either", "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "libp2p-core", "libp2p-identity", @@ -2795,21 +2637,22 @@ name = "libp2p-identity" version = "0.2.3" dependencies = [ "asn1_der", - "base64 0.21.2", + "base64 0.21.4", "bs58", "criterion", "ed25519-dalek", "hex-literal", + "hkdf", "libsecp256k1", "log", "multihash", - "p256 0.13.2", + "p256", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", "ring", "rmp-serde", - "sec1 0.7.3", + "sec1", "serde", "serde_json", "sha2 0.10.7", @@ -2820,7 +2663,7 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.44.4" +version = "0.44.6" dependencies = [ "arrayvec", "async-std", @@ -2841,6 +2684,7 @@ dependencies = [ "libp2p-yamux", "log", "quick-protobuf", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "serde", @@ -2872,7 +2716,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2 0.5.3", + "socket2 0.5.4", "tokio", "trust-dns-proto", "void", @@ -2951,10 +2795,10 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.43.0" +version = "0.43.1" dependencies = [ "bytes", - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.1.1", "env_logger 0.10.0", "futures", "futures_ringbuf", @@ -3006,7 +2850,7 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.43.0" +version = "0.43.1" dependencies = [ "async-std", "either", @@ -3026,7 +2870,7 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.40.0" +version = "0.40.1" dependencies = [ "asynchronous-codec", "bytes", @@ -3085,8 +2929,9 @@ dependencies = [ "quickcheck", "quinn", "rand 0.8.5", - "rustls 0.21.6", - "socket2 0.5.3", + "ring", + "rustls 0.21.7", + "socket2 0.5.4", "thiserror", "tokio", ] @@ -3100,6 +2945,7 @@ dependencies = [ "either", "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", @@ -3174,9 +3020,29 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-server" +version = "0.12.3" +dependencies = [ + "base64 0.21.4", + "clap", + "env_logger 0.10.0", + "futures", + "futures-timer", + "hyper", + "libp2p", + "log", + "prometheus-client", + "serde", + "serde_derive", + "serde_json", + "tokio", + "zeroize", +] + [[package]] name = "libp2p-swarm" -version = "0.43.3" +version = "0.43.5" dependencies = [ "async-std", "either", @@ -3215,7 +3081,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -3249,16 +3115,16 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.3", + "socket2 0.5.4", "tokio", ] [[package]] name = "libp2p-tls" -version = "0.2.0" +version = "0.2.1" dependencies = [ "futures", - "futures-rustls 0.24.0", + "futures-rustls", "hex", "hex-literal", "libp2p-core", @@ -3267,11 +3133,11 @@ dependencies = [ "libp2p-yamux", "rcgen 0.10.0", "ring", - "rustls 0.21.6", + "rustls 0.21.7", + "rustls-webpki", "thiserror", "tokio", - "webpki 0.22.0", - "x509-parser 0.15.1", + "x509-parser", "yasna", ] @@ -3287,6 +3153,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "libp2p-upnp" +version = "0.1.1" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", + "log", + "tokio", + "void", +] + [[package]] name = "libp2p-wasm-ext" version = "0.40.0" @@ -3301,50 +3181,90 @@ dependencies = [ [[package]] name = "libp2p-webrtc" -version = "0.6.0-alpha" +version = "0.6.1-alpha" dependencies = [ - "anyhow", "async-trait", - "asynchronous-codec", "bytes", "env_logger 0.10.0", "futures", "futures-timer", "hex", - "hex-literal", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-noise", - "libp2p-ping", - "libp2p-swarm", + "libp2p-webrtc-utils", "log", "multihash", - "quick-protobuf", - "quick-protobuf-codec", "quickcheck", "rand 0.8.5", - "rcgen 0.10.0", + "rcgen 0.11.1", "serde", - "sha2 0.10.7", "stun", "thiserror", "tinytemplate", "tokio", "tokio-util", - "unsigned-varint", - "void", "webrtc", ] +[[package]] +name = "libp2p-webrtc-utils" +version = "0.1.0" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "hex", + "hex-literal", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "log", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "serde", + "sha2 0.10.7", + "thiserror", + "tinytemplate", + "unsigned-varint", +] + +[[package]] +name = "libp2p-webrtc-websys" +version = "0.1.0-alpha" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "getrandom 0.2.10", + "hex", + "hex-literal", + "js-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "libp2p-ping", + "libp2p-swarm", + "libp2p-webrtc-utils", + "log", + "send_wrapper 0.6.0", + "serde", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "libp2p-websocket" -version = "0.42.0" +version = "0.42.1" dependencies = [ "async-std", "either", "futures", - "futures-rustls 0.22.2", + "futures-rustls", "libp2p-core", "libp2p-dns", "libp2p-identity", @@ -3356,7 +3276,7 @@ dependencies = [ "rw-stream-sink", "soketto", "url", - "webpki-roots 0.25.2", + "webpki-roots", ] [[package]] @@ -3479,9 +3399,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eedb2bdbad7e0634f83989bf596f497b070130daaa398ab22d84c39e266deec5" +checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21" dependencies = [ "hashbrown 0.14.0", ] @@ -3510,12 +3430,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.1" @@ -3533,15 +3447,15 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] @@ -3652,9 +3566,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" +checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "arbitrary", "core2", @@ -3774,9 +3688,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "libc", - "memoffset 0.6.5", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset 0.7.1", + "pin-utils", ] [[package]] @@ -3863,22 +3789,13 @@ dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" -dependencies = [ - "asn1-rs 0.3.1", -] - [[package]] name = "oid-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", ] [[package]] @@ -3906,7 +3823,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -3922,7 +3839,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -3949,37 +3866,27 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "p256" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" -dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.7", -] - [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.16.8", - "elliptic-curve 0.13.5", + "ecdsa", + "elliptic-curve", "primeorder", "sha2 0.10.7", ] [[package]] name = "p384" -version = "0.11.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", + "ecdsa", + "elliptic-curve", + "primeorder", "sha2 0.10.7", ] @@ -4005,9 +3912,9 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", "windows-targets", ] @@ -4028,12 +3935,23 @@ dependencies = [ ] [[package]] -name = "pem-rfc7468" -version = "0.6.0" +name = "pem" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64ct", + "base64 0.21.4", + "serde", +] + +[[package]] +name = "pem" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +dependencies = [ + "base64 0.21.4", + "serde", ] [[package]] @@ -4068,7 +3986,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -4095,28 +4013,19 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", + "env_logger 0.10.0", "futures", "libp2p", ] -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.7", - "spki 0.7.2", + "der", + "spki", ] [[package]] @@ -4167,7 +4076,7 @@ checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "libc", "log", @@ -4192,7 +4101,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug", "universal-hash 0.4.0", @@ -4204,7 +4113,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug", "universal-hash 0.5.1", @@ -4222,7 +4131,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" dependencies = [ - "elliptic-curve 0.13.5", + "elliptic-curve", ] [[package]] @@ -4251,20 +4160,20 @@ dependencies = [ [[package]] name = "proc-macro-warning" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" +checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -4362,7 +4271,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.6", + "rustls 0.21.7", "thiserror", "tokio", "tracing", @@ -4370,15 +4279,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.2" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c8bb234e70c863204303507d841e7fa2295e95c822b2bb4ca8ebf57f17b1cb" +checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.21.6", + "rustls 0.21.7", "slab", "thiserror", "tinyvec", @@ -4393,16 +4302,16 @@ checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" dependencies = [ "bytes", "libc", - "socket2 0.5.3", + "socket2 0.5.4", "tracing", "windows-sys", ] [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -4502,11 +4411,11 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.9.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" +checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ - "pem", + "pem 1.1.1", "ring", "time", "yasna", @@ -4514,22 +4423,22 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" dependencies = [ - "pem", + "pem 2.0.1", "ring", "time", - "x509-parser 0.14.0", + "x509-parser", "yasna", ] [[package]] name = "redis" -version = "0.23.2" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffd6543a7bc6428396845f6854ccf3d1ae8823816592e2cbe74f20f50f209d02" +checksum = "4f49cdc0bb3f412bf8e7d1bd90fe1d9eb10bc5c399ba90973c14662a27b3f8ba" dependencies = [ "async-trait", "bytes", @@ -4544,6 +4453,15 @@ dependencies = [ "url", ] +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.3.5" @@ -4554,15 +4472,26 @@ dependencies = [ ] [[package]] -name = "regex" -version = "1.9.3" +name = "redox_users" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "aho-corasick", + "getrandom 0.2.10", + "redox_syscall 0.2.16", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +dependencies = [ + "aho-corasick 1.0.2", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.3.8", + "regex-syntax 0.7.5", ] [[package]] @@ -4576,13 +4505,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.2", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.7.5", ] [[package]] @@ -4593,9 +4522,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "relay-server-example" @@ -4624,11 +4553,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.18" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", @@ -4656,7 +4585,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.10.1", + "winreg", ] [[package]] @@ -4669,17 +4598,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4738,9 +4656,9 @@ dependencies = [ [[package]] name = "rtcp" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6423493804221c276d27f3cc383cd5cbe1a1f10f210909fd4951b579b01293cd" +checksum = "3677908cadfbecb4cc1da9a56a32524fae4ebdfa7c2ea93886e1b1e846488cb9" dependencies = [ "bytes", "thiserror", @@ -4758,16 +4676,16 @@ dependencies = [ "log", "netlink-packet-route", "netlink-proto", - "nix", + "nix 0.24.3", "thiserror", "tokio", ] [[package]] name = "rtp" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b728adb99b88d932f2f0622b540bf7ccb196f81e9823b5b0eeb166526c88138c" +checksum = "e60482acbe8afb31edf6b1413103b7bca7a65004c423b3c3993749a083994fbe" dependencies = [ "bytes", "rand 0.8.5", @@ -4778,9 +4696,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "6.8.1" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661" +checksum = "b1e7d90385b59f0a6bf3d3b757f3ca4ece2048265d70db20a2016043d4509a40" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -4789,23 +4707,25 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "6.8.1" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" +checksum = "3c3d8c6fd84090ae348e63a84336b112b5c3918b3bf0493a581f7bd8ee623c29" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.28", + "shellexpand", + "syn 2.0.37", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "7.8.1" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" +checksum = "873feff8cb7bf86fdf0a71bb21c95159f4e4a37dd7a4bd1855a940909b583ada" dependencies = [ + "globset", "sha2 0.10.7", "walkdir", ] @@ -4867,19 +4787,6 @@ dependencies = [ "windows-sys", ] -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64 0.13.1", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", -] - [[package]] name = "rustls" version = "0.20.8" @@ -4888,20 +4795,20 @@ checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] name = "rustls" -version = "0.21.6" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", "rustls-webpki", - "sct 0.7.0", + "sct", ] [[package]] @@ -4922,14 +4829,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.2", + "base64 0.21.4", ] [[package]] name = "rustls-webpki" -version = "0.101.2" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -4996,16 +4903,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -5018,9 +4915,9 @@ dependencies = [ [[package]] name = "sdp" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" +checksum = "4653054c30ebce63658762eb0d64e27673868a95564474811ae6c220cf767640" dependencies = [ "rand 0.8.5", "substring", @@ -5028,30 +4925,16 @@ dependencies = [ "url", ] -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.7", + "base16ct", + "der", "generic-array", - "pkcs8 0.10.2", + "pkcs8", "subtle", "zeroize", ] @@ -5102,29 +4985,29 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "indexmap 2.0.0", "itoa", @@ -5150,7 +5033,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -5172,7 +5055,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5184,7 +5067,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5196,7 +5079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5208,7 +5091,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5232,6 +5115,15 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shellexpand" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" +dependencies = [ + "dirs", +] + [[package]] name = "signal-hook" version = "0.3.17" @@ -5251,16 +5143,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.1.0" @@ -5282,9 +5164,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "smol" @@ -5321,7 +5203,7 @@ dependencies = [ "aes-gcm 0.9.2", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", "ring", "rustc_version", @@ -5341,9 +5223,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" dependencies = [ "libc", "windows-sys", @@ -5370,16 +5252,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - [[package]] name = "spki" version = "0.7.2" @@ -5387,7 +5259,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.7", + "der", ] [[package]] @@ -5413,11 +5285,11 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "stun" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" +checksum = "7beb1624a3ea34778d58d30e2b8606b4d29fe65e87c4d50b87ed30afd5c3830c" dependencies = [ - "base64 0.13.1", + "base64 0.21.4", "crc", "lazy_static", "md-5", @@ -5458,9 +5330,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.28" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -5487,11 +5359,11 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.29.8" +version = "0.29.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d10ed79c22663a35a255d289a7fdcb43559fc77ff15df5ce6c341809e7867528" +checksum = "0a18d114d420ada3a891e6bc8e96a2023402203296a47cdd65083377dad18ba5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "core-foundation-sys", "libc", "ntapi", @@ -5523,13 +5395,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand 2.0.0", - "redox_syscall", + "redox_syscall 0.3.5", "rustix 0.38.4", "windows-sys", ] @@ -5550,7 +5422,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf0fe180d5f1f7dd32bb5f1a8d19231bb63dc9bbb1985e1dbb6f07163b6a8578" dependencies = [ "async-trait", - "base64 0.21.2", + "base64 0.21.4", "cookie", "fantoccini", "futures", @@ -5583,22 +5455,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.44" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -5607,7 +5479,7 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -5665,9 +5537,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.31.0" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40de3a2ba249dcb097e01be5e67a5ff53cf250397715a071a81543e8a832a920" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ "backtrace", "bytes", @@ -5677,7 +5549,7 @@ dependencies = [ "parking_lot", "pin-project-lite 0.2.12", "signal-hook-registry", - "socket2 0.5.3", + "socket2 0.5.4", "tokio-macros", "windows-sys", ] @@ -5690,7 +5562,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -5711,14 +5583,24 @@ checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", - "webpki 0.22.0", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.7", + "tokio", ] [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -5747,9 +5629,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "bitflags 2.3.3", "bytes", @@ -5788,7 +5670,7 @@ version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite 0.2.12", "tracing-attributes", @@ -5803,7 +5685,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] [[package]] @@ -5847,13 +5729,13 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +checksum = "0dc775440033cb114085f6f2437682b194fa7546466024b1037e82a48a052a69" dependencies = [ "async-trait", "bytes", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", @@ -5861,45 +5743,46 @@ dependencies = [ "futures-util", "h2", "http", - "idna 0.2.3", + "idna", "ipnet", - "lazy_static", + "once_cell", "rand 0.8.5", - "rustls 0.20.8", + "rustls 0.21.7", "rustls-pemfile", + "rustls-webpki", "smallvec", - "socket2 0.4.9", + "socket2 0.5.4", "thiserror", "tinyvec", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tracing", "url", - "webpki 0.22.0", - "webpki-roots 0.22.6", + "webpki-roots", ] [[package]] name = "trust-dns-resolver" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" +checksum = "2dff7aed33ef3e8bf2c9966fccdfed93f93d46f432282ea875cd66faabc6ef2f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", - "lazy_static", "lru-cache", + "once_cell", "parking_lot", + "rand 0.8.5", "resolv-conf", - "rustls 0.20.8", + "rustls 0.21.7", "smallvec", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tracing", "trust-dns-proto", - "webpki-roots 0.22.6", + "webpki-roots", ] [[package]] @@ -5910,9 +5793,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.83" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df60d81823ed9c520ee897489573da4b1d79ffbe006b8134f46de1a1aa03555" +checksum = "196a58260a906cedb9bf6d8034b6379d0c11f552416960452f267402ceeddff1" dependencies = [ "basic-toml", "glob", @@ -5925,12 +5808,12 @@ dependencies = [ [[package]] name = "turn" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" +checksum = "58f4fcb97da0426e8146fe0e9b78cc13120161087256198701d12d9df77f7701" dependencies = [ "async-trait", - "base64 0.13.1", + "base64 0.21.4", "futures", "log", "md-5", @@ -6024,9 +5907,9 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ "asynchronous-codec", "bytes", @@ -6038,14 +5921,23 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "upnp-example" +version = "0.1.0" +dependencies = [ + "futures", + "libp2p", + "tokio", +] + [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna", "percent-encoding", ] @@ -6146,7 +6038,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -6161,7 +6053,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -6171,7 +6063,7 @@ version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -6195,7 +6087,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6272,33 +6164,14 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.4" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ "ring", "untrusted", ] -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.22.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" -dependencies = [ - "webpki 0.22.0", -] - [[package]] name = "webpki-roots" version = "0.25.2" @@ -6307,26 +6180,26 @@ checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" [[package]] name = "webrtc" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60dde9fd592872bc371b3842e4616bc4c6984242e3cd2a7d7cb771db278601b" +checksum = "d91e7cf018f7185552bf6a5dd839f4ed9827aea33b746763c9a215f84a0d0b34" dependencies = [ "arc-swap", "async-trait", "bytes", - "cfg-if 0.1.10", + "cfg-if", "hex", "interceptor", "lazy_static", "log", - "pem", + "pem 3.0.2", "rand 0.8.5", - "rcgen 0.10.0", + "rcgen 0.11.1", "regex", "ring", "rtcp", "rtp", - "rustls 0.19.1", + "rustls 0.21.7", "sdp", "serde", "serde_json", @@ -6351,12 +6224,11 @@ dependencies = [ [[package]] name = "webrtc-data" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c3c7ba7d11733e448d8d2d054814e97c558f52293f0e0a2eb05840f28b3be12" +checksum = "a45d2461d0e0bf93f181e30eb0b40df32b8bf3efb89c53cebb1990e603e2067d" dependencies = [ "bytes", - "derive_builder", "log", "thiserror", "tokio", @@ -6366,51 +6238,46 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" +checksum = "32b140b953f986e97828aa33ec6318186b05d862bee689efbc57af04a243e832" dependencies = [ - "aes 0.6.0", + "aes 0.8.3", "aes-gcm 0.10.2", "async-trait", "bincode", - "block-modes", "byteorder", + "cbc", "ccm", - "curve25519-dalek 3.2.0", - "der-parser 8.2.0", - "elliptic-curve 0.12.3", + "der-parser", "hkdf", "hmac 0.12.1", "log", - "oid-registry 0.6.1", - "p256 0.11.1", + "p256", "p384", - "pem", + "pem 3.0.2", "rand 0.8.5", "rand_core 0.6.4", - "rcgen 0.9.3", + "rcgen 0.11.1", "ring", - "rustls 0.19.1", - "sec1 0.3.0", + "rustls 0.21.7", + "sec1", "serde", "sha1", "sha2 0.10.7", - "signature 1.6.4", "subtle", "thiserror", "tokio", - "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", - "x509-parser 0.13.2", + "x25519-dalek 2.0.0", + "x509-parser", ] [[package]] name = "webrtc-ice" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" +checksum = "66eb4b85646f1c52225779db3e1e7e873dede6db68cc9be080b648f1713083a3" dependencies = [ "arc-swap", "async-trait", @@ -6432,12 +6299,12 @@ dependencies = [ [[package]] name = "webrtc-mdns" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" +checksum = "62bebbd40e7f8b630a0f1a74783dbfff1edfc0ccaae891c4689891156a8c4d8c" dependencies = [ "log", - "socket2 0.4.9", + "socket2 0.5.4", "thiserror", "tokio", "webrtc-util", @@ -6445,9 +6312,9 @@ dependencies = [ [[package]] name = "webrtc-media" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8e3711a321f6a375973144f48065cf705316ab6709672954aace020c668eb6" +checksum = "1cfde3c7b9450b67d466bb2f02c6d9ff9514d33535eb9994942afd1f828839d1" dependencies = [ "byteorder", "bytes", @@ -6458,9 +6325,9 @@ dependencies = [ [[package]] name = "webrtc-sctp" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df742d91cfbd982f6ab2bfd45a7c3ddfce5b2f55913b2f63877404d1b3259db" +checksum = "1af6116b7f9703560c3ad0b32f67220b171bb1b59633b03563db8404d0e482ea" dependencies = [ "arc-swap", "async-trait", @@ -6475,16 +6342,16 @@ dependencies = [ [[package]] name = "webrtc-srtp" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5683b597b3c6af47ff11e695697f881bc42acfd8feeb0d4eb20a5ae9caaee6ae" +checksum = "c1db1f36c1c81e4b1e531c0b9678ba0c93809e196ce62122d87259bb71c03b9f" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", + "aead 0.5.2", + "aes 0.8.3", "aes-gcm 0.10.2", "byteorder", "bytes", - "ctr 0.8.0", + "ctr 0.9.2", "hmac 0.12.1", "log", "rtcp", @@ -6498,9 +6365,9 @@ dependencies = [ [[package]] name = "webrtc-util" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" +checksum = "1adc96bee68417e1f4d19dd7698124a7f859db55ae2fd3eedbbb7e732f614735" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -6510,7 +6377,7 @@ dependencies = [ "lazy_static", "libc", "log", - "nix", + "nix 0.26.4", "rand 0.8.5", "thiserror", "tokio", @@ -6681,22 +6548,13 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys", ] @@ -6713,69 +6571,49 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", + "serde", "zeroize", ] -[[package]] -name = "x509-parser" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" -dependencies = [ - "asn1-rs 0.3.1", - "base64 0.13.1", - "data-encoding", - "der-parser 7.0.0", - "lazy_static", - "nom", - "oid-registry 0.4.0", - "rusticata-macros", - "thiserror", - "time", -] - -[[package]] -name = "x509-parser" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" -dependencies = [ - "asn1-rs 0.5.2", - "base64 0.13.1", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom", - "oid-registry 0.6.1", - "ring", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "x509-parser" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" dependencies = [ - "asn1-rs 0.5.2", + "asn1-rs", "data-encoding", - "der-parser 8.2.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.6.1", + "oid-registry", + "ring", "rusticata-macros", "thiserror", "time", ] +[[package]] +name = "xml-rs" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1eee6bf5926be7cf998d7381a9a23d833fd493f6a8034658a9505a4dc4b20444" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + [[package]] name = "yamux" version = "0.12.0" @@ -6817,5 +6655,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.28", + "syn 2.0.37", ] diff --git a/Cargo.toml b/Cargo.toml index e42a905b..73ae23d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,8 @@ members = [ "core", "examples/autonat", - "examples/chat-example", + "examples/browser-webrtc", + "examples/chat", "examples/dcutr", "examples/distributed-key-value-store", "examples/file-sharing", @@ -10,13 +11,15 @@ members = [ "examples/ipfs-kad", "examples/ipfs-private", "examples/metrics", - "examples/ping-example", + "examples/ping", "examples/relay-server", "examples/rendezvous", + "examples/upnp", "identity", "interop-tests", "misc/allow-block-list", "misc/connection-limits", + "misc/futures-bounded", "misc/keygen", "misc/memory-connection-limits", "misc/metrics", @@ -24,6 +27,8 @@ members = [ "misc/quick-protobuf-codec", "misc/quickcheck-ext", "misc/rw-stream-sink", + "misc/server", + "misc/webrtc-utils", "muxers/mplex", "muxers/test-harness", "muxers/yamux", @@ -39,6 +44,7 @@ members = [ "protocols/relay", "protocols/rendezvous", "protocols/request-response", + "protocols/upnp", "swarm", "swarm-derive", "swarm-test", @@ -53,6 +59,7 @@ members = [ "transports/uds", "transports/wasm-ext", "transports/webrtc", + "transports/webrtc-websys", "transports/websocket", "transports/webtransport-websys", "wasm-tests/webtransport-tests", @@ -63,41 +70,47 @@ resolver = "2" rust-version = "1.65.0" [workspace.dependencies] +futures-bounded = { version = "0.1.0", path = "misc/futures-bounded" } +libp2p = { version = "0.52.3", path = "libp2p" } libp2p-allow-block-list = { version = "0.2.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.11.0", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.2.1", path = "misc/connection-limits" } -libp2p-core = { version = "0.40.0", path = "core" } +libp2p-core = { version = "0.40.1", path = "core" } libp2p-dcutr = { version = "0.10.0", path = "protocols/dcutr" } -libp2p-deflate = { version = "0.40.0", path = "transports/deflate" } -libp2p-dns = { version = "0.40.0", path = "transports/dns" } +libp2p-deflate = { version = "0.40.1", path = "transports/deflate" } +libp2p-dns = { version = "0.40.1", path = "transports/dns" } libp2p-floodsub = { version = "0.43.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.45.1", path = "protocols/gossipsub" } -libp2p-identify = { version = "0.43.0", path = "protocols/identify" } +libp2p-identify = { version = "0.43.1", path = "protocols/identify" } libp2p-identity = { version = "0.2.3" } -libp2p-kad = { version = "0.44.4", path = "protocols/kad" } +libp2p-kad = { version = "0.44.6", path = "protocols/kad" } libp2p-mdns = { version = "0.44.0", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.1.0", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.13.1", path = "misc/metrics" } libp2p-mplex = { version = "0.40.0", path = "muxers/mplex" } libp2p-muxer-test-harness = { path = "muxers/test-harness" } -libp2p-noise = { version = "0.43.0", path = "transports/noise" } +libp2p-noise = { version = "0.43.1", path = "transports/noise" } libp2p-perf = { version = "0.2.0", path = "protocols/perf" } -libp2p-ping = { version = "0.43.0", path = "protocols/ping" } -libp2p-plaintext = { version = "0.40.0", path = "transports/plaintext" } +libp2p-ping = { version = "0.43.1", path = "protocols/ping" } +libp2p-plaintext = { version = "0.40.1", path = "transports/plaintext" } libp2p-pnet = { version = "0.23.0", path = "transports/pnet" } libp2p-quic = { version = "0.9.2", path = "transports/quic" } libp2p-relay = { version = "0.16.1", path = "protocols/relay" } libp2p-rendezvous = { version = "0.13.0", path = "protocols/rendezvous" } +libp2p-upnp = { version = "0.1.1", path = "protocols/upnp" } libp2p-request-response = { version = "0.25.1", path = "protocols/request-response" } -libp2p-swarm = { version = "0.43.3", path = "swarm" } +libp2p-server = { version = "0.12.3", path = "misc/server" } +libp2p-swarm = { version = "0.43.5", path = "swarm" } libp2p-swarm-derive = { version = "0.33.0", path = "swarm-derive" } libp2p-swarm-test = { version = "0.2.0", path = "swarm-test" } libp2p-tcp = { version = "0.40.0", path = "transports/tcp" } -libp2p-tls = { version = "0.2.0", path = "transports/tls" } +libp2p-tls = { version = "0.2.1", path = "transports/tls" } libp2p-uds = { version = "0.39.0", path = "transports/uds" } libp2p-wasm-ext = { version = "0.40.0", path = "transports/wasm-ext" } -libp2p-webrtc = { version = "0.6.0-alpha", path = "transports/webrtc" } -libp2p-websocket = { version = "0.42.0", path = "transports/websocket" } +libp2p-webrtc = { version = "0.6.1-alpha", path = "transports/webrtc" } +libp2p-webrtc-utils = { version = "0.1.0", path = "misc/webrtc-utils" } +libp2p-webrtc-websys = { version = "0.1.0-alpha", path = "transports/webrtc-websys" } +libp2p-websocket = { version = "0.42.1", path = "transports/websocket" } libp2p-webtransport-websys = { version = "0.1.0", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.44.1", path = "muxers/yamux" } multistream-select = { version = "0.13.0", path = "misc/multistream-select" } @@ -105,8 +118,7 @@ quick-protobuf-codec = { version = "0.2.0", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } multiaddr = "0.18.0" -multihash = "0.19.0" - +multihash = "0.19.1" [patch.crates-io] diff --git a/ROADMAP.md b/ROADMAP.md index 0d5533f9..0d422a6d 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -80,15 +80,6 @@ This makes rust-libp2p a truly end-to-end solution, enabling users to use rust-l Reduce maintenance burden and reduce dependency footprint. -### Automate port-forwarding e.g. via UPnP - -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| -| Connectivity | todo | | https://github.com/libp2p/rust-libp2p/issues/3903 | | | - -Leverage protocols like UPnP to configure port-forwarding on ones router when behind NAT and/or firewall. -Another technique in addition to hole punching increasing the probability for a node to become publicly reachable when behind a firewall and/or NAT. - ## Done ### Alpha QUIC support @@ -176,3 +167,13 @@ Kademlia operations. We added alpha support for QUIC in Q4/2022 wrapping `quinn-proto`. Evaluate using `quinn` directly, replacing the wrapper. + +### Automate port-forwarding e.g. via UPnP + +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| +| Connectivity | done | | https://github.com/libp2p/rust-libp2p/pull/4156 | | | + +Leverage protocols like UPnP to configure port-forwarding on ones router when behind NAT and/or firewall. +Another technique in addition to hole punching increasing the probability for a node to become publicly reachable when behind a firewall and/or NAT. + diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index ecc1eff2..ea7bc2bd 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,4 +1,11 @@ -## 0.40.0 +## 0.40.1 + +- Implement `Debug` for `StreamMuxerEvent`. + See [PR 4426]. + +[PR 4426]: https://github.com/libp2p/rust-libp2p/pull/4426 + +## 0.40.0 - Allow `ListenerId` to be user-controlled, i.e. to be provided on `Transport::listen_on`. See [PR 3567]. diff --git a/core/Cargo.toml b/core/Cargo.toml index e71c4d0f..ecf3c153 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = { workspace = true } description = "Core traits and structs of libp2p" -version = "0.40.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -28,7 +28,7 @@ quick-protobuf = "0.8" rand = "0.8" rw-stream-sink = { workspace = true } serde = { version = "1", optional = true, features = ["derive"] } -smallvec = "1.11.0" +smallvec = "1.11.1" thiserror = "1.0" unsigned-varint = "0.7" void = "1" diff --git a/core/src/muxing.rs b/core/src/muxing.rs index 13b826ba..477e1608 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -112,6 +112,7 @@ pub trait StreamMuxer { } /// An event produced by a [`StreamMuxer`]. +#[derive(Debug)] pub enum StreamMuxerEvent { /// The address of the remote has changed. AddressChange(Multiaddr), diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index 201918f2..8525ab74 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -30,8 +30,8 @@ use crate::{ TransportError, TransportEvent, }, upgrade::{ - self, apply_inbound, apply_outbound, InboundUpgrade, InboundUpgradeApply, OutboundUpgrade, - OutboundUpgradeApply, UpgradeError, + self, apply_inbound, apply_outbound, InboundConnectionUpgrade, InboundUpgradeApply, + OutboundConnectionUpgrade, OutboundUpgradeApply, UpgradeError, }, Negotiated, }; @@ -101,8 +101,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, D: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = (PeerId, D), Error = E>, - U: OutboundUpgrade, Output = (PeerId, D), Error = E> + Clone, + U: InboundConnectionUpgrade, Output = (PeerId, D), Error = E>, + U: OutboundConnectionUpgrade, Output = (PeerId, D), Error = E> + Clone, E: Error + 'static, { let version = self.version; @@ -123,7 +123,7 @@ where pub struct Authenticate where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> + OutboundUpgrade>, + U: InboundConnectionUpgrade> + OutboundConnectionUpgrade>, { #[pin] inner: EitherUpgrade, @@ -132,11 +132,11 @@ where impl Future for Authenticate where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> - + OutboundUpgrade< + U: InboundConnectionUpgrade> + + OutboundConnectionUpgrade< Negotiated, - Output = >>::Output, - Error = >>::Error, + Output = >>::Output, + Error = >>::Error, >, { type Output = as Future>::Output; @@ -155,7 +155,7 @@ where pub struct Multiplex where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> + OutboundUpgrade>, + U: InboundConnectionUpgrade> + OutboundConnectionUpgrade>, { peer_id: Option, #[pin] @@ -165,8 +165,8 @@ where impl Future for Multiplex where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = M, Error = E>, - U: OutboundUpgrade, Output = M, Error = E>, + U: InboundConnectionUpgrade, Output = M, Error = E>, + U: OutboundConnectionUpgrade, Output = M, Error = E>, { type Output = Result<(PeerId, M), UpgradeError>; @@ -208,8 +208,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, D: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = D, Error = E>, - U: OutboundUpgrade, Output = D, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = D, Error = E>, + U: OutboundConnectionUpgrade, Output = D, Error = E> + Clone, E: Error + 'static, { Authenticated(Builder::new( @@ -236,8 +236,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, M: StreamMuxer, - U: InboundUpgrade, Output = M, Error = E>, - U: OutboundUpgrade, Output = M, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = M, Error = E>, + U: OutboundConnectionUpgrade, Output = M, Error = E> + Clone, E: Error + 'static, { let version = self.0.version; @@ -269,8 +269,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, M: StreamMuxer, - U: InboundUpgrade, Output = M, Error = E>, - U: OutboundUpgrade, Output = M, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = M, Error = E>, + U: OutboundConnectionUpgrade, Output = M, Error = E> + Clone, E: Error + 'static, F: for<'a> FnOnce(&'a PeerId, &'a ConnectedPoint) -> U + Clone, { @@ -395,8 +395,8 @@ where T: Transport, T::Error: 'static, C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = D, Error = E>, - U: OutboundUpgrade, Output = D, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = D, Error = E>, + U: OutboundConnectionUpgrade, Output = D, Error = E> + Clone, E: Error + 'static, { type Output = (PeerId, D); @@ -502,7 +502,7 @@ where /// The [`Transport::Dial`] future of an [`Upgrade`]d transport. pub struct DialUpgradeFuture where - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, C: AsyncRead + AsyncWrite + Unpin, { future: Pin>, @@ -513,7 +513,7 @@ impl Future for DialUpgradeFuture where F: TryFuture, C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade, Output = D>, + U: OutboundConnectionUpgrade, Output = D>, U::Error: Error, { type Output = Result<(PeerId, D), TransportUpgradeError>; @@ -553,7 +553,7 @@ where impl Unpin for DialUpgradeFuture where - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, C: AsyncRead + AsyncWrite + Unpin, { } @@ -562,7 +562,7 @@ where pub struct ListenerUpgradeFuture where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { future: Pin>, upgrade: future::Either, (PeerId, InboundUpgradeApply)>, @@ -572,7 +572,7 @@ impl Future for ListenerUpgradeFuture where F: TryFuture, C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = D>, + U: InboundConnectionUpgrade, Output = D>, U::Error: Error, { type Output = Result<(PeerId, D), TransportUpgradeError>; @@ -613,6 +613,6 @@ where impl Unpin for ListenerUpgradeFuture where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { } diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index f6bf72d1..7db1853b 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -125,3 +125,63 @@ pub trait OutboundUpgrade: UpgradeInfo { /// The `info` is the identifier of the protocol, as produced by `protocol_info`. fn upgrade_outbound(self, socket: C, info: Self::Info) -> Self::Future; } + +/// Possible upgrade on an inbound connection +pub trait InboundConnectionUpgrade: UpgradeInfo { + /// Output after the upgrade has been successfully negotiated and the handshake performed. + type Output; + /// Possible error during the handshake. + type Error; + /// Future that performs the handshake with the remote. + type Future: Future>; + + /// After we have determined that the remote supports one of the protocols we support, this + /// method is called to start the handshake. + /// + /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future; +} + +/// Possible upgrade on an outbound connection +pub trait OutboundConnectionUpgrade: UpgradeInfo { + /// Output after the upgrade has been successfully negotiated and the handshake performed. + type Output; + /// Possible error during the handshake. + type Error; + /// Future that performs the handshake with the remote. + type Future: Future>; + + /// After we have determined that the remote supports one of the protocols we support, this + /// method is called to start the handshake. + /// + /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future; +} + +// Blanket implementation for InboundConnectionUpgrade based on InboundUpgrade for backwards compatibility +impl InboundConnectionUpgrade for U +where + U: InboundUpgrade, +{ + type Output = >::Output; + type Error = >::Error; + type Future = >::Future; + + fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future { + self.upgrade_inbound(socket, info) + } +} + +// Blanket implementation for OutboundConnectionUpgrade based on OutboundUpgrade for backwards compatibility +impl OutboundConnectionUpgrade for U +where + U: OutboundUpgrade, +{ + type Output = >::Output; + type Error = >::Error; + type Future = >::Future; + + fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future { + self.upgrade_outbound(socket, info) + } +} diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index aa997435..aefce686 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError}; +use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; use crate::{connection::ConnectedPoint, Negotiated}; use futures::{future::Either, prelude::*}; use log::debug; @@ -37,7 +37,7 @@ pub(crate) fn apply( ) -> Either, OutboundUpgradeApply> where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> + OutboundUpgrade>, + U: InboundConnectionUpgrade> + OutboundConnectionUpgrade>, { match cp { ConnectedPoint::Dialer { role_override, .. } if role_override.is_dialer() => { @@ -51,7 +51,7 @@ where pub(crate) fn apply_inbound(conn: C, up: U) -> InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { InboundUpgradeApply { inner: InboundUpgradeApplyState::Init { @@ -65,7 +65,7 @@ where pub(crate) fn apply_outbound(conn: C, up: U, v: Version) -> OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { OutboundUpgradeApply { inner: OutboundUpgradeApplyState::Init { @@ -79,7 +79,7 @@ where pub struct InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { inner: InboundUpgradeApplyState, } @@ -88,7 +88,7 @@ where enum InboundUpgradeApplyState where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { Init { future: ListenerSelectFuture, @@ -104,14 +104,14 @@ where impl Unpin for InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { } impl Future for InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { type Output = Result>; @@ -162,7 +162,7 @@ where pub struct OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { inner: OutboundUpgradeApplyState, } @@ -170,7 +170,7 @@ where enum OutboundUpgradeApplyState where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { Init { future: DialerSelectFuture::IntoIter>, @@ -186,14 +186,14 @@ where impl Unpin for OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { } impl Future for OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { type Output = Result>; diff --git a/examples/README.md b/examples/README.md index a8a07be0..28e08558 100644 --- a/examples/README.md +++ b/examples/README.md @@ -7,7 +7,7 @@ A set of examples showcasing how to use rust-libp2p. ## Individual libp2p features -- [Chat](./chat-example) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. +- [Chat](./chat) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. - [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the mDNS and Kademlia protocol. - [File sharing application](./file-sharing) Basic file sharing application with peers either providing or locating and getting files by name. @@ -20,6 +20,6 @@ A set of examples showcasing how to use rust-libp2p. - [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature. -- [Ping](./ping-example) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. +- [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. - [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 9ca936b6..4b9ab5aa 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -6,8 +6,8 @@ publish = false license = "MIT" [dependencies] -async-std = { version = "1.12", features = ["attributes"] } -clap = { version = "4.3.21", features = ["derive"] } +tokio = { version = "1.32", features = ["full"] } +clap = { version = "4.3.23", features = ["derive"] } env_logger = "0.10.0" futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } +libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index 40db305f..eeb39ec5 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -43,7 +43,7 @@ struct Opt { server_peer_id: PeerId, } -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { env_logger::init(); @@ -51,9 +51,8 @@ async fn main() -> Result<(), Box> { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); - let transport = tcp::async_io::Transport::default() + let transport = tcp::tokio::Transport::default() .upgrade(Version::V1Lazy) .authenticate(noise::Config::new(&local_key)?) .multiplex(yamux::Config::default()) @@ -61,8 +60,7 @@ async fn main() -> Result<(), Box> { let behaviour = Behaviour::new(local_key.public()); - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build(); swarm.listen_on( Multiaddr::empty() .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index e0d6ad2f..065708f7 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -35,7 +35,7 @@ struct Opt { listen_port: Option, } -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { env_logger::init(); @@ -43,9 +43,8 @@ async fn main() -> Result<(), Box> { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); - let transport = tcp::async_io::Transport::default() + let transport = tcp::tokio::Transport::default() .upgrade(Version::V1Lazy) .authenticate(noise::Config::new(&local_key)?) .multiplex(yamux::Config::default()) @@ -53,8 +52,7 @@ async fn main() -> Result<(), Box> { let behaviour = Behaviour::new(local_key.public()); - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build(); swarm.listen_on( Multiaddr::empty() .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml new file mode 100644 index 00000000..99263dde --- /dev/null +++ b/examples/browser-webrtc/Cargo.toml @@ -0,0 +1,40 @@ +[package] +authors = ["Doug Anderson "] +description = "Example use of the WebRTC transport in a browser wasm environment" +edition = "2021" +license = "MIT" +name = "browser-webrtc-example" +publish = false +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +version = "0.1.0" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +anyhow = "1.0.72" +env_logger = "0.10" +futures = "0.3.28" +log = "0.4" +rand = "0.8" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +axum = "0.6.19" +libp2p = { path = "../../libp2p", features = ["ed25519", "macros", "ping", "wasm-bindgen", "tokio"] } +libp2p-webrtc = { workspace = true, features = ["tokio"] } +rust-embed = { version = "8.0.0", features = ["include-exclude", "interpolate-folder-path"] } +tokio = { version = "1.29", features = ["macros", "net", "rt", "signal"] } +tokio-util = { version = "0.7", features = ["compat"] } +tower = "0.4" +tower-http = { version = "0.4.0", features = ["cors"] } +mime_guess = "2.0.4" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = "0.3.64" +libp2p = { path = "../../libp2p", features = ["ed25519", "macros", "ping", "wasm-bindgen"] } +libp2p-webrtc-websys = { workspace = true } +wasm-bindgen = "0.2.84" +wasm-bindgen-futures = "0.4.37" +wasm-logger = { version = "0.2.0" } +web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } diff --git a/examples/browser-webrtc/README.md b/examples/browser-webrtc/README.md new file mode 100644 index 00000000..d44cf879 --- /dev/null +++ b/examples/browser-webrtc/README.md @@ -0,0 +1,18 @@ +# Rust-libp2p Browser-Server WebRTC Example + +This example demonstrates how to use the `libp2p-webrtc-websys` transport library in a browser to ping the WebRTC Server. +It uses [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/) to build the project for use in the browser. + +## Running the example + +1. Build the client library: +```shell +wasm-pack build --target web --out-dir static +``` + +2. Start the server: +```shell +cargo run +``` + +3. Open the URL printed in the terminal diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs new file mode 100644 index 00000000..1a9856da --- /dev/null +++ b/examples/browser-webrtc/src/lib.rs @@ -0,0 +1,104 @@ +#![cfg(target_arch = "wasm32")] + +use futures::StreamExt; +use js_sys::Date; +use libp2p::core::Multiaddr; +use libp2p::identity::{Keypair, PeerId}; +use libp2p::ping; +use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use std::convert::From; +use std::io; +use wasm_bindgen::prelude::*; +use web_sys::{Document, HtmlElement}; + +#[wasm_bindgen] +pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { + wasm_logger::init(wasm_logger::Config::default()); + + let body = Body::from_current_window()?; + body.append_p("Let's ping the WebRTC Server!")?; + + let local_key = Keypair::generate_ed25519(); + let local_peer_id = PeerId::from(local_key.public()); + let mut swarm = SwarmBuilder::with_wasm_executor( + libp2p_webrtc_websys::Transport::new(libp2p_webrtc_websys::Config::new(&local_key)).boxed(), + Behaviour { + ping: ping::Behaviour::new(ping::Config::new()), + keep_alive: keep_alive::Behaviour, + }, + local_peer_id, + ) + .build(); + + log::info!("Initialize swarm with identity: {local_peer_id}"); + + let addr = libp2p_endpoint.parse::()?; + log::info!("Dialing {addr}"); + swarm.dial(addr)?; + + loop { + match swarm.next().await.unwrap() { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Err(e), .. })) => { + log::error!("Ping failed: {:?}", e); + + break; + } + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + peer, + result: Ok(rtt), + .. + })) => { + log::info!("Ping successful: RTT: {rtt:?}, from {peer}"); + body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?; + } + evt => log::info!("Swarm event: {:?}", evt), + } + } + + Ok(()) +} + +#[derive(NetworkBehaviour)] +struct Behaviour { + ping: ping::Behaviour, + keep_alive: keep_alive::Behaviour, +} + +/// Convenience wrapper around the current document body +struct Body { + body: HtmlElement, + document: Document, +} + +impl Body { + fn from_current_window() -> Result { + // Use `web_sys`'s global `window` function to get a handle on the global + // window object. + let document = web_sys::window() + .ok_or(js_error("no global `window` exists"))? + .document() + .ok_or(js_error("should have a document on window"))?; + let body = document + .body() + .ok_or(js_error("document should have a body"))?; + + Ok(Self { body, document }) + } + + fn append_p(&self, msg: &str) -> Result<(), JsError> { + let val = self + .document + .create_element("p") + .map_err(|_| js_error("failed to create

"))?; + val.set_text_content(Some(msg)); + self.body + .append_child(&val) + .map_err(|_| js_error("failed to append

"))?; + + Ok(()) + } +} + +fn js_error(msg: &str) -> JsError { + io::Error::new(io::ErrorKind::Other, msg).into() +} diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs new file mode 100644 index 00000000..8a4034a4 --- /dev/null +++ b/examples/browser-webrtc/src/main.rs @@ -0,0 +1,150 @@ +#![allow(non_upper_case_globals)] + +use anyhow::Result; +use axum::extract::{Path, State}; +use axum::http::header::CONTENT_TYPE; +use axum::http::StatusCode; +use axum::response::{Html, IntoResponse}; +use axum::{http::Method, routing::get, Router}; +use futures::StreamExt; +use libp2p::{ + core::muxing::StreamMuxerBox, + core::Transport, + identity, + multiaddr::{Multiaddr, Protocol}, + ping, + swarm::{SwarmBuilder, SwarmEvent}, +}; +use libp2p_webrtc as webrtc; +use rand::thread_rng; +use std::net::{Ipv4Addr, SocketAddr}; +use std::time::Duration; +use tower_http::cors::{Any, CorsLayer}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + env_logger::builder() + .parse_filters("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") + .parse_default_env() + .init(); + + let id_keys = identity::Keypair::generate_ed25519(); + let local_peer_id = id_keys.public().to_peer_id(); + let transport = webrtc::tokio::Transport::new( + id_keys, + webrtc::tokio::Certificate::generate(&mut thread_rng())?, + ) + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))) + .boxed(); + + let mut swarm = + SwarmBuilder::with_tokio_executor(transport, ping::Behaviour::default(), local_peer_id) + .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe the pings. + .build(); + + let address_webrtc = Multiaddr::from(Ipv4Addr::UNSPECIFIED) + .with(Protocol::Udp(0)) + .with(Protocol::WebRTCDirect); + + swarm.listen_on(address_webrtc.clone())?; + + let address = loop { + if let SwarmEvent::NewListenAddr { address, .. } = swarm.select_next_some().await { + if address + .iter() + .any(|e| e == Protocol::Ip4(Ipv4Addr::LOCALHOST)) + { + log::debug!("Ignoring localhost address to make sure the example works in Firefox"); + continue; + } + + log::info!("Listening on: {address}"); + + break address; + } + }; + + let addr = address.with(Protocol::P2p(*swarm.local_peer_id())); + + // Serve .wasm, .js and server multiaddress over HTTP on this address. + tokio::spawn(serve(addr)); + + loop { + tokio::select! { + swarm_event = swarm.next() => { + log::trace!("Swarm Event: {:?}", swarm_event) + }, + _ = tokio::signal::ctrl_c() => { + break; + } + } + } + + Ok(()) +} + +#[derive(rust_embed::RustEmbed)] +#[folder = "$CARGO_MANIFEST_DIR/static"] +struct StaticFiles; + +/// Serve the Multiaddr we are listening on and the host files. +pub(crate) async fn serve(libp2p_transport: Multiaddr) { + let listen_addr = match libp2p_transport.iter().next() { + Some(Protocol::Ip4(addr)) => addr, + _ => panic!("Expected 1st protocol to be IP4"), + }; + + let server = Router::new() + .route("/", get(get_index)) + .route("/index.html", get(get_index)) + .route("/:path", get(get_static_file)) + .with_state(Libp2pEndpoint(libp2p_transport)) + .layer( + // allow cors + CorsLayer::new() + .allow_origin(Any) + .allow_methods([Method::GET]), + ); + + let addr = SocketAddr::new(listen_addr.into(), 8080); + + log::info!("Serving client files at http://{addr}"); + + axum::Server::bind(&addr) + .serve(server.into_make_service()) + .await + .unwrap(); +} + +#[derive(Clone)] +struct Libp2pEndpoint(Multiaddr); + +/// Serves the index.html file for our client. +/// +/// Our server listens on a random UDP port for the WebRTC transport. +/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address. +async fn get_index( + State(Libp2pEndpoint(libp2p_endpoint)): State, +) -> Result, StatusCode> { + let content = StaticFiles::get("index.html") + .ok_or(StatusCode::NOT_FOUND)? + .data; + + let html = std::str::from_utf8(&content) + .expect("index.html to be valid utf8") + .replace("__LIBP2P_ENDPOINT__", &libp2p_endpoint.to_string()); + + Ok(Html(html)) +} + +/// Serves the static files generated by `wasm-pack`. +async fn get_static_file(Path(path): Path) -> Result { + log::debug!("Serving static file: {path}"); + + let content = StaticFiles::get(&path).ok_or(StatusCode::NOT_FOUND)?.data; + let content_type = mime_guess::from_path(path) + .first_or_octet_stream() + .to_string(); + + Ok(([(CONTENT_TYPE, content_type)], content)) +} diff --git a/examples/browser-webrtc/static/index.html b/examples/browser-webrtc/static/index.html new file mode 100644 index 00000000..a5a26310 --- /dev/null +++ b/examples/browser-webrtc/static/index.html @@ -0,0 +1,23 @@ + + + + + + + +

+

Rust Libp2p Demo!

+
+ + + + diff --git a/examples/chat-example/Cargo.toml b/examples/chat-example/Cargo.toml deleted file mode 100644 index cfc417cf..00000000 --- a/examples/chat-example/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "chat-example" -version = "0.1.0" -edition = "2021" -publish = false -license = "MIT" - -[dependencies] -async-std = { version = "1.12", features = ["attributes"] } -async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml new file mode 100644 index 00000000..9c539c85 --- /dev/null +++ b/examples/chat/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "chat-example" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +tokio = { version = "1.32", features = ["full"] } +async-trait = "0.1" +env_logger = "0.10.0" +futures = "0.3.28" +libp2p = { path = "../../libp2p", features = ["tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } diff --git a/examples/chat-example/README.md b/examples/chat/README.md similarity index 100% rename from examples/chat-example/README.md rename to examples/chat/README.md diff --git a/examples/chat-example/src/main.rs b/examples/chat/src/main.rs similarity index 88% rename from examples/chat-example/src/main.rs rename to examples/chat/src/main.rs index 95a16f38..312ca2d3 100644 --- a/examples/chat-example/src/main.rs +++ b/examples/chat/src/main.rs @@ -20,8 +20,7 @@ #![doc = include_str!("../README.md")] -use async_std::io; -use futures::{future::Either, prelude::*, select}; +use futures::{future::Either, stream::StreamExt}; use libp2p::{ core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade}, gossipsub, identity, mdns, noise, quic, @@ -33,29 +32,30 @@ use std::collections::hash_map::DefaultHasher; use std::error::Error; use std::hash::{Hash, Hasher}; use std::time::Duration; +use tokio::{io, io::AsyncBufReadExt, select}; // We create a custom network behaviour that combines Gossipsub and Mdns. #[derive(NetworkBehaviour)] struct MyBehaviour { gossipsub: gossipsub::Behaviour, - mdns: mdns::async_io::Behaviour, + mdns: mdns::tokio::Behaviour, } -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { // Create a random PeerId + env_logger::init(); let id_keys = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(id_keys.public()); - println!("Local peer id: {local_peer_id}"); // Set up an encrypted DNS-enabled TCP Transport over the yamux protocol. - let tcp_transport = tcp::async_io::Transport::new(tcp::Config::default().nodelay(true)) + let tcp_transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) .upgrade(upgrade::Version::V1Lazy) .authenticate(noise::Config::new(&id_keys).expect("signing libp2p-noise static keypair")) .multiplex(yamux::Config::default()) .timeout(std::time::Duration::from_secs(20)) .boxed(); - let quic_transport = quic::async_std::Transport::new(quic::Config::new(&id_keys)); + let quic_transport = quic::tokio::Transport::new(quic::Config::new(&id_keys)); let transport = OrTransport::new(quic_transport, tcp_transport) .map(|either_output, _| match either_output { Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), @@ -91,13 +91,13 @@ async fn main() -> Result<(), Box> { // Create a Swarm to manage peers and events let mut swarm = { - let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; + let mdns = mdns::tokio::Behaviour::new(mdns::Config::default(), local_peer_id)?; let behaviour = MyBehaviour { gossipsub, mdns }; - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() + SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build() }; // Read full lines from stdin - let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); + let mut stdin = io::BufReader::new(io::stdin()).lines(); // Listen on all interfaces and whatever port the OS assigns swarm.listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse()?)?; @@ -108,13 +108,13 @@ async fn main() -> Result<(), Box> { // Kick it off loop { select! { - line = stdin.select_next_some() => { + Ok(Some(line)) = stdin.next_line() => { if let Err(e) = swarm .behaviour_mut().gossipsub - .publish(topic.clone(), line.expect("Stdin not to close").as_bytes()) { + .publish(topic.clone(), line.as_bytes()) { println!("Publish error: {e:?}"); } - }, + } event = swarm.select_next_some() => match event { SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, _multiaddr) in list { diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index 7a13dd23..852083d0 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "dcutr" +name = "dcutr-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" [dependencies] -clap = { version = "4.3.21", features = ["derive"] } +clap = { version = "4.3.23", features = ["derive"] } env_logger = "0.10.0" futures = "0.3.28" futures-timer = "3.0" diff --git a/examples/dcutr/README.md b/examples/dcutr/README.md index 52ce991e..5c7a9c38 100644 --- a/examples/dcutr/README.md +++ b/examples/dcutr/README.md @@ -18,12 +18,12 @@ To run the example, follow these steps: - Example usage in client-listen mode: ```sh - cargo run -- --mode listen --secret-key-seed 42 --relay-address /ip4/127.0.0.1/tcp/12345 + cargo run -- --mode listen --secret-key-seed 42 --relay-address /ip4/$RELAY_IP/tcp/$PORT/p2p/$RELAY_PEERID ``` - Example usage in client-dial mode: ```sh - cargo run -- --mode dial --secret-key-seed 42 --relay-address /ip4/127.0.0.1/tcp/12345 --remote-peer-id + cargo run -- --mode dial --secret-key-seed 42 --relay-address /ip4/$RELAY_IP/tcp/$PORT/p2p/$RELAY_PEERID --remote-peer-id ``` For this example to work, it is also necessary to turn on a relay server (you will find the related instructions in the example in the `examples/relay-server` folder). diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 8837df78..ec308521 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -33,9 +33,7 @@ use libp2p::{ transport::Transport, upgrade, }, - dcutr, - dns::DnsConfig, - identify, identity, noise, ping, quic, relay, + dcutr, dns, identify, identity, noise, ping, quic, relay, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, tcp, yamux, PeerId, }; @@ -87,7 +85,6 @@ fn main() -> Result<(), Box> { let local_key = generate_ed25519(opts.secret_key_seed); let local_peer_id = PeerId::from(local_key.public()); - info!("Local peer id: {:?}", local_peer_id); let (relay_transport, client) = relay::client::new(local_peer_id); @@ -103,7 +100,7 @@ fn main() -> Result<(), Box> { &local_key, ))); - block_on(DnsConfig::system(relay_tcp_quic_transport)) + block_on(dns::async_std::Transport::system(relay_tcp_quic_transport)) .unwrap() .map(|either_output, _| match either_output { Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index f8e0fd23..d128d6bb 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "distributed-key-value-store" +name = "distributed-key-value-store-example" version = "0.1.0" edition = "2021" publish = false diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index ce0998a9..d1eec922 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -23,12 +23,9 @@ use async_std::io; use futures::{prelude::*, select}; use libp2p::core::upgrade::Version; +use libp2p::kad; use libp2p::kad::record::store::MemoryStore; use libp2p::kad::Mode; -use libp2p::kad::{ - record::Key, AddProviderOk, GetProvidersOk, GetRecordOk, Kademlia, KademliaEvent, PeerRecord, - PutRecordOk, QueryResult, Quorum, Record, -}; use libp2p::{ identity, mdns, noise, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, @@ -54,18 +51,18 @@ async fn main() -> Result<(), Box> { #[derive(NetworkBehaviour)] #[behaviour(to_swarm = "MyBehaviourEvent")] struct MyBehaviour { - kademlia: Kademlia, + kademlia: kad::Behaviour, mdns: mdns::async_io::Behaviour, } #[allow(clippy::large_enum_variant)] enum MyBehaviourEvent { - Kademlia(KademliaEvent), + Kademlia(kad::Event), Mdns(mdns::Event), } - impl From for MyBehaviourEvent { - fn from(event: KademliaEvent) -> Self { + impl From for MyBehaviourEvent { + fn from(event: kad::Event) -> Self { MyBehaviourEvent::Kademlia(event) } } @@ -80,7 +77,7 @@ async fn main() -> Result<(), Box> { let mut swarm = { // Create a Kademlia behaviour. let store = MemoryStore::new(local_peer_id); - let kademlia = Kademlia::new(local_peer_id, store); + let kademlia = kad::Behaviour::new(local_peer_id, store); let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; let behaviour = MyBehaviour { kademlia, mdns }; SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() @@ -107,9 +104,9 @@ async fn main() -> Result<(), Box> { swarm.behaviour_mut().kademlia.add_address(&peer_id, multiaddr); } } - SwarmEvent::Behaviour(MyBehaviourEvent::Kademlia(KademliaEvent::OutboundQueryProgressed { result, ..})) => { + SwarmEvent::Behaviour(MyBehaviourEvent::Kademlia(kad::Event::OutboundQueryProgressed { result, ..})) => { match result { - QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { key, providers, .. })) => { + kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders { key, providers, .. })) => { for peer in providers { println!( "Peer {peer:?} provides key {:?}", @@ -117,12 +114,12 @@ async fn main() -> Result<(), Box> { ); } } - QueryResult::GetProviders(Err(err)) => { + kad::QueryResult::GetProviders(Err(err)) => { eprintln!("Failed to get providers: {err:?}"); } - QueryResult::GetRecord(Ok( - GetRecordOk::FoundRecord(PeerRecord { - record: Record { key, value, .. }, + kad::QueryResult::GetRecord(Ok( + kad::GetRecordOk::FoundRecord(kad::PeerRecord { + record: kad::Record { key, value, .. }, .. }) )) => { @@ -132,26 +129,26 @@ async fn main() -> Result<(), Box> { std::str::from_utf8(&value).unwrap(), ); } - QueryResult::GetRecord(Ok(_)) => {} - QueryResult::GetRecord(Err(err)) => { + kad::QueryResult::GetRecord(Ok(_)) => {} + kad::QueryResult::GetRecord(Err(err)) => { eprintln!("Failed to get record: {err:?}"); } - QueryResult::PutRecord(Ok(PutRecordOk { key })) => { + kad::QueryResult::PutRecord(Ok(kad::PutRecordOk { key })) => { println!( "Successfully put record {:?}", std::str::from_utf8(key.as_ref()).unwrap() ); } - QueryResult::PutRecord(Err(err)) => { + kad::QueryResult::PutRecord(Err(err)) => { eprintln!("Failed to put record: {err:?}"); } - QueryResult::StartProviding(Ok(AddProviderOk { key })) => { + kad::QueryResult::StartProviding(Ok(kad::AddProviderOk { key })) => { println!( "Successfully put provider record {:?}", std::str::from_utf8(key.as_ref()).unwrap() ); } - QueryResult::StartProviding(Err(err)) => { + kad::QueryResult::StartProviding(Err(err)) => { eprintln!("Failed to put provider record: {err:?}"); } _ => {} @@ -163,14 +160,14 @@ async fn main() -> Result<(), Box> { } } -fn handle_input_line(kademlia: &mut Kademlia, line: String) { +fn handle_input_line(kademlia: &mut kad::Behaviour, line: String) { let mut args = line.split(' '); match args.next() { Some("GET") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::record::Key::new(&key), None => { eprintln!("Expected key"); return; @@ -182,7 +179,7 @@ fn handle_input_line(kademlia: &mut Kademlia, line: String) { Some("GET_PROVIDERS") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::record::Key::new(&key), None => { eprintln!("Expected key"); return; @@ -194,7 +191,7 @@ fn handle_input_line(kademlia: &mut Kademlia, line: String) { Some("PUT") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::record::Key::new(&key), None => { eprintln!("Expected key"); return; @@ -210,20 +207,20 @@ fn handle_input_line(kademlia: &mut Kademlia, line: String) { } } }; - let record = Record { + let record = kad::Record { key, value, publisher: None, expires: None, }; kademlia - .put_record(record, Quorum::One) + .put_record(record, kad::Quorum::One) .expect("Failed to store record locally."); } Some("PUT_PROVIDER") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::record::Key::new(&key), None => { eprintln!("Expected key"); return; diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index a2aa47ce..03745aac 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "file-sharing" +name = "file-sharing-example" version = "0.1.0" edition = "2021" publish = false @@ -8,7 +8,7 @@ license = "MIT" [dependencies] serde = { version = "1.0", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } -clap = { version = "4.3.21", features = ["derive"] } +clap = { version = "4.3.23", features = ["derive"] } either = "1.9" env_logger = "0.10" futures = "0.3.28" diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index 7ddd0afb..675f69a0 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -5,11 +5,7 @@ use futures::prelude::*; use libp2p::{ core::Multiaddr, - identity, - kad::{ - self, record::store::MemoryStore, GetProvidersOk, Kademlia, KademliaEvent, QueryId, - QueryResult, - }, + identity, kad, multiaddr::Protocol, noise, request_response::{self, ProtocolSupport, RequestId, ResponseChannel}, @@ -56,7 +52,7 @@ pub(crate) async fn new( let mut swarm = SwarmBuilder::with_async_std_executor( transport, ComposedBehaviour { - kademlia: Kademlia::new(peer_id, MemoryStore::new(peer_id)), + kademlia: kad::Behaviour::new(peer_id, kad::record::store::MemoryStore::new(peer_id)), request_response: request_response::cbor::Behaviour::new( [( StreamProtocol::new("/file-exchange/1"), @@ -179,8 +175,8 @@ pub(crate) struct EventLoop { command_receiver: mpsc::Receiver, event_sender: mpsc::Sender, pending_dial: HashMap>>>, - pending_start_providing: HashMap>, - pending_get_providers: HashMap>>, + pending_start_providing: HashMap>, + pending_get_providers: HashMap>>, pending_request_file: HashMap, Box>>>, } @@ -221,9 +217,9 @@ impl EventLoop { ) { match event { SwarmEvent::Behaviour(ComposedEvent::Kademlia( - KademliaEvent::OutboundQueryProgressed { + kad::Event::OutboundQueryProgressed { id, - result: QueryResult::StartProviding(_), + result: kad::QueryResult::StartProviding(_), .. }, )) => { @@ -234,11 +230,12 @@ impl EventLoop { let _ = sender.send(()); } SwarmEvent::Behaviour(ComposedEvent::Kademlia( - KademliaEvent::OutboundQueryProgressed { + kad::Event::OutboundQueryProgressed { id, result: - QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { - providers, .. + kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders { + providers, + .. })), .. }, @@ -256,11 +253,11 @@ impl EventLoop { } } SwarmEvent::Behaviour(ComposedEvent::Kademlia( - KademliaEvent::OutboundQueryProgressed { + kad::Event::OutboundQueryProgressed { result: - QueryResult::GetProviders(Ok(GetProvidersOk::FinishedWithNoAdditionalRecord { - .. - })), + kad::QueryResult::GetProviders(Ok( + kad::GetProvidersOk::FinishedWithNoAdditionalRecord { .. }, + )), .. }, )) => {} @@ -412,13 +409,13 @@ impl EventLoop { #[behaviour(to_swarm = "ComposedEvent")] struct ComposedBehaviour { request_response: request_response::cbor::Behaviour, - kademlia: Kademlia, + kademlia: kad::Behaviour, } #[derive(Debug)] enum ComposedEvent { RequestResponse(request_response::Event), - Kademlia(KademliaEvent), + Kademlia(kad::Event), } impl From> for ComposedEvent { @@ -427,8 +424,8 @@ impl From> for ComposedEvent } } -impl From for ComposedEvent { - fn from(event: KademliaEvent) -> Self { +impl From for ComposedEvent { + fn from(event: kad::Event) -> Self { ComposedEvent::Kademlia(event) } } diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml index b11ea227..fb14aeba 100644 --- a/examples/identify/Cargo.toml +++ b/examples/identify/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "identify" +name = "identify-example" version = "0.1.0" edition = "2021" publish = false @@ -8,5 +8,6 @@ license = "MIT" [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" +env_logger = "0.10" futures = "0.3.28" libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio", "yamux"] } diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index 7abdd5e9..dc98fb58 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -31,9 +31,9 @@ use std::error::Error; #[async_std::main] async fn main() -> Result<(), Box> { + env_logger::init(); let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); let transport = tcp::async_io::Transport::default() .upgrade(Version::V1Lazy) diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index 94587a90..0526060e 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "ipfs-kad" +name = "ipfs-kad-example" version = "0.1.0" edition = "2021" publish = false diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index 1d0ed0a3..6897cf63 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -21,8 +21,8 @@ #![doc = include_str!("../README.md")] use futures::StreamExt; +use libp2p::kad; use libp2p::kad::record::store::MemoryStore; -use libp2p::kad::{GetClosestPeersError, Kademlia, KademliaConfig, KademliaEvent, QueryResult}; use libp2p::{ development_transport, identity, swarm::{SwarmBuilder, SwarmEvent}, @@ -51,10 +51,10 @@ async fn main() -> Result<(), Box> { // Create a swarm to manage peers and events. let mut swarm = { // Create a Kademlia behaviour. - let mut cfg = KademliaConfig::default(); + let mut cfg = kad::Config::default(); cfg.set_query_timeout(Duration::from_secs(5 * 60)); let store = MemoryStore::new(local_peer_id); - let mut behaviour = Kademlia::with_config(local_peer_id, store, cfg); + let mut behaviour = kad::Behaviour::with_config(local_peer_id, store, cfg); // Add the bootnodes to the local routing table. `libp2p-dns` built // into the `transport` resolves the `dnsaddr` when Kademlia tries @@ -78,8 +78,8 @@ async fn main() -> Result<(), Box> { loop { let event = swarm.select_next_some().await; - if let SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetClosestPeers(result), + if let SwarmEvent::Behaviour(kad::Event::OutboundQueryProgressed { + result: kad::QueryResult::GetClosestPeers(result), .. }) = event { @@ -93,7 +93,7 @@ async fn main() -> Result<(), Box> { println!("Query finished with no closest peers.") } } - Err(GetClosestPeersError::Timeout { peers, .. }) => { + Err(kad::GetClosestPeersError::Timeout { peers, .. }) => { if !peers.is_empty() { println!("Query timed out with closest peers: {peers:#?}") } else { diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml index 7871e5c4..278611e6 100644 --- a/examples/ipfs-private/Cargo.toml +++ b/examples/ipfs-private/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "ipfs-private" +name = "ipfs-private-example" version = "0.1.0" edition = "2021" publish = false diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 177ff3af..b28abaee 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -26,12 +26,13 @@ use futures::stream::StreamExt; use libp2p::core::{upgrade::Version, Multiaddr, Transport}; use libp2p::identity::PeerId; use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; use libp2p::{identify, identity, noise, ping, tcp, yamux}; use log::info; use prometheus_client::registry::Registry; use std::error::Error; use std::thread; +use std::time::Duration; mod http_service; @@ -41,7 +42,6 @@ fn main() -> Result<(), Box> { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(local_key.public()); let local_pub_key = local_key.public(); - info!("Local peer id: {local_peer_id:?}"); let mut swarm = SwarmBuilder::without_executor( tcp::async_io::Transport::default() @@ -52,6 +52,7 @@ fn main() -> Result<(), Box> { Behaviour::new(local_pub_key), local_peer_id, ) + .idle_connection_timeout(Duration::from_secs(60)) .build(); swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; @@ -88,13 +89,9 @@ fn main() -> Result<(), Box> { } /// Our network behaviour. -/// -/// For illustrative purposes, this includes the [`keep_alive::Behaviour`]) behaviour so the ping actually happen -/// and can be observed via the metrics. #[derive(NetworkBehaviour)] struct Behaviour { identify: identify::Behaviour, - keep_alive: keep_alive::Behaviour, ping: ping::Behaviour, } @@ -106,7 +103,6 @@ impl Behaviour { "/ipfs/0.1.0".into(), local_pub_key, )), - keep_alive: keep_alive::Behaviour, } } } diff --git a/examples/ping-example/Cargo.toml b/examples/ping/Cargo.toml similarity index 93% rename from examples/ping-example/Cargo.toml rename to examples/ping/Cargo.toml index f1022b2d..33c9e56b 100644 --- a/examples/ping-example/Cargo.toml +++ b/examples/ping/Cargo.toml @@ -8,5 +8,6 @@ license = "MIT" [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" +env_logger = "0.10.0" futures = "0.3.28" libp2p = { path = "../../libp2p", features = ["async-std", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux"] } diff --git a/examples/ping-example/README.md b/examples/ping/README.md similarity index 100% rename from examples/ping-example/README.md rename to examples/ping/README.md diff --git a/examples/ping-example/src/main.rs b/examples/ping/src/main.rs similarity index 82% rename from examples/ping-example/src/main.rs rename to examples/ping/src/main.rs index 6b993bcb..898a2581 100644 --- a/examples/ping-example/src/main.rs +++ b/examples/ping/src/main.rs @@ -24,16 +24,17 @@ use futures::prelude::*; use libp2p::core::upgrade::Version; use libp2p::{ identity, noise, ping, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, + swarm::{SwarmBuilder, SwarmEvent}, tcp, yamux, Multiaddr, PeerId, Transport, }; use std::error::Error; +use std::time::Duration; #[async_std::main] async fn main() -> Result<(), Box> { + env_logger::init(); let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); let transport = tcp::async_io::Transport::default() .upgrade(Version::V1Lazy) @@ -42,7 +43,8 @@ async fn main() -> Result<(), Box> { .boxed(); let mut swarm = - SwarmBuilder::with_async_std_executor(transport, Behaviour::default(), local_peer_id) + SwarmBuilder::with_async_std_executor(transport, ping::Behaviour::default(), local_peer_id) + .idle_connection_timeout(Duration::from_secs(60)) // For illustrative purposes, keep idle connections alive for a minute so we can observe a few pings. .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -65,13 +67,3 @@ async fn main() -> Result<(), Box> { } } } - -/// Our network behaviour. -/// -/// For illustrative purposes, this includes the [`KeepAlive`](keep_alive::Behaviour) behaviour so a continuous sequence of -/// pings can be observed. -#[derive(NetworkBehaviour, Default)] -struct Behaviour { - keep_alive: keep_alive::Behaviour, - ping: ping::Behaviour, -} diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 62d3cc0a..49c32cd3 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -6,7 +6,7 @@ publish = false license = "MIT" [dependencies] -clap = { version = "4.3.21", features = ["derive"] } +clap = { version = "4.3.23", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" env_logger = "0.10.0" diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 7526a076..ab87615d 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -47,7 +47,6 @@ fn main() -> Result<(), Box> { // Create a static known PeerId based on given secret let local_key: identity::Keypair = generate_ed25519(opt.secret_key_seed); let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); let tcp_transport = tcp::async_io::Transport::default(); diff --git a/examples/rendezvous/Cargo.toml b/examples/rendezvous/Cargo.toml index e0cf39ba..a66c758e 100644 --- a/examples/rendezvous/Cargo.toml +++ b/examples/rendezvous/Cargo.toml @@ -12,4 +12,4 @@ env_logger = "0.10.0" futures = "0.3.28" libp2p = { path = "../../libp2p", features = ["async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } log = "0.4" -tokio = { version = "1.31", features = [ "rt-multi-thread", "macros", "time" ] } +tokio = { version = "1.32", features = [ "rt-multi-thread", "macros", "time" ] } diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index 710b491f..ac45afae 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -24,7 +24,7 @@ use libp2p::{ identity, multiaddr::Protocol, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, + swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, tcp, yamux, Multiaddr, PeerId, Transport, }; use std::time::Duration; @@ -50,14 +50,12 @@ async fn main() { MyBehaviour { rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, }, PeerId::from(key_pair.public()), ) + .idle_connection_timeout(Duration::from_secs(5)) .build(); - log::info!("Local peer id: {}", swarm.local_peer_id()); - swarm.dial(rendezvous_point_address.clone()).unwrap(); let mut discover_tick = tokio::time::interval(Duration::from_secs(30)); @@ -129,5 +127,4 @@ async fn main() { struct MyBehaviour { rendezvous: rendezvous::client::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index 7c326688..95ed7a5c 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -22,7 +22,7 @@ use futures::StreamExt; use libp2p::{ core::transport::upgrade::Version, identify, identity, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, + swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, tcp, yamux, Multiaddr, PeerId, Transport, }; use std::time::Duration; @@ -50,14 +50,12 @@ async fn main() { )), rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, }, PeerId::from(key_pair.public()), ) + .idle_connection_timeout(Duration::from_secs(5)) .build(); - log::info!("Local peer id: {}", swarm.local_peer_id()); - let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()); swarm.dial(rendezvous_point_address.clone()).unwrap(); @@ -135,5 +133,4 @@ struct MyBehaviour { identify: identify::Behaviour, rendezvous: rendezvous::client::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index f9fd12b1..51acfee2 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -22,7 +22,7 @@ use futures::StreamExt; use libp2p::{ core::transport::upgrade::Version, identity, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, + swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, tcp, yamux, Multiaddr, PeerId, Transport, }; use std::time::Duration; @@ -46,10 +46,10 @@ async fn main() { MyBehaviour { rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, }, PeerId::from(key_pair.public()), ) + .idle_connection_timeout(Duration::from_secs(5)) .build(); // In production the external address should be the publicly facing IP address of the rendezvous point. @@ -57,8 +57,6 @@ async fn main() { let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap(); swarm.add_external_address(external_address); - log::info!("Local peer id: {}", swarm.local_peer_id()); - swarm.dial(rendezvous_point_address.clone()).unwrap(); while let Some(event) = swarm.next().await { @@ -132,5 +130,4 @@ async fn main() { struct MyBehaviour { rendezvous: rendezvous::client::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index 4f5aca75..a3ed3c0f 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -24,7 +24,7 @@ use futures::StreamExt; use libp2p::{ core::transport::upgrade::Version, identify, identity, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, + swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, tcp, yamux, PeerId, Transport, }; use std::time::Duration; @@ -48,14 +48,12 @@ async fn main() { )), rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, }, PeerId::from(key_pair.public()), ) + .idle_connection_timeout(Duration::from_secs(5)) .build(); - log::info!("Local peer id: {}", swarm.local_peer_id()); - let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap()); while let Some(event) = swarm.next().await { @@ -99,5 +97,4 @@ struct MyBehaviour { identify: identify::Behaviour, rendezvous: rendezvous::server::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/upnp/Cargo.toml b/examples/upnp/Cargo.toml new file mode 100644 index 00000000..afb8f61d --- /dev/null +++ b/examples/upnp/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "upnp-example" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +tokio = { version = "1", features = [ "rt-multi-thread", "macros"] } +futures = "0.3.28" +libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux", "upnp"] } diff --git a/examples/upnp/README.md b/examples/upnp/README.md new file mode 100644 index 00000000..48335bfa --- /dev/null +++ b/examples/upnp/README.md @@ -0,0 +1,23 @@ +## Description + +The upnp example showcases how to use the upnp network behaviour to externally open ports on the network gateway. + + +## Usage + +To run the example, follow these steps: + +1. In a terminal window, run the following command: + + ```sh + cargo run + ``` + +2. This command will start the swarm and print the `NewExternalAddr` if the gateway supports `UPnP` or + `GatewayNotFound` if it doesn't. + + +## Conclusion + +The upnp example demonstrates the usage of **libp2p** to externally open a port on the gateway if it +supports [`UPnP`](https://en.wikipedia.org/wiki/Universal_Plug_and_Play). diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs new file mode 100644 index 00000000..b4350dc8 --- /dev/null +++ b/examples/upnp/src/main.rs @@ -0,0 +1,81 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![doc = include_str!("../README.md")] + +use futures::prelude::*; +use libp2p::core::upgrade::Version; +use libp2p::{ + identity, noise, + swarm::{SwarmBuilder, SwarmEvent}, + tcp, upnp, yamux, Multiaddr, PeerId, Transport, +}; +use std::error::Error; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let local_key = identity::Keypair::generate_ed25519(); + let local_peer_id = PeerId::from(local_key.public()); + println!("Local peer id: {local_peer_id:?}"); + + let transport = tcp::tokio::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate(noise::Config::new(&local_key)?) + .multiplex(yamux::Config::default()) + .boxed(); + + let mut swarm = SwarmBuilder::with_tokio_executor( + transport, + upnp::tokio::Behaviour::default(), + local_peer_id, + ) + .build(); + + // Tell the swarm to listen on all interfaces and a random, OS-assigned + // port. + swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; + + // Dial the peer identified by the multi-address given as the second + // command-line argument, if any. + if let Some(addr) = std::env::args().nth(1) { + let remote: Multiaddr = addr.parse()?; + swarm.dial(remote)?; + println!("Dialed {addr}") + } + + loop { + match swarm.select_next_some().await { + SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"), + SwarmEvent::Behaviour(upnp::Event::NewExternalAddr(addr)) => { + println!("New external address: {addr}"); + } + SwarmEvent::Behaviour(upnp::Event::GatewayNotFound) => { + println!("Gateway does not support UPnP"); + break; + } + SwarmEvent::Behaviour(upnp::Event::NonRoutableGateway) => { + println!("Gateway is not exposed directly to the public Internet, i.e. it itself has a private IP address."); + break; + } + _ => {} + } + } + Ok(()) +} diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index b52f0018..89fd8dfa 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.2.4 - unreleased + +- Implement `Keypair::derive_secret`, to deterministically derive a new secret from the embedded secret key. + See [PR 4554]. + +[PR 4554]: https://github.com/libp2p/rust-libp2p/pull/4554 + ## 0.2.3 - Fix [RUSTSEC-2022-0093] by updating `ed25519-dalek` to `2.0`. diff --git a/identity/Cargo.toml b/identity/Cargo.toml index b4cb8e1a..d8b313e6 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -15,9 +15,10 @@ categories = ["cryptography"] asn1_der = { version = "0.7.6", optional = true } bs58 = { version = "0.5.0", optional = true } ed25519-dalek = { version = "2.0", optional = true } +hkdf = { version = "0.12.3", optional = true } libsecp256k1 = { version = "0.7.0", optional = true } log = "0.4" -multihash = { version = "0.19.0", optional = true } +multihash = { version = "0.19.1", optional = true } p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } quick-protobuf = "0.8.1" rand = { version = "0.8", optional = true } @@ -32,16 +33,18 @@ zeroize = { version = "1.6", optional = true } ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true} [features] -secp256k1 = [ "dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:zeroize" ] -ecdsa = [ "dep:p256", "dep:void", "dep:zeroize", "dep:sec1" ] -rsa = [ "dep:ring", "dep:asn1_der", "rand", "dep:zeroize" ] -ed25519 = [ "dep:ed25519-dalek", "dep:zeroize" ] -peerid = [ "dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2" ] +secp256k1 = [ "dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize" ] +ecdsa = [ "dep:p256", "dep:void", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf" ] +rsa = [ "dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize" ] +ed25519 = [ "dep:ed25519-dalek", "dep:rand", "dep:zeroize", "dep:sha2", "dep:hkdf" ] +peerid = [ "dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2", "dep:hkdf" ] rand = ["dep:rand", "ed25519-dalek?/rand_core"] +default = ["rand"] + [dev-dependencies] quickcheck = { workspace = true } -base64 = "0.21.2" +base64 = "0.21.4" serde_json = "1.0" rmp-serde = "1.1" criterion = "0.5" diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 4a96260d..529a4ddd 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -129,7 +129,7 @@ impl hash::Hash for PublicKey { impl cmp::PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.0.as_bytes().partial_cmp(other.0.as_bytes()) + Some(self.cmp(other)) } } @@ -199,6 +199,10 @@ impl SecretKey { sk_bytes.zeroize(); Ok(SecretKey(secret)) } + + pub(crate) fn to_bytes(&self) -> [u8; 32] { + self.0 + } } #[cfg(test)] diff --git a/identity/src/error.rs b/identity/src/error.rs index 4580c442..71cd78fe 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -77,11 +77,7 @@ impl DecodingError { } } - #[cfg(any( - all(feature = "rsa", not(target_arch = "wasm32")), - feature = "secp256k1", - feature = "ecdsa" - ))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self { Self { msg: format!("encoding {key_type} key to Protobuf is unsupported"), @@ -111,7 +107,7 @@ pub struct SigningError { /// An error during encoding of key material. impl SigningError { - #[cfg(any(feature = "secp256k1", feature = "rsa"))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub(crate) fn new(msg: S) -> Self { Self { msg: msg.to_string(), @@ -119,7 +115,7 @@ impl SigningError { } } - #[cfg(feature = "rsa")] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { Self { source: Some(Box::new(source)), diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index cd61c5ba..143174df 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -342,6 +342,74 @@ impl Keypair { KeyPairInner::Ecdsa(_) => KeyType::Ecdsa, } } + + #[doc = r##" + Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain. + + This works for all key types except RSA where it returns `None`. + + # Example + + "##] + #[cfg_attr( + feature = "rand", + doc = r##" + ``` + "## + )] + #[cfg_attr( + not(feature = "rand"), + doc = r##" + ```ignore + "## + )] + #[doc = r##" + # fn main() { + # use libp2p_identity as identity; + let key = identity::Keypair::generate_ed25519(); + + let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); + # } + ``` + "## + ] + #[allow(unused_variables, unreachable_code)] + pub fn derive_secret(&self, domain: &[u8]) -> Option<[u8; 32]> { + #[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + ))] + return Some( + hkdf::Hkdf::::extract(None, &[domain, &self.secret()?].concat()) + .0 + .into(), + ); + + None + } + + /// Return the secret key of the [`Keypair`]. + #[allow(dead_code)] + pub(crate) fn secret(&self) -> Option<[u8; 32]> { + match self.keypair { + #[cfg(feature = "ed25519")] + KeyPairInner::Ed25519(ref inner) => Some(inner.secret().to_bytes()), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + KeyPairInner::Rsa(_) => return None, + #[cfg(feature = "secp256k1")] + KeyPairInner::Secp256k1(ref inner) => Some(inner.secret().to_bytes()), + #[cfg(feature = "ecdsa")] + KeyPairInner::Ecdsa(ref inner) => Some( + inner + .secret() + .to_bytes() + .try_into() + .expect("Ecdsa's private key should be 32 bytes"), + ), + } + } } #[cfg(feature = "ecdsa")] @@ -901,4 +969,11 @@ mod tests { assert_eq!(converted_pubkey, pubkey); assert_eq!(converted_pubkey.key_type(), KeyType::Ecdsa) } + + #[test] + #[cfg(feature = "ecdsa")] + fn test_secret_from_ecdsa_private_key() { + let keypair = Keypair::generate_ecdsa(); + assert!(keypair.derive_secret(b"domain separator!").is_some()) + } } diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index a3996360..5e1fda29 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -180,7 +180,7 @@ impl hash::Hash for PublicKey { impl cmp::PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.to_bytes().partial_cmp(&other.to_bytes()) + Some(self.cmp(other)) } } diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml index 861bf14e..1966aee4 100644 --- a/interop-tests/Cargo.toml +++ b/interop-tests/Cargo.toml @@ -23,17 +23,18 @@ libp2p = { path = "../libp2p", features = ["ping", "noise", "tls", "rsa", "macro libp2p-webrtc = { workspace = true, features = ["tokio"] } libp2p-mplex = { path = "../muxers/mplex" } mime_guess = "2.0" -redis = { version = "0.23.2", default-features = false, features = ["tokio-comp"] } -rust-embed = "6.8" +redis = { version = "0.23.3", default-features = false, features = ["tokio-comp"] } +rust-embed = "8.0" serde_json = "1" thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169 -tokio = { version = "1.31.0", features = ["full"] } +tokio = { version = "1.32.0", features = ["full"] } tower-http = { version = "0.4", features = ["cors", "fs", "trace"] } tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(target_arch = "wasm32")'.dependencies] libp2p = { path = "../libp2p", features = ["ping", "macros", "webtransport-websys", "wasm-bindgen", "identify"] } +libp2p-webrtc-websys = { workspace = true } wasm-bindgen = { version = "0.2" } wasm-bindgen-futures = { version = "0.4" } wasm-logger = { version = "0.2.0" } diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium index ebe66850..33e0a0bf 100644 --- a/interop-tests/Dockerfile.chromium +++ b/interop-tests/Dockerfile.chromium @@ -6,11 +6,8 @@ ADD . . RUN rustup target add wasm32-unknown-unknown -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - cargo install wasm-pack@0.11.1 --locked - -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - cargo install wasm-opt@0.113.0 --locked +RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" +RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt" RUN --mount=type=cache,target=./target \ --mount=type=cache,target=/usr/local/cargo/registry \ @@ -23,7 +20,7 @@ RUN --mount=type=cache,target=./target \ RUN --mount=type=cache,target=./target \ mv ./target/release/wasm_ping /usr/local/bin/testplan -FROM selenium/standalone-chrome:112.0 +FROM selenium/standalone-chrome:115.0 COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan ENV RUST_BACKTRACE=1 diff --git a/interop-tests/README.md b/interop-tests/README.md index 88cd7518..bab98df7 100644 --- a/interop-tests/README.md +++ b/interop-tests/README.md @@ -8,13 +8,9 @@ You can run this test locally by having a local Redis instance and by having another peer that this test can dial or listen for. For example to test that we can dial/listen for ourselves we can do the following: -1. Start redis (needed by the tests): `docker run --rm -it -p 6379:6379 - redis/redis-stack`. -2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0" - transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping` -3. In another terminal, run the listener: `redis_addr=localhost:6379 - ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin native_ping` - +1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine`. +2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping` +3. In another terminal, run the listener: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin native_ping` To test the interop with other versions do something similar, except replace one of these nodes with the other version's interop test. @@ -29,6 +25,15 @@ Firefox is not yet supported as it doesn't support all required features yet 1. Build the wasm package: `wasm-pack build --target web` 2. Run the dialer: `redis_addr=127.0.0.1:6379 ip=0.0.0.0 transport=webtransport is_dialer=true cargo run --bin wasm_ping` +# Running this test with webrtc-direct + +To run the webrtc-direct test, you'll need the `chromedriver` in your `$PATH`, compatible with your Chrome browser. + +1. Start redis: `docker run --rm -p 6379:6379 redis:7-alpine`. +1. Build the wasm package: `wasm-pack build --target web` +1. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping` +1. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping` + # Running all interop tests locally with Compose To run this test against all released libp2p versions you'll need to have the diff --git a/interop-tests/chromium-ping-version.json b/interop-tests/chromium-ping-version.json index 9fb2cd22..ae5c6e10 100644 --- a/interop-tests/chromium-ping-version.json +++ b/interop-tests/chromium-ping-version.json @@ -1,7 +1,10 @@ { "id": "chromium-rust-libp2p-head", "containerImageID": "chromium-rust-libp2p-head", - "transports": [{ "name": "webtransport", "onlyDial": true }], + "transports": [ + { "name": "webtransport", "onlyDial": true }, + { "name": "webrtc-direct", "onlyDial": true } + ], "secureChannels": [], "muxers": [] } diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index 2b2181f2..b30c3ad1 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -159,6 +159,7 @@ pub(crate) mod wasm { use libp2p::identity::Keypair; use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; use libp2p::PeerId; + use libp2p_webrtc_websys as webrtc; use std::time::Duration; use crate::{BlpopRequest, Transport}; @@ -181,16 +182,19 @@ pub(crate) mod wasm { ip: &str, transport: Transport, ) -> Result<(BoxedTransport, String)> { - if let Transport::Webtransport = transport { - Ok(( + match transport { + Transport::Webtransport => Ok(( libp2p::webtransport_websys::Transport::new( libp2p::webtransport_websys::Config::new(&local_key), ) .boxed(), format!("/ip4/{ip}/udp/0/quic/webtransport"), - )) - } else { - bail!("Only webtransport supported with wasm") + )), + Transport::WebRtcDirect => Ok(( + webrtc::Transport::new(webrtc::Config::new(&local_key)).boxed(), + format!("/ip4/{ip}/udp/0/webrtc-direct"), + )), + _ => bail!("Only webtransport and webrtc-direct are supported with wasm"), } } diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index 20350170..b3a91819 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -1,3 +1,4 @@ +#![allow(non_upper_case_globals)] use std::process::Stdio; use std::time::Duration; @@ -103,7 +104,12 @@ async fn open_in_browser() -> Result<(Child, WebDriver)> { // start a webdriver process // currently only the chromedriver is supported as firefox doesn't // have support yet for the certhashes - let mut chrome = tokio::process::Command::new("chromedriver") + let chromedriver = if cfg!(windows) { + "chromedriver.cmd" + } else { + "chromedriver" + }; + let mut chrome = tokio::process::Command::new(chromedriver) .arg("--port=45782") .stdout(Stdio::piped()) .spawn()?; diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index 57ce6363..40c06b57 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -3,8 +3,8 @@ use std::time::Duration; use anyhow::{bail, Context, Result}; use futures::{FutureExt, StreamExt}; -use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmEvent}; -use libp2p::{identify, identity, ping, Multiaddr, PeerId}; +use libp2p::swarm::SwarmEvent; +use libp2p::{identify, identity, ping, swarm::NetworkBehaviour, Multiaddr, PeerId}; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; @@ -33,8 +33,7 @@ pub async fn run_test( let mut swarm = swarm_builder( boxed_transport, Behaviour { - ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(10))), // Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved. identify: identify::Behaviour::new(identify::Config::new( "/interop-tests".to_owned(), @@ -43,6 +42,7 @@ pub async fn run_test( }, local_peer_id, ) + .idle_connection_timeout(Duration::from_secs(5)) .build(); log::info!("Running ping test: {}", swarm.local_peer_id()); @@ -50,6 +50,7 @@ pub async fn run_test( let mut maybe_id = None; // See https://github.com/libp2p/rust-libp2p/issues/4071. + #[cfg(not(target_arch = "wasm32"))] if transport == Transport::WebRtcDirect { maybe_id = Some(swarm.listen_on(local_addr.parse()?)?); } @@ -241,7 +242,6 @@ impl FromStr for SecProtocol { #[derive(NetworkBehaviour)] struct Behaviour { ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, identify: identify::Behaviour, } diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index 642d4103..9ae0d1c2 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.52.3 - unreleased +## 0.52.3 - Add `libp2p-quic` stable release. @@ -10,10 +10,14 @@ - Add `json` feature which exposes `request_response::json`. See [PR 4188]. +- Add support for UPnP via the IGD protocol. + See [PR 4156]. + - Add `libp2p-memory-connection-limits` providing memory usage based connection limit configurations. See [PR 4281]. [PR 4188]: https://github.com/libp2p/rust-libp2p/pull/4188 +[PR 4156]: https://github.com/libp2p/rust-libp2p/pull/4156 [PR 4217]: https://github.com/libp2p/rust-libp2p/pull/4217 [PR 4281]: https://github.com/libp2p/rust-libp2p/pull/4281 diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 5cb8741a..6b11d7de 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -50,6 +50,7 @@ full = [ "websocket", "webtransport-websys", "yamux", + "upnp" ] async-std = ["libp2p-swarm/async-std", "libp2p-mdns?/async-io", "libp2p-tcp?/async-io", "libp2p-dns?/async-std", "libp2p-quic?/async-std"] @@ -82,7 +83,7 @@ secp256k1 = ["libp2p-identity/secp256k1"] serde = ["libp2p-core/serde", "libp2p-kad?/serde", "libp2p-gossipsub?/serde"] tcp = ["dep:libp2p-tcp"] tls = ["dep:libp2p-tls"] -tokio = ["libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio"] +tokio = ["libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio", "libp2p-upnp?/tokio"] uds = ["dep:libp2p-uds"] wasm-bindgen = ["futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "libp2p-swarm/wasm-bindgen", "libp2p-gossipsub?/wasm-bindgen"] wasm-ext = ["dep:libp2p-wasm-ext"] @@ -90,6 +91,7 @@ wasm-ext-websocket = ["wasm-ext", "libp2p-wasm-ext?/websocket"] websocket = ["dep:libp2p-websocket"] webtransport-websys = ["dep:libp2p-webtransport-websys"] yamux = ["dep:libp2p-yamux"] +upnp = ["dep:libp2p-upnp"] [dependencies] bytes = "1" @@ -133,6 +135,7 @@ libp2p-quic = { workspace = true, optional = true } libp2p-tcp = { workspace = true, optional = true } libp2p-tls = { workspace = true, optional = true } libp2p-uds = { workspace = true, optional = true } +libp2p-upnp = { workspace = true, optional = true } libp2p-websocket = { workspace = true, optional = true } [dev-dependencies] diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index 349427ff..d1e151cc 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -52,10 +52,15 @@ pub use libp2p_core as core; #[cfg(feature = "dcutr")] #[doc(inline)] pub use libp2p_dcutr as dcutr; + #[cfg(feature = "deflate")] #[cfg(not(target_arch = "wasm32"))] -#[doc(inline)] -pub use libp2p_deflate as deflate; +#[deprecated( + note = "Will be removed in the next release, see https://github.com/libp2p/rust-libp2p/issues/4522 for details." +)] +pub mod deflate { + pub use libp2p_deflate::*; +} #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] #[cfg(not(target_arch = "wasm32"))] @@ -127,6 +132,10 @@ pub use libp2p_tls as tls; #[cfg(not(target_arch = "wasm32"))] #[doc(inline)] pub use libp2p_uds as uds; +#[cfg(feature = "upnp")] +#[cfg(not(target_arch = "wasm32"))] +#[doc(inline)] +pub use libp2p_upnp as upnp; #[cfg(feature = "wasm-ext")] #[doc(inline)] pub use libp2p_wasm_ext as wasm_ext; @@ -185,12 +194,12 @@ pub async fn development_transport( keypair: identity::Keypair, ) -> std::io::Result> { let transport = { - let dns_tcp = dns::DnsConfig::system(tcp::async_io::Transport::new( + let dns_tcp = dns::async_std::Transport::system(tcp::async_io::Transport::new( tcp::Config::new().nodelay(true), )) .await?; let ws_dns_tcp = websocket::WsConfig::new( - dns::DnsConfig::system(tcp::async_io::Transport::new( + dns::async_std::Transport::system(tcp::async_io::Transport::new( tcp::Config::new().nodelay(true), )) .await?, @@ -230,10 +239,10 @@ pub fn tokio_development_transport( keypair: identity::Keypair, ) -> std::io::Result> { let transport = { - let dns_tcp = dns::TokioDnsConfig::system(tcp::tokio::Transport::new( + let dns_tcp = dns::tokio::Transport::system(tcp::tokio::Transport::new( tcp::Config::new().nodelay(true), ))?; - let ws_dns_tcp = websocket::WsConfig::new(dns::TokioDnsConfig::system( + let ws_dns_tcp = websocket::WsConfig::new(dns::tokio::Transport::system( tcp::tokio::Transport::new(tcp::Config::new().nodelay(true)), )?); dns_tcp.or_transport(ws_dns_tcp) diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 161a2fee..5fd74fe7 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -54,16 +54,16 @@ //! //! ``` bash //! ## Inside the rust-libp2p repository. -//! cargo build --example relay_v2 -p libp2p-relay +//! cargo build --bin relay-server-example //! ``` //! -//! You can find the binary at `target/debug/examples/relay_v2`. In case you built it locally, copy +//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy //! it to your server. //! //! On your server, start the relay server binary: //! //! ``` bash -//! ./relay_v2 --port 4001 --secret-key-seed 0 +//! ./relay-server-example --port 4001 --secret-key-seed 0 //! ``` //! //! Now let's make sure that the server is public, in other words let's make sure one can reach it @@ -122,16 +122,16 @@ //! //! ``` bash //! ## Inside the rust-libp2p repository. -//! cargo build --example client -p libp2p-dcutr +//! cargo build --bin dcutr-example //! ``` //! -//! You can find the binary at `target/debug/examples/client`. In case you built it locally, copy +//! You can find the binary at `target/debug/dcutr-example`. In case you built it locally, copy //! it to your listening client machine. //! //! On the listening client machine: //! //! ``` bash -//! RUST_LOG=info ./client --secret-key-seed 1 --mode listen --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN +//! RUST_LOG=info ./dcutr-example --secret-key-seed 1 --mode listen --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN //! //! [2022-05-11T10:38:52Z INFO client] Local peer id: PeerId("XXX") //! [2022-05-11T10:38:52Z INFO client] Listening on "/ip4/127.0.0.1/tcp/44703" @@ -153,7 +153,7 @@ //! ## Connecting to the listening client from the dialing client //! //! ``` bash -//! RUST_LOG=info ./client --secret-key-seed 2 --mode dial --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN --remote-peer-id 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X +//! RUST_LOG=info ./dcutr-example --secret-key-seed 2 --mode dial --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN --remote-peer-id 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X //! ``` //! //! You should see the following logs appear: diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index 976b45e1..aedc1492 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -57,6 +57,7 @@ //! [dependencies] //! libp2p = { version = "0.50", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } //! futures = "0.3.21" +//! env_logger = "0.10.0" //! async-std = { version = "1.12.0", features = ["attributes"] } //! ``` //! @@ -142,7 +143,7 @@ //! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust -//! use libp2p::swarm::{keep_alive, NetworkBehaviour}; +//! use libp2p::swarm::NetworkBehaviour; //! use libp2p::{identity, ping, PeerId}; //! use std::error::Error; //! @@ -154,20 +155,10 @@ //! //! let transport = libp2p::development_transport(local_key).await?; //! -//! let behaviour = Behaviour::default(); +//! let behaviour = ping::Behaviour::default(); //! //! Ok(()) //! } -//! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of -//! /// pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, -//! } //! ``` //! //! ## Swarm @@ -177,36 +168,65 @@ //! carried out by a [`Swarm`]. Put simply, a [`Swarm`] drives both a //! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the //! [`NetworkBehaviour`] to the [`Transport`] as well as events from the -//! [`Transport`] to the [`NetworkBehaviour`]. +//! [`Transport`] to the [`NetworkBehaviour`]. As you can see, after [`Swarm`] initialization, we +//! removed the print of the local [`PeerId`](crate::PeerId) because every time a [`Swarm`] is +//! created, it prints the local [`PeerId`](crate::PeerId) in the logs at the INFO level. In order +//! to continue to see the local [`PeerId`](crate::PeerId) you must initialize the logger +//! (In our example, `env_logger` is used) //! //! ```rust -//! use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder}; +//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; //! use libp2p::{identity, ping, PeerId}; //! use std::error::Error; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! env_logger::init(); //! let local_key = identity::Keypair::generate_ed25519(); //! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); //! //! let transport = libp2p::development_transport(local_key).await?; //! -//! let behaviour = Behaviour::default(); +//! let behaviour = ping::Behaviour::default(); //! //! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); //! //! Ok(()) //! } +//! ``` //! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour:: -//! /// KeepAlive) behaviour so a continuous sequence of pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, +//! ## Idle connection timeout +//! +//! Now, for this example in particular, we need set the idle connection timeout. +//! Otherwise, the connection will be closed immediately. +//! +//! Whether you need to set this in your application too depends on your usecase. +//! Typically, connections are kept alive if they are "in use" by a certain protocol. +//! The ping protocol however is only an "auxiliary" kind of protocol. +//! Thus, without any other behaviour in place, we would not be able to observe the pings. +//! +//! ```rust +//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; +//! use libp2p::{identity, ping, PeerId}; +//! use std::error::Error; +//! use std::time::Duration; +//! +//! #[async_std::main] +//! async fn main() -> Result<(), Box> { +//! use std::time::Duration; +//! let local_key = identity::Keypair::generate_ed25519(); +//! let local_peer_id = PeerId::from(local_key.public()); +//! println!("Local peer id: {local_peer_id:?}"); +//! +//! let transport = libp2p::development_transport(local_key).await?; +//! +//! let behaviour = ping::Behaviour::default(); +//! +//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id) +//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds. +//! .build(); +//! +//! Ok(()) //! } //! ``` //! @@ -237,21 +257,24 @@ //! remote peer. //! //! ```rust -//! use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder}; +//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; //! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use std::error::Error; +//! use std::time::Duration; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! env_logger::init(); //! let local_key = identity::Keypair::generate_ed25519(); //! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); //! //! let transport = libp2p::development_transport(local_key).await?; //! -//! let behaviour = Behaviour::default(); +//! let behaviour = ping::Behaviour::default(); //! -//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); +//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id) +//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds. +//! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned //! // port. @@ -267,16 +290,6 @@ //! //! Ok(()) //! } -//! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of -//! /// pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, -//! } //! ``` //! //! ## Continuously polling the Swarm @@ -287,21 +300,24 @@ //! //! ```no_run //! use futures::prelude::*; -//! use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmEvent, SwarmBuilder}; +//! use libp2p::swarm::{NetworkBehaviour, SwarmEvent, SwarmBuilder}; //! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use std::error::Error; +//! use std::time::Duration; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! env_logger::init(); //! let local_key = identity::Keypair::generate_ed25519(); //! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); //! //! let transport = libp2p::development_transport(local_key).await?; //! -//! let behaviour = Behaviour::default(); +//! let behaviour = ping::Behaviour::default(); //! -//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); +//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id) +//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds. +//! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned //! // port. @@ -323,16 +339,6 @@ //! } //! } //! } -//! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of -//! /// pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, -//! } //! ``` //! //! ## Running two nodes @@ -349,9 +355,8 @@ //! cargo run --example ping //! ``` //! -//! It will print the PeerId and the new listening addresses, e.g. +//! It will print the new listening addresses, e.g. //! ```sh -//! Local peer id: PeerId("12D3KooWT1As4mwh3KYBnNTw9bSrRbYQGJTm9SSte82JSumqgCQG") //! Listening on "/ip4/127.0.0.1/tcp/24915" //! Listening on "/ip4/192.168.178.25/tcp/24915" //! Listening on "/ip4/172.17.0.1/tcp/24915" diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index eed79d74..1950c47f 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -283,14 +283,11 @@ mod tests { #[async_std::test] async fn cannot_dial_blocked_peer() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; - dialer - .behaviour_mut() - .list - .block_peer(*listener.local_peer_id()); + dialer.behaviour_mut().block_peer(*listener.local_peer_id()); let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { panic!("unexpected dial error") @@ -300,17 +297,13 @@ mod tests { #[async_std::test] async fn can_dial_unblocked_peer() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; + dialer.behaviour_mut().block_peer(*listener.local_peer_id()); dialer .behaviour_mut() - .list - .block_peer(*listener.local_peer_id()); - dialer - .behaviour_mut() - .list .unblock_peer(*listener.local_peer_id()); dial(&mut dialer, &listener).unwrap(); @@ -318,14 +311,11 @@ mod tests { #[async_std::test] async fn blocked_peer_cannot_dial_us() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; - listener - .behaviour_mut() - .list - .block_peer(*dialer.local_peer_id()); + listener.behaviour_mut().block_peer(*dialer.local_peer_id()); dial(&mut dialer, &listener).unwrap(); async_std::task::spawn(dialer.loop_on_next()); @@ -343,15 +333,12 @@ mod tests { #[async_std::test] async fn connections_get_closed_upon_blocked() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; dialer.connect(&mut listener).await; - dialer - .behaviour_mut() - .list - .block_peer(*listener.local_peer_id()); + dialer.behaviour_mut().block_peer(*listener.local_peer_id()); let ( [SwarmEvent::ConnectionClosed { @@ -372,8 +359,8 @@ mod tests { #[async_std::test] async fn cannot_dial_peer_unless_allowed() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { @@ -381,26 +368,19 @@ mod tests { }; assert!(cause.downcast::().is_ok()); - dialer - .behaviour_mut() - .list - .allow_peer(*listener.local_peer_id()); + dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); assert!(dial(&mut dialer, &listener).is_ok()); } #[async_std::test] async fn cannot_dial_disallowed_peer() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; + dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); dialer .behaviour_mut() - .list - .allow_peer(*listener.local_peer_id()); - dialer - .behaviour_mut() - .list .disallow_peer(*listener.local_peer_id()); let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { @@ -411,8 +391,8 @@ mod tests { #[async_std::test] async fn not_allowed_peer_cannot_dial_us() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; dialer @@ -448,23 +428,16 @@ mod tests { #[async_std::test] async fn connections_get_closed_upon_disallow() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); listener.listen().await; - dialer - .behaviour_mut() - .list - .allow_peer(*listener.local_peer_id()); - listener - .behaviour_mut() - .list - .allow_peer(*dialer.local_peer_id()); + dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); + listener.behaviour_mut().allow_peer(*dialer.local_peer_id()); dialer.connect(&mut listener).await; dialer .behaviour_mut() - .list .disallow_peer(*listener.local_peer_id()); let ( [SwarmEvent::ConnectionClosed { @@ -496,27 +469,4 @@ mod tests { .build(), ) } - - #[derive(libp2p_swarm_derive::NetworkBehaviour)] - #[behaviour(prelude = "libp2p_swarm::derive_prelude")] - struct Behaviour { - list: super::Behaviour, - keep_alive: libp2p_swarm::keep_alive::Behaviour, - } - - impl Behaviour - where - S: Default, - { - fn new() -> Self { - Self { - list: super::Behaviour { - waker: None, - close_connections: VecDeque::new(), - state: S::default(), - }, - keep_alive: libp2p_swarm::keep_alive::Behaviour, - } - } - } } diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index e4723dd9..7de96cc1 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -529,7 +529,6 @@ mod tests { #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Behaviour { limits: super::Behaviour, - keep_alive: libp2p_swarm::keep_alive::Behaviour, connection_denier: Toggle, } @@ -537,14 +536,12 @@ mod tests { fn new(limits: ConnectionLimits) -> Self { Self { limits: super::Behaviour::new(limits), - keep_alive: libp2p_swarm::keep_alive::Behaviour, connection_denier: None.into(), } } fn new_with_connection_denier(limits: ConnectionLimits) -> Self { Self { limits: super::Behaviour::new(limits), - keep_alive: libp2p_swarm::keep_alive::Behaviour, connection_denier: Some(ConnectionDenier {}).into(), } } diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md new file mode 100644 index 00000000..bd05a0f8 --- /dev/null +++ b/misc/futures-bounded/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.1.0 + +Initial release. diff --git a/misc/futures-bounded/Cargo.toml b/misc/futures-bounded/Cargo.toml new file mode 100644 index 00000000..fae7528a --- /dev/null +++ b/misc/futures-bounded/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "futures-bounded" +version = "0.1.0" +edition = "2021" +rust-version.workspace = true +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["futures", "async", "backpressure"] +categories = ["data-structures", "asynchronous"] +description = "Utilities for bounding futures in size and time." +publish = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +futures-util = { version = "0.3.28" } +futures-timer = "3.0.2" + +[dev-dependencies] +tokio = { version = "1.29.1", features = ["macros", "rt"] } diff --git a/misc/futures-bounded/src/lib.rs b/misc/futures-bounded/src/lib.rs new file mode 100644 index 00000000..e7b461dc --- /dev/null +++ b/misc/futures-bounded/src/lib.rs @@ -0,0 +1,28 @@ +mod map; +mod set; + +pub use map::{FuturesMap, PushError}; +pub use set::FuturesSet; +use std::fmt; +use std::fmt::Formatter; +use std::time::Duration; + +/// A future failed to complete within the given timeout. +#[derive(Debug)] +pub struct Timeout { + limit: Duration, +} + +impl Timeout { + fn new(duration: Duration) -> Self { + Self { limit: duration } + } +} + +impl fmt::Display for Timeout { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "future failed to complete within {:?}", self.limit) + } +} + +impl std::error::Error for Timeout {} diff --git a/misc/futures-bounded/src/map.rs b/misc/futures-bounded/src/map.rs new file mode 100644 index 00000000..cecf6070 --- /dev/null +++ b/misc/futures-bounded/src/map.rs @@ -0,0 +1,268 @@ +use std::future::Future; +use std::hash::Hash; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll, Waker}; +use std::time::Duration; + +use futures_timer::Delay; +use futures_util::future::BoxFuture; +use futures_util::stream::FuturesUnordered; +use futures_util::{FutureExt, StreamExt}; + +use crate::Timeout; + +/// Represents a map of [`Future`]s. +/// +/// Each future must finish within the specified time and the map never outgrows its capacity. +pub struct FuturesMap { + timeout: Duration, + capacity: usize, + inner: FuturesUnordered>>>, + empty_waker: Option, + full_waker: Option, +} + +/// Error of a future pushing +#[derive(PartialEq, Debug)] +pub enum PushError { + /// The length of the set is equal to the capacity + BeyondCapacity(F), + /// The set already contains the given future's ID + ReplacedFuture(F), +} + +impl FuturesMap { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + timeout, + capacity, + inner: Default::default(), + empty_waker: None, + full_waker: None, + } + } +} + +impl FuturesMap +where + ID: Clone + Hash + Eq + Send + Unpin + 'static, +{ + /// Push a future into the map. + /// + /// This method inserts the given future with defined `future_id` to the set. + /// If the length of the map is equal to the capacity, this method returns [PushError::BeyondCapacity], + /// that contains the passed future. In that case, the future is not inserted to the map. + /// If a future with the given `future_id` already exists, then the old future will be replaced by a new one. + /// In that case, the returned error [PushError::ReplacedFuture] contains the old future. + pub fn try_push(&mut self, future_id: ID, future: F) -> Result<(), PushError>> + where + F: Future + Send + 'static, + { + if self.inner.len() >= self.capacity { + return Err(PushError::BeyondCapacity(future.boxed())); + } + + if let Some(waker) = self.empty_waker.take() { + waker.wake(); + } + + match self.inner.iter_mut().find(|tagged| tagged.tag == future_id) { + None => { + self.inner.push(TaggedFuture { + tag: future_id, + inner: TimeoutFuture { + inner: future.boxed(), + timeout: Delay::new(self.timeout), + }, + }); + + Ok(()) + } + Some(existing) => { + let old_future = mem::replace( + &mut existing.inner, + TimeoutFuture { + inner: future.boxed(), + timeout: Delay::new(self.timeout), + }, + ); + + Err(PushError::ReplacedFuture(old_future.inner)) + } + } + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + #[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // &mut Context is idiomatic. + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + if self.inner.len() < self.capacity { + return Poll::Ready(()); + } + + self.full_waker = Some(cx.waker().clone()); + + Poll::Pending + } + + pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<(ID, Result)> { + let maybe_result = futures_util::ready!(self.inner.poll_next_unpin(cx)); + + match maybe_result { + None => { + self.empty_waker = Some(cx.waker().clone()); + Poll::Pending + } + Some((id, Ok(output))) => Poll::Ready((id, Ok(output))), + Some((id, Err(_timeout))) => Poll::Ready((id, Err(Timeout::new(self.timeout)))), + } + } +} + +struct TimeoutFuture { + inner: F, + timeout: Delay, +} + +impl Future for TimeoutFuture +where + F: Future + Unpin, +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.timeout.poll_unpin(cx).is_ready() { + return Poll::Ready(Err(())); + } + + self.inner.poll_unpin(cx).map(Ok) + } +} + +struct TaggedFuture { + tag: T, + inner: F, +} + +impl Future for TaggedFuture +where + T: Clone + Unpin, + F: Future + Unpin, +{ + type Output = (T, F::Output); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let output = futures_util::ready!(self.inner.poll_unpin(cx)); + + Poll::Ready((self.tag.clone(), output)) + } +} + +#[cfg(test)] +mod tests { + use std::future::{pending, poll_fn, ready}; + use std::pin::Pin; + use std::time::Instant; + + use super::*; + + #[test] + fn cannot_push_more_than_capacity_tasks() { + let mut futures = FuturesMap::new(Duration::from_secs(10), 1); + + assert!(futures.try_push("ID_1", ready(())).is_ok()); + matches!( + futures.try_push("ID_2", ready(())), + Err(PushError::BeyondCapacity(_)) + ); + } + + #[test] + fn cannot_push_the_same_id_few_times() { + let mut futures = FuturesMap::new(Duration::from_secs(10), 5); + + assert!(futures.try_push("ID", ready(())).is_ok()); + matches!( + futures.try_push("ID", ready(())), + Err(PushError::ReplacedFuture(_)) + ); + } + + #[tokio::test] + async fn futures_timeout() { + let mut futures = FuturesMap::new(Duration::from_millis(100), 1); + + let _ = futures.try_push("ID", pending::<()>()); + Delay::new(Duration::from_millis(150)).await; + let (_, result) = poll_fn(|cx| futures.poll_unpin(cx)).await; + + assert!(result.is_err()) + } + + // Each future causes a delay, `Task` only has a capacity of 1, meaning they must be processed in sequence. + // We stop after NUM_FUTURES tasks, meaning the overall execution must at least take DELAY * NUM_FUTURES. + #[tokio::test] + async fn backpressure() { + const DELAY: Duration = Duration::from_millis(100); + const NUM_FUTURES: u32 = 10; + + let start = Instant::now(); + Task::new(DELAY, NUM_FUTURES, 1).await; + let duration = start.elapsed(); + + assert!(duration >= DELAY * NUM_FUTURES); + } + + struct Task { + future: Duration, + num_futures: usize, + num_processed: usize, + inner: FuturesMap, + } + + impl Task { + fn new(future: Duration, num_futures: u32, capacity: usize) -> Self { + Self { + future, + num_futures: num_futures as usize, + num_processed: 0, + inner: FuturesMap::new(Duration::from_secs(60), capacity), + } + } + } + + impl Future for Task { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while this.num_processed < this.num_futures { + if let Poll::Ready((_, result)) = this.inner.poll_unpin(cx) { + if result.is_err() { + panic!("Timeout is great than future delay") + } + + this.num_processed += 1; + continue; + } + + if let Poll::Ready(()) = this.inner.poll_ready_unpin(cx) { + // We push the constant future's ID to prove that user can use the same ID + // if the future was finished + let maybe_future = this.inner.try_push(1u8, Delay::new(this.future)); + assert!(maybe_future.is_ok(), "we polled for readiness"); + + continue; + } + + return Poll::Pending; + } + + Poll::Ready(()) + } + } +} diff --git a/misc/futures-bounded/src/set.rs b/misc/futures-bounded/src/set.rs new file mode 100644 index 00000000..96140d82 --- /dev/null +++ b/misc/futures-bounded/src/set.rs @@ -0,0 +1,58 @@ +use std::future::Future; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use futures_util::future::BoxFuture; + +use crate::{FuturesMap, PushError, Timeout}; + +/// Represents a list of [Future]s. +/// +/// Each future must finish within the specified time and the list never outgrows its capacity. +pub struct FuturesSet { + id: u32, + inner: FuturesMap, +} + +impl FuturesSet { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + id: 0, + inner: FuturesMap::new(timeout, capacity), + } + } +} + +impl FuturesSet { + /// Push a future into the list. + /// + /// This method adds the given future to the list. + /// If the length of the list is equal to the capacity, this method returns a error that contains the passed future. + /// In that case, the future is not added to the set. + pub fn try_push(&mut self, future: F) -> Result<(), BoxFuture> + where + F: Future + Send + 'static, + { + self.id = self.id.wrapping_add(1); + + match self.inner.try_push(self.id, future) { + Ok(()) => Ok(()), + Err(PushError::BeyondCapacity(w)) => Err(w), + Err(PushError::ReplacedFuture(_)) => unreachable!("we never reuse IDs"), + } + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + self.inner.poll_ready_unpin(cx) + } + + pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> { + let (_, res) = ready!(self.inner.poll_unpin(cx)); + + Poll::Ready(res) + } +} diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 6de32895..b6959150 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -10,10 +10,10 @@ categories = ["network-programming", "asynchronous"] publish = false [dependencies] -clap = { version = "4.3.21", features = ["derive"] } +clap = { version = "4.3.23", features = ["derive"] } zeroize = "1" -serde = { version = "1.0.183", features = ["derive"] } -serde_json = "1.0.100" +serde = { version = "1.0.188", features = ["derive"] } +serde_json = "1.0.107" libp2p-core = { workspace = true } -base64 = "0.21.2" +base64 = "0.21.4" libp2p-identity = { workspace = true } diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs index e4170dd9..bd5a6526 100644 --- a/misc/metrics/src/kad.rs +++ b/misc/metrics/src/kad.rs @@ -159,10 +159,10 @@ impl Metrics { } } -impl super::Recorder for Metrics { - fn record(&self, event: &libp2p_kad::KademliaEvent) { +impl super::Recorder for Metrics { + fn record(&self, event: &libp2p_kad::Event) { match event { - libp2p_kad::KademliaEvent::OutboundQueryProgressed { result, stats, .. } => { + libp2p_kad::Event::OutboundQueryProgressed { result, stats, .. } => { self.query_result_num_requests .get_or_create(&result.into()) .observe(stats.num_requests().into()); @@ -217,7 +217,7 @@ impl super::Recorder for Metrics { _ => {} } } - libp2p_kad::KademliaEvent::RoutingUpdated { + libp2p_kad::Event::RoutingUpdated { is_new_peer, old_peer, bucket_range: (low, _high), @@ -250,7 +250,7 @@ impl super::Recorder for Metrics { } } - libp2p_kad::KademliaEvent::InboundRequest { request } => { + libp2p_kad::Event::InboundRequest { request } => { self.inbound_requests.get_or_create(&request.into()).inc(); } _ => {} diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index fd977537..2132dd5d 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -118,8 +118,8 @@ impl Recorder for Metrics { } #[cfg(feature = "kad")] -impl Recorder for Metrics { - fn record(&self, event: &libp2p_kad::KademliaEvent) { +impl Recorder for Metrics { + fn record(&self, event: &libp2p_kad::Event) { self.kad.record(event) } } diff --git a/misc/multiaddr/README.md b/misc/multiaddr/README.md deleted file mode 100644 index e745a6f6..00000000 --- a/misc/multiaddr/README.md +++ /dev/null @@ -1 +0,0 @@ -Moved to https://github.com/multiformats/rust-multiaddr. \ No newline at end of file diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 711a8014..8d19e3e5 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -15,7 +15,7 @@ bytes = "1" futures = "0.3" log = "0.4" pin-project = "1.1.3" -smallvec = "1.11.0" +smallvec = "1.11.1" unsigned-varint = "0.7" [dev-dependencies] diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md new file mode 100644 index 00000000..5fd4313f --- /dev/null +++ b/misc/server/CHANGELOG.md @@ -0,0 +1,71 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.12.3] +### Changed +- Add libp2p-lookup to Dockerfile to enable healthchecks. + +### Fixed + +- Disable QUIC `draft-29` support. + Listening on `/quic` and `/quic-v1` addresses with the same port would otherwise result in an "Address already in use" error by the OS. + See [PR 4467]. + +[PR 4467]: https://github.com/libp2p/rust-libp2p/pull/4467 + +## [0.12.2] +### Fixed +- Adhere to `--metrics-path` flag and listen on `0.0.0.0:8888` (default IPFS metrics port). + [PR 4392] + +[PR 4392]: https://github.com/libp2p/rust-libp2p/pull/4392 + +## [0.12.1] +### Changed +- Move to tokio and hyper. + See [PR 4311]. +- Move to distroless Docker base image. + See [PR 4311]. + +[PR 4311]: https://github.com/libp2p/rust-libp2p/pull/4311 + +## [0.8.0] +### Changed +- Remove mplex support. + +## [0.7.0] +### Changed +- Update to libp2p v0.47.0. + +## [0.6.0] - [2022-05-05] +### Changed +- Update to libp2p v0.44.0. + +## [0.5.4] - [2022-01-11] +### Changed +- Pull latest autonat changes. + +## [0.5.3] - [2021-12-25] +### Changed +- Update dependencies. +- Pull in autonat fixes. + +## [0.5.2] - [2021-12-20] +### Added +- Add support for libp2p autonat protocol via `--enable-autonat`. + +## [0.5.1] - [2021-12-20] +### Fixed +- Update dependencies. +- Fix typo in command line flag `--enable-kademlia`. + +## [0.5.0] - 2021-11-18 +### Changed +- Disable Kademlia protocol by default. + +## [0.4.0] - 2021-11-18 +### Fixed +- Update dependencies. diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml new file mode 100644 index 00000000..aa259ce8 --- /dev/null +++ b/misc/server/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "libp2p-server" +version = "0.12.3" +authors = ["Max Inden "] +edition = "2021" +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +description = "A rust-libp2p server binary." +license = "MIT" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +base64 = "0.21" +clap = { version = "4.3.12", features = ["derive"] } +env_logger = "0.10.0" +futures = "0.3" +futures-timer = "3" +hyper = { version = "0.14", features = ["server", "tcp", "http1"] } +libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] } +log = "0.4" +prometheus-client = "0.21.2" +serde = "1.0.188" +serde_derive = "1.0.125" +serde_json = "1.0" +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +zeroize = "1" diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile new file mode 100644 index 00000000..72641cc3 --- /dev/null +++ b/misc/server/Dockerfile @@ -0,0 +1,20 @@ +FROM rust:1.72-bullseye as builder +WORKDIR /usr/src/rust-libp2p-server + +# Run with access to the target cache to speed up builds +WORKDIR /workspace + +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4 + +ADD . . +RUN --mount=type=cache,target=./target \ + --mount=type=cache,target=/usr/local/cargo/registry \ + cargo build --release --package libp2p-server + +RUN --mount=type=cache,target=./target \ + mv ./target/release/libp2p-server /usr/local/bin/libp2p-server + +FROM gcr.io/distroless/cc +COPY --from=builder /usr/local/bin/libp2p-server /usr/local/bin/libp2p-lookup /usr/local/bin/ +CMD ["libp2p-server"] diff --git a/misc/server/README.md b/misc/server/README.md new file mode 100644 index 00000000..0da1bd8a --- /dev/null +++ b/misc/server/README.md @@ -0,0 +1,41 @@ +# Rust libp2p Server + +A rust-libp2p based server implementation running: + +- the [Kademlia protocol](https://github.com/libp2p/specs/tree/master/kad-dht) + +- the [Circuit Relay v2 protocol](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) + +- the [AutoNAT protocol](https://github.com/libp2p/specs/blob/master/autonat/README.md) + +## Usage + +``` +cargo run -- --help + +A rust-libp2p server binary. + +Usage: libp2p-server [OPTIONS] --config + +Options: + --config Path to IPFS config file + --metrics-path Metric endpoint path [default: /metrics] + --enable-kademlia Whether to run the libp2p Kademlia protocol and join the IPFS DHT + --enable-autonat Whether to run the libp2p Autonat protocol + -h, --help Print help +``` + + +``` +cargo run -- --config ~/.ipfs/config + +Local peer id: PeerId("12D3KooWSa1YEeQVSwvoqAMhwjKQ6kqZQckhWPb3RWEGV3sZGU6Z") +Listening on "/ip4/127.0.0.1/udp/4001/quic" +[...] +``` + +The Docker container includes [libp2-lookup](https://github.com/mxinden/libp2p-lookup/) to enable adding a proper healthcheck for container startup, e.g. + +``` shell +docker run --health-cmd 'libp2p-lookup direct --address /ip4/127.0.0.1/tcp/4001/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa' /home/ipfs/.ipfs:/ipfs ghcr.io/libp2p/rust-libp2p-server --config /ipfs/config +``` diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs new file mode 100644 index 00000000..2f7741b9 --- /dev/null +++ b/misc/server/src/behaviour.rs @@ -0,0 +1,78 @@ +use libp2p::autonat; +use libp2p::identify; +use libp2p::kad; +use libp2p::ping; +use libp2p::relay; +use libp2p::swarm::behaviour::toggle::Toggle; +use libp2p::{identity, swarm::NetworkBehaviour, Multiaddr, PeerId}; +use std::str::FromStr; +use std::time::Duration; + +const BOOTNODES: [&str; 4] = [ + "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", +]; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour { + relay: relay::Behaviour, + ping: ping::Behaviour, + identify: identify::Behaviour, + pub(crate) kademlia: Toggle>, + autonat: Toggle, +} + +impl Behaviour { + pub(crate) fn new( + pub_key: identity::PublicKey, + enable_kademlia: bool, + enable_autonat: bool, + ) -> Self { + let kademlia = if enable_kademlia { + let mut kademlia_config = kad::Config::default(); + // Instantly remove records and provider records. + // + // TODO: Replace hack with option to disable both. + kademlia_config.set_record_ttl(Some(Duration::from_secs(0))); + kademlia_config.set_provider_record_ttl(Some(Duration::from_secs(0))); + let mut kademlia = kad::Behaviour::with_config( + pub_key.to_peer_id(), + kad::record::store::MemoryStore::new(pub_key.to_peer_id()), + kademlia_config, + ); + let bootaddr = Multiaddr::from_str("/dnsaddr/bootstrap.libp2p.io").unwrap(); + for peer in &BOOTNODES { + kademlia.add_address(&PeerId::from_str(peer).unwrap(), bootaddr.clone()); + } + kademlia.bootstrap().unwrap(); + Some(kademlia) + } else { + None + } + .into(); + + let autonat = if enable_autonat { + Some(autonat::Behaviour::new( + PeerId::from(pub_key.clone()), + Default::default(), + )) + } else { + None + } + .into(); + + Self { + relay: relay::Behaviour::new(PeerId::from(pub_key.clone()), Default::default()), + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new( + identify::Config::new("ipfs/0.1.0".to_string(), pub_key).with_agent_version( + format!("rust-libp2p-server/{}", env!("CARGO_PKG_VERSION")), + ), + ), + kademlia, + autonat, + } + } +} diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs new file mode 100644 index 00000000..c3e3ec52 --- /dev/null +++ b/misc/server/src/config.rs @@ -0,0 +1,39 @@ +use libp2p::Multiaddr; +use serde_derive::Deserialize; +use std::error::Error; +use std::path::Path; + +#[derive(Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct Config { + pub(crate) identity: Identity, + pub(crate) addresses: Addresses, +} + +impl Config { + pub(crate) fn from_file(path: &Path) -> Result> { + Ok(serde_json::from_str(&std::fs::read_to_string(path)?)?) + } +} + +#[derive(Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct Identity { + #[serde(rename = "PeerID")] + pub(crate) peer_id: String, + pub(crate) priv_key: String, +} + +#[derive(Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct Addresses { + pub(crate) swarm: Vec, + pub(crate) append_announce: Vec, +} + +impl zeroize::Zeroize for Config { + fn zeroize(&mut self) { + self.identity.peer_id.zeroize(); + self.identity.priv_key.zeroize(); + } +} diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs new file mode 100644 index 00000000..1f5ebaff --- /dev/null +++ b/misc/server/src/http_service.rs @@ -0,0 +1,137 @@ +// Copyright 2022 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use hyper::http::StatusCode; +use hyper::service::Service; +use hyper::{Body, Method, Request, Response, Server}; +use log::info; +use prometheus_client::encoding::text::encode; +use prometheus_client::registry::Registry; +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll}; + +const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; +pub(crate) async fn metrics_server( + registry: Registry, + metrics_path: String, +) -> Result<(), hyper::Error> { + // Serve on localhost. + let addr = ([0, 0, 0, 0], 8888).into(); + + let server = Server::bind(&addr).serve(MakeMetricService::new(registry, metrics_path.clone())); + info!( + "Metrics server on http://{}{}", + server.local_addr(), + metrics_path + ); + server.await?; + Ok(()) +} +pub(crate) struct MetricService { + reg: Arc>, + metrics_path: String, +} + +type SharedRegistry = Arc>; + +impl MetricService { + fn get_reg(&mut self) -> SharedRegistry { + Arc::clone(&self.reg) + } + fn respond_with_metrics(&mut self) -> Response { + let mut response: Response = Response::default(); + + response.headers_mut().insert( + hyper::header::CONTENT_TYPE, + METRICS_CONTENT_TYPE.try_into().unwrap(), + ); + + let reg = self.get_reg(); + encode(&mut response.body_mut(), ®.lock().unwrap()).unwrap(); + + *response.status_mut() = StatusCode::OK; + + response + } + fn respond_with_404_not_found(&mut self) -> Response { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(format!( + "Not found try localhost:[port]/{}", + self.metrics_path + )) + .unwrap() + } +} + +impl Service> for MetricService { + type Response = Response; + type Error = hyper::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + let req_path = req.uri().path(); + let req_method = req.method(); + let resp = if (req_method == Method::GET) && (req_path == self.metrics_path) { + // Encode and serve metrics from registry. + self.respond_with_metrics() + } else { + self.respond_with_404_not_found() + }; + Box::pin(async { Ok(resp) }) + } +} + +pub(crate) struct MakeMetricService { + reg: SharedRegistry, + metrics_path: String, +} + +impl MakeMetricService { + pub(crate) fn new(registry: Registry, metrics_path: String) -> MakeMetricService { + MakeMetricService { + reg: Arc::new(Mutex::new(registry)), + metrics_path, + } + } +} + +impl Service for MakeMetricService { + type Response = MetricService; + type Error = hyper::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: T) -> Self::Future { + let reg = self.reg.clone(); + let metrics_path = self.metrics_path.clone(); + let fut = async move { Ok(MetricService { reg, metrics_path }) }; + Box::pin(fut) + } +} diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs new file mode 100644 index 00000000..e885301d --- /dev/null +++ b/misc/server/src/main.rs @@ -0,0 +1,211 @@ +use base64::Engine; +use clap::Parser; +use futures::future::Either; +use futures::stream::StreamExt; +use futures_timer::Delay; +use libp2p::core::muxing::StreamMuxerBox; +use libp2p::core::upgrade; +use libp2p::dns; +use libp2p::identify; +use libp2p::identity; +use libp2p::identity::PeerId; +use libp2p::kad; +use libp2p::metrics::{Metrics, Recorder}; +use libp2p::noise; +use libp2p::quic; +use libp2p::swarm::{SwarmBuilder, SwarmEvent}; +use libp2p::tcp; +use libp2p::yamux; +use libp2p::Transport; +use log::{debug, info, warn}; +use prometheus_client::metrics::info::Info; +use prometheus_client::registry::Registry; +use std::error::Error; +use std::io; +use std::path::PathBuf; +use std::str::FromStr; +use std::task::Poll; +use std::time::Duration; +use zeroize::Zeroizing; + +mod behaviour; +mod config; +mod http_service; + +const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(5 * 60); + +#[derive(Debug, Parser)] +#[clap(name = "libp2p server", about = "A rust-libp2p server binary.")] +struct Opts { + /// Path to IPFS config file. + #[clap(long)] + config: PathBuf, + + /// Metric endpoint path. + #[clap(long, default_value = "/metrics")] + metrics_path: String, + + /// Whether to run the libp2p Kademlia protocol and join the IPFS DHT. + #[clap(long)] + enable_kademlia: bool, + + /// Whether to run the libp2p Autonat protocol. + #[clap(long)] + enable_autonat: bool, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::init(); + + let opt = Opts::parse(); + + let config = Zeroizing::new(config::Config::from_file(opt.config.as_path())?); + + let (local_peer_id, local_keypair) = { + let keypair = identity::Keypair::from_protobuf_encoding(&Zeroizing::new( + base64::engine::general_purpose::STANDARD + .decode(config.identity.priv_key.as_bytes())?, + ))?; + + let peer_id = keypair.public().into(); + assert_eq!( + PeerId::from_str(&config.identity.peer_id)?, + peer_id, + "Expect peer id derived from private key and peer id retrieved from config to match." + ); + + (peer_id, keypair) + }; + + let transport = { + let tcp_transport = + tcp::tokio::Transport::new(tcp::Config::new().port_reuse(true).nodelay(true)) + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&local_keypair)?) + .multiplex(yamux::Config::default()) + .timeout(Duration::from_secs(20)); + + let quic_transport = quic::tokio::Transport::new(quic::Config::new(&local_keypair)); + + dns::tokio::Transport::system(libp2p::core::transport::OrTransport::new( + quic_transport, + tcp_transport, + ))? + .map(|either_output, _| match either_output { + Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), + Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), + }) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed() + }; + + let behaviour = behaviour::Behaviour::new( + local_keypair.public(), + opt.enable_kademlia, + opt.enable_autonat, + ); + let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build(); + + if config.addresses.swarm.is_empty() { + warn!("No listen addresses configured."); + } + for address in &config.addresses.swarm { + match swarm.listen_on(address.clone()) { + Ok(_) => {} + Err(e @ libp2p::TransportError::MultiaddrNotSupported(_)) => { + warn!("Failed to listen on {address}, continuing anyways, {e}") + } + Err(e) => return Err(e.into()), + } + } + + if config.addresses.append_announce.is_empty() { + warn!("No external addresses configured."); + } + for address in &config.addresses.append_announce { + swarm.add_external_address(address.clone()) + } + info!( + "External addresses: {:?}", + swarm.external_addresses().collect::>() + ); + + let mut metric_registry = Registry::default(); + let metrics = Metrics::new(&mut metric_registry); + let build_info = Info::new(vec![("version".to_string(), env!("CARGO_PKG_VERSION"))]); + metric_registry.register( + "build", + "A metric with a constant '1' value labeled by version", + build_info, + ); + tokio::spawn(async move { + if let Err(e) = http_service::metrics_server(metric_registry, opt.metrics_path).await { + log::error!("Metrics server failed: {e}"); + } + }); + + let mut bootstrap_timer = Delay::new(BOOTSTRAP_INTERVAL); + + loop { + if let Poll::Ready(()) = futures::poll!(&mut bootstrap_timer) { + bootstrap_timer.reset(BOOTSTRAP_INTERVAL); + let _ = swarm + .behaviour_mut() + .kademlia + .as_mut() + .map(|k| k.bootstrap()); + } + + let event = swarm.next().await.expect("Swarm not to terminate."); + metrics.record(&event); + match event { + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Identify(e)) => { + info!("{:?}", e); + metrics.record(&e); + + if let identify::Event::Received { + peer_id, + info: + identify::Info { + listen_addrs, + protocols, + .. + }, + } = e + { + if protocols.iter().any(|p| *p == kad::PROTOCOL_NAME) { + for addr in listen_addrs { + swarm + .behaviour_mut() + .kademlia + .as_mut() + .map(|k| k.add_address(&peer_id, addr)); + } + } + } + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Ping(e)) => { + debug!("{:?}", e); + metrics.record(&e); + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Kademlia(e)) => { + debug!("{:?}", e); + metrics.record(&e); + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Relay(e)) => { + info!("{:?}", e); + metrics.record(&e) + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Autonat(e)) => { + info!("{:?}", e); + // TODO: Add metric recording for `NatStatus`. + // metrics.record(&e) + } + SwarmEvent::NewListenAddr { address, .. } => { + info!("Listening on {address:?}"); + } + _ => {} + } + } +} diff --git a/misc/webrtc-utils/CHANGELOG.md b/misc/webrtc-utils/CHANGELOG.md new file mode 100644 index 00000000..c3485aa1 --- /dev/null +++ b/misc/webrtc-utils/CHANGELOG.md @@ -0,0 +1,6 @@ +## 0.1.0 + +- Initial release. + See [PR 4248]. + +[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248 diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml new file mode 100644 index 00000000..a3a5bef9 --- /dev/null +++ b/misc/webrtc-utils/Cargo.toml @@ -0,0 +1,32 @@ +[package] +authors = ["Doug Anderson "] +categories = ["network-programming"] +description = "Utilities for WebRTC in libp2p" +edition = "2021" +license = "MIT" +name = "libp2p-webrtc-utils" +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +version = "0.1.0" +publish = true + +[dependencies] +bytes = "1" +futures = "0.3" +hex = "0.4" +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-noise = { workspace = true } +log = "0.4.19" +quick-protobuf = "0.8" +quick-protobuf-codec = { workspace = true } +rand = "0.8" +serde = { version = "1.0", features = ["derive"] } +sha2 = "0.10.7" +thiserror = "1" +tinytemplate = "1.2" +asynchronous-codec = "0.6" + +[dev-dependencies] +hex-literal = "0.4" +unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs new file mode 100644 index 00000000..a02c4d11 --- /dev/null +++ b/misc/webrtc-utils/src/fingerprint.rs @@ -0,0 +1,109 @@ +// Copyright 2023 Doug Anderson. +// Copyright 2022 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use libp2p_core::multihash; +use sha2::Digest as _; +use std::fmt; + +pub const SHA256: &str = "sha-256"; +const MULTIHASH_SHA256_CODE: u64 = 0x12; + +type Multihash = multihash::Multihash<64>; + +/// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm. +#[derive(Eq, PartialEq, Copy, Clone)] +pub struct Fingerprint([u8; 32]); + +impl Fingerprint { + pub const FF: Fingerprint = Fingerprint([0xFF; 32]); + + pub const fn raw(digest: [u8; 32]) -> Self { + Fingerprint(digest) + } + + /// Creates a new [Fingerprint] from a raw certificate by hashing the given bytes with SHA256. + pub fn from_certificate(bytes: &[u8]) -> Self { + Fingerprint(sha2::Sha256::digest(bytes).into()) + } + + /// Converts [`Multihash`](multihash::Multihash) to [`Fingerprint`]. + pub fn try_from_multihash(hash: Multihash) -> Option { + if hash.code() != MULTIHASH_SHA256_CODE { + // Only support SHA256 for now. + return None; + } + + let bytes = hash.digest().try_into().ok()?; + + Some(Self(bytes)) + } + + /// Converts this fingerprint to [`Multihash`](multihash::Multihash). + pub fn to_multihash(self) -> Multihash { + Multihash::wrap(MULTIHASH_SHA256_CODE, &self.0).expect("fingerprint's len to be 32 bytes") + } + + /// Formats this fingerprint as uppercase hex, separated by colons (`:`). + /// + /// This is the format described in . + pub fn to_sdp_format(self) -> String { + self.0.map(|byte| format!("{byte:02X}")).join(":") + } + + /// Returns the algorithm used (e.g. "sha-256"). + /// See + pub fn algorithm(&self) -> String { + SHA256.to_owned() + } +} + +impl fmt::Debug for Fingerprint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&hex::encode(self.0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const SDP_FORMAT: &str = "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:3F:DF:E1:12:C1:EE:BB:6C:C6:AC"; + const REGULAR_FORMAT: [u8; 32] = + hex_literal::hex!("7DE3D83F81A680592A471E6B6ABB0747ABD35385A8093FDFE112C1EEBB6CC6AC"); + + #[test] + fn sdp_format() { + let fp = Fingerprint::raw(REGULAR_FORMAT); + + let formatted = fp.to_sdp_format(); + + assert_eq!(formatted, SDP_FORMAT) + } + + #[test] + fn from_sdp() { + let mut bytes = [0; 32]; + bytes.copy_from_slice(&hex::decode(SDP_FORMAT.replace(':', "")).unwrap()); + + let fp = Fingerprint::raw(bytes); + assert_eq!(fp, Fingerprint::raw(REGULAR_FORMAT)); + } +} diff --git a/transports/webrtc/src/generated/message.proto b/misc/webrtc-utils/src/generated/message.proto similarity index 100% rename from transports/webrtc/src/generated/message.proto rename to misc/webrtc-utils/src/generated/message.proto diff --git a/transports/webrtc/src/generated/mod.rs b/misc/webrtc-utils/src/generated/mod.rs similarity index 100% rename from transports/webrtc/src/generated/mod.rs rename to misc/webrtc-utils/src/generated/mod.rs diff --git a/transports/webrtc/src/generated/webrtc/mod.rs b/misc/webrtc-utils/src/generated/webrtc/mod.rs similarity index 100% rename from transports/webrtc/src/generated/webrtc/mod.rs rename to misc/webrtc-utils/src/generated/webrtc/mod.rs diff --git a/transports/webrtc/src/generated/webrtc/pb.rs b/misc/webrtc-utils/src/generated/webrtc/pb.rs similarity index 100% rename from transports/webrtc/src/generated/webrtc/pb.rs rename to misc/webrtc-utils/src/generated/webrtc/pb.rs diff --git a/misc/webrtc-utils/src/lib.rs b/misc/webrtc-utils/src/lib.rs new file mode 100644 index 00000000..c744634d --- /dev/null +++ b/misc/webrtc-utils/src/lib.rs @@ -0,0 +1,15 @@ +mod proto { + #![allow(unreachable_pub)] + include!("generated/mod.rs"); + pub use self::webrtc::pb::{mod_Message::Flag, Message}; +} + +mod fingerprint; +pub mod noise; +pub mod sdp; +mod stream; +mod transport; + +pub use fingerprint::{Fingerprint, SHA256}; +pub use stream::{DropListener, Stream, MAX_MSG_LEN}; +pub use transport::parse_webrtc_dial_addr; diff --git a/transports/webrtc/src/tokio/upgrade/noise.rs b/misc/webrtc-utils/src/noise.rs similarity index 95% rename from transports/webrtc/src/tokio/upgrade/noise.rs rename to misc/webrtc-utils/src/noise.rs index 34e3526a..023766bc 100644 --- a/transports/webrtc/src/tokio/upgrade/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -24,15 +24,14 @@ use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; -use crate::tokio::fingerprint::Fingerprint; -use crate::tokio::Error; +use crate::fingerprint::Fingerprint; -pub(crate) async fn inbound( +pub async fn inbound( id_keys: identity::Keypair, stream: T, client_fingerprint: Fingerprint, server_fingerprint: Fingerprint, -) -> Result +) -> Result where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -49,12 +48,12 @@ where Ok(peer_id) } -pub(crate) async fn outbound( +pub async fn outbound( id_keys: identity::Keypair, stream: T, server_fingerprint: Fingerprint, client_fingerprint: Fingerprint, -) -> Result +) -> Result where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs new file mode 100644 index 00000000..7c4facaf --- /dev/null +++ b/misc/webrtc-utils/src/sdp.rs @@ -0,0 +1,157 @@ +// Copyright 2023 Doug Anderson +// Copyright 2022 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +use crate::fingerprint::Fingerprint; +use serde::Serialize; +use std::net::{IpAddr, SocketAddr}; +use tinytemplate::TinyTemplate; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String { + let answer = render_description( + SERVER_SESSION_DESCRIPTION, + addr, + server_fingerprint, + client_ufrag, + ); + + log::trace!("Created SDP answer: {answer}"); + + answer +} + +// See [`CLIENT_SESSION_DESCRIPTION`]. +// +// a=ice-lite +// +// A lite implementation is only appropriate for devices that will *always* be connected to +// the public Internet and have a public IP address at which it can receive packets from any +// correspondent. ICE will not function when a lite implementation is placed behind a NAT +// (RFC8445). +// +// a=tls-id: +// +// "TLS ID" uniquely identifies a TLS association. +// The ICE protocol uses a "TLS ID" system to indicate whether a fresh DTLS connection +// must be reopened in case of ICE renegotiation. Considering that ICE renegotiations +// never happen in our use case, we can simply put a random value and not care about +// it. Note however that the TLS ID in the answer must be present if and only if the +// offer contains one. (RFC8842) +// TODO: is it true that renegotiations never happen? what about a connection closing? +// "tls-id" attribute MUST be present in the initial offer and respective answer (RFC8839). +// XXX: but right now browsers don't send it. +// +// a=setup:passive +// +// "passive" indicates that the remote DTLS server will only listen for incoming +// connections. (RFC5763) +// The answerer (server) MUST not be located behind a NAT (RFC6135). +// +// The answerer MUST use either a setup attribute value of setup:active or setup:passive. +// Note that if the answerer uses setup:passive, then the DTLS handshake will not begin until +// the answerer is received, which adds additional latency. setup:active allows the answer and +// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. +// +// a=candidate: +// +// A transport address for a candidate that can be used for connectivity checks (RFC8839). +// +// a=end-of-candidates +const SERVER_SESSION_DESCRIPTION: &str = "v=0 +o=- 0 0 IN {ip_version} {target_ip} +s=- +t=0 0 +a=ice-lite +m=application {target_port} UDP/DTLS/SCTP webrtc-datachannel +c=IN {ip_version} {target_ip} +a=mid:0 +a=ice-options:ice2 +a=ice-ufrag:{ufrag} +a=ice-pwd:{pwd} +a=fingerprint:{fingerprint_algorithm} {fingerprint_value} +a=setup:passive +a=sctp-port:5000 +a=max-message-size:16384 +a=candidate:1467250027 1 UDP 1467250027 {target_ip} {target_port} typ host +a=end-of-candidates +"; + +/// Indicates the IP version used in WebRTC: `IP4` or `IP6`. +#[derive(Serialize)] +enum IpVersion { + IP4, + IP6, +} + +/// Context passed to the templating engine, which replaces the above placeholders (e.g. +/// `{IP_VERSION}`) with real values. +#[derive(Serialize)] +struct DescriptionContext { + pub(crate) ip_version: IpVersion, + pub(crate) target_ip: IpAddr, + pub(crate) target_port: u16, + pub(crate) fingerprint_algorithm: String, + pub(crate) fingerprint_value: String, + pub(crate) ufrag: String, + pub(crate) pwd: String, +} + +/// Renders a [`TinyTemplate`] description using the provided arguments. +pub fn render_description( + description: &str, + addr: SocketAddr, + fingerprint: Fingerprint, + ufrag: &str, +) -> String { + let mut tt = TinyTemplate::new(); + tt.add_template("description", description).unwrap(); + + let context = DescriptionContext { + ip_version: { + if addr.is_ipv4() { + IpVersion::IP4 + } else { + IpVersion::IP6 + } + }, + target_ip: addr.ip(), + target_port: addr.port(), + fingerprint_algorithm: fingerprint.algorithm(), + fingerprint_value: fingerprint.to_sdp_format(), + // NOTE: ufrag is equal to pwd. + ufrag: ufrag.to_owned(), + pwd: ufrag.to_owned(), + }; + tt.render("description", &context).unwrap() +} + +/// Generates a random ufrag and adds a prefix according to the spec. +pub fn random_ufrag() -> String { + format!( + "libp2p+webrtc+v1/{}", + thread_rng() + .sample_iter(&Alphanumeric) + .take(64) + .map(char::from) + .collect::() + ) +} diff --git a/transports/webrtc/src/tokio/substream.rs b/misc/webrtc-utils/src/stream.rs similarity index 87% rename from transports/webrtc/src/tokio/substream.rs rename to misc/webrtc-utils/src/stream.rs index 89e52376..a6de759a 100644 --- a/transports/webrtc/src/tokio/substream.rs +++ b/misc/webrtc-utils/src/stream.rs @@ -1,4 +1,5 @@ // Copyright 2022 Parity Technologies (UK) Ltd. +// Copyright 2023 Protocol Labs. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), @@ -18,24 +19,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use asynchronous_codec::Framed; use bytes::Bytes; use futures::{channel::oneshot, prelude::*, ready}; -use tokio_util::compat::Compat; -use webrtc::data::data_channel::{DataChannel, PollDataChannel}; use std::{ io, pin::Pin, - sync::Arc, task::{Context, Poll}, }; use crate::proto::{Flag, Message}; -use crate::tokio::{ - substream::drop_listener::GracefullyClosed, - substream::framed_dc::FramedDc, - substream::state::{Closing, State}, +use crate::{ + stream::drop_listener::GracefullyClosed, + stream::framed_dc::FramedDc, + stream::state::{Closing, State}, }; mod drop_listener; @@ -47,7 +44,7 @@ mod state; /// "As long as message interleaving is not supported, the sender SHOULD limit the maximum message /// size to 16 KB to avoid monopolization." /// Source: -const MAX_MSG_LEN: usize = 16384; // 16kiB +pub const MAX_MSG_LEN: usize = 16 * 1024; /// Length of varint, in bytes. const VARINT_LEN: usize = 2; /// Overhead of the protobuf encoding, in bytes. @@ -55,26 +52,28 @@ const PROTO_OVERHEAD: usize = 5; /// Maximum length of data, in bytes. const MAX_DATA_LEN: usize = MAX_MSG_LEN - VARINT_LEN - PROTO_OVERHEAD; -pub(crate) use drop_listener::DropListener; -/// A substream on top of a WebRTC data channel. +pub use drop_listener::DropListener; +/// A stream backed by a WebRTC data channel. /// -/// To be a proper libp2p substream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well +/// To be a proper libp2p stream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well /// as support a half-closed state which we do by framing messages in a protobuf envelope. -pub struct Substream { - io: FramedDc, +pub struct Stream { + io: FramedDc, state: State, read_buffer: Bytes, /// Dropping this will close the oneshot and notify the receiver by emitting `Canceled`. drop_notifier: Option>, } -impl Substream { - /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream - /// is dropped. - pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { +impl Stream +where + T: AsyncRead + AsyncWrite + Unpin + Clone, +{ + /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped. + pub fn new(data_channel: T) -> (Self, DropListener) { let (sender, receiver) = oneshot::channel(); - let substream = Self { + let stream = Self { io: framed_dc::new(data_channel.clone()), state: State::Open, read_buffer: Bytes::default(), @@ -82,10 +81,10 @@ impl Substream { }; let listener = DropListener::new(framed_dc::new(data_channel), receiver); - (substream, listener) + (stream, listener) } - /// Gracefully closes the "read-half" of the substream. + /// Gracefully closes the "read-half" of the stream. pub fn poll_close_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { match self.state.close_read_barrier()? { @@ -113,7 +112,10 @@ impl Substream { } } -impl AsyncRead for Substream { +impl AsyncRead for Stream +where + T: AsyncRead + AsyncWrite + Unpin, +{ fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -157,7 +159,10 @@ impl AsyncRead for Substream { } } -impl AsyncWrite for Substream { +impl AsyncWrite for Stream +where + T: AsyncRead + AsyncWrite + Unpin, +{ fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -236,10 +241,13 @@ impl AsyncWrite for Substream { } } -fn io_poll_next( - io: &mut Framed, quick_protobuf_codec::Codec>, +fn io_poll_next( + io: &mut FramedDc, cx: &mut Context<'_>, -) -> Poll, Option>)>>> { +) -> Poll, Option>)>>> +where + T: AsyncRead + AsyncWrite + Unpin, +{ match ready!(io.poll_next_unpin(cx)) .transpose() .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))? @@ -262,8 +270,8 @@ mod tests { // Largest possible message. let message = [0; MAX_DATA_LEN]; - let protobuf = crate::proto::Message { - flag: Some(crate::proto::Flag::FIN), + let protobuf = Message { + flag: Some(Flag::FIN), message: Some(message.to_vec()), }; diff --git a/transports/webrtc/src/tokio/substream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs similarity index 77% rename from transports/webrtc/src/tokio/substream/drop_listener.rs rename to misc/webrtc-utils/src/stream/drop_listener.rs index 73524045..b638ea84 100644 --- a/transports/webrtc/src/tokio/substream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -20,7 +20,7 @@ use futures::channel::oneshot; use futures::channel::oneshot::Canceled; -use futures::{FutureExt, SinkExt}; +use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt}; use std::future::Future; use std::io; @@ -28,46 +28,42 @@ use std::pin::Pin; use std::task::{Context, Poll}; use crate::proto::{Flag, Message}; -use crate::tokio::substream::framed_dc::FramedDc; +use crate::stream::framed_dc::FramedDc; #[must_use] -pub(crate) struct DropListener { - state: State, +pub struct DropListener { + state: State, } -impl DropListener { - pub(crate) fn new(stream: FramedDc, receiver: oneshot::Receiver) -> Self { - let substream_id = stream.get_ref().stream_identifier(); - +impl DropListener { + pub fn new(stream: FramedDc, receiver: oneshot::Receiver) -> Self { Self { - state: State::Idle { - stream, - receiver, - substream_id, - }, + state: State::Idle { stream, receiver }, } } } -enum State { +enum State { /// The [`DropListener`] is idle and waiting to be activated. Idle { - stream: FramedDc, + stream: FramedDc, receiver: oneshot::Receiver, - substream_id: u16, }, /// The stream got dropped and we are sending a reset flag. SendingReset { - stream: FramedDc, + stream: FramedDc, }, Flushing { - stream: FramedDc, + stream: FramedDc, }, /// Bad state transition. Poisoned, } -impl Future for DropListener { +impl Future for DropListener +where + T: AsyncRead + AsyncWrite + Unpin, +{ type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -77,23 +73,18 @@ impl Future for DropListener { match std::mem::replace(state, State::Poisoned) { State::Idle { stream, - substream_id, mut receiver, } => match receiver.poll_unpin(cx) { Poll::Ready(Ok(GracefullyClosed {})) => { return Poll::Ready(Ok(())); } Poll::Ready(Err(Canceled)) => { - log::info!("Substream {substream_id} dropped without graceful close, sending Reset"); + log::info!("Stream dropped without graceful close, sending Reset"); *state = State::SendingReset { stream }; continue; } Poll::Pending => { - *state = State::Idle { - stream, - substream_id, - receiver, - }; + *state = State::Idle { stream, receiver }; return Poll::Pending; } }, @@ -126,5 +117,5 @@ impl Future for DropListener { } } -/// Indicates that our substream got gracefully closed. -pub(crate) struct GracefullyClosed {} +/// Indicates that our stream got gracefully closed. +pub struct GracefullyClosed {} diff --git a/transports/webrtc/src/tokio/substream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs similarity index 75% rename from transports/webrtc/src/tokio/substream/framed_dc.rs rename to misc/webrtc-utils/src/stream/framed_dc.rs index 1b3860b6..4409b79a 100644 --- a/transports/webrtc/src/tokio/substream/framed_dc.rs +++ b/misc/webrtc-utils/src/stream/framed_dc.rs @@ -19,22 +19,18 @@ // DEALINGS IN THE SOFTWARE. use asynchronous_codec::Framed; -use tokio_util::compat::Compat; -use tokio_util::compat::TokioAsyncReadCompatExt; -use webrtc::data::data_channel::{DataChannel, PollDataChannel}; +use futures::{AsyncRead, AsyncWrite}; -use std::sync::Arc; - -use super::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; use crate::proto::Message; +use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; -pub(crate) type FramedDc = Framed, quick_protobuf_codec::Codec>; -pub(crate) fn new(data_channel: Arc) -> FramedDc { - let mut inner = PollDataChannel::new(data_channel); - inner.set_read_buf_capacity(MAX_MSG_LEN); - +pub(crate) type FramedDc = Framed>; +pub(crate) fn new(inner: T) -> FramedDc +where + T: AsyncRead + AsyncWrite, +{ let mut framed = Framed::new( - inner.compat(), + inner, quick_protobuf_codec::Codec::new(MAX_MSG_LEN - VARINT_LEN), ); // If not set, `Framed` buffers up to 131kB of data before sending, which leads to "outbound diff --git a/transports/webrtc/src/tokio/substream/state.rs b/misc/webrtc-utils/src/stream/state.rs similarity index 99% rename from transports/webrtc/src/tokio/substream/state.rs rename to misc/webrtc-utils/src/stream/state.rs index b1768aa2..082325e4 100644 --- a/transports/webrtc/src/tokio/substream/state.rs +++ b/misc/webrtc-utils/src/stream/state.rs @@ -277,7 +277,7 @@ impl State { } } - /// Acts as a "barrier" for [`Substream::poll_close_read`](super::Substream::poll_close_read). + /// Acts as a "barrier" for [`Stream::poll_close_read`](super::Stream::poll_close_read). pub(crate) fn close_read_barrier(&mut self) -> io::Result> { loop { match self { diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs new file mode 100644 index 00000000..440ad73e --- /dev/null +++ b/misc/webrtc-utils/src/transport.rs @@ -0,0 +1,101 @@ +use crate::fingerprint::Fingerprint; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use std::net::{IpAddr, SocketAddr}; + +/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. +pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { + let mut iter = addr.iter(); + + let ip = match iter.next()? { + Protocol::Ip4(ip) => IpAddr::from(ip), + Protocol::Ip6(ip) => IpAddr::from(ip), + _ => return None, + }; + + let port = iter.next()?; + let webrtc = iter.next()?; + let certhash = iter.next()?; + + let (port, fingerprint) = match (port, webrtc, certhash) { + (Protocol::Udp(port), Protocol::WebRTCDirect, Protocol::Certhash(cert_hash)) => { + let fingerprint = Fingerprint::try_from_multihash(cert_hash)?; + + (port, fingerprint) + } + _ => return None, + }; + + match iter.next() { + Some(Protocol::P2p(_)) => {} + // peer ID is optional + None => {} + // unexpected protocol + Some(_) => return None, + } + + Some((SocketAddr::new(ip, port), fingerprint)) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::{Ipv4Addr, Ipv6Addr}; + + #[test] + fn parse_valid_address_with_certhash_and_p2p() { + let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" + .parse() + .unwrap(); + + let maybe_parsed = parse_webrtc_dial_addr(&addr); + + assert_eq!( + maybe_parsed, + Some(( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), + Fingerprint::raw(hex_literal::hex!( + "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" + )) + )) + ); + } + + #[test] + fn peer_id_is_not_required() { + let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" + .parse() + .unwrap(); + + let maybe_parsed = parse_webrtc_dial_addr(&addr); + + assert_eq!( + maybe_parsed, + Some(( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), + Fingerprint::raw(hex_literal::hex!( + "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" + )) + )) + ); + } + + #[test] + fn parse_ipv6() { + let addr = + "/ip6/::1/udp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" + .parse() + .unwrap(); + + let maybe_parsed = parse_webrtc_dial_addr(&addr); + + assert_eq!( + maybe_parsed, + Some(( + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 12345), + Fingerprint::raw(hex_literal::hex!( + "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" + )) + )) + ); + } +} diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index 7a98fd13..50a2d105 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -20,7 +20,7 @@ log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" rand = "0.8" -smallvec = "1.11.0" +smallvec = "1.11.1" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [dev-dependencies] diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index dfdc619a..86f84cea 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -32,7 +32,7 @@ use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, T use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_mplex as mplex; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use std::pin::Pin; use std::time::Duration; @@ -166,30 +166,28 @@ fn run( } fn tcp_transport(split_send_size: usize) -> BenchTransport { - let key = identity::Keypair::generate_ed25519(); - let local_public_key = key.public(); - let mut mplex = mplex::MplexConfig::default(); mplex.set_split_send_size(split_send_size); libp2p_tcp::async_io::Transport::new(libp2p_tcp::Config::default().nodelay(true)) .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new( + &identity::Keypair::generate_ed25519(), + )) .multiplex(mplex) .timeout(Duration::from_secs(5)) .boxed() } fn mem_transport(split_send_size: usize) -> BenchTransport { - let key = identity::Keypair::generate_ed25519(); - let local_public_key = key.public(); - let mut mplex = mplex::MplexConfig::default(); mplex.set_split_send_size(split_send_size); transport::MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new( + &identity::Keypair::generate_ed25519(), + )) .multiplex(mplex) .timeout(Duration::from_secs(5)) .boxed() diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 85b58820..8002ad38 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -143,7 +143,7 @@ where } /// Flushes the underlying I/O stream. - pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &Context<'_>) -> Poll> { match &self.status { Status::Closed => return Poll::Ready(Ok(())), Status::Err(e) => return Poll::Ready(Err(io::Error::new(e.kind(), e.to_string()))), @@ -169,7 +169,7 @@ where /// > **Note**: No `Close` or `Reset` frames are sent on open substreams /// > before closing the underlying connection. However, the connection /// > close implies a flush of any frames already sent. - pub(crate) fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + pub(crate) fn poll_close(&mut self, cx: &Context<'_>) -> Poll> { match &self.status { Status::Closed => return Poll::Ready(Ok(())), Status::Err(e) => return Poll::Ready(Err(io::Error::new(e.kind(), e.to_string()))), @@ -208,10 +208,7 @@ where /// [`MaxBufferBehaviour::Block`] is used, this method is blocked /// (i.e. `Pending`) on some task reading from the substream whose /// buffer is full. - pub(crate) fn poll_next_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { + pub(crate) fn poll_next_stream(&mut self, cx: &Context<'_>) -> Poll> { self.guard_open()?; // Try to read from the buffer first. @@ -252,10 +249,7 @@ where } /// Creates a new (outbound) substream, returning the allocated stream ID. - pub(crate) fn poll_open_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { + pub(crate) fn poll_open_stream(&mut self, cx: &Context<'_>) -> Poll> { self.guard_open()?; // Check the stream limits. @@ -374,7 +368,7 @@ where /// Writes data to a substream. pub(crate) fn poll_write_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, buf: &[u8], ) -> Poll> { @@ -424,7 +418,7 @@ where /// Inbound substreams received in excess of that limit are immediately reset. pub(crate) fn poll_read_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, ) -> Poll>> { self.guard_open()?; @@ -516,7 +510,7 @@ where /// > the underlying I/O stream is already closed. pub(crate) fn poll_flush_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, ) -> Poll> { self.guard_open()?; @@ -532,7 +526,7 @@ where /// > **Note**: As opposed to `poll_close()`, a flush it not implied. pub(crate) fn poll_close_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, ) -> Poll> { self.guard_open()?; @@ -587,7 +581,7 @@ where /// /// The frame is only constructed if the underlying sink is ready to /// send another frame. - fn poll_send_frame(&mut self, cx: &mut Context<'_>, frame: F) -> Poll> + fn poll_send_frame(&mut self, cx: &Context<'_>, frame: F) -> Poll> where F: FnOnce() -> Frame, { @@ -613,7 +607,7 @@ where /// frames for any substream. fn poll_read_frame( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, stream_id: Option, ) -> Poll>> { // Try to send pending frames, if there are any, without blocking, @@ -822,7 +816,7 @@ where } /// Sends pending frames, without flushing. - fn send_pending_frames(&mut self, cx: &mut Context<'_>) -> Poll> { + fn send_pending_frames(&mut self, cx: &Context<'_>) -> Poll> { while let Some(frame) = self.pending_frames.pop_back() { if self.poll_send_frame(cx, || frame.clone())?.is_pending() { self.pending_frames.push_back(frame); diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 2b9d3c3a..acade64c 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -27,7 +27,7 @@ void = "1" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -clap = { version = "4.3.21", features = ["derive"] } +clap = { version = "4.3.23", features = ["derive"] } env_logger = "0.10.0" libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } diff --git a/protocols/dcutr/src/behaviour_impl.rs b/protocols/dcutr/src/behaviour_impl.rs index 4993da65..748e1634 100644 --- a/protocols/dcutr/src/behaviour_impl.rs +++ b/protocols/dcutr/src/behaviour_impl.rs @@ -103,8 +103,8 @@ impl Behaviour { fn observed_addresses(&self) -> Vec { self.external_addresses .iter() - .cloned() .filter(|a| !a.iter().any(|p| p == Protocol::P2pCircuit)) + .cloned() .map(|a| a.with(Protocol::P2p(self.local_peer_id))) .collect() } diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 6888e591..a47f413f 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -24,7 +24,7 @@ use libp2p_core::transport::{MemoryTransport, Transport}; use libp2p_dcutr as dcutr; use libp2p_identity as identity; use libp2p_identity::PeerId; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use libp2p_relay as relay; use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; @@ -111,8 +111,7 @@ fn build_relay() -> Swarm { fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let local_peer_id = local_public_key.to_peer_id(); + let local_peer_id = local_key.public().to_peer_id(); let (relay_transport, behaviour) = relay::client::new(local_peer_id); @@ -120,7 +119,7 @@ fn build_client() -> Swarm { .or_transport(MemoryTransport::default()) .or_transport(libp2p_tcp::async_io::Transport::default()) .upgrade(Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new(&local_key)) .multiplex(libp2p_yamux::Config::default()) .boxed(); diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index d79ae5bc..2484624d 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -22,8 +22,8 @@ log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" -smallvec = "1.11.0" -thiserror = "1.0.44" +smallvec = "1.11.1" +thiserror = "1.0.48" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index acbab0c8..e4ed4e56 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -15,9 +15,9 @@ wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] [dependencies] asynchronous-codec = "0.6" -base64 = "0.21.2" +base64 = "0.21.4" byteorder = "1.3.4" -bytes = "1.4" +bytes = "1.5" either = "1.9" fnv = "1.0.7" futures = "0.3.28" @@ -32,11 +32,11 @@ log = "0.4.20" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" -regex = "1.9.3" +regex = "1.9.5" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.7" -smallvec = "1.11.0" -unsigned-varint = { version = "0.7.0", features = ["asynchronous_codec"] } +smallvec = "1.11.1" +unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec"] } void = "1.0.2" # Metrics dependencies diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index 3021e841..b4a40b91 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -86,12 +86,7 @@ impl BackoffStorage { backoffs_by_heartbeat[index].insert(pair); HeartbeatIndex(index) }; - match self - .backoffs - .entry(topic.clone()) - .or_insert_with(HashMap::new) - .entry(*peer) - { + match self.backoffs.entry(topic.clone()).or_default().entry(*peer) { Entry::Occupied(mut o) => { let (backoff, index) = o.get(); if backoff < &instant { diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 7ca9f6df..402420f3 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1020,10 +1020,7 @@ where "JOIN: Inserting {:?} random peers into the mesh", new_peers.len() ); - let mesh_peers = self - .mesh - .entry(topic_hash.clone()) - .or_insert_with(Default::default); + let mesh_peers = self.mesh.entry(topic_hash.clone()).or_default(); mesh_peers.extend(new_peers); } @@ -1962,10 +1959,7 @@ where for subscription in filtered_topics { // get the peers from the mapping, or insert empty lists if the topic doesn't exist let topic_hash = &subscription.topic_hash; - let peer_list = self - .topic_peers - .entry(topic_hash.clone()) - .or_insert_with(Default::default); + let peer_list = self.topic_peers.entry(topic_hash.clone()).or_default(); match subscription.action { SubscriptionAction::Subscribe => { @@ -2874,10 +2868,7 @@ where peer: PeerId, control: ControlAction, ) { - control_pool - .entry(peer) - .or_insert_with(Vec::new) - .push(control); + control_pool.entry(peer).or_default().push(control); } /// Takes each control action mapping and turns it into a message diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index e1418032..827206af 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -48,7 +48,7 @@ impl GossipPromises { // If a promise for this message id and peer already exists we don't update the expiry! self.promises .entry(message_id.clone()) - .or_insert_with(HashMap::new) + .or_default() .entry(peer) .or_insert(expires); } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index ab92d536..c6c918d6 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -454,10 +454,7 @@ impl PeerScore { // Insert the ip peer_stats.known_ips.insert(ip); - self.peer_ips - .entry(ip) - .or_insert_with(HashSet::new) - .insert(*peer_id); + self.peer_ips.entry(ip).or_default().insert(*peer_id); } /// Removes an ip from a peer @@ -570,9 +567,7 @@ impl PeerScore { topic_hash: &TopicHash, ) { // adds an empty record with the message id - self.deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + self.deliveries.entry(msg_id.clone()).or_default(); if let Some(callback) = self.message_delivery_time_callback { if self @@ -595,10 +590,7 @@ impl PeerScore { ) { self.mark_first_message_delivery(from, topic_hash); - let record = self - .deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + let record = self.deliveries.entry(msg_id.clone()).or_default(); // this should be the first delivery trace if record.status != DeliveryStatus::Unknown { @@ -649,10 +641,7 @@ impl PeerScore { } let peers: Vec<_> = { - let record = self - .deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + let record = self.deliveries.entry(msg_id.clone()).or_default(); // Multiple peers can now reject the same message as we track which peers send us the // message. If we have already updated the status, return. @@ -686,10 +675,7 @@ impl PeerScore { msg_id: &MessageId, topic_hash: &TopicHash, ) { - let record = self - .deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + let record = self.deliveries.entry(msg_id.clone()).or_default(); if record.peers.get(from).is_some() { // we have already seen this duplicate! diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs index ed0956ca..ffc95a47 100644 --- a/protocols/gossipsub/src/time_cache.rs +++ b/protocols/gossipsub/src/time_cache.rs @@ -99,6 +99,15 @@ where Entry::Vacant(entry) => entry.insert(default()), } } + pub(crate) fn or_default(self) -> &'a mut V + where + V: Default, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(V::default()), + } + } } impl TimeCache diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 9c9cd3f9..f1865635 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -330,7 +330,7 @@ impl From for proto::RPC { .into_iter() .map(|info| proto::PeerInfo { peer_id: info.peer_id.map(|id| id.to_bytes()), - /// TODO, see https://github.com/libp2p/specs/pull/217 + // TODO, see https://github.com/libp2p/specs/pull/217 signed_peer_record: None, }) .collect(), diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 79632954..0e8812b1 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,4 +1,12 @@ -## 0.43.0 +## 0.43.1 - unreleased + +- Handle partial push messages. + Previously, push messages with partial information were ignored. + See [PR 4495]. + +[PR 4495]: https://github.com/libp2p/rust-libp2p/pull/4495 + +## 0.43.0 - Observed addresses (aka. external address candidates) of the local node, reported by a remote node via `libp2p-identify`, are no longer automatically considered confirmed external addresses, in other words they are no longer trusted by default. Instead users need to confirm the reported observed address either manually, or by using `libp2p-autonat`. diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index fb207dd7..557f5a18 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identify" edition = "2021" rust-version = { workspace = true } description = "Nodes identifcation protocol for libp2p" -version = "0.43.0" +version = "0.43.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,14 +14,15 @@ categories = ["network-programming", "asynchronous"] asynchronous-codec = "0.6" futures = "0.3.28" futures-timer = "3.0.2" +futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } log = "0.4.20" -lru = "0.11.0" +lru = "0.11.1" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" -smallvec = "1.11.0" +smallvec = "1.11.1" thiserror = "1.0" void = "1.0" either = "1.9.0" diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 5a1712e8..50b9882f 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -18,13 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{Identify, InboundPush, Info, OutboundPush, Push, UpgradeError}; +use crate::protocol::{Info, PushInfo, UpgradeError}; +use crate::{protocol, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; use either::Either; -use futures::future::BoxFuture; use futures::prelude::*; -use futures::stream::FuturesUnordered; +use futures_bounded::Timeout; use futures_timer::Delay; -use libp2p_core::upgrade::SelectUpgrade; +use libp2p_core::upgrade::{ReadyUpgrade, SelectUpgrade}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; @@ -41,6 +41,9 @@ use smallvec::SmallVec; use std::collections::HashSet; use std::{io, task::Context, task::Poll, time::Duration}; +const STREAM_TIMEOUT: Duration = Duration::from_secs(60); +const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; + /// Protocol handler for sending and receiving identification requests. /// /// Outbound requests are sent periodically. The handler performs expects @@ -48,14 +51,17 @@ use std::{io, task::Context, task::Poll, time::Duration}; /// permitting the underlying connection to be closed. pub struct Handler { remote_peer_id: PeerId, - inbound_identify_push: Option>>, /// Pending events to yield. events: SmallVec< - [ConnectionHandlerEvent>, (), Event, io::Error>; 4], + [ConnectionHandlerEvent< + Either, ReadyUpgrade>, + (), + Event, + io::Error, + >; 4], >, - /// Pending identification replies, awaiting being sent. - pending_replies: FuturesUnordered>>, + active_streams: futures_bounded::FuturesSet>, /// Future that fires when we need to identify the node again. trigger_next_identify: Delay, @@ -80,6 +86,9 @@ pub struct Handler { /// Address observed by or for the remote. observed_addr: Multiaddr, + /// Identify information about the remote peer. + remote_info: Option, + local_supported_protocols: SupportedProtocols, remote_supported_protocols: HashSet, external_addresses: HashSet, @@ -121,9 +130,11 @@ impl Handler { ) -> Self { Self { remote_peer_id, - inbound_identify_push: Default::default(), events: SmallVec::new(), - pending_replies: FuturesUnordered::new(), + active_streams: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), trigger_next_identify: Delay::new(initial_delay), exchanged_one_periodic_identify: false, interval, @@ -133,6 +144,7 @@ impl Handler { observed_addr, local_supported_protocols: SupportedProtocols::default(), remote_supported_protocols: HashSet::default(), + remote_info: Default::default(), external_addresses, } } @@ -147,19 +159,28 @@ impl Handler { >, ) { match output { - future::Either::Left(substream) => { + future::Either::Left(stream) => { let info = self.build_info(); - self.pending_replies - .push(crate::protocol::send(substream, info).boxed()); + if self + .active_streams + .try_push( + protocol::send_identify(stream, info).map_ok(|_| Success::SentIdentify), + ) + .is_err() + { + warn!("Dropping inbound stream because we are at capacity"); + } else { + self.exchanged_one_periodic_identify = true; + } } - future::Either::Right(fut) => { - if self.inbound_identify_push.replace(fut).is_some() { - warn!( - "New inbound identify push stream from {} while still \ - upgrading previous one. Replacing previous with new.", - self.remote_peer_id, - ); + future::Either::Right(stream) => { + if self + .active_streams + .try_push(protocol::recv_push(stream).map_ok(Success::ReceivedIdentifyPush)) + .is_err() + { + warn!("Dropping inbound identify push stream because we are at capacity"); } } } @@ -175,31 +196,29 @@ impl Handler { >, ) { match output { - future::Either::Left(remote_info) => { - self.update_supported_protocols_for_remote(&remote_info); - self.events - .push(ConnectionHandlerEvent::NotifyBehaviour(Event::Identified( - remote_info, - ))); + future::Either::Left(stream) => { + if self + .active_streams + .try_push(protocol::recv_identify(stream).map_ok(Success::ReceivedIdentify)) + .is_err() + { + warn!("Dropping outbound identify stream because we are at capacity"); + } } - future::Either::Right(()) => self.events.push(ConnectionHandlerEvent::NotifyBehaviour( - Event::IdentificationPushed, - )), - } - } + future::Either::Right(stream) => { + let info = self.build_info(); - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { error: err, .. }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - let err = err.map_upgrade_err(|e| e.into_inner()); - self.events.push(ConnectionHandlerEvent::NotifyBehaviour( - Event::IdentificationError(err), - )); - self.trigger_next_identify.reset(self.interval); + if self + .active_streams + .try_push( + protocol::send_identify(stream, info).map_ok(|_| Success::SentIdentifyPush), + ) + .is_err() + { + warn!("Dropping outbound identify push stream because we are at capacity"); + } + } + } } fn build_info(&mut self) -> Info { @@ -213,6 +232,12 @@ impl Handler { } } + fn handle_incoming_info(&mut self, info: &Info) { + self.remote_info.replace(info.clone()); + + self.update_supported_protocols_for_remote(info); + } + fn update_supported_protocols_for_remote(&mut self, remote_info: &Info) { let new_remote_protocols = HashSet::from_iter(remote_info.protocols.clone()); @@ -256,13 +281,20 @@ impl ConnectionHandler for Handler { type FromBehaviour = InEvent; type ToBehaviour = Event; type Error = io::Error; - type InboundProtocol = SelectUpgrade>; - type OutboundProtocol = Either>; + type InboundProtocol = + SelectUpgrade, ReadyUpgrade>; + type OutboundProtocol = Either, ReadyUpgrade>; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(SelectUpgrade::new(Identify, Push::inbound()), ()) + SubstreamProtocol::new( + SelectUpgrade::new( + ReadyUpgrade::new(PROTOCOL_NAME), + ReadyUpgrade::new(PUSH_PROTOCOL_NAME), + ), + (), + ) } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { @@ -271,21 +303,19 @@ impl ConnectionHandler for Handler { self.external_addresses = addresses; } InEvent::Push => { - let info = self.build_info(); self.events .push(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(Either::Right(Push::outbound(info)), ()), + protocol: SubstreamProtocol::new( + Either::Right(ReadyUpgrade::new(PUSH_PROTOCOL_NAME)), + (), + ), }); } } } fn connection_keep_alive(&self) -> KeepAlive { - if self.inbound_identify_push.is_some() { - return KeepAlive::Yes; - } - - if !self.pending_replies.is_empty() { + if !self.active_streams.is_empty() { return KeepAlive::Yes; } @@ -305,35 +335,54 @@ impl ConnectionHandler for Handler { // Poll the future that fires when we need to identify the node again. if let Poll::Ready(()) = self.trigger_next_identify.poll_unpin(cx) { self.trigger_next_identify.reset(self.interval); - let ev = ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(Either::Left(Identify), ()), + let event = ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new( + Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), + (), + ), }; - return Poll::Ready(ev); + return Poll::Ready(event); } - if let Some(Poll::Ready(res)) = self - .inbound_identify_push - .as_mut() - .map(|f| f.poll_unpin(cx)) - { - self.inbound_identify_push.take(); + match self.active_streams.poll_unpin(cx) { + Poll::Ready(Ok(Ok(Success::ReceivedIdentify(remote_info)))) => { + self.handle_incoming_info(&remote_info); - if let Ok(info) = res { - self.update_supported_protocols_for_remote(&info); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::Identified( - info, + remote_info, ))); } - } + Poll::Ready(Ok(Ok(Success::SentIdentifyPush))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationPushed, + )); + } + Poll::Ready(Ok(Ok(Success::SentIdentify))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identification, + )); + } + Poll::Ready(Ok(Ok(Success::ReceivedIdentifyPush(remote_push_info)))) => { + if let Some(mut info) = self.remote_info.clone() { + info.merge(remote_push_info); + self.handle_incoming_info(&info); - // Check for pending replies to send. - if let Poll::Ready(Some(result)) = self.pending_replies.poll_next_unpin(cx) { - let event = result - .map(|()| Event::Identification) - .unwrap_or_else(|err| Event::IdentificationError(StreamUpgradeError::Apply(err))); - self.exchanged_one_periodic_identify = true; - - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identified(info), + )); + }; + } + Poll::Ready(Ok(Err(e))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError(StreamUpgradeError::Apply(e)), + )); + } + Poll::Ready(Err(Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError(StreamUpgradeError::Timeout), + )); + } + Poll::Pending => {} } Poll::Pending @@ -355,8 +404,13 @@ impl ConnectionHandler for Handler { ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { self.on_fully_negotiated_outbound(fully_negotiated_outbound) } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { + self.events.push(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError( + error.map_upgrade_err(|e| void::unreachable(e.into_inner())), + ), + )); + self.trigger_next_identify.reset(self.interval); } ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) @@ -376,11 +430,10 @@ impl ConnectionHandler for Handler { self.remote_peer_id ); - let info = self.build_info(); self.events .push(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new( - Either::Right(Push::outbound(info)), + Either::Right(ReadyUpgrade::new(PUSH_PROTOCOL_NAME)), (), ), }); @@ -389,3 +442,10 @@ impl ConnectionHandler for Handler { } } } + +enum Success { + SentIdentify, + ReceivedIdentify(Info), + SentIdentifyPush, + ReceivedIdentifyPush(PushInfo), +} diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index a508591b..5e2891e0 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -20,20 +20,15 @@ use crate::proto; use asynchronous_codec::{FramedRead, FramedWrite}; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{ - multiaddr, - upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, - Multiaddr, -}; +use futures::prelude::*; +use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; use log::{debug, trace}; use std::convert::TryFrom; -use std::{io, iter, pin::Pin}; +use std::io; use thiserror::Error; -use void::Void; const MAX_MESSAGE_SIZE_BYTES: usize = 4096; @@ -41,29 +36,7 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/1.0.0"); pub const PUSH_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/push/1.0.0"); -/// Substream upgrade protocol for `/ipfs/id/1.0.0`. -#[derive(Debug, Clone)] -pub struct Identify; - -/// Substream upgrade protocol for `/ipfs/id/push/1.0.0`. -#[derive(Debug, Clone)] -pub struct Push(T); -pub struct InboundPush(); -pub struct OutboundPush(Info); - -impl Push { - pub fn inbound() -> Self { - Push(InboundPush()) - } -} - -impl Push { - pub fn outbound(info: Info) -> Self { - Push(OutboundPush(info)) - } -} - -/// Information of a peer sent in protocol messages. +/// Identify information of a peer sent in protocol messages. #[derive(Debug, Clone)] pub struct Info { /// The public key of the local peer. @@ -82,75 +55,42 @@ pub struct Info { pub observed_addr: Multiaddr, } -impl UpgradeInfo for Identify { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(PROTOCOL_NAME) +impl Info { + pub fn merge(&mut self, info: PushInfo) { + if let Some(public_key) = info.public_key { + self.public_key = public_key; + } + if let Some(protocol_version) = info.protocol_version { + self.protocol_version = protocol_version; + } + if let Some(agent_version) = info.agent_version { + self.agent_version = agent_version; + } + if !info.listen_addrs.is_empty() { + self.listen_addrs = info.listen_addrs; + } + if !info.protocols.is_empty() { + self.protocols = info.protocols; + } + if let Some(observed_addr) = info.observed_addr { + self.observed_addr = observed_addr; + } } } -impl InboundUpgrade for Identify { - type Output = C; - type Error = UpgradeError; - type Future = future::Ready>; - - fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { - future::ok(socket) - } +/// Identify push information of a peer sent in protocol messages. +/// Note that missing fields should be ignored, as peers may choose to send partial updates containing only the fields whose values have changed. +#[derive(Debug, Clone)] +pub struct PushInfo { + pub public_key: Option, + pub protocol_version: Option, + pub agent_version: Option, + pub listen_addrs: Vec, + pub protocols: Vec, + pub observed_addr: Option, } -impl OutboundUpgrade for Identify -where - C: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = Info; - type Error = UpgradeError; - type Future = Pin> + Send>>; - - fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { - recv(socket).boxed() - } -} - -impl UpgradeInfo for Push { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(PUSH_PROTOCOL_NAME) - } -} - -impl InboundUpgrade for Push -where - C: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = BoxFuture<'static, Result>; - type Error = Void; - type Future = future::Ready>; - - fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { - // Lazily upgrade stream, thus allowing upgrade to happen within identify's handler. - future::ok(recv(socket).boxed()) - } -} - -impl OutboundUpgrade for Push -where - C: AsyncWrite + Unpin + Send + 'static, -{ - type Output = (); - type Error = UpgradeError; - type Future = Pin> + Send>>; - - fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { - send(socket, self.0 .0).boxed() - } -} - -pub(crate) async fn send(io: T, info: Info) -> Result<(), UpgradeError> +pub(crate) async fn send_identify(io: T, info: Info) -> Result<(), UpgradeError> where T: AsyncWrite + Unpin, { @@ -184,7 +124,29 @@ where Ok(()) } -async fn recv(socket: T) -> Result +pub(crate) async fn recv_push(socket: T) -> Result +where + T: AsyncRead + AsyncWrite + Unpin, +{ + let info = recv(socket).await?.try_into()?; + + trace!("Received {:?}", info); + + Ok(info) +} + +pub(crate) async fn recv_identify(socket: T) -> Result +where + T: AsyncRead + AsyncWrite + Unpin, +{ + let info = recv(socket).await?.try_into()?; + + trace!("Received {:?}", info); + + Ok(info) +} + +async fn recv(socket: T) -> Result where T: AsyncRead + AsyncWrite + Unpin, { @@ -199,61 +161,93 @@ where ) .next() .await - .ok_or(UpgradeError::StreamClosed)?? - .try_into()?; - - trace!("Received: {:?}", info); + .ok_or(UpgradeError::StreamClosed)??; Ok(info) } +fn parse_listen_addrs(listen_addrs: Vec>) -> Vec { + listen_addrs + .into_iter() + .filter_map(|bytes| match Multiaddr::try_from(bytes) { + Ok(a) => Some(a), + Err(e) => { + debug!("Unable to parse multiaddr: {e:?}"); + None + } + }) + .collect() +} + +fn parse_protocols(protocols: Vec) -> Vec { + protocols + .into_iter() + .filter_map(|p| match StreamProtocol::try_from_owned(p) { + Ok(p) => Some(p), + Err(e) => { + debug!("Received invalid protocol from peer: {e}"); + None + } + }) + .collect() +} + +fn parse_public_key(public_key: Option>) -> Option { + public_key.and_then(|key| match PublicKey::try_decode_protobuf(&key) { + Ok(k) => Some(k), + Err(e) => { + debug!("Unable to decode public key: {e:?}"); + None + } + }) +} + +fn parse_observed_addr(observed_addr: Option>) -> Option { + observed_addr.and_then(|bytes| match Multiaddr::try_from(bytes) { + Ok(a) => Some(a), + Err(e) => { + debug!("Unable to parse observed multiaddr: {e:?}"); + None + } + }) +} + impl TryFrom for Info { type Error = UpgradeError; fn try_from(msg: proto::Identify) -> Result { - fn parse_multiaddr(bytes: Vec) -> Result { - Multiaddr::try_from(bytes) - } - - let listen_addrs = { - let mut addrs = Vec::new(); - for addr in msg.listenAddrs.into_iter() { - match parse_multiaddr(addr) { - Ok(a) => addrs.push(a), - Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); - } - } - } - addrs - }; - - let public_key = PublicKey::try_decode_protobuf(&msg.publicKey.unwrap_or_default())?; - - let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { - Ok(a) => a, - Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); - Multiaddr::empty() + let public_key = { + match parse_public_key(msg.publicKey) { + Some(key) => key, + // This will always produce a DecodingError if the public key is missing. + None => PublicKey::try_decode_protobuf(Default::default())?, } }; + let info = Info { public_key, protocol_version: msg.protocolVersion.unwrap_or_default(), agent_version: msg.agentVersion.unwrap_or_default(), - listen_addrs, - protocols: msg - .protocols - .into_iter() - .filter_map(|p| match StreamProtocol::try_from_owned(p) { - Ok(p) => Some(p), - Err(e) => { - debug!("Received invalid protocol from peer: {e}"); - None - } - }) - .collect(), - observed_addr, + listen_addrs: parse_listen_addrs(msg.listenAddrs), + protocols: parse_protocols(msg.protocols), + observed_addr: parse_observed_addr(msg.observedAddr).unwrap_or(Multiaddr::empty()), + }; + + Ok(info) + } +} + +impl TryFrom for PushInfo { + type Error = UpgradeError; + + fn try_from(msg: proto::Identify) -> Result { + let info = PushInfo { + public_key: parse_public_key(msg.publicKey), + protocol_version: msg.protocolVersion, + agent_version: msg.agentVersion, + listen_addrs: parse_listen_addrs(msg.listenAddrs), + protocols: parse_protocols(msg.protocols), + observed_addr: parse_observed_addr(msg.observedAddr), }; Ok(info) @@ -303,7 +297,7 @@ mod tests { ), }; - let info = Info::try_from(payload).expect("not to fail"); + let info = PushInfo::try_from(payload).expect("not to fail"); assert_eq!(info.listen_addrs, vec![valid_multiaddr]) } diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index c70ab318..c1926b41 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -1,6 +1,6 @@ use libp2p_core::multiaddr::Protocol; use libp2p_identify as identify; -use libp2p_swarm::{keep_alive, Swarm, SwarmEvent}; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::iter; @@ -9,7 +9,7 @@ async fn periodic_identify() { let _ = env_logger::try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { - Behaviour::new( + identify::Behaviour::new( identify::Config::new("a".to_string(), identity.public()) .with_agent_version("b".to_string()), ) @@ -17,7 +17,7 @@ async fn periodic_identify() { let swarm1_peer_id = *swarm1.local_peer_id(); let mut swarm2 = Swarm::new_ephemeral(|identity| { - Behaviour::new( + identify::Behaviour::new( identify::Config::new("c".to_string(), identity.public()) .with_agent_version("d".to_string()), ) @@ -33,20 +33,20 @@ async fn periodic_identify() { match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { ( - [BehaviourEvent::Identify(Received { info: s1_info, .. }), BehaviourEvent::Identify(Sent { .. })], - [BehaviourEvent::Identify(Received { info: s2_info, .. }), BehaviourEvent::Identify(Sent { .. })], + [Received { info: s1_info, .. }, Sent { .. }], + [Received { info: s2_info, .. }, Sent { .. }], ) | ( - [BehaviourEvent::Identify(Sent { .. }), BehaviourEvent::Identify(Received { info: s1_info, .. })], - [BehaviourEvent::Identify(Sent { .. }), BehaviourEvent::Identify(Received { info: s2_info, .. })], + [Sent { .. }, Received { info: s1_info, .. }], + [Sent { .. }, Received { info: s2_info, .. }], ) | ( - [BehaviourEvent::Identify(Received { info: s1_info, .. }), BehaviourEvent::Identify(Sent { .. })], - [BehaviourEvent::Identify(Sent { .. }), BehaviourEvent::Identify(Received { info: s2_info, .. })], + [Received { info: s1_info, .. }, Sent { .. }], + [Sent { .. }, Received { info: s2_info, .. }], ) | ( - [BehaviourEvent::Identify(Sent { .. }), BehaviourEvent::Identify(Received { info: s1_info, .. })], - [BehaviourEvent::Identify(Received { info: s2_info, .. }), BehaviourEvent::Identify(Sent { .. })], + [Sent { .. }, Received { info: s1_info, .. }], + [Received { info: s2_info, .. }, Sent { .. }], ) => { assert_eq!(s1_info.public_key.to_peer_id(), swarm2_peer_id); assert_eq!(s1_info.protocol_version, "c"); @@ -83,10 +83,10 @@ async fn identify_push() { let _ = env_logger::try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { - Behaviour::new(identify::Config::new("a".to_string(), identity.public())) + identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) }); let mut swarm2 = Swarm::new_ephemeral(|identity| { - Behaviour::new( + identify::Behaviour::new( identify::Config::new("a".to_string(), identity.public()) .with_agent_version("b".to_string()), ) @@ -96,33 +96,25 @@ async fn identify_push() { swarm2.connect(&mut swarm1).await; // First, let the periodic identify do its thing. - match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { - ( - [BehaviourEvent::Identify(e1), BehaviourEvent::Identify(e2)], - [BehaviourEvent::Identify(e3), BehaviourEvent::Identify(e4)], - ) => { - use identify::Event::{Received, Sent}; + let ([e1, e2], [e3, e4]) = libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await; - // These can be received in any order, hence assert them here instead of the pattern above. - assert!(matches!(e1, Received { .. } | Sent { .. })); - assert!(matches!(e2, Received { .. } | Sent { .. })); - assert!(matches!(e3, Received { .. } | Sent { .. })); - assert!(matches!(e4, Received { .. } | Sent { .. })); - } - other => panic!("Unexpected events: {other:?}"), - }; + { + use identify::Event::{Received, Sent}; + + // These can be received in any order, hence assert them here. + assert!(matches!(e1, Received { .. } | Sent { .. })); + assert!(matches!(e2, Received { .. } | Sent { .. })); + assert!(matches!(e3, Received { .. } | Sent { .. })); + assert!(matches!(e4, Received { .. } | Sent { .. })); + } // Second, actively push. swarm2 .behaviour_mut() - .identify .push(iter::once(*swarm1.local_peer_id())); let swarm1_received_info = match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { - ( - [BehaviourEvent::Identify(identify::Event::Received { info, .. })], - [BehaviourEvent::Identify(identify::Event::Pushed { .. })], - ) => info, + ([identify::Event::Received { info, .. }], [identify::Event::Pushed { .. }]) => info, other => panic!("Unexpected events: {other:?}"), }; @@ -141,10 +133,10 @@ async fn discover_peer_after_disconnect() { let _ = env_logger::try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { - Behaviour::new(identify::Config::new("a".to_string(), identity.public())) + identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) }); let mut swarm2 = Swarm::new_ephemeral(|identity| { - Behaviour::new( + identify::Behaviour::new( identify::Config::new("a".to_string(), identity.public()) .with_agent_version("b".to_string()), ) @@ -161,7 +153,7 @@ async fn discover_peer_after_disconnect() { .wait(|event| { matches!( event, - SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Received { .. })) + SwarmEvent::Behaviour(identify::Event::Received { .. }) ) .then_some(()) }) @@ -186,23 +178,3 @@ async fn discover_peer_after_disconnect() { assert_eq!(connected_peer, swarm1_peer_id); } - -/// Combined behaviour to keep the connection alive after the periodic identify. -/// -/// The identify implementation sets `keep_alive` to `No` once it has done its thing. -/// This can result in unexpected connection closures if one peer is faster than the other. -#[derive(libp2p_swarm::NetworkBehaviour)] -#[behaviour(prelude = "libp2p_swarm::derive_prelude")] -struct Behaviour { - identify: identify::Behaviour, - keep_alive: keep_alive::Behaviour, -} - -impl Behaviour { - fn new(config: identify::Config) -> Self { - Self { - identify: identify::Behaviour::new(config), - keep_alive: keep_alive::Behaviour, - } - } -} diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index a9cde76d..7e90c783 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,3 +1,15 @@ +## 0.44.6 - unreleased +- Rename `Kademlia` symbols to follow naming convention. + See [PR 4547]. + +[PR 4547]: https://github.com/libp2p/rust-libp2p/pull/4547 + +## 0.44.5 +- Migrate to `quick-protobuf-codec` crate for codec logic. + See [PR 4501]. + +[PR 4501]: https://github.com/libp2p/rust-libp2p/pull/4501 + ## 0.44.4 - Implement common traits on `RoutingUpdate`. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index fa937033..5b2a7408 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-kad" edition = "2021" rust-version = { workspace = true } description = "Kademlia protocol for libp2p" -version = "0.44.4" +version = "0.44.6" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -21,10 +21,11 @@ log = "0.4" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } quick-protobuf = "0.8" +quick-protobuf-codec = { workspace = true } libp2p-identity = { workspace = true } rand = "0.8" sha2 = "0.10.7" -smallvec = "1.11.0" +smallvec = "1.11.1" uint = "0.9" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0" diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 340fefb2..262962cb 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -23,10 +23,10 @@ mod test; use crate::addresses::Addresses; -use crate::handler::{KademliaHandler, KademliaHandlerEvent, KademliaHandlerIn, KademliaRequestId}; +use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; use crate::jobs::*; use crate::kbucket::{self, Distance, KBucketsTable, NodeStatus}; -use crate::protocol::{KadConnectionType, KadPeer, KademliaProtocolConfig}; +use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; use crate::record_priv::{ self, @@ -59,20 +59,20 @@ use thiserror::Error; pub use crate::query::QueryStats; -/// `Kademlia` is a `NetworkBehaviour` that implements the libp2p +/// `Behaviour` is a `NetworkBehaviour` that implements the libp2p /// Kademlia protocol. -pub struct Kademlia { +pub struct Behaviour { /// The Kademlia routing table. kbuckets: KBucketsTable, Addresses>, /// The k-bucket insertion strategy. - kbucket_inserts: KademliaBucketInserts, + kbucket_inserts: BucketInserts, /// Configuration of the wire protocol. - protocol_config: KademliaProtocolConfig, + protocol_config: ProtocolConfig, /// Configuration of [`RecordStore`] filtering. - record_filtering: KademliaStoreInserts, + record_filtering: StoreInserts, /// The currently active (i.e. in-progress) queries. queries: QueryPool, @@ -100,7 +100,7 @@ pub struct Kademlia { connection_idle_timeout: Duration, /// Queued events to return when the behaviour is being polled. - queued_events: VecDeque>, + queued_events: VecDeque>, listen_addresses: ListenAddresses, @@ -108,8 +108,8 @@ pub struct Kademlia { connections: HashMap, - /// See [`KademliaConfig::caching`]. - caching: KademliaCaching, + /// See [`Config::caching`]. + caching: Caching, local_peer_id: PeerId, @@ -125,7 +125,7 @@ pub struct Kademlia { /// and their addresses into the k-buckets of the Kademlia /// routing table. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum KademliaBucketInserts { +pub enum BucketInserts { /// Whenever a connection to a peer is established as a /// result of a dialing attempt and that peer is not yet /// in the routing table, it is inserted as long as there @@ -135,10 +135,10 @@ pub enum KademliaBucketInserts { /// disconnected peer is evicted from the bucket. OnConnected, /// New peers and addresses are only added to the routing table via - /// explicit calls to [`Kademlia::add_address`]. + /// explicit calls to [`Behaviour::add_address`]. /// /// > **Note**: Even though peers can only get into the - /// > routing table as a result of [`Kademlia::add_address`], + /// > routing table as a result of [`Behaviour::add_address`], /// > routing table entries are still updated as peers /// > connect and disconnect (i.e. the order of the entries /// > as well as the network addresses). @@ -153,63 +153,63 @@ pub enum KademliaBucketInserts { /// /// [`Key`]: crate::record_priv::Key #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum KademliaStoreInserts { +pub enum StoreInserts { /// Whenever a (provider) record is received, /// the record is forwarded immediately to the [`RecordStore`]. Unfiltered, /// Whenever a (provider) record is received, an event is emitted. - /// Provider records generate a [`InboundRequest::AddProvider`] under [`KademliaEvent::InboundRequest`], - /// normal records generate a [`InboundRequest::PutRecord`] under [`KademliaEvent::InboundRequest`]. + /// Provider records generate a [`InboundRequest::AddProvider`] under [`Event::InboundRequest`], + /// normal records generate a [`InboundRequest::PutRecord`] under [`Event::InboundRequest`]. /// /// When deemed valid, a (provider) record needs to be explicitly stored in /// the [`RecordStore`] via [`RecordStore::put`] or [`RecordStore::add_provider`], /// whichever is applicable. A mutable reference to the [`RecordStore`] can - /// be retrieved via [`Kademlia::store_mut`]. + /// be retrieved via [`Behaviour::store_mut`]. FilterBoth, } /// The configuration for the `Kademlia` behaviour. /// -/// The configuration is consumed by [`Kademlia::new`]. +/// The configuration is consumed by [`Behaviour::new`]. #[derive(Debug, Clone)] -pub struct KademliaConfig { +pub struct Config { kbucket_pending_timeout: Duration, query_config: QueryConfig, - protocol_config: KademliaProtocolConfig, + protocol_config: ProtocolConfig, record_ttl: Option, record_replication_interval: Option, record_publication_interval: Option, - record_filtering: KademliaStoreInserts, + record_filtering: StoreInserts, provider_record_ttl: Option, provider_publication_interval: Option, connection_idle_timeout: Duration, - kbucket_inserts: KademliaBucketInserts, - caching: KademliaCaching, + kbucket_inserts: BucketInserts, + caching: Caching, } -impl Default for KademliaConfig { +impl Default for Config { fn default() -> Self { - KademliaConfig { + Config { kbucket_pending_timeout: Duration::from_secs(60), query_config: QueryConfig::default(), protocol_config: Default::default(), record_ttl: Some(Duration::from_secs(36 * 60 * 60)), record_replication_interval: Some(Duration::from_secs(60 * 60)), record_publication_interval: Some(Duration::from_secs(24 * 60 * 60)), - record_filtering: KademliaStoreInserts::Unfiltered, + record_filtering: StoreInserts::Unfiltered, provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)), provider_record_ttl: Some(Duration::from_secs(24 * 60 * 60)), connection_idle_timeout: Duration::from_secs(10), - kbucket_inserts: KademliaBucketInserts::OnConnected, - caching: KademliaCaching::Enabled { max_peers: 1 }, + kbucket_inserts: BucketInserts::OnConnected, + caching: Caching::Enabled { max_peers: 1 }, } } } /// The configuration for Kademlia "write-back" caching after successful -/// lookups via [`Kademlia::get_record`]. +/// lookups via [`Behaviour::get_record`]. #[derive(Debug, Clone)] -pub enum KademliaCaching { +pub enum Caching { /// Caching is disabled and the peers closest to records being looked up /// that do not return a record are not tracked, i.e. /// [`GetRecordOk::FinishedWithNoAdditionalRecord`] is always empty. @@ -217,11 +217,11 @@ pub enum KademliaCaching { /// Up to `max_peers` peers not returning a record that are closest to the key /// being looked up are tracked and returned in [`GetRecordOk::FinishedWithNoAdditionalRecord`]. /// The write-back operation must be performed explicitly, if - /// desired and after choosing a record from the results, via [`Kademlia::put_record_to`]. + /// desired and after choosing a record from the results, via [`Behaviour::put_record_to`]. Enabled { max_peers: u16 }, } -impl KademliaConfig { +impl Config { /// Sets custom protocol names. /// /// Kademlia nodes only communicate with other nodes using the same protocol @@ -266,7 +266,7 @@ impl KademliaConfig { /// This only controls the level of parallelism of an iterative query, not /// the level of parallelism of a query to a fixed set of peers. /// - /// When used with [`KademliaConfig::disjoint_query_paths`] it equals + /// When used with [`Config::disjoint_query_paths`] it equals /// the amount of disjoint paths used. pub fn set_parallelism(&mut self, parallelism: NonZeroUsize) -> &mut Self { self.query_config.parallelism = parallelism; @@ -302,9 +302,9 @@ impl KademliaConfig { /// Sets whether or not records should be filtered before being stored. /// - /// See [`KademliaStoreInserts`] for the different values. - /// Defaults to [`KademliaStoreInserts::Unfiltered`]. - pub fn set_record_filtering(&mut self, filtering: KademliaStoreInserts) -> &mut Self { + /// See [`StoreInserts`] for the different values. + /// Defaults to [`StoreInserts::Unfiltered`]. + pub fn set_record_filtering(&mut self, filtering: StoreInserts) -> &mut Self { self.record_filtering = filtering; self } @@ -387,24 +387,24 @@ impl KademliaConfig { } /// Sets the k-bucket insertion strategy for the Kademlia routing table. - pub fn set_kbucket_inserts(&mut self, inserts: KademliaBucketInserts) -> &mut Self { + pub fn set_kbucket_inserts(&mut self, inserts: BucketInserts) -> &mut Self { self.kbucket_inserts = inserts; self } - /// Sets the [`KademliaCaching`] strategy to use for successful lookups. + /// Sets the [`Caching`] strategy to use for successful lookups. /// - /// The default is [`KademliaCaching::Enabled`] with a `max_peers` of 1. + /// The default is [`Caching::Enabled`] with a `max_peers` of 1. /// Hence, with default settings and a lookup quorum of 1, a successful lookup /// will result in the record being cached at the closest node to the key that /// did not return the record, i.e. the standard Kademlia behaviour. - pub fn set_caching(&mut self, c: KademliaCaching) -> &mut Self { + pub fn set_caching(&mut self, c: Caching) -> &mut Self { self.caching = c; self } } -impl Kademlia +impl Behaviour where TStore: RecordStore + Send + 'static, { @@ -419,7 +419,7 @@ where } /// Creates a new `Kademlia` network behaviour with the given configuration. - pub fn with_config(id: PeerId, store: TStore, config: KademliaConfig) -> Self { + pub fn with_config(id: PeerId, store: TStore, config: Config) -> Self { let local_key = kbucket::Key::from(id); let put_record_job = config @@ -438,7 +438,7 @@ where .provider_publication_interval .map(AddProviderJob::new); - Kademlia { + Behaviour { store, caching: config.caching, kbuckets: KBucketsTable::new(local_key, config.kbucket_pending_timeout), @@ -523,14 +523,14 @@ where /// in the DHT. /// /// If the routing table has been updated as a result of this operation, - /// a [`KademliaEvent::RoutingUpdated`] event is emitted. + /// a [`Event::RoutingUpdated`] event is emitted. pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { if entry.value().insert(address) { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutingUpdated { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::RoutingUpdated { peer: *peer, is_new_peer: false, addresses: entry.value().clone(), @@ -540,8 +540,7 @@ where .bucket(&key) .map(|b| b.range()) .expect("Not kbucket::Entry::SelfEntry."), - }, - )) + })) } RoutingUpdate::Success } @@ -559,7 +558,7 @@ where match entry.insert(addresses.clone(), status) { kbucket::InsertResult::Inserted => { self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutingUpdated { + Event::RoutingUpdated { peer: *peer, is_new_peer: true, addresses, @@ -665,7 +664,7 @@ where /// Initiates an iterative query for the closest peers to the given key. /// /// The result of the query is delivered in a - /// [`KademliaEvent::OutboundQueryProgressed{QueryResult::GetClosestPeers}`]. + /// [`Event::OutboundQueryProgressed{QueryResult::GetClosestPeers}`]. pub fn get_closest_peers(&mut self, key: K) -> QueryId where K: Into> + Into> + Clone, @@ -692,7 +691,7 @@ where /// Performs a lookup for a record in the DHT. /// /// The result of this operation is delivered in a - /// [`KademliaEvent::OutboundQueryProgressed{QueryResult::GetRecord}`]. + /// [`Event::OutboundQueryProgressed{QueryResult::GetRecord}`]. pub fn get_record(&mut self, key: record_priv::Key) -> QueryId { let record = if let Some(record) = self.store.get(&key) { if record.is_expired(Instant::now()) { @@ -734,14 +733,13 @@ where let stats = QueryStats::empty(); if let Some(record) = record { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundQueryProgressed { id, result: QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord(record))), step, stats, - }, - )); + })); } id @@ -753,12 +751,12 @@ where /// Returns `Ok` if a record has been stored locally, providing the /// `QueryId` of the initial query that replicates the record in the DHT. /// The result of the query is eventually reported as a - /// [`KademliaEvent::OutboundQueryProgressed{QueryResult::PutRecord}`]. + /// [`Event::OutboundQueryProgressed{QueryResult::PutRecord}`]. /// /// The record is always stored locally with the given expiration. If the record's /// expiration is `None`, the common case, it does not expire in local storage /// but is still replicated with the configured record TTL. To remove the record - /// locally and stop it from being re-published in the DHT, see [`Kademlia::remove_record`]. + /// locally and stop it from being re-published in the DHT, see [`Behaviour::remove_record`]. /// /// After the initial publication of the record, it is subject to (re-)replication /// and (re-)publication as per the configured intervals. Periodic (re-)publication @@ -869,13 +867,13 @@ where /// /// Returns `Ok` if bootstrapping has been initiated with a self-lookup, providing the /// `QueryId` for the entire bootstrapping process. The progress of bootstrapping is - /// reported via [`KademliaEvent::OutboundQueryProgressed{QueryResult::Bootstrap}`] events, + /// reported via [`Event::OutboundQueryProgressed{QueryResult::Bootstrap}`] events, /// with one such event per bootstrapping query. /// /// Returns `Err` if bootstrapping is impossible due an empty routing table. /// /// > **Note**: Bootstrapping requires at least one node of the DHT to be known. - /// > See [`Kademlia::add_address`]. + /// > See [`Behaviour::add_address`]. pub fn bootstrap(&mut self) -> Result { let local_key = self.kbuckets.local_key().clone(); let info = QueryInfo::Bootstrap { @@ -904,16 +902,16 @@ where /// The publication of the provider records is periodically repeated as per the /// configured interval, to renew the expiry and account for changes to the DHT /// topology. A provider record may be removed from local storage and - /// thus no longer re-published by calling [`Kademlia::stop_providing`]. + /// thus no longer re-published by calling [`Behaviour::stop_providing`]. /// /// In contrast to the standard Kademlia push-based model for content distribution - /// implemented by [`Kademlia::put_record`], the provider API implements a + /// implemented by [`Behaviour::put_record`], the provider API implements a /// pull-based model that may be used in addition or as an alternative. /// The means by which the actual value is obtained from a provider is out of scope /// of the libp2p Kademlia provider API. /// /// The results of the (repeated) provider announcements sent by this node are - /// reported via [`KademliaEvent::OutboundQueryProgressed{QueryResult::StartProviding}`]. + /// reported via [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`]. pub fn start_providing(&mut self, key: record_priv::Key) -> Result { // Note: We store our own provider records locally without local addresses // to avoid redundant storage and outdated addresses. Instead these are @@ -950,7 +948,7 @@ where /// Performs a lookup for providers of a value to the given key. /// /// The result of this operation is delivered in a - /// reported via [`KademliaEvent::OutboundQueryProgressed{QueryResult::GetProviders}`]. + /// reported via [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`]. pub fn get_providers(&mut self, key: record_priv::Key) -> QueryId { let providers: HashSet<_> = self .store @@ -981,8 +979,8 @@ where let stats = QueryStats::empty(); if !providers.is_empty() { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundQueryProgressed { id, result: QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { key, @@ -990,8 +988,7 @@ where })), step, stats, - }, - )); + })); } id } @@ -1040,7 +1037,7 @@ where .map(|(conn_id, peer_id)| ToSwarm::NotifyHandler { peer_id: *peer_id, handler: NotifyHandler::One(*conn_id), - event: KademliaHandlerIn::ReconfigureMode { + event: HandlerIn::ReconfigureMode { new_mode: self.mode, }, }), @@ -1142,9 +1139,9 @@ where let node_id = p.provider; let multiaddrs = p.addresses; let connection_ty = if connected.contains(&node_id) { - KadConnectionType::Connected + ConnectionType::Connected } else { - KadConnectionType::NotConnected + ConnectionType::NotConnected }; if multiaddrs.is_empty() { // The provider is either the local node and we fill in @@ -1228,7 +1225,7 @@ where if let Some(address) = address { if entry.value().insert(address) { self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutingUpdated { + Event::RoutingUpdated { peer, is_new_peer: false, addresses: entry.value().clone(), @@ -1260,20 +1257,21 @@ where } match (address, self.kbucket_inserts) { (None, _) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::UnroutablePeer { peer }, - )); + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::UnroutablePeer { peer })); } - (Some(a), KademliaBucketInserts::Manual) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutablePeer { peer, address: a }, - )); + (Some(a), BucketInserts::Manual) => { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::RoutablePeer { + peer, + address: a, + })); } - (Some(a), KademliaBucketInserts::OnConnected) => { + (Some(a), BucketInserts::OnConnected) => { let addresses = Addresses::new(a); match entry.insert(addresses.clone(), new_status) { kbucket::InsertResult::Inserted => { - let event = KademliaEvent::RoutingUpdated { + let event = Event::RoutingUpdated { peer, is_new_peer: true, addresses, @@ -1290,19 +1288,19 @@ where debug!("Bucket full. Peer not added to routing table: {}", peer); let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutablePeer { peer, address }, + Event::RoutablePeer { peer, address }, )); } kbucket::InsertResult::Pending { disconnected } => { let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::PendingRoutablePeer { peer, address }, + Event::PendingRoutablePeer { peer, address }, )); // `disconnected` might already be in the process of re-connecting. // In other words `disconnected` might have already re-connected but // is not yet confirmed to support the Kademlia protocol via - // [`KademliaHandlerEvent::ProtocolConfirmed`]. + // [`HandlerEvent::ProtocolConfirmed`]. // // Only try dialing peer if not currently connected. if !self.connected_peers.contains(disconnected.preimage()) { @@ -1322,7 +1320,7 @@ where } /// Handles a finished (i.e. successful) query. - fn query_finished(&mut self, q: Query) -> Option { + fn query_finished(&mut self, q: Query) -> Option { let query_id = q.id(); log::trace!("Query {:?} finished.", query_id); let result = q.into_result(); @@ -1387,7 +1385,7 @@ where step.last = true; }; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::Bootstrap(Ok(BootstrapOk { @@ -1401,7 +1399,7 @@ where QueryInfo::GetClosestPeers { key, mut step } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetClosestPeers(Ok(GetClosestPeersOk { @@ -1415,7 +1413,7 @@ where QueryInfo::GetProviders { mut step, .. } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetProviders(Ok( @@ -1456,13 +1454,13 @@ where .. }, } => match context { - AddProviderContext::Publish => Some(KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Publish => Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::StartProviding(Ok(AddProviderOk { key })), step: ProgressStep::first_and_last(), }), - AddProviderContext::Republish => Some(KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Republish => Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::RepublishProvider(Ok(AddProviderOk { key })), @@ -1486,7 +1484,7 @@ where closest_peers: result.peers.collect(), }) }; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetRecord(results), @@ -1537,14 +1535,14 @@ where }; match context { PutRecordContext::Publish | PutRecordContext::Custom => { - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::PutRecord(mk_result(record.key)), step: ProgressStep::first_and_last(), }) } - PutRecordContext::Republish => Some(KademliaEvent::OutboundQueryProgressed { + PutRecordContext::Republish => Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::RepublishRecord(mk_result(record.key)), @@ -1560,7 +1558,7 @@ where } /// Handles a query that timed out. - fn query_timeout(&mut self, query: Query) -> Option { + fn query_timeout(&mut self, query: Query) -> Option { let query_id = query.id(); log::trace!("Query {:?} timed out.", query_id); let result = query.into_result(); @@ -1589,7 +1587,7 @@ where step.last = true; } - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::Bootstrap(Err(BootstrapError::Timeout { @@ -1601,13 +1599,13 @@ where } QueryInfo::AddProvider { context, key, .. } => Some(match context { - AddProviderContext::Publish => KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Publish => Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::StartProviding(Err(AddProviderError::Timeout { key })), step: ProgressStep::first_and_last(), }, - AddProviderContext::Republish => KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Republish => Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::RepublishProvider(Err(AddProviderError::Timeout { key })), @@ -1618,7 +1616,7 @@ where QueryInfo::GetClosestPeers { key, mut step } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetClosestPeers(Err(GetClosestPeersError::Timeout { @@ -1645,14 +1643,14 @@ where }); match context { PutRecordContext::Publish | PutRecordContext::Custom => { - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::PutRecord(err), step: ProgressStep::first_and_last(), }) } - PutRecordContext::Republish => Some(KademliaEvent::OutboundQueryProgressed { + PutRecordContext::Republish => Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::RepublishRecord(err), @@ -1674,7 +1672,7 @@ where QueryInfo::GetRecord { key, mut step, .. } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetRecord(Err(GetRecordError::Timeout { key })), @@ -1685,7 +1683,7 @@ where QueryInfo::GetProviders { key, mut step, .. } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetProviders(Err(GetProvidersError::Timeout { @@ -1703,7 +1701,7 @@ where &mut self, source: PeerId, connection: ConnectionId, - request_id: KademliaRequestId, + request_id: RequestId, mut record: Record, ) { if record.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { @@ -1713,7 +1711,7 @@ where self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::PutRecordRes { + event: HandlerIn::PutRecordRes { key: record.key, value: record.value, request_id, @@ -1762,7 +1760,7 @@ where // requirement to send back the value in the response, although this // is a waste of resources. match self.record_filtering { - KademliaStoreInserts::Unfiltered => match self.store.put(record.clone()) { + StoreInserts::Unfiltered => match self.store.put(record.clone()) { Ok(()) => { debug!( "Record stored: {:?}; {} bytes", @@ -1770,7 +1768,7 @@ where record.value.len() ); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + Event::InboundRequest { request: InboundRequest::PutRecord { source, connection, @@ -1784,37 +1782,36 @@ where self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::Reset(request_id), + event: HandlerIn::Reset(request_id), }); return; } }, - KademliaStoreInserts::FilterBoth => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + StoreInserts::FilterBoth => { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::PutRecord { source, connection, record: Some(record.clone()), }, - }, - )); + })); } } } - // The remote receives a [`KademliaHandlerIn::PutRecordRes`] even in the + // The remote receives a [`HandlerIn::PutRecordRes`] even in the // case where the record is discarded due to being expired. Given that - // the remote sent the local node a [`KademliaHandlerEvent::PutRecord`] + // the remote sent the local node a [`HandlerEvent::PutRecord`] // request, the remote perceives the local node as one node among the k // closest nodes to the target. In addition returning - // [`KademliaHandlerIn::PutRecordRes`] does not reveal any internal + // [`HandlerIn::PutRecordRes`] does not reveal any internal // information to a possibly malicious remote node. self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::PutRecordRes { + event: HandlerIn::PutRecordRes { key: record.key, value: record.value, request_id, @@ -1832,26 +1829,24 @@ where addresses: provider.multiaddrs, }; match self.record_filtering { - KademliaStoreInserts::Unfiltered => { + StoreInserts::Unfiltered => { if let Err(e) = self.store.add_provider(record) { info!("Provider record not stored: {:?}", e); return; } - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::AddProvider { record: None }, - }, - )); + })); } - KademliaStoreInserts::FilterBoth => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + StoreInserts::FilterBoth => { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::AddProvider { record: Some(record), }, - }, - )); + })); } } } @@ -1880,7 +1875,7 @@ where // and is unreachable in the context of another peer pending insertion // into the same bucket. This is handled transparently by the // `KBucketsTable` and takes effect through `KBucketsTable::take_applied_pending` - // within `Kademlia::poll`. + // within `Behaviour::poll`. debug!( "Last remaining address '{}' of peer '{}' is unreachable.", address, peer_id, @@ -1910,7 +1905,7 @@ where // When a connection is established, we don't know yet whether the // remote supports the configured protocol name. Only once a connection - // handler reports [`KademliaHandlerEvent::ProtocolConfirmed`] do we + // handler reports [`HandlerEvent::ProtocolConfirmed`] do we // update the local routing table. // Peer's first connection. @@ -2055,12 +2050,12 @@ fn exp_decrease(ttl: Duration, exp: u32) -> Duration { Duration::from_secs(ttl.as_secs().checked_shr(exp).unwrap_or(0)) } -impl NetworkBehaviour for Kademlia +impl NetworkBehaviour for Behaviour where TStore: RecordStore + Send + 'static, { - type ConnectionHandler = KademliaHandler; - type ToSwarm = KademliaEvent; + type ConnectionHandler = Handler; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -2075,7 +2070,7 @@ where }; self.connections.insert(connection_id, peer); - Ok(KademliaHandler::new( + Ok(Handler::new( self.protocol_config.clone(), self.connection_idle_timeout, connected_point, @@ -2098,7 +2093,7 @@ where }; self.connections.insert(connection_id, peer); - Ok(KademliaHandler::new( + Ok(Handler::new( self.protocol_config.clone(), self.connection_idle_timeout, connected_point, @@ -2149,7 +2144,7 @@ where event: THandlerOutEvent, ) { match event { - KademliaHandlerEvent::ProtocolConfirmed { endpoint } => { + HandlerEvent::ProtocolConfirmed { endpoint } => { debug_assert!(self.connected_peers.contains(&source)); // The remote's address can only be put into the routing table, // and thus shared with other nodes, if the local node is the dialer, @@ -2163,7 +2158,7 @@ where self.connection_updated(source, address, NodeStatus::Connected); } - KademliaHandlerEvent::ProtocolNotSupported { endpoint } => { + HandlerEvent::ProtocolNotSupported { endpoint } => { let address = match endpoint { ConnectedPoint::Dialer { address, .. } => Some(address), ConnectedPoint::Listener { .. } => None, @@ -2171,51 +2166,49 @@ where self.connection_updated(source, address, NodeStatus::Disconnected); } - KademliaHandlerEvent::FindNodeReq { key, request_id } => { + HandlerEvent::FindNodeReq { key, request_id } => { let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::FindNode { num_closer_peers: closer_peers.len(), }, - }, - )); + })); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::FindNodeRes { + event: HandlerIn::FindNodeRes { closer_peers, request_id, }, }); } - KademliaHandlerEvent::FindNodeRes { + HandlerEvent::FindNodeRes { closer_peers, query_id, } => { self.discovered(&query_id, &source, closer_peers.iter()); } - KademliaHandlerEvent::GetProvidersReq { key, request_id } => { + HandlerEvent::GetProvidersReq { key, request_id } => { let provider_peers = self.provider_peers(&key, &source); let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::GetProvider { num_closer_peers: closer_peers.len(), num_provider_peers: provider_peers.len(), }, - }, - )); + })); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::GetProvidersRes { + event: HandlerIn::GetProvidersRes { closer_peers, provider_peers, request_id, @@ -2223,7 +2216,7 @@ where }); } - KademliaHandlerEvent::GetProvidersRes { + HandlerEvent::GetProvidersRes { closer_peers, provider_peers, query_id, @@ -2243,7 +2236,7 @@ where let providers = provider_peers.iter().map(|p| p.node_id).collect(); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id: query_id, result: QueryResult::GetProviders(Ok( GetProvidersOk::FoundProviders { @@ -2260,7 +2253,7 @@ where } } - KademliaHandlerEvent::QueryError { query_id, error } => { + HandlerEvent::QueryError { query_id, error } => { log::debug!( "Request to {:?} in query {:?} failed with {:?}", source, @@ -2274,7 +2267,7 @@ where } } - KademliaHandlerEvent::AddProvider { key, provider } => { + HandlerEvent::AddProvider { key, provider } => { // Only accept a provider record from a legitimate peer. if provider.node_id != source { return; @@ -2283,7 +2276,7 @@ where self.provider_received(key, provider); } - KademliaHandlerEvent::GetRecord { key, request_id } => { + HandlerEvent::GetRecord { key, request_id } => { // Lookup the record locally. let record = match self.store.get(&key) { Some(record) => { @@ -2299,19 +2292,18 @@ where let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::GetRecord { num_closer_peers: closer_peers.len(), present_locally: record.is_some(), }, - }, - )); + })); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::GetRecordRes { + event: HandlerIn::GetRecordRes { record, closer_peers, request_id, @@ -2319,7 +2311,7 @@ where }); } - KademliaHandlerEvent::GetRecordRes { + HandlerEvent::GetRecordRes { record, closer_peers, query_id, @@ -2341,7 +2333,7 @@ where }; self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id: query_id, result: QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord( record, @@ -2354,7 +2346,7 @@ where *step = step.next(); } else { log::trace!("Record with key {:?} not found at {}", key, source); - if let KademliaCaching::Enabled { max_peers } = self.caching { + if let Caching::Enabled { max_peers } = self.caching { let source_key = kbucket::Key::from(source); let target_key = kbucket::Key::from(key.clone()); let distance = source_key.distance(&target_key); @@ -2374,11 +2366,11 @@ where self.discovered(&query_id, &source, closer_peers.iter()); } - KademliaHandlerEvent::PutRecord { record, request_id } => { + HandlerEvent::PutRecord { record, request_id } => { self.record_received(source, connection, request_id, record); } - KademliaHandlerEvent::PutRecordRes { query_id, .. } => { + HandlerEvent::PutRecordRes { query_id, .. } => { if let Some(query) = self.queries.get_mut(&query_id) { query.on_success(&source, vec![]); if let QueryInfo::PutRecord { @@ -2462,7 +2454,7 @@ where // Drain applied pending entries from the routing table. if let Some(entry) = self.kbuckets.take_applied_pending() { let kbucket::Node { key, value } = entry.inserted; - let event = KademliaEvent::RoutingUpdated { + let event = Event::RoutingUpdated { bucket_range: self .kbuckets .bucket(&key) @@ -2605,7 +2597,7 @@ pub struct PeerRecord { /// See [`NetworkBehaviour::poll`]. #[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] -pub enum KademliaEvent { +pub enum Event { /// An inbound request has been received and handled. // // Note on the difference between 'request' and 'query': A request is a @@ -2646,19 +2638,19 @@ pub enum KademliaEvent { /// A peer has connected for whom no listen address is known. /// /// If the peer is to be added to the routing table, a known - /// listen address for the peer must be provided via [`Kademlia::add_address`]. + /// listen address for the peer must be provided via [`Behaviour::add_address`]. UnroutablePeer { peer: PeerId }, /// A connection to a peer has been established for whom a listen address /// is known but the peer has not been added to the routing table either - /// because [`KademliaBucketInserts::Manual`] is configured or because + /// because [`BucketInserts::Manual`] is configured or because /// the corresponding bucket is full. /// /// If the peer is to be included in the routing table, it must - /// must be explicitly added via [`Kademlia::add_address`], possibly after + /// must be explicitly added via [`Behaviour::add_address`], possibly after /// removing another peer. /// - /// See [`Kademlia::kbucket`] for insight into the contents of + /// See [`Behaviour::kbucket`] for insight into the contents of /// the k-bucket of `peer`. RoutablePeer { peer: PeerId, address: Multiaddr }, @@ -2668,10 +2660,10 @@ pub enum KademliaEvent { /// may not make it into the routing table. /// /// If the peer is to be unconditionally included in the routing table, - /// it should be explicitly added via [`Kademlia::add_address`] after + /// it should be explicitly added via [`Behaviour::add_address`] after /// removing another peer. /// - /// See [`Kademlia::kbucket`] for insight into the contents of + /// See [`Behaviour::kbucket`] for insight into the contents of /// the k-bucket of `peer`. PendingRoutablePeer { peer: PeerId, address: Multiaddr }, } @@ -2719,10 +2711,10 @@ pub enum InboundRequest { num_provider_peers: usize, }, /// A peer sent an add provider request. - /// If filtering [`KademliaStoreInserts::FilterBoth`] is enabled, the [`ProviderRecord`] is + /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`ProviderRecord`] is /// included. /// - /// See [`KademliaStoreInserts`] and [`KademliaConfig::set_record_filtering`] for details.. + /// See [`StoreInserts`] and [`Config::set_record_filtering`] for details.. AddProvider { record: Option }, /// Request to retrieve a record. GetRecord { @@ -2730,9 +2722,9 @@ pub enum InboundRequest { present_locally: bool, }, /// A peer sent a put record request. - /// If filtering [`KademliaStoreInserts::FilterBoth`] is enabled, the [`Record`] is included. + /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`Record`] is included. /// - /// See [`KademliaStoreInserts`] and [`KademliaConfig::set_record_filtering`]. + /// See [`StoreInserts`] and [`Config::set_record_filtering`]. PutRecord { source: PeerId, connection: ConnectionId, @@ -2743,35 +2735,35 @@ pub enum InboundRequest { /// The results of Kademlia queries. #[derive(Debug, Clone)] pub enum QueryResult { - /// The result of [`Kademlia::bootstrap`]. + /// The result of [`Behaviour::bootstrap`]. Bootstrap(BootstrapResult), - /// The result of [`Kademlia::get_closest_peers`]. + /// The result of [`Behaviour::get_closest_peers`]. GetClosestPeers(GetClosestPeersResult), - /// The result of [`Kademlia::get_providers`]. + /// The result of [`Behaviour::get_providers`]. GetProviders(GetProvidersResult), - /// The result of [`Kademlia::start_providing`]. + /// The result of [`Behaviour::start_providing`]. StartProviding(AddProviderResult), /// The result of a (automatic) republishing of a provider record. RepublishProvider(AddProviderResult), - /// The result of [`Kademlia::get_record`]. + /// The result of [`Behaviour::get_record`]. GetRecord(GetRecordResult), - /// The result of [`Kademlia::put_record`]. + /// The result of [`Behaviour::put_record`]. PutRecord(PutRecordResult), /// The result of a (automatic) republishing of a (value-)record. RepublishRecord(PutRecordResult), } -/// The result of [`Kademlia::get_record`]. +/// The result of [`Behaviour::get_record`]. pub type GetRecordResult = Result; -/// The successful result of [`Kademlia::get_record`]. +/// The successful result of [`Behaviour::get_record`]. #[derive(Debug, Clone)] pub enum GetRecordOk { FoundRecord(PeerRecord), @@ -2780,16 +2772,16 @@ pub enum GetRecordOk { /// _to the record key_ (not the local node) that were queried but /// did not return the record, sorted by distance to the record key /// from closest to farthest. How many of these are tracked is configured - /// by [`KademliaConfig::set_caching`]. If the lookup used a quorum of - /// 1, these peers will be sent the record as a means of caching. - /// If the lookup used a quorum > 1, you may wish to use these - /// candidates with [`Kademlia::put_record_to`] after selecting - /// one of the returned records. + /// by [`Config::set_caching`]. + /// + /// Writing back the cache at these peers is a manual operation. + /// ie. you may wish to use these candidates with [`Behaviour::put_record_to`] + /// after selecting one of the returned records. cache_candidates: BTreeMap, }, } -/// The error result of [`Kademlia::get_record`]. +/// The error result of [`Behaviour::get_record`]. #[derive(Debug, Clone, Error)] pub enum GetRecordError { #[error("the record was not found")] @@ -2828,16 +2820,16 @@ impl GetRecordError { } } -/// The result of [`Kademlia::put_record`]. +/// The result of [`Behaviour::put_record`]. pub type PutRecordResult = Result; -/// The successful result of [`Kademlia::put_record`]. +/// The successful result of [`Behaviour::put_record`]. #[derive(Debug, Clone)] pub struct PutRecordOk { pub key: record_priv::Key, } -/// The error result of [`Kademlia::put_record`]. +/// The error result of [`Behaviour::put_record`]. #[derive(Debug, Clone, Error)] pub enum PutRecordError { #[error("the quorum failed; needed {quorum} peers")] @@ -2875,17 +2867,17 @@ impl PutRecordError { } } -/// The result of [`Kademlia::bootstrap`]. +/// The result of [`Behaviour::bootstrap`]. pub type BootstrapResult = Result; -/// The successful result of [`Kademlia::bootstrap`]. +/// The successful result of [`Behaviour::bootstrap`]. #[derive(Debug, Clone)] pub struct BootstrapOk { pub peer: PeerId, pub num_remaining: u32, } -/// The error result of [`Kademlia::bootstrap`]. +/// The error result of [`Behaviour::bootstrap`]. #[derive(Debug, Clone, Error)] pub enum BootstrapError { #[error("the request timed out")] @@ -2895,17 +2887,17 @@ pub enum BootstrapError { }, } -/// The result of [`Kademlia::get_closest_peers`]. +/// The result of [`Behaviour::get_closest_peers`]. pub type GetClosestPeersResult = Result; -/// The successful result of [`Kademlia::get_closest_peers`]. +/// The successful result of [`Behaviour::get_closest_peers`]. #[derive(Debug, Clone)] pub struct GetClosestPeersOk { pub key: Vec, pub peers: Vec, } -/// The error result of [`Kademlia::get_closest_peers`]. +/// The error result of [`Behaviour::get_closest_peers`]. #[derive(Debug, Clone, Error)] pub enum GetClosestPeersError { #[error("the request timed out")] @@ -2929,10 +2921,10 @@ impl GetClosestPeersError { } } -/// The result of [`Kademlia::get_providers`]. +/// The result of [`Behaviour::get_providers`]. pub type GetProvidersResult = Result; -/// The successful result of [`Kademlia::get_providers`]. +/// The successful result of [`Behaviour::get_providers`]. #[derive(Debug, Clone)] pub enum GetProvidersOk { FoundProviders { @@ -2945,7 +2937,7 @@ pub enum GetProvidersOk { }, } -/// The error result of [`Kademlia::get_providers`]. +/// The error result of [`Behaviour::get_providers`]. #[derive(Debug, Clone, Error)] pub enum GetProvidersError { #[error("the request timed out")] @@ -3010,8 +3002,8 @@ impl From, Addresses>> for KadPeer { node_id: e.node.key.into_preimage(), multiaddrs: e.node.value.into_vec(), connection_ty: match e.status { - NodeStatus::Connected => KadConnectionType::Connected, - NodeStatus::Disconnected => KadConnectionType::NotConnected, + NodeStatus::Connected => ConnectionType::Connected, + NodeStatus::Disconnected => ConnectionType::NotConnected, }, } } @@ -3029,7 +3021,7 @@ struct QueryInner { /// /// A request is pending if the targeted peer is not currently connected /// and these requests are sent as soon as a connection to the peer is established. - pending_rpcs: SmallVec<[(PeerId, KademliaHandlerIn); K_VALUE.get()]>, + pending_rpcs: SmallVec<[(PeerId, HandlerIn); K_VALUE.get()]>, } impl QueryInner { @@ -3045,33 +3037,33 @@ impl QueryInner { /// The context of a [`QueryInfo::AddProvider`] query. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum AddProviderContext { - /// The context is a [`Kademlia::start_providing`] operation. + /// The context is a [`Behaviour::start_providing`] operation. Publish, /// The context is periodic republishing of provider announcements - /// initiated earlier via [`Kademlia::start_providing`]. + /// initiated earlier via [`Behaviour::start_providing`]. Republish, } /// The context of a [`QueryInfo::PutRecord`] query. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum PutRecordContext { - /// The context is a [`Kademlia::put_record`] operation. + /// The context is a [`Behaviour::put_record`] operation. Publish, /// The context is periodic republishing of records stored - /// earlier via [`Kademlia::put_record`]. + /// earlier via [`Behaviour::put_record`]. Republish, /// The context is periodic replication (i.e. without extending /// the record TTL) of stored records received earlier from another peer. Replicate, /// The context is a custom store operation targeting specific - /// peers initiated by [`Kademlia::put_record_to`]. + /// peers initiated by [`Behaviour::put_record_to`]. Custom, } /// Information about a running query. #[derive(Debug, Clone)] pub enum QueryInfo { - /// A query initiated by [`Kademlia::bootstrap`]. + /// A query initiated by [`Behaviour::bootstrap`]. Bootstrap { /// The targeted peer ID. peer: PeerId, @@ -3085,7 +3077,7 @@ pub enum QueryInfo { step: ProgressStep, }, - /// A (repeated) query initiated by [`Kademlia::get_closest_peers`]. + /// A (repeated) query initiated by [`Behaviour::get_closest_peers`]. GetClosestPeers { /// The key being queried (the preimage). key: Vec, @@ -3093,7 +3085,7 @@ pub enum QueryInfo { step: ProgressStep, }, - /// A (repeated) query initiated by [`Kademlia::get_providers`]. + /// A (repeated) query initiated by [`Behaviour::get_providers`]. GetProviders { /// The key for which to search for providers. key: record_priv::Key, @@ -3103,7 +3095,7 @@ pub enum QueryInfo { step: ProgressStep, }, - /// A (repeated) query initiated by [`Kademlia::start_providing`]. + /// A (repeated) query initiated by [`Behaviour::start_providing`]. AddProvider { /// The record key. key: record_priv::Key, @@ -3113,7 +3105,7 @@ pub enum QueryInfo { context: AddProviderContext, }, - /// A (repeated) query initiated by [`Kademlia::put_record`]. + /// A (repeated) query initiated by [`Behaviour::put_record`]. PutRecord { record: Record, /// The expected quorum of responses w.r.t. the replication factor. @@ -3124,7 +3116,7 @@ pub enum QueryInfo { context: PutRecordContext, }, - /// A (repeated) query initiated by [`Kademlia::get_record`]. + /// A (repeated) query initiated by [`Behaviour::get_record`]. GetRecord { /// The key to look for. key: record_priv::Key, @@ -3141,22 +3133,22 @@ pub enum QueryInfo { impl QueryInfo { /// Creates an event for a handler to issue an outgoing request in the /// context of a query. - fn to_request(&self, query_id: QueryId) -> KademliaHandlerIn { + fn to_request(&self, query_id: QueryId) -> HandlerIn { match &self { - QueryInfo::Bootstrap { peer, .. } => KademliaHandlerIn::FindNodeReq { + QueryInfo::Bootstrap { peer, .. } => HandlerIn::FindNodeReq { key: peer.to_bytes(), query_id, }, - QueryInfo::GetClosestPeers { key, .. } => KademliaHandlerIn::FindNodeReq { + QueryInfo::GetClosestPeers { key, .. } => HandlerIn::FindNodeReq { key: key.clone(), query_id, }, - QueryInfo::GetProviders { key, .. } => KademliaHandlerIn::GetProvidersReq { + QueryInfo::GetProviders { key, .. } => HandlerIn::GetProvidersReq { key: key.clone(), query_id, }, QueryInfo::AddProvider { key, phase, .. } => match phase { - AddProviderPhase::GetClosestPeers => KademliaHandlerIn::FindNodeReq { + AddProviderPhase::GetClosestPeers => HandlerIn::FindNodeReq { key: key.to_vec(), query_id, }, @@ -3164,25 +3156,25 @@ impl QueryInfo { provider_id, external_addresses, .. - } => KademliaHandlerIn::AddProvider { + } => HandlerIn::AddProvider { key: key.clone(), provider: crate::protocol::KadPeer { node_id: *provider_id, multiaddrs: external_addresses.clone(), - connection_ty: crate::protocol::KadConnectionType::Connected, + connection_ty: crate::protocol::ConnectionType::Connected, }, }, }, - QueryInfo::GetRecord { key, .. } => KademliaHandlerIn::GetRecord { + QueryInfo::GetRecord { key, .. } => HandlerIn::GetRecord { key: key.clone(), query_id, }, QueryInfo::PutRecord { record, phase, .. } => match phase { - PutRecordPhase::GetClosestPeers => KademliaHandlerIn::FindNodeReq { + PutRecordPhase::GetClosestPeers => HandlerIn::FindNodeReq { key: record.key.to_vec(), query_id, }, - PutRecordPhase::PutRecord { .. } => KademliaHandlerIn::PutRecord { + PutRecordPhase::PutRecord { .. } => HandlerIn::PutRecord { record: record.clone(), query_id, }, @@ -3290,7 +3282,7 @@ impl fmt::Display for NoKnownPeers { impl std::error::Error for NoKnownPeers {} -/// The possible outcomes of [`Kademlia::add_address`]. +/// The possible outcomes of [`Behaviour::add_address`]. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RoutingUpdate { /// The given peer and address has been added to the routing @@ -3299,7 +3291,7 @@ pub enum RoutingUpdate { /// The peer and address is pending insertion into /// the routing table, if a disconnected peer fails /// to respond. If the given peer and address ends up - /// in the routing table, [`KademliaEvent::RoutingUpdated`] + /// in the routing table, [`Event::RoutingUpdated`] /// is eventually emitted. Pending, /// The routing table update failed, either because the diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index cd4337e9..f85208ee 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -48,13 +48,13 @@ use std::{ u64, }; -type TestSwarm = Swarm>; +type TestSwarm = Swarm>; fn build_node() -> (Multiaddr, TestSwarm) { build_node_with_config(Default::default()) } -fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { +fn build_node_with_config(cfg: Config) -> (Multiaddr, TestSwarm) { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); let transport = MemoryTransport::default() @@ -65,7 +65,7 @@ fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { let local_id = local_public_key.to_peer_id(); let store = MemoryStore::new(local_id); - let behaviour = Kademlia::with_config(local_id, store, cfg); + let behaviour = Behaviour::with_config(local_id, store, cfg); let mut swarm = SwarmBuilder::without_executor(transport, behaviour, local_id).build(); @@ -82,7 +82,7 @@ fn build_nodes(num: usize) -> Vec<(Multiaddr, TestSwarm)> { } /// Builds swarms, each listening on a port. Does *not* connect the nodes together. -fn build_nodes_with_config(num: usize, cfg: KademliaConfig) -> Vec<(Multiaddr, TestSwarm)> { +fn build_nodes_with_config(num: usize, cfg: Config) -> Vec<(Multiaddr, TestSwarm)> { (0..num) .map(|_| build_node_with_config(cfg.clone())) .collect() @@ -95,7 +95,7 @@ fn build_connected_nodes(total: usize, step: usize) -> Vec<(Multiaddr, TestSwarm fn build_connected_nodes_with_config( total: usize, step: usize, - cfg: KademliaConfig, + cfg: Config, ) -> Vec<(Multiaddr, TestSwarm)> { let mut swarms = build_nodes_with_config(total, cfg); let swarm_ids: Vec<_> = swarms @@ -121,7 +121,7 @@ fn build_connected_nodes_with_config( fn build_fully_connected_nodes_with_config( total: usize, - cfg: KademliaConfig, + cfg: Config, ) -> Vec<(Multiaddr, TestSwarm)> { let mut swarms = build_nodes_with_config(total, cfg); let swarm_addr_and_peer_id: Vec<_> = swarms @@ -166,7 +166,7 @@ fn bootstrap() { // or smaller than K_VALUE. let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); - let mut cfg = KademliaConfig::default(); + let mut cfg = Config::default(); if rng.gen() { cfg.disjoint_query_paths(true); } @@ -190,7 +190,7 @@ fn bootstrap() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::Bootstrap(Ok(ok)), .. @@ -280,7 +280,7 @@ fn query_iter() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::GetClosestPeers(Ok(ok)), .. @@ -338,12 +338,10 @@ fn unresponsive_not_returned_direct() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetClosestPeers(Ok(ok)), - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetClosestPeers(Ok(ok)), + .. + }))) => { assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(ok.peers.len(), 0); return Poll::Ready(()); @@ -398,12 +396,10 @@ fn unresponsive_not_returned_indirect() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetClosestPeers(Ok(ok)), - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetClosestPeers(Ok(ok)), + .. + }))) => { assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(ok.peers.len(), 1); assert_eq!(ok.peers[0], first_peer_id); @@ -453,13 +449,11 @@ fn get_record_not_found() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - id, - result: QueryResult::GetRecord(Err(e)), - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + id, + result: QueryResult::GetRecord(Err(e)), + .. + }))) => { assert_eq!(id, qid); if let GetRecordError::NotFound { key, closest_peers } = e { assert_eq!(key, target_key); @@ -495,14 +489,14 @@ fn put_record() { // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); - let mut config = KademliaConfig::default(); + let mut config = Config::default(); config.set_replication_factor(replication_factor); if rng.gen() { config.disjoint_query_paths(true); } if filter_records { - config.set_record_filtering(KademliaStoreInserts::FilterBoth); + config.set_record_filtering(StoreInserts::FilterBoth); } let mut swarms = { @@ -574,7 +568,7 @@ fn put_record() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::PutRecord(res), stats, @@ -582,7 +576,7 @@ fn put_record() { }, ))) | Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::RepublishRecord(res), stats, @@ -605,16 +599,14 @@ fn put_record() { } } } - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::InboundRequest { - request: InboundRequest::PutRecord { record, .. }, - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::InboundRequest { + request: InboundRequest::PutRecord { record, .. }, + }))) => { if !drop_records { if let Some(record) = record { assert_eq!( swarm.behaviour().record_filtering, - KademliaStoreInserts::FilterBoth + StoreInserts::FilterBoth ); // Accept the record swarm @@ -625,7 +617,7 @@ fn put_record() { } else { assert_eq!( swarm.behaviour().record_filtering, - KademliaStoreInserts::Unfiltered + StoreInserts::Unfiltered ); } } @@ -684,7 +676,7 @@ fn put_record() { }) .collect::>(); - if swarms[0].behaviour().record_filtering != KademliaStoreInserts::Unfiltered + if swarms[0].behaviour().record_filtering != StoreInserts::Unfiltered && drop_records { assert_eq!(actual.len(), 0); @@ -765,14 +757,12 @@ fn get_record() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - id, - result: QueryResult::GetRecord(Ok(r)), - step: ProgressStep { count, last }, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + id, + result: QueryResult::GetRecord(Ok(r)), + step: ProgressStep { count, last }, + .. + }))) => { assert_eq!(id, qid); if usize::from(count) == 1 { assert!(!last); @@ -829,14 +819,12 @@ fn get_record_many() { swarm.behaviour_mut().query_mut(&qid).unwrap().finish(); } match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - id, - result: QueryResult::GetRecord(Ok(r)), - step: ProgressStep { count: _, last }, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + id, + result: QueryResult::GetRecord(Ok(r)), + step: ProgressStep { count: _, last }, + .. + }))) => { assert_eq!(id, qid); if let GetRecordOk::FoundRecord(r) = r { assert_eq!(r.record, record); @@ -870,7 +858,7 @@ fn add_provider() { // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); - let mut config = KademliaConfig::default(); + let mut config = Config::default(); config.set_replication_factor(replication_factor); if rng.gen() { config.disjoint_query_paths(true); @@ -924,14 +912,14 @@ fn add_provider() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::StartProviding(res), .. }, ))) | Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::RepublishProvider(res), .. @@ -1062,7 +1050,7 @@ fn exceed_jobs_max_queries() { loop { if let Poll::Ready(Some(e)) = swarm.poll_next_unpin(ctx) { match e { - SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(Event::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(Ok(r)), .. }) => break assert!(r.peers.is_empty()), @@ -1085,14 +1073,14 @@ fn exp_decr_expiration_overflow() { } // Right shifting a u64 by >63 results in a panic. - prop_no_panic(KademliaConfig::default().record_ttl.unwrap(), 64); + prop_no_panic(Config::default().record_ttl.unwrap(), 64); quickcheck(prop_no_panic as fn(_, _)) } #[test] fn disjoint_query_does_not_finish_before_all_paths_did() { - let mut config = KademliaConfig::default(); + let mut config = Config::default(); config.disjoint_query_paths(true); // I.e. setting the amount disjoint paths to be explored to 2. config.set_parallelism(NonZeroUsize::new(2).unwrap()); @@ -1140,13 +1128,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { for (i, swarm) in [&mut alice, &mut trudy].iter_mut().enumerate() { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetRecord(result), - step, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetRecord(result), + step, + .. + }))) => { if i != 0 { panic!("Expected `QueryResult` from Alice.") } @@ -1197,13 +1183,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { for (i, swarm) in [&mut alice, &mut bob].iter_mut().enumerate() { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetRecord(result), - step, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetRecord(result), + step, + .. + }))) => { if i != 0 { panic!("Expected `QueryResult` from Alice.") } @@ -1241,11 +1225,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { } /// Tests that peers are not automatically inserted into -/// the routing table with `KademliaBucketInserts::Manual`. +/// the routing table with `BucketInserts::Manual`. #[test] fn manual_bucket_inserts() { - let mut cfg = KademliaConfig::default(); - cfg.set_kbucket_inserts(KademliaBucketInserts::Manual); + let mut cfg = Config::default(); + cfg.set_kbucket_inserts(BucketInserts::Manual); // 1 -> 2 -> [3 -> ...] let mut swarms = build_connected_nodes_with_config(3, 1, cfg); // The peers and their addresses for which we expect `RoutablePeer` events. @@ -1271,7 +1255,7 @@ fn manual_bucket_inserts() { for (_, swarm) in swarms.iter_mut() { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour(KademliaEvent::RoutablePeer { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::RoutablePeer { peer, address, }))) => { @@ -1303,7 +1287,7 @@ fn network_behaviour_on_address_change() { let old_address: Multiaddr = Protocol::Memory(1).into(); let new_address: Multiaddr = Protocol::Memory(2).into(); - let mut kademlia = Kademlia::new(local_peer_id, MemoryStore::new(local_peer_id)); + let mut kademlia = Behaviour::new(local_peer_id, MemoryStore::new(local_peer_id)); let endpoint = ConnectedPoint::Dialer { address: old_address.clone(), @@ -1337,7 +1321,7 @@ fn network_behaviour_on_address_change() { kademlia.on_connection_handler_event( remote_peer_id, connection_id, - KademliaHandlerEvent::ProtocolConfirmed { endpoint }, + HandlerEvent::ProtocolConfirmed { endpoint }, ); assert_eq!( @@ -1389,7 +1373,7 @@ fn get_providers_single() { block_on(async { match single_swarm.next().await.unwrap() { - SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(Event::OutboundQueryProgressed { result: QueryResult::StartProviding(Ok(_)), .. }) => {} @@ -1403,7 +1387,7 @@ fn get_providers_single() { block_on(async { loop { match single_swarm.next().await.unwrap() { - SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(Event::OutboundQueryProgressed { id, result: QueryResult::GetProviders(Ok(ok)), step: index, @@ -1469,7 +1453,7 @@ fn get_providers_limit() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::GetProviders(Ok(ok)), step: index, diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index d695420e..0df4da6b 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -20,8 +20,7 @@ use crate::behaviour::Mode; use crate::protocol::{ - KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, - KademliaProtocolConfig, + KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, }; use crate::record_priv::{self, Record}; use crate::QueryId; @@ -54,9 +53,9 @@ const MAX_NUM_SUBSTREAMS: usize = 32; /// make. /// /// It also handles requests made by the remote. -pub struct KademliaHandler { +pub struct Handler { /// Configuration of the wire protocol. - protocol_config: KademliaProtocolConfig, + protocol_config: ProtocolConfig, /// In client mode, we don't accept inbound substreams. mode: Mode, @@ -126,7 +125,7 @@ enum OutboundSubstreamState { // TODO: add timeout WaitingAnswer(KadOutStreamSink, QueryId), /// An error happened on the substream and we should report the error to the user. - ReportError(KademliaHandlerQueryErr, QueryId), + ReportError(HandlerQueryErr, QueryId), /// The substream is being closed. Closing(KadOutStreamSink), /// The substream is complete and will not perform any more work. @@ -143,7 +142,7 @@ enum InboundSubstreamState { connection_id: UniqueConnecId, substream: KadInStreamSink, }, - /// Waiting for the behaviour to send a [`KademliaHandlerIn`] event containing the response. + /// Waiting for the behaviour to send a [`HandlerIn`] event containing the response. WaitingBehaviour(UniqueConnecId, KadInStreamSink, Option), /// Waiting to send an answer back to the remote. PendingSend(UniqueConnecId, KadInStreamSink, KadResponseMsg), @@ -162,7 +161,7 @@ enum InboundSubstreamState { impl InboundSubstreamState { fn try_answer_with( &mut self, - id: KademliaRequestId, + id: RequestId, msg: KadResponseMsg, ) -> Result<(), KadResponseMsg> { match std::mem::replace( @@ -214,7 +213,7 @@ impl InboundSubstreamState { /// Event produced by the Kademlia handler. #[derive(Debug)] -pub enum KademliaHandlerEvent { +pub enum HandlerEvent { /// The configured protocol name has been confirmed by the peer through /// a successfully negotiated substream or by learning the supported protocols of the remote. ProtocolConfirmed { endpoint: ConnectedPoint }, @@ -228,10 +227,10 @@ pub enum KademliaHandlerEvent { /// The key for which to locate the closest nodes. key: Vec, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, - /// Response to an `KademliaHandlerIn::FindNodeReq`. + /// Response to an `HandlerIn::FindNodeReq`. FindNodeRes { /// Results of the request. closer_peers: Vec, @@ -245,10 +244,10 @@ pub enum KademliaHandlerEvent { /// The key for which providers are requested. key: record_priv::Key, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, - /// Response to an `KademliaHandlerIn::GetProvidersReq`. + /// Response to an `HandlerIn::GetProvidersReq`. GetProvidersRes { /// Nodes closest to the key. closer_peers: Vec, @@ -261,7 +260,7 @@ pub enum KademliaHandlerEvent { /// An error happened when performing a query. QueryError { /// The error that happened. - error: KademliaHandlerQueryErr, + error: HandlerQueryErr, /// The user data passed to the query. query_id: QueryId, }, @@ -279,10 +278,10 @@ pub enum KademliaHandlerEvent { /// Key for which we should look in the dht key: record_priv::Key, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, - /// Response to a `KademliaHandlerIn::GetRecord`. + /// Response to a `HandlerIn::GetRecord`. GetRecordRes { /// The result is present if the key has been found record: Option, @@ -296,7 +295,7 @@ pub enum KademliaHandlerEvent { PutRecord { record: Record, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Response to a request to store a record. @@ -312,7 +311,7 @@ pub enum KademliaHandlerEvent { /// Error that can happen when requesting an RPC query. #[derive(Debug)] -pub enum KademliaHandlerQueryErr { +pub enum HandlerQueryErr { /// Error while trying to perform the query. Upgrade(StreamUpgradeError), /// Received an answer that doesn't correspond to the request. @@ -321,44 +320,44 @@ pub enum KademliaHandlerQueryErr { Io(io::Error), } -impl fmt::Display for KademliaHandlerQueryErr { +impl fmt::Display for HandlerQueryErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - KademliaHandlerQueryErr::Upgrade(err) => { + HandlerQueryErr::Upgrade(err) => { write!(f, "Error while performing Kademlia query: {err}") } - KademliaHandlerQueryErr::UnexpectedMessage => { + HandlerQueryErr::UnexpectedMessage => { write!( f, "Remote answered our Kademlia RPC query with the wrong message type" ) } - KademliaHandlerQueryErr::Io(err) => { + HandlerQueryErr::Io(err) => { write!(f, "I/O error during a Kademlia RPC query: {err}") } } } } -impl error::Error for KademliaHandlerQueryErr { +impl error::Error for HandlerQueryErr { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - KademliaHandlerQueryErr::Upgrade(err) => Some(err), - KademliaHandlerQueryErr::UnexpectedMessage => None, - KademliaHandlerQueryErr::Io(err) => Some(err), + HandlerQueryErr::Upgrade(err) => Some(err), + HandlerQueryErr::UnexpectedMessage => None, + HandlerQueryErr::Io(err) => Some(err), } } } -impl From> for KademliaHandlerQueryErr { +impl From> for HandlerQueryErr { fn from(err: StreamUpgradeError) -> Self { - KademliaHandlerQueryErr::Upgrade(err) + HandlerQueryErr::Upgrade(err) } } /// Event to send to the handler. #[derive(Debug)] -pub enum KademliaHandlerIn { +pub enum HandlerIn { /// Resets the (sub)stream associated with the given request ID, /// thus signaling an error to the remote. /// @@ -366,7 +365,7 @@ pub enum KademliaHandlerIn { /// can be used as an alternative to letting requests simply time /// out on the remote peer, thus potentially avoiding some delay /// for the query on the remote. - Reset(KademliaRequestId), + Reset(RequestId), /// Change the connection to the specified mode. ReconfigureMode { new_mode: Mode }, @@ -387,7 +386,7 @@ pub enum KademliaHandlerIn { /// Identifier of the request that was made by the remote. /// /// It is a logic error to use an id of the handler of a different node. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Same as `FindNodeReq`, but should also return the entries of the local providers list for @@ -408,7 +407,7 @@ pub enum KademliaHandlerIn { /// Identifier of the request that was made by the remote. /// /// It is a logic error to use an id of the handler of a different node. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Indicates that this provider is known for this key. @@ -437,7 +436,7 @@ pub enum KademliaHandlerIn { /// Nodes that are closer to the key we were searching for. closer_peers: Vec, /// Identifier of the request that was made by the remote. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Put a value into the dht records. @@ -454,14 +453,14 @@ pub enum KademliaHandlerIn { /// Value that was put. value: Vec, /// Identifier of the request that was made by the remote. - request_id: KademliaRequestId, + request_id: RequestId, }, } /// Unique identifier for a request. Must be passed back in order to answer a request from /// the remote. #[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct KademliaRequestId { +pub struct RequestId { /// Unique identifier for an incoming connection. connec_unique_id: UniqueConnecId, } @@ -470,9 +469,9 @@ pub struct KademliaRequestId { #[derive(Debug, Copy, Clone, PartialEq, Eq)] struct UniqueConnecId(u64); -impl KademliaHandler { +impl Handler { pub fn new( - protocol_config: KademliaProtocolConfig, + protocol_config: ProtocolConfig, idle_timeout: Duration, endpoint: ConnectedPoint, remote_peer_id: PeerId, @@ -494,7 +493,7 @@ impl KademliaHandler { let keep_alive = KeepAlive::Until(Instant::now() + idle_timeout); - KademliaHandler { + Handler { protocol_config, mode, idle_timeout, @@ -612,12 +611,12 @@ impl KademliaHandler { } } -impl ConnectionHandler for KademliaHandler { - type FromBehaviour = KademliaHandlerIn; - type ToBehaviour = KademliaHandlerEvent; +impl ConnectionHandler for Handler { + type FromBehaviour = HandlerIn; + type ToBehaviour = HandlerEvent; type Error = io::Error; // TODO: better error type? - type InboundProtocol = Either; - type OutboundProtocol = KademliaProtocolConfig; + type InboundProtocol = Either; + type OutboundProtocol = ProtocolConfig; type OutboundOpenInfo = (); type InboundOpenInfo = (); @@ -628,9 +627,9 @@ impl ConnectionHandler for KademliaHandler { } } - fn on_behaviour_event(&mut self, message: KademliaHandlerIn) { + fn on_behaviour_event(&mut self, message: HandlerIn) { match message { - KademliaHandlerIn::Reset(request_id) => { + HandlerIn::Reset(request_id) => { if let Some(state) = self .inbound_substreams .iter_mut() @@ -644,19 +643,19 @@ impl ConnectionHandler for KademliaHandler { state.close(); } } - KademliaHandlerIn::FindNodeReq { key, query_id } => { + HandlerIn::FindNodeReq { key, query_id } => { let msg = KadRequestMsg::FindNode { key }; self.pending_messages.push_back((msg, Some(query_id))); } - KademliaHandlerIn::FindNodeRes { + HandlerIn::FindNodeRes { closer_peers, request_id, } => self.answer_pending_request(request_id, KadResponseMsg::FindNode { closer_peers }), - KademliaHandlerIn::GetProvidersReq { key, query_id } => { + HandlerIn::GetProvidersReq { key, query_id } => { let msg = KadRequestMsg::GetProviders { key }; self.pending_messages.push_back((msg, Some(query_id))); } - KademliaHandlerIn::GetProvidersRes { + HandlerIn::GetProvidersRes { closer_peers, provider_peers, request_id, @@ -667,19 +666,19 @@ impl ConnectionHandler for KademliaHandler { provider_peers, }, ), - KademliaHandlerIn::AddProvider { key, provider } => { + HandlerIn::AddProvider { key, provider } => { let msg = KadRequestMsg::AddProvider { key, provider }; self.pending_messages.push_back((msg, None)); } - KademliaHandlerIn::GetRecord { key, query_id } => { + HandlerIn::GetRecord { key, query_id } => { let msg = KadRequestMsg::GetValue { key }; self.pending_messages.push_back((msg, Some(query_id))); } - KademliaHandlerIn::PutRecord { record, query_id } => { + HandlerIn::PutRecord { record, query_id } => { let msg = KadRequestMsg::PutValue { record }; self.pending_messages.push_back((msg, Some(query_id))); } - KademliaHandlerIn::GetRecordRes { + HandlerIn::GetRecordRes { record, closer_peers, request_id, @@ -692,14 +691,14 @@ impl ConnectionHandler for KademliaHandler { }, ); } - KademliaHandlerIn::PutRecordRes { + HandlerIn::PutRecordRes { key, request_id, value, } => { self.answer_pending_request(request_id, KadResponseMsg::PutValue { key, value }); } - KademliaHandlerIn::ReconfigureMode { new_mode } => { + HandlerIn::ReconfigureMode { new_mode } => { let peer = self.remote_peer_id; match &self.endpoint { @@ -736,7 +735,7 @@ impl ConnectionHandler for KademliaHandler { if let ProtocolStatus::Confirmed = self.protocol_status { self.protocol_status = ProtocolStatus::Reported; return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::ProtocolConfirmed { + HandlerEvent::ProtocolConfirmed { endpoint: self.endpoint.clone(), }, )); @@ -833,8 +832,8 @@ impl ConnectionHandler for KademliaHandler { } } -impl KademliaHandler { - fn answer_pending_request(&mut self, request_id: KademliaRequestId, mut msg: KadResponseMsg) { +impl Handler { + fn answer_pending_request(&mut self, request_id: RequestId, mut msg: KadResponseMsg) { for state in self.inbound_substreams.iter_mut() { match state.try_answer_with(request_id, msg) { Ok(()) => return, @@ -849,7 +848,7 @@ impl KademliaHandler { } impl futures::Stream for OutboundSubstreamState { - type Item = ConnectionHandlerEvent; + type Item = ConnectionHandlerEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -866,8 +865,8 @@ impl futures::Stream for OutboundSubstreamState { *this = OutboundSubstreamState::Done; let event = query_id.map(|query_id| { ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(error), query_id, }, ) @@ -883,12 +882,10 @@ impl futures::Stream for OutboundSubstreamState { Poll::Ready(Err(error)) => { *this = OutboundSubstreamState::Done; let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), - query_id, - }, - ) + ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::QueryError { + error: HandlerQueryErr::Io(error), + query_id, + }) }); return Poll::Ready(event); @@ -911,12 +908,10 @@ impl futures::Stream for OutboundSubstreamState { Poll::Ready(Err(error)) => { *this = OutboundSubstreamState::Done; let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), - query_id, - }, - ) + ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::QueryError { + error: HandlerQueryErr::Io(error), + query_id, + }) }); return Poll::Ready(event); @@ -939,8 +934,8 @@ impl futures::Stream for OutboundSubstreamState { } Poll::Ready(Some(Err(error))) => { *this = OutboundSubstreamState::Done; - let event = KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), + let event = HandlerEvent::QueryError { + error: HandlerQueryErr::Io(error), query_id, }; @@ -950,10 +945,8 @@ impl futures::Stream for OutboundSubstreamState { } Poll::Ready(None) => { *this = OutboundSubstreamState::Done; - let event = KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io( - io::ErrorKind::UnexpectedEof.into(), - ), + let event = HandlerEvent::QueryError { + error: HandlerQueryErr::Io(io::ErrorKind::UnexpectedEof.into()), query_id, }; @@ -965,7 +958,7 @@ impl futures::Stream for OutboundSubstreamState { } OutboundSubstreamState::ReportError(error, query_id) => { *this = OutboundSubstreamState::Done; - let event = KademliaHandlerEvent::QueryError { error, query_id }; + let event = HandlerEvent::QueryError { error, query_id }; return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour(event))); } @@ -987,7 +980,7 @@ impl futures::Stream for OutboundSubstreamState { } impl futures::Stream for InboundSubstreamState { - type Item = ConnectionHandlerEvent; + type Item = ConnectionHandlerEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -1013,9 +1006,9 @@ impl futures::Stream for InboundSubstreamState { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::FindNodeReq { + HandlerEvent::FindNodeReq { key, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1025,9 +1018,9 @@ impl futures::Stream for InboundSubstreamState { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::GetProvidersReq { + HandlerEvent::GetProvidersReq { key, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1040,16 +1033,16 @@ impl futures::Stream for InboundSubstreamState { substream, }; return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::AddProvider { key, provider }, + HandlerEvent::AddProvider { key, provider }, ))); } Poll::Ready(Some(Ok(KadRequestMsg::GetValue { key }))) => { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::GetRecord { + HandlerEvent::GetRecord { key, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1059,9 +1052,9 @@ impl futures::Stream for InboundSubstreamState { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - KademliaHandlerEvent::PutRecord { + HandlerEvent::PutRecord { record, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1138,24 +1131,24 @@ impl futures::Stream for InboundSubstreamState { } /// Process a Kademlia message that's supposed to be a response to one of our requests. -fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> KademliaHandlerEvent { +fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEvent { // TODO: must check that the response corresponds to the request match event { KadResponseMsg::Pong => { // We never send out pings. - KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::UnexpectedMessage, + HandlerEvent::QueryError { + error: HandlerQueryErr::UnexpectedMessage, query_id, } } - KadResponseMsg::FindNode { closer_peers } => KademliaHandlerEvent::FindNodeRes { + KadResponseMsg::FindNode { closer_peers } => HandlerEvent::FindNodeRes { closer_peers, query_id, }, KadResponseMsg::GetProviders { closer_peers, provider_peers, - } => KademliaHandlerEvent::GetProvidersRes { + } => HandlerEvent::GetProvidersRes { closer_peers, provider_peers, query_id, @@ -1163,12 +1156,12 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> KademliaHan KadResponseMsg::GetValue { record, closer_peers, - } => KademliaHandlerEvent::GetRecordRes { + } => HandlerEvent::GetRecordRes { record, closer_peers, query_id, }, - KadResponseMsg::PutValue { key, value, .. } => KademliaHandlerEvent::PutRecordRes { + KadResponseMsg::PutValue { key, value, .. } => HandlerEvent::PutRecordRes { key, value, query_id, diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index cfc4f929..af070760 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -74,10 +74,10 @@ use std::vec; /// The maximum number of queries towards which background jobs /// are allowed to start new queries on an invocation of -/// `Kademlia::poll`. +/// `Behaviour::poll`. pub(crate) const JOBS_MAX_QUERIES: usize = 100; /// The maximum number of new queries started by a background job -/// per invocation of `Kademlia::poll`. +/// per invocation of `Behaviour::poll`. pub(crate) const JOBS_MAX_NEW_QUERIES: usize = 10; /// A background job run periodically. #[derive(Debug)] diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index ccdb06b8..dd9f7f56 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -26,7 +26,7 @@ //! [Identify](https://github.com/libp2p/specs/tree/master/identify) protocol might be seen as a core protocol. Rust-libp2p //! tries to stay as generic as possible, and does not make this assumption. //! This means that the Identify protocol must be manually hooked up to Kademlia through calls -//! to [`Kademlia::add_address`]. +//! to [`Behaviour::add_address`]. //! If you choose not to use the Identify protocol, and do not provide an alternative peer //! discovery mechanism, a Kademlia node will not discover nodes beyond the network's //! [boot nodes](https://docs.libp2p.io/concepts/glossary/#boot-node). Without the Identify protocol, @@ -73,11 +73,10 @@ pub use behaviour::{ QueryResult, QueryStats, RoutingUpdate, }; pub use behaviour::{ - Kademlia, KademliaBucketInserts, KademliaCaching, KademliaConfig, KademliaEvent, - KademliaStoreInserts, ProgressStep, Quorum, + Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts, }; pub use kbucket::{Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey}; -pub use protocol::KadConnectionType; +pub use protocol::ConnectionType; pub use query::QueryId; pub use record_priv::{store, Key as RecordKey, ProviderRecord, Record}; @@ -115,3 +114,30 @@ pub const PROTOCOL_NAME: StreamProtocol = protocol::DEFAULT_PROTO_NAME; /// Constant shared across tests for the [`Multihash`](libp2p_core::multihash::Multihash) type. #[cfg(test)] const SHA_256_MH: u64 = 0x12; + +#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Behaviour`.")] +pub type Kademlia = Behaviour; + +#[deprecated( + note = "Import the `kad` module instead and refer to this type as `kad::BucketInserts`." +)] +pub type KademliaBucketInserts = BucketInserts; + +#[deprecated( + note = "Import the `kad` module instead and refer to this type as `kad::StoreInserts`." +)] +pub type KademliaStoreInserts = StoreInserts; + +#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Config`.")] +pub type KademliaConfig = Config; + +#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Caching`.")] +pub type KademliaCaching = Caching; + +#[deprecated(note = "Import the `kad` module instead and refer to this type as `kad::Event`.")] +pub type KademliaEvent = Event; + +#[deprecated( + note = "Import the `kad` module instead and refer to this type as `kad::ConnectionType`." +)] +pub type KadConnectionType = ConnectionType; diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index d960e650..e6341ee4 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -20,7 +20,7 @@ //! The Kademlia connection protocol upgrade and associated message types. //! -//! The connection protocol upgrade is provided by [`KademliaProtocolConfig`], with the +//! The connection protocol upgrade is provided by [`ProtocolConfig`], with the //! request and response types [`KadRequestMsg`] and [`KadResponseMsg`], respectively. //! The upgrade's output is a `Sink + Stream` of messages. The `Stream` component is used //! to poll the underlying transport for incoming messages, and the `Sink` component @@ -28,19 +28,17 @@ use crate::proto; use crate::record_priv::{self, Record}; -use asynchronous_codec::Framed; +use asynchronous_codec::{Decoder, Encoder, Framed}; use bytes::BytesMut; -use codec::UviBytes; use futures::prelude::*; use instant::Instant; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use quick_protobuf::{BytesReader, Writer}; +use std::marker::PhantomData; use std::{convert::TryFrom, time::Duration}; use std::{io, iter}; -use unsigned_varint::codec; /// The protocol name used for negotiating with multistream-select. pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); @@ -48,7 +46,7 @@ pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs pub(crate) const DEFAULT_MAX_PACKET_SIZE: usize = 16 * 1024; /// Status of our connection to a node reported by the Kademlia protocol. #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub enum KadConnectionType { +pub enum ConnectionType { /// Sender hasn't tried to connect to peer. NotConnected = 0, /// Sender is currently connected to peer. @@ -59,26 +57,26 @@ pub enum KadConnectionType { CannotConnect = 3, } -impl From for KadConnectionType { - fn from(raw: proto::ConnectionType) -> KadConnectionType { +impl From for ConnectionType { + fn from(raw: proto::ConnectionType) -> ConnectionType { use proto::ConnectionType::*; match raw { - NOT_CONNECTED => KadConnectionType::NotConnected, - CONNECTED => KadConnectionType::Connected, - CAN_CONNECT => KadConnectionType::CanConnect, - CANNOT_CONNECT => KadConnectionType::CannotConnect, + NOT_CONNECTED => ConnectionType::NotConnected, + CONNECTED => ConnectionType::Connected, + CAN_CONNECT => ConnectionType::CanConnect, + CANNOT_CONNECT => ConnectionType::CannotConnect, } } } -impl From for proto::ConnectionType { - fn from(val: KadConnectionType) -> Self { +impl From for proto::ConnectionType { + fn from(val: ConnectionType) -> Self { use proto::ConnectionType::*; match val { - KadConnectionType::NotConnected => NOT_CONNECTED, - KadConnectionType::Connected => CONNECTED, - KadConnectionType::CanConnect => CAN_CONNECT, - KadConnectionType::CannotConnect => CANNOT_CONNECT, + ConnectionType::NotConnected => NOT_CONNECTED, + ConnectionType::Connected => CONNECTED, + ConnectionType::CanConnect => CAN_CONNECT, + ConnectionType::CannotConnect => CANNOT_CONNECT, } } } @@ -91,7 +89,7 @@ pub struct KadPeer { /// The multiaddresses that the sender think can be used in order to reach the peer. pub multiaddrs: Vec, /// How the sender is connected to that remote. - pub connection_ty: KadConnectionType, + pub connection_ty: ConnectionType, } // Builds a `KadPeer` from a corresponding protobuf message. @@ -137,13 +135,13 @@ impl From for proto::Peer { // only one request, then we can change the output of the `InboundUpgrade` and // `OutboundUpgrade` to be just a single message #[derive(Debug, Clone)] -pub struct KademliaProtocolConfig { +pub struct ProtocolConfig { protocol_names: Vec, /// Maximum allowed size of a packet. max_packet_size: usize, } -impl KademliaProtocolConfig { +impl ProtocolConfig { /// Returns the configured protocol name. pub fn protocol_names(&self) -> &[StreamProtocol] { &self.protocol_names @@ -161,16 +159,16 @@ impl KademliaProtocolConfig { } } -impl Default for KademliaProtocolConfig { +impl Default for ProtocolConfig { fn default() -> Self { - KademliaProtocolConfig { + ProtocolConfig { protocol_names: iter::once(DEFAULT_PROTO_NAME).collect(), max_packet_size: DEFAULT_MAX_PACKET_SIZE, } } } -impl UpgradeInfo for KademliaProtocolConfig { +impl UpgradeInfo for ProtocolConfig { type Info = StreamProtocol; type InfoIter = std::vec::IntoIter; @@ -179,7 +177,43 @@ impl UpgradeInfo for KademliaProtocolConfig { } } -impl InboundUpgrade for KademliaProtocolConfig +/// Codec for Kademlia inbound and outbound message framing. +pub struct Codec { + codec: quick_protobuf_codec::Codec, + __phantom: PhantomData<(A, B)>, +} +impl Codec { + fn new(max_packet_size: usize) -> Self { + Codec { + codec: quick_protobuf_codec::Codec::new(max_packet_size), + __phantom: PhantomData, + } + } +} + +impl, B> Encoder for Codec { + type Error = io::Error; + type Item = A; + + fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + Ok(self.codec.encode(item.into(), dst)?) + } +} +impl> Decoder for Codec { + type Error = io::Error; + type Item = B; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + self.codec.decode(src)?.map(B::try_from).transpose() + } +} + +/// Sink of responses and stream of requests. +pub(crate) type KadInStreamSink = Framed>; +/// Sink of requests and stream of responses. +pub(crate) type KadOutStreamSink = Framed>; + +impl InboundUpgrade for ProtocolConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -188,36 +222,13 @@ where type Error = io::Error; fn upgrade_inbound(self, incoming: C, _: Self::Info) -> Self::Future { - use quick_protobuf::{MessageRead, MessageWrite}; + let codec = Codec::new(self.max_packet_size); - let mut codec = UviBytes::default(); - codec.set_max_len(self.max_packet_size); - - future::ok( - Framed::new(incoming, codec) - .err_into() - .with::<_, _, fn(_) -> _, _>(|response| { - let proto_struct = resp_msg_to_proto(response); - let mut buf = Vec::with_capacity(proto_struct.get_size()); - let mut writer = Writer::new(&mut buf); - proto_struct - .write_message(&mut writer) - .expect("Encoding to succeed"); - future::ready(Ok(io::Cursor::new(buf))) - }) - .and_then::<_, fn(_) -> _>(|bytes| { - let mut reader = BytesReader::from_bytes(&bytes); - let request = match proto::Message::from_reader(&mut reader, &bytes) { - Ok(r) => r, - Err(err) => return future::ready(Err(err.into())), - }; - future::ready(proto_to_req_msg(request)) - }), - ) + future::ok(Framed::new(incoming, codec)) } } -impl OutboundUpgrade for KademliaProtocolConfig +impl OutboundUpgrade for ProtocolConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -226,51 +237,12 @@ where type Error = io::Error; fn upgrade_outbound(self, incoming: C, _: Self::Info) -> Self::Future { - use quick_protobuf::{MessageRead, MessageWrite}; + let codec = Codec::new(self.max_packet_size); - let mut codec = UviBytes::default(); - codec.set_max_len(self.max_packet_size); - - future::ok( - Framed::new(incoming, codec) - .err_into() - .with::<_, _, fn(_) -> _, _>(|request| { - let proto_struct = req_msg_to_proto(request); - let mut buf = Vec::with_capacity(proto_struct.get_size()); - let mut writer = Writer::new(&mut buf); - proto_struct - .write_message(&mut writer) - .expect("Encoding to succeed"); - future::ready(Ok(io::Cursor::new(buf))) - }) - .and_then::<_, fn(_) -> _>(|bytes| { - let mut reader = BytesReader::from_bytes(&bytes); - let response = match proto::Message::from_reader(&mut reader, &bytes) { - Ok(r) => r, - Err(err) => return future::ready(Err(err.into())), - }; - future::ready(proto_to_resp_msg(response)) - }), - ) + future::ok(Framed::new(incoming, codec)) } } -/// Sink of responses and stream of requests. -pub(crate) type KadInStreamSink = KadStreamSink; -/// Sink of requests and stream of responses. -pub(crate) type KadOutStreamSink = KadStreamSink; -pub(crate) type KadStreamSink = stream::AndThen< - sink::With< - stream::ErrInto>>>, io::Error>, - io::Cursor>, - A, - future::Ready>, io::Error>>, - fn(A) -> future::Ready>, io::Error>>, - >, - future::Ready>, - fn(BytesMut) -> future::Ready>, ->; - /// Request that we can send to a peer or that we received from a peer. #[derive(Debug, Clone, PartialEq, Eq)] pub enum KadRequestMsg { @@ -346,6 +318,31 @@ pub enum KadResponseMsg { }, } +impl From for proto::Message { + fn from(kad_msg: KadRequestMsg) -> Self { + req_msg_to_proto(kad_msg) + } +} +impl From for proto::Message { + fn from(kad_msg: KadResponseMsg) -> Self { + resp_msg_to_proto(kad_msg) + } +} +impl TryFrom for KadRequestMsg { + type Error = io::Error; + + fn try_from(message: proto::Message) -> Result { + proto_to_req_msg(message) + } +} +impl TryFrom for KadResponseMsg { + type Error = io::Error; + + fn try_from(message: proto::Message) -> Result { + proto_to_resp_msg(message) + } +} + /// Converts a `KadRequestMsg` into the corresponding protobuf message for sending. fn req_msg_to_proto(kad_msg: KadRequestMsg) -> proto::Message { match kad_msg { @@ -627,7 +624,7 @@ mod tests { use futures::{Future, Sink, Stream}; use libp2p_core::{PeerId, PublicKey, Transport}; use multihash::{encode, Hash}; - use protocol::{KadConnectionType, KadPeer, KademliaProtocolConfig}; + use protocol::{ConnectionType, KadPeer, ProtocolConfig}; use std::sync::mpsc; use std::thread; @@ -644,7 +641,7 @@ mod tests { closer_peers: vec![KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: KadConnectionType::Connected, + connection_ty: ConnectionType::Connected, }], }); test_one(KadMsg::GetProvidersReq { @@ -654,12 +651,12 @@ mod tests { closer_peers: vec![KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: KadConnectionType::Connected, + connection_ty: ConnectionType::Connected, }], provider_peers: vec![KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], - connection_ty: KadConnectionType::NotConnected, + connection_ty: ConnectionType::NotConnected, }], }); test_one(KadMsg::AddProvider { @@ -667,7 +664,7 @@ mod tests { provider_peer: KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], - connection_ty: KadConnectionType::Connected, + connection_ty: ConnectionType::Connected, }, }); // TODO: all messages @@ -677,7 +674,7 @@ mod tests { let (tx, rx) = mpsc::channel(); let bg_thread = thread::spawn(move || { - let transport = TcpTransport::default().with_upgrade(KademliaProtocolConfig); + let transport = TcpTransport::default().with_upgrade(ProtocolConfig); let (listener, addr) = transport .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) @@ -697,7 +694,7 @@ mod tests { let _ = rt.block_on(future).unwrap(); }); - let transport = TcpTransport::default().with_upgrade(KademliaProtocolConfig); + let transport = TcpTransport::default().with_upgrade(ProtocolConfig); let future = transport .dial(rx.recv().unwrap()) diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index 6b0a42a0..6cc15861 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -230,19 +230,19 @@ pub struct QueryId(usize); pub(crate) struct QueryConfig { /// Timeout of a single query. /// - /// See [`crate::behaviour::KademliaConfig::set_query_timeout`] for details. + /// See [`crate::behaviour::Config::set_query_timeout`] for details. pub(crate) timeout: Duration, /// The replication factor to use. /// - /// See [`crate::behaviour::KademliaConfig::set_replication_factor`] for details. + /// See [`crate::behaviour::Config::set_replication_factor`] for details. pub(crate) replication_factor: NonZeroUsize, /// Allowed level of parallelism for iterative queries. /// - /// See [`crate::behaviour::KademliaConfig::set_parallelism`] for details. + /// See [`crate::behaviour::Config::set_parallelism`] for details. pub(crate) parallelism: NonZeroUsize, /// Whether to use disjoint paths on iterative lookups. /// - /// See [`crate::behaviour::KademliaConfig::disjoint_query_paths`] for details. + /// See [`crate::behaviour::Config::disjoint_query_paths`] for details. pub(crate) disjoint_query_paths: bool, } diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 30fd4d97..bc162ff6 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -1,7 +1,7 @@ use libp2p_identify as identify; use libp2p_identity as identity; use libp2p_kad::store::MemoryStore; -use libp2p_kad::{Kademlia, KademliaConfig, KademliaEvent, Mode}; +use libp2p_kad::{Behaviour, Config, Event, Mode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt; @@ -19,7 +19,7 @@ async fn server_gets_added_to_routing_table_by_client() { match libp2p_swarm_test::drive(&mut client, &mut server).await { ( - [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_), MyBehaviourEvent::Kad(KademliaEvent::RoutingUpdated { peer, .. })], + [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_), MyBehaviourEvent::Kad(Event::RoutingUpdated { peer, .. })], [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_)], ) => { assert_eq!(peer, server_peer_id) @@ -41,7 +41,7 @@ async fn two_servers_add_each_other_to_routing_table() { let server1_peer_id = *server1.local_peer_id(); let server2_peer_id = *server2.local_peer_id(); - use KademliaEvent::*; + use Event::*; use MyBehaviourEvent::*; match libp2p_swarm_test::drive(&mut server1, &mut server2).await { @@ -94,7 +94,7 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti other => panic!("Unexpected events: {other:?}"), } - use KademliaEvent::*; + use Event::*; // Server learns its external address (this could be through AutoNAT or some other mechanism). server.add_external_address(memory_addr); @@ -127,7 +127,7 @@ async fn set_client_to_server_mode() { match libp2p_swarm_test::drive(&mut client, &mut server).await { ( - [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_), MyBehaviourEvent::Kad(KademliaEvent::RoutingUpdated { peer, .. })], + [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(_), MyBehaviourEvent::Kad(Event::RoutingUpdated { peer, .. })], [MyBehaviourEvent::Identify(_), MyBehaviourEvent::Identify(identify::Event::Received { info, .. })], ) => { assert_eq!(peer, server_peer_id); @@ -159,7 +159,7 @@ async fn set_client_to_server_mode() { #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct MyBehaviour { identify: identify::Behaviour, - kad: Kademlia, + kad: Behaviour, } impl MyBehaviour { @@ -171,10 +171,10 @@ impl MyBehaviour { "/test/1.0.0".to_owned(), k.public(), )), - kad: Kademlia::with_config( + kad: Behaviour::with_config( local_peer_id, MemoryStore::new(local_peer_id), - KademliaConfig::default(), + Config::default(), ), } } diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 14a30e6b..c200c478 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -20,10 +20,10 @@ libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } log = "0.4.20" rand = "0.8.3" -smallvec = "1.11.0" -socket2 = { version = "0.5.3", features = ["all"] } -tokio = { version = "1.31", default-features = false, features = ["net", "time"], optional = true} -trust-dns-proto = { version = "0.22.0", default-features = false, features = ["mdns", "tokio-runtime"] } +smallvec = "1.11.1" +socket2 = { version = "0.5.4", features = ["all"] } +tokio = { version = "1.32", default-features = false, features = ["net", "time"], optional = true} +trust-dns-proto = { version = "0.23.0", default-features = false, features = ["mdns"] } void = "1.0.2" [features] @@ -37,7 +37,7 @@ libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio", "async-std"] } libp2p-tcp = { workspace = true, features = ["tokio", "async-io"] } libp2p-yamux = { workspace = true } -tokio = { version = "1.31", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } +tokio = { version = "1.32", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } libp2p-swarm-test = { path = "../../swarm-test" } [[test]] diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index 499cfd82..41b91ea1 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] anyhow = "1" async-trait = "0.1" -clap = { version = "4.3.21", features = ["derive"] } +clap = { version = "4.3.23", features = ["derive"] } env_logger = "0.10.0" futures = "0.3.28" instant = "0.1.12" @@ -30,7 +30,7 @@ log = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.31.0", features = ["full"] } +tokio = { version = "1.32.0", features = ["full"] } void = "1" [dev-dependencies] diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index b6b09060..4205cc38 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -405,7 +405,7 @@ async fn swarm() -> Result> { libp2p_quic::tokio::Transport::new(config) }; - let dns = libp2p_dns::TokioDnsConfig::system(OrTransport::new(quic, tcp))?; + let dns = libp2p_dns::tokio::Transport::system(OrTransport::new(quic, tcp))?; dns.map(|either_output, _| match either_output { Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), @@ -428,14 +428,12 @@ async fn connect( let start = Instant::now(); swarm.dial(server_address.clone()).unwrap(); - let server_peer_id = loop { - match swarm.next().await.unwrap() { - SwarmEvent::ConnectionEstablished { peer_id, .. } => break peer_id, - SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { - bail!("Outgoing connection error to {:?}: {:?}", peer_id, error); - } - e => panic!("{e:?}"), + let server_peer_id = match swarm.next().await.unwrap() { + SwarmEvent::ConnectionEstablished { peer_id, .. } => peer_id, + SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { + bail!("Outgoing connection error to {:?}: {:?}", peer_id, error); } + e => panic!("{e:?}"), }; let duration = start.elapsed(); diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index fd0c0824..db68d375 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,4 +1,12 @@ -## 0.43.0 +## 0.43.1 + +- Honor ping interval in case of errors. + Previously, we would immediately open another ping stream if the current one failed. + See [PR 4423]. + +[PR 4423]: https://github.com/libp2p/rust-libp2p/pull/4423 + +## 0.43.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index f66222ef..29cf5985 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-ping" edition = "2021" rust-version = { workspace = true } description = "Ping protocol for libp2p" -version = "0.43.0" +version = "0.43.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 0cf5c6e5..52266319 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -303,6 +303,7 @@ impl ConnectionHandler for Handler { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(rtt))); } Poll::Ready(Err(e)) => { + self.interval.reset(self.config.interval); self.pending_errors.push_front(e); } }, @@ -321,13 +322,16 @@ impl ConnectionHandler for Handler { self.outbound = Some(OutboundState::OpenStream); break; } - None => { - self.outbound = Some(OutboundState::OpenStream); - let protocol = SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()); - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol, - }); - } + None => match self.interval.poll_unpin(cx) { + Poll::Pending => break, + Poll::Ready(()) => { + self.outbound = Some(OutboundState::OpenStream); + let protocol = SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol, + }); + } + }, } } diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 59e583a8..28549e1c 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -35,8 +35,7 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0" /// /// At most a single inbound and outbound substream is kept open at /// any time. In case of a ping timeout or another error on a substream, the -/// substream is dropped. If a configurable number of consecutive -/// outbound pings fail, the connection is closed. +/// substream is dropped. /// /// Successful pings report the round-trip time. /// diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 63836a15..946a2daa 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -21,8 +21,8 @@ //! Integration tests for the `Ping` network behaviour. use libp2p_ping as ping; -use libp2p_swarm::keep_alive; -use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm::dummy; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use quickcheck::*; use std::{num::NonZeroU8, time::Duration}; @@ -32,18 +32,16 @@ fn ping_pong() { fn prop(count: NonZeroU8) { let cfg = ping::Config::new().with_interval(Duration::from_millis(10)); - let mut swarm1 = Swarm::new_ephemeral(|_| Behaviour::new(cfg.clone())); - let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(cfg.clone())); + let mut swarm1 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); + let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); async_std::task::block_on(async { swarm1.listen().await; swarm2.connect(&mut swarm1).await; for _ in 0..count.get() { - let (e1, e2) = match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { - ([BehaviourEvent::Ping(e1)], [BehaviourEvent::Ping(e2)]) => (e1, e2), - events => panic!("Unexpected events: {events:?}"), - }; + let ([e1], [e2]): ([ping::Event; 1], [ping::Event; 1]) = + libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await; assert_eq!(&e1.peer, swarm2.local_peer_id()); assert_eq!(&e2.peer, swarm1.local_peer_id()); @@ -65,8 +63,8 @@ fn assert_ping_rtt_less_than_50ms(e: ping::Event) { #[test] fn unsupported_doesnt_fail() { - let mut swarm1 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); - let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(ping::Config::new())); + let mut swarm1 = Swarm::new_ephemeral(|_| dummy::Behaviour); + let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(ping::Config::new())); let result = async_std::task::block_on(async { swarm1.listen().await; @@ -76,10 +74,10 @@ fn unsupported_doesnt_fail() { loop { match swarm2.next_swarm_event().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + SwarmEvent::Behaviour(ping::Event { result: Err(ping::Failure::Unsupported), .. - })) => { + }) => { swarm2.disconnect_peer_id(swarm1_peer_id).unwrap(); } SwarmEvent::ConnectionClosed { cause: Some(e), .. } => { @@ -95,19 +93,3 @@ fn unsupported_doesnt_fail() { result.expect("node with ping should not fail connection due to unsupported protocol"); } - -#[derive(NetworkBehaviour, Default)] -#[behaviour(prelude = "libp2p_swarm::derive_prelude")] -struct Behaviour { - keep_alive: keep_alive::Behaviour, - ping: ping::Behaviour, -} - -impl Behaviour { - fn new(config: ping::Config) -> Self { - Self { - keep_alive: keep_alive::Behaviour, - ping: ping::Behaviour::new(config), - } - } -} diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index 31f6cc16..a13ba2bb 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -16,6 +16,7 @@ bytes = "1" either = "1.9.0" futures = "0.3.28" futures-timer = "3" +futures-bounded = { workspace = true } instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 8fcfa103..9e49b9ce 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -20,7 +20,7 @@ //! [`NetworkBehaviour`] to act as a circuit relay v2 **relay**. -mod handler; +pub(crate) mod handler; pub(crate) mod rate_limiter; use crate::behaviour::handler::Handler; use crate::multiaddr_ext::MultiaddrExt; @@ -41,7 +41,6 @@ use std::num::NonZeroU32; use std::ops::Add; use std::task::{Context, Poll}; use std::time::Duration; -use void::Void; /// Configuration for the relay [`Behaviour`]. /// @@ -230,7 +229,7 @@ pub struct Behaviour { circuits: CircuitsTracker, /// Queue of actions to return when polled. - queued_actions: VecDeque, + queued_actions: VecDeque>>, external_addresses: ExternalAddresses, } @@ -269,14 +268,12 @@ impl Behaviour { // Only emit [`CircuitClosed`] for accepted requests. .filter(|c| matches!(c.status, CircuitStatus::Accepted)) { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitClosed { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitClosed { src_peer_id: circuit.src_peer_id, dst_peer_id: circuit.dst_peer_id, error: Some(std::io::ErrorKind::ConnectionAborted.into()), - }) - .into(), - ); + })); } } } @@ -414,7 +411,6 @@ impl NetworkBehaviour for Behaviour { status: proto::Status::RESOURCE_LIMIT_EXCEEDED, }), } - .into() } else { // Accept reservation. self.reservations @@ -422,10 +418,22 @@ impl NetworkBehaviour for Behaviour { .or_default() .insert(connection); - Action::AcceptReservationPrototype { + ToSwarm::NotifyHandler { handler: NotifyHandler::One(connection), peer_id: event_source, - inbound_reservation_req, + event: Either::Left(handler::In::AcceptReservationReq { + inbound_reservation_req, + addrs: self + .external_addresses + .iter() + .cloned() + // Add local peer ID in case it isn't present yet. + .filter_map(|a| match a.iter().last()? { + Protocol::P2p(_) => Some(a), + _ => Some(a.with(Protocol::P2p(self.local_peer_id))), + }) + .collect(), + }), } }; @@ -439,39 +447,35 @@ impl NetworkBehaviour for Behaviour { .or_default() .insert(connection); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqAccepted { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqAccepted { src_peer_id: event_source, renewed, - }) - .into(), - ); + }, + )); } handler::Event::ReservationReqAcceptFailed { error } => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqAcceptFailed { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqAcceptFailed { src_peer_id: event_source, error, - }) - .into(), - ); + }, + )); } handler::Event::ReservationReqDenied {} => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqDenied { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqDenied { src_peer_id: event_source, - }) - .into(), - ); + }, + )); } handler::Event::ReservationReqDenyFailed { error } => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqDenyFailed { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqDenyFailed { src_peer_id: event_source, error, - }) - .into(), - ); + }, + )); } handler::Event::ReservationTimedOut {} => { match self.reservations.entry(event_source) { @@ -490,12 +494,10 @@ impl NetworkBehaviour for Behaviour { } } - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationTimedOut { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::ReservationTimedOut { src_peer_id: event_source, - }) - .into(), - ); + })); } handler::Event::CircuitReqReceived { inbound_circuit_req, @@ -565,7 +567,7 @@ impl NetworkBehaviour for Behaviour { }), } }; - self.queued_actions.push_back(action.into()); + self.queued_actions.push_back(action); } handler::Event::CircuitReqDenied { circuit_id, @@ -575,13 +577,11 @@ impl NetworkBehaviour for Behaviour { self.circuits.remove(circuit_id); } - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqDenied { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitReqDenied { src_peer_id: event_source, dst_peer_id, - }) - .into(), - ); + })); } handler::Event::CircuitReqDenyFailed { circuit_id, @@ -592,14 +592,13 @@ impl NetworkBehaviour for Behaviour { self.circuits.remove(circuit_id); } - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqDenyFailed { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::CircuitReqDenyFailed { src_peer_id: event_source, dst_peer_id, error, - }) - .into(), - ); + }, + )); } handler::Event::OutboundConnectNegotiated { circuit_id, @@ -610,21 +609,18 @@ impl NetworkBehaviour for Behaviour { dst_stream, dst_pending_data, } => { - self.queued_actions.push_back( - ToSwarm::NotifyHandler { - handler: NotifyHandler::One(src_connection_id), - peer_id: src_peer_id, - event: Either::Left(handler::In::AcceptAndDriveCircuit { - circuit_id, - dst_peer_id: event_source, - inbound_circuit_req, - dst_handler_notifier, - dst_stream, - dst_pending_data, - }), - } - .into(), - ); + self.queued_actions.push_back(ToSwarm::NotifyHandler { + handler: NotifyHandler::One(src_connection_id), + peer_id: src_peer_id, + event: Either::Left(handler::In::AcceptAndDriveCircuit { + circuit_id, + dst_peer_id: event_source, + inbound_circuit_req, + dst_handler_notifier, + dst_stream, + dst_pending_data, + }), + }); } handler::Event::OutboundConnectNegotiationFailed { circuit_id, @@ -634,39 +630,33 @@ impl NetworkBehaviour for Behaviour { status, error, } => { - self.queued_actions.push_back( - ToSwarm::NotifyHandler { - handler: NotifyHandler::One(src_connection_id), - peer_id: src_peer_id, - event: Either::Left(handler::In::DenyCircuitReq { - circuit_id: Some(circuit_id), - inbound_circuit_req, - status, - }), - } - .into(), - ); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqOutboundConnectFailed { + self.queued_actions.push_back(ToSwarm::NotifyHandler { + handler: NotifyHandler::One(src_connection_id), + peer_id: src_peer_id, + event: Either::Left(handler::In::DenyCircuitReq { + circuit_id: Some(circuit_id), + inbound_circuit_req, + status, + }), + }); + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::CircuitReqOutboundConnectFailed { src_peer_id, dst_peer_id: event_source, error, - }) - .into(), - ); + }, + )); } handler::Event::CircuitReqAccepted { dst_peer_id, circuit_id, } => { self.circuits.accepted(circuit_id); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqAccepted { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitReqAccepted { src_peer_id: event_source, dst_peer_id, - }) - .into(), - ); + })); } handler::Event::CircuitReqAcceptFailed { dst_peer_id, @@ -674,14 +664,13 @@ impl NetworkBehaviour for Behaviour { error, } => { self.circuits.remove(circuit_id); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqAcceptFailed { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::CircuitReqAcceptFailed { src_peer_id: event_source, dst_peer_id, error, - }) - .into(), - ); + }, + )); } handler::Event::CircuitClosed { dst_peer_id, @@ -690,14 +679,12 @@ impl NetworkBehaviour for Behaviour { } => { self.circuits.remove(circuit_id); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitClosed { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitClosed { src_peer_id: event_source, dst_peer_id, error, - }) - .into(), - ); + })); } } } @@ -707,8 +694,8 @@ impl NetworkBehaviour for Behaviour { _cx: &mut Context<'_>, _: &mut impl PollParameters, ) -> Poll>> { - if let Some(action) = self.queued_actions.pop_front() { - return Poll::Ready(action.build(self.local_peer_id, &self.external_addresses)); + if let Some(to_swarm) = self.queued_actions.pop_front() { + return Poll::Ready(to_swarm); } Poll::Pending @@ -804,53 +791,3 @@ impl Add for CircuitId { CircuitId(self.0 + rhs) } } - -/// A [`ToSwarm`], either complete, or still requiring data from [`PollParameters`] -/// before being returned in [`Behaviour::poll`]. -#[allow(clippy::large_enum_variant)] -enum Action { - Done(ToSwarm>), - AcceptReservationPrototype { - inbound_reservation_req: inbound_hop::ReservationReq, - handler: NotifyHandler, - peer_id: PeerId, - }, -} - -impl From>> for Action { - fn from(action: ToSwarm>) -> Self { - Self::Done(action) - } -} - -impl Action { - fn build( - self, - local_peer_id: PeerId, - external_addresses: &ExternalAddresses, - ) -> ToSwarm> { - match self { - Action::Done(action) => action, - Action::AcceptReservationPrototype { - inbound_reservation_req, - handler, - peer_id, - } => ToSwarm::NotifyHandler { - handler, - peer_id, - event: Either::Left(handler::In::AcceptReservationReq { - inbound_reservation_req, - addrs: external_addresses - .iter() - .cloned() - // Add local peer ID in case it isn't present yet. - .filter_map(|a| match a.iter().last()? { - Protocol::P2p(_) => Some(a), - _ => Some(a.with(Protocol::P2p(local_peer_id))), - }) - .collect(), - }), - }, - } - } -} diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 9c1b8524..895228e8 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -20,8 +20,8 @@ use crate::behaviour::CircuitId; use crate::copy_future::CopyFuture; -use crate::proto; use crate::protocol::{inbound_hop, outbound_stop}; +use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; use bytes::Bytes; use either::Either; use futures::channel::oneshot::{self, Canceled}; @@ -30,21 +30,24 @@ use futures::io::AsyncWriteExt; use futures::stream::{FuturesUnordered, StreamExt}; use futures_timer::Delay; use instant::Instant; +use libp2p_core::upgrade::ReadyUpgrade; use libp2p_core::{ConnectedPoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionId, KeepAlive, Stream, StreamUpgradeError, - SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, ConnectionId, KeepAlive, Stream, StreamProtocol, + StreamUpgradeError, SubstreamProtocol, }; use std::collections::VecDeque; use std::fmt; use std::task::{Context, Poll}; use std::time::Duration; +const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; +const STREAM_TIMEOUT: Duration = Duration::from_secs(60); + #[derive(Debug, Clone)] pub struct Config { pub reservation_duration: Duration, @@ -174,7 +177,7 @@ pub enum Event { dst_peer_id: PeerId, error: inbound_hop::UpgradeError, }, - /// An inbound cirucit request has been accepted. + /// An inbound circuit request has been accepted. CircuitReqAccepted { circuit_id: CircuitId, dst_peer_id: PeerId, @@ -363,7 +366,7 @@ pub struct Handler { /// Futures accepting an inbound circuit request. circuit_accept_futures: Futures>, - /// Futures deying an inbound circuit request. + /// Futures denying an inbound circuit request. circuit_deny_futures: Futures<( Option, PeerId, @@ -380,11 +383,30 @@ pub struct Handler { alive_lend_out_substreams: FuturesUnordered>, /// Futures relaying data for circuit between two peers. circuits: Futures<(CircuitId, PeerId, Result<(), std::io::Error>)>, + + pending_connect_requests: VecDeque, + + workers: futures_bounded::FuturesSet< + Either< + Result< + Either, + inbound_hop::FatalUpgradeError, + >, + Result< + Result, + outbound_stop::FatalUpgradeError, + >, + >, + >, } impl Handler { pub fn new(config: Config, endpoint: ConnectedPoint) -> Handler { Handler { + workers: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), endpoint, config, queued_events: Default::default(), @@ -396,93 +418,49 @@ impl Handler { circuits: Default::default(), active_reservation: Default::default(), keep_alive: KeepAlive::Yes, + pending_connect_requests: Default::default(), } } - fn on_fully_negotiated_inbound( - &mut self, - FullyNegotiatedInbound { - protocol: request, .. - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, - ) { - match request { - inbound_hop::Req::Reserve(inbound_reservation_req) => { - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqReceived { - inbound_reservation_req, - endpoint: self.endpoint.clone(), - renewed: self.active_reservation.is_some(), - }, - )); - } - inbound_hop::Req::Connect(inbound_circuit_req) => { - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::CircuitReqReceived { - inbound_circuit_req, - endpoint: self.endpoint.clone(), - }, - )); - } + fn on_fully_negotiated_inbound(&mut self, stream: Stream) { + if self + .workers + .try_push( + inbound_hop::handle_inbound_request( + stream, + self.config.reservation_duration, + self.config.max_circuit_duration, + self.config.max_circuit_bytes, + ) + .map(Either::Left), + ) + .is_err() + { + log::warn!("Dropping inbound stream because we are at capacity") } } - fn on_fully_negotiated_outbound( - &mut self, - FullyNegotiatedOutbound { - protocol: (dst_stream, dst_pending_data), - info: outbound_open_info, - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, - ) { - let OutboundOpenInfo { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - } = outbound_open_info; + fn on_fully_negotiated_outbound(&mut self, stream: Stream) { + let stop_command = self + .pending_connect_requests + .pop_front() + .expect("opened a stream without a pending stop command"); + let (tx, rx) = oneshot::channel(); self.alive_lend_out_substreams.push(rx); - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundConnectNegotiated { - circuit_id, - src_peer_id, - src_connection_id, - inbound_circuit_req, - dst_handler_notifier: tx, - dst_stream, - dst_pending_data, - }, - )); - } - - fn on_listen_upgrade_error( - &mut self, - ListenUpgradeError { - error: inbound_hop::UpgradeError::Fatal(error), - .. - }: ListenUpgradeError< - ::InboundOpenInfo, - ::InboundProtocol, - >, - ) { - self.pending_error = Some(StreamUpgradeError::Apply(Either::Left(error))); + if self + .workers + .try_push(outbound_stop::connect(stream, stop_command, tx).map(Either::Right)) + .is_err() + { + log::warn!("Dropping outbound stream because we are at capacity") + } } fn on_dial_upgrade_error( &mut self, - DialUpgradeError { - info: open_info, - error, - }: DialUpgradeError< + DialUpgradeError { error, .. }: DialUpgradeError< ::OutboundOpenInfo, ::OutboundProtocol, >, @@ -502,39 +480,21 @@ impl Handler { self.pending_error = Some(StreamUpgradeError::Io(e)); return; } - StreamUpgradeError::Apply(error) => match error { - outbound_stop::UpgradeError::Fatal(error) => { - self.pending_error = Some(StreamUpgradeError::Apply(Either::Right(error))); - return; - } - outbound_stop::UpgradeError::CircuitFailed(error) => { - let status = match error { - outbound_stop::CircuitFailedReason::ResourceLimitExceeded => { - proto::Status::RESOURCE_LIMIT_EXCEEDED - } - outbound_stop::CircuitFailedReason::PermissionDenied => { - proto::Status::PERMISSION_DENIED - } - }; - (StreamUpgradeError::Apply(error), status) - } - }, + StreamUpgradeError::Apply(v) => void::unreachable(v), }; - let OutboundOpenInfo { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - } = open_info; + let stop_command = self + .pending_connect_requests + .pop_front() + .expect("failed to open a stream without a pending stop command"); self.queued_events .push_back(ConnectionHandlerEvent::NotifyBehaviour( Event::OutboundConnectNegotiationFailed { - circuit_id, - src_peer_id, - src_connection_id, - inbound_circuit_req, + circuit_id: stop_command.circuit_id, + src_peer_id: stop_command.src_peer_id, + src_connection_id: stop_command.src_connection_id, + inbound_circuit_req: stop_command.inbound_circuit_req, status, error: non_fatal_error, }, @@ -555,20 +515,13 @@ impl ConnectionHandler for Handler { type Error = StreamUpgradeError< Either, >; - type InboundProtocol = inbound_hop::Upgrade; - type OutboundProtocol = outbound_stop::Upgrade; - type OutboundOpenInfo = OutboundOpenInfo; + type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); + type OutboundProtocol = ReadyUpgrade; + type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new( - inbound_hop::Upgrade { - reservation_duration: self.config.reservation_duration, - max_circuit_duration: self.config.max_circuit_duration, - max_circuit_bytes: self.config.max_circuit_bytes, - }, - (), - ) + SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()) } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { @@ -580,7 +533,7 @@ impl ConnectionHandler for Handler { if self .reservation_request_future .replace(ReservationRequestFuture::Accepting( - inbound_reservation_req.accept(addrs).boxed(), + inbound_reservation_req.accept(addrs).err_into().boxed(), )) .is_some() { @@ -594,7 +547,7 @@ impl ConnectionHandler for Handler { if self .reservation_request_future .replace(ReservationRequestFuture::Denying( - inbound_reservation_req.deny(status).boxed(), + inbound_reservation_req.deny(status).err_into().boxed(), )) .is_some() { @@ -607,21 +560,17 @@ impl ConnectionHandler for Handler { src_peer_id, src_connection_id, } => { + self.pending_connect_requests + .push_back(outbound_stop::PendingConnect::new( + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + &self.config, + )); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - outbound_stop::Upgrade { - src_peer_id, - max_circuit_duration: self.config.max_circuit_duration, - max_circuit_bytes: self.config.max_circuit_bytes, - }, - OutboundOpenInfo { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - }, - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()), }); } In::DenyCircuitReq { @@ -633,6 +582,7 @@ impl ConnectionHandler for Handler { self.circuit_deny_futures.push( inbound_circuit_req .deny(status) + .err_into() .map(move |result| (circuit_id, dst_peer_id, result)) .boxed(), ); @@ -648,6 +598,7 @@ impl ConnectionHandler for Handler { self.circuit_accept_futures.push( inbound_circuit_req .accept() + .err_into() .map_ok(move |(src_stream, src_pending_data)| CircuitParts { circuit_id, src_stream, @@ -716,6 +667,66 @@ impl ConnectionHandler for Handler { } } + // Process protocol requests + match self.workers.poll_unpin(cx) { + Poll::Ready(Ok(Either::Left(Ok(Either::Left(inbound_reservation_req))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::ReservationReqReceived { + inbound_reservation_req, + endpoint: self.endpoint.clone(), + renewed: self.active_reservation.is_some(), + }, + )); + } + Poll::Ready(Ok(Either::Left(Ok(Either::Right(inbound_circuit_req))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitReqReceived { + inbound_circuit_req, + endpoint: self.endpoint.clone(), + }, + )); + } + Poll::Ready(Ok(Either::Right(Ok(Ok(circuit))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiated { + circuit_id: circuit.circuit_id, + src_peer_id: circuit.src_peer_id, + src_connection_id: circuit.src_connection_id, + inbound_circuit_req: circuit.inbound_circuit_req, + dst_handler_notifier: circuit.dst_handler_notifier, + dst_stream: circuit.dst_stream, + dst_pending_data: circuit.dst_pending_data, + }, + )); + } + Poll::Ready(Ok(Either::Right(Ok(Err(circuit_failed))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiationFailed { + circuit_id: circuit_failed.circuit_id, + src_peer_id: circuit_failed.src_peer_id, + src_connection_id: circuit_failed.src_connection_id, + inbound_circuit_req: circuit_failed.inbound_circuit_req, + status: circuit_failed.status, + error: circuit_failed.error, + }, + )); + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); + } + Poll::Ready(Ok(Either::Left(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( + Either::Left(e), + ))); + } + Poll::Ready(Ok(Either::Right(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( + Either::Right(e), + ))); + } + Poll::Pending => {} + } + // Deny new circuits. if let Poll::Ready(Some((circuit_id, dst_peer_id, result))) = self.circuit_deny_futures.poll_next_unpin(cx) @@ -896,33 +907,30 @@ impl ConnectionHandler for Handler { >, ) { match event { - ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { - self.on_fully_negotiated_inbound(fully_negotiated_inbound) + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol: stream, + .. + }) => { + self.on_fully_negotiated_inbound(stream); } - ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { - self.on_fully_negotiated_outbound(fully_negotiated_outbound) - } - ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { - self.on_listen_upgrade_error(listen_upgrade_error) + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol: stream, + .. + }) => { + self.on_fully_negotiated_outbound(stream); } ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + self.on_dial_upgrade_error(dial_upgrade_error); } ConnectionEvent::AddressChange(_) + | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::LocalProtocolsChange(_) | ConnectionEvent::RemoteProtocolsChange(_) => {} } } } -pub struct OutboundOpenInfo { - circuit_id: CircuitId, - inbound_circuit_req: inbound_hop::CircuitReq, - src_peer_id: PeerId, - src_connection_id: ConnectionId, -} - -pub(crate) struct CircuitParts { +struct CircuitParts { circuit_id: CircuitId, src_stream: Stream, src_pending_data: Bytes, diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index c3c80c5b..d4f0c07c 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -20,7 +20,7 @@ //! [`NetworkBehaviour`] to act as a circuit relay v2 **client**. -mod handler; +pub(crate) mod handler; pub(crate) mod transport; use crate::multiaddr_ext::MultiaddrExt; @@ -163,7 +163,6 @@ impl NetworkBehaviour for Behaviour { if local_addr.is_relayed() { return Ok(Either::Right(dummy::ConnectionHandler)); } - let mut handler = Handler::new(self.local_peer_id, peer, remote_addr.clone()); if let Some(event) = self.pending_handler_commands.remove(&connection_id) { @@ -378,10 +377,10 @@ impl NetworkBehaviour for Behaviour { /// /// Internally, this uses a stream to the relay. pub struct Connection { - state: ConnectionState, + pub(crate) state: ConnectionState, } -enum ConnectionState { +pub(crate) enum ConnectionState { InboundAccepting { accept: BoxFuture<'static, Result>, }, diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 9613d7d6..25488ac3 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -19,27 +19,30 @@ // DEALINGS IN THE SOFTWARE. use crate::priv_client::transport; -use crate::proto; use crate::protocol::{self, inbound_stop, outbound_hop}; +use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; use either::Either; use futures::channel::{mpsc, oneshot}; use futures::future::{BoxFuture, FutureExt}; use futures::sink::SinkExt; use futures::stream::{FuturesUnordered, StreamExt}; +use futures::TryFutureExt; +use futures_bounded::{PushError, Timeout}; use futures_timer::Delay; use instant::Instant; use libp2p_core::multiaddr::Protocol; +use libp2p_core::upgrade::ReadyUpgrade; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamUpgradeError, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, KeepAlive, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, }; use log::debug; -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use std::fmt; use std::task::{Context, Poll}; use std::time::Duration; @@ -48,6 +51,10 @@ use std::time::Duration; /// /// Circuits to be denied exceeding the limit are dropped. const MAX_NUMBER_DENYING_CIRCUIT: usize = 8; +const DENYING_CIRCUIT_TIMEOUT: Duration = Duration::from_secs(60); + +const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; +const STREAM_TIMEOUT: Duration = Duration::from_secs(60); pub enum In { Reserve { @@ -121,10 +128,21 @@ pub struct Handler { /// Queue of events to return when polled. queued_events: VecDeque< ConnectionHandlerEvent< - ::OutboundProtocol, - ::OutboundOpenInfo, - ::ToBehaviour, - ::Error, + ::OutboundProtocol, + ::OutboundOpenInfo, + ::ToBehaviour, + ::Error, + >, + >, + + wait_for_outbound_stream: VecDeque, + outbound_circuits: futures_bounded::FuturesSet< + Result< + Either< + Result, + Result, outbound_hop::CircuitFailedReason>, + >, + outbound_hop::FatalUpgradeError, >, >, @@ -140,8 +158,10 @@ pub struct Handler { /// eventually. alive_lend_out_substreams: FuturesUnordered>, - circuit_deny_futs: - HashMap>>, + open_circuit_futs: + futures_bounded::FuturesSet>, + + circuit_deny_futs: futures_bounded::FuturesMap>, /// Futures that try to send errors to the transport. /// @@ -158,163 +178,38 @@ impl Handler { remote_addr, queued_events: Default::default(), pending_error: Default::default(), + wait_for_outbound_stream: Default::default(), + outbound_circuits: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), reservation: Reservation::None, alive_lend_out_substreams: Default::default(), - circuit_deny_futs: Default::default(), + open_circuit_futs: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + circuit_deny_futs: futures_bounded::FuturesMap::new( + DENYING_CIRCUIT_TIMEOUT, + MAX_NUMBER_DENYING_CIRCUIT, + ), send_error_futs: Default::default(), keep_alive: KeepAlive::Yes, } } - fn on_fully_negotiated_inbound( - &mut self, - FullyNegotiatedInbound { - protocol: inbound_circuit, - .. - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, - ) { - match &mut self.reservation { - Reservation::Accepted { pending_msgs, .. } - | Reservation::Renewing { pending_msgs, .. } => { - let src_peer_id = inbound_circuit.src_peer_id(); - let limit = inbound_circuit.limit(); - - let (tx, rx) = oneshot::channel(); - self.alive_lend_out_substreams.push(rx); - let connection = super::ConnectionState::new_inbound(inbound_circuit, tx); - - pending_msgs.push_back(transport::ToListenerMsg::IncomingRelayedConnection { - // stream: connection, - stream: super::Connection { state: connection }, - src_peer_id, - relay_peer_id: self.remote_peer_id, - relay_addr: self.remote_addr.clone(), - }); - - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundCircuitEstablished { src_peer_id, limit }, - )); - } - Reservation::None => { - let src_peer_id = inbound_circuit.src_peer_id(); - - if self.circuit_deny_futs.len() == MAX_NUMBER_DENYING_CIRCUIT - && !self.circuit_deny_futs.contains_key(&src_peer_id) - { - log::warn!( - "Dropping inbound circuit request to be denied from {:?} due to exceeding limit.", - src_peer_id, - ); - } else if self - .circuit_deny_futs - .insert( - src_peer_id, - inbound_circuit.deny(proto::Status::NO_RESERVATION).boxed(), - ) - .is_some() - { - log::warn!( - "Dropping existing inbound circuit request to be denied from {:?} in favor of new one.", - src_peer_id - ) - } - } - } - } - - fn on_fully_negotiated_outbound( - &mut self, - FullyNegotiatedOutbound { - protocol: output, - info, - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, - ) { - match (output, info) { - // Outbound reservation - ( - outbound_hop::Output::Reservation { - renewal_timeout, - addrs, - limit, - }, - OutboundOpenInfo::Reserve { to_listener }, - ) => { - let event = self.reservation.accepted( - renewal_timeout, - addrs, - to_listener, - self.local_peer_id, - limit, - ); - - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour(event)); - } - - // Outbound circuit - ( - outbound_hop::Output::Circuit { - substream, - read_buffer, - limit, - }, - OutboundOpenInfo::Connect { send_back }, - ) => { - let (tx, rx) = oneshot::channel(); - match send_back.send(Ok(super::Connection { - state: super::ConnectionState::new_outbound(substream, read_buffer, tx), - })) { - Ok(()) => { - self.alive_lend_out_substreams.push(rx); - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitEstablished { limit }, - )); - } - Err(_) => debug!( - "Oneshot to `client::transport::Dial` future dropped. \ - Dropping established relayed connection to {:?}.", - self.remote_peer_id, - ), - } - } - - _ => unreachable!(), - } - } - - fn on_listen_upgrade_error( - &mut self, - ListenUpgradeError { - error: inbound_stop::UpgradeError::Fatal(error), - .. - }: ListenUpgradeError< - ::InboundOpenInfo, - ::InboundProtocol, - >, - ) { - self.pending_error = Some(StreamUpgradeError::Apply(Either::Left(error))); - } - fn on_dial_upgrade_error( &mut self, - DialUpgradeError { - info: open_info, - error, - }: DialUpgradeError< + DialUpgradeError { error, .. }: DialUpgradeError< ::OutboundOpenInfo, ::OutboundProtocol, >, ) { - match open_info { - OutboundOpenInfo::Reserve { mut to_listener } => { + let outbound_info = self.wait_for_outbound_stream.pop_front().expect( + "got a stream error without a pending connection command or a reserve listener", + ); + match outbound_info { + outbound_hop::OutboundStreamInfo::Reserve(mut to_listener) => { let non_fatal_error = match error { StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, @@ -322,19 +217,7 @@ impl Handler { self.pending_error = Some(StreamUpgradeError::Io(e)); return; } - StreamUpgradeError::Apply(error) => match error { - outbound_hop::UpgradeError::Fatal(error) => { - self.pending_error = - Some(StreamUpgradeError::Apply(Either::Right(error))); - return; - } - outbound_hop::UpgradeError::ReservationFailed(error) => { - StreamUpgradeError::Apply(error) - } - outbound_hop::UpgradeError::CircuitFailed(_) => { - unreachable!("Do not emitt `CircuitFailed` for outgoing reservation.") - } - }, + StreamUpgradeError::Apply(v) => void::unreachable(v), }; if self.pending_error.is_none() { @@ -347,11 +230,12 @@ impl Handler { .boxed(), ); } else { - // Fatal error occured, thus handler is closing as quickly as possible. + // Fatal error occurred, thus handler is closing as quickly as possible. // Transport is notified through dropping `to_listener`. } let renewal = self.reservation.failed(); + self.queued_events .push_back(ConnectionHandlerEvent::NotifyBehaviour( Event::ReservationReqFailed { @@ -360,7 +244,7 @@ impl Handler { }, )); } - OutboundOpenInfo::Connect { send_back } => { + outbound_hop::OutboundStreamInfo::CircuitConnection(cmd) => { let non_fatal_error = match error { StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, @@ -368,22 +252,10 @@ impl Handler { self.pending_error = Some(StreamUpgradeError::Io(e)); return; } - StreamUpgradeError::Apply(error) => match error { - outbound_hop::UpgradeError::Fatal(error) => { - self.pending_error = - Some(StreamUpgradeError::Apply(Either::Right(error))); - return; - } - outbound_hop::UpgradeError::CircuitFailed(error) => { - StreamUpgradeError::Apply(error) - } - outbound_hop::UpgradeError::ReservationFailed(_) => { - unreachable!("Do not emitt `ReservationFailed` for outgoing circuit.") - } - }, + StreamUpgradeError::Apply(v) => void::unreachable(v), }; - let _ = send_back.send(Err(())); + let _ = cmd.send_back.send(Err(())); self.queued_events .push_back(ConnectionHandlerEvent::NotifyBehaviour( @@ -394,6 +266,23 @@ impl Handler { } } } + + fn insert_to_deny_futs(&mut self, circuit: inbound_stop::Circuit) { + let src_peer_id = circuit.src_peer_id(); + + match self.circuit_deny_futs.try_push( + src_peer_id, + circuit.deny(proto::Status::NO_RESERVATION), + ) { + Err(PushError::BeyondCapacity(_)) => log::warn!( + "Dropping inbound circuit request to be denied from {src_peer_id} due to exceeding limit." + ), + Err(PushError::ReplacedFuture(_)) => log::warn!( + "Dropping existing inbound circuit request to be denied from {src_peer_id} in favor of new one." + ), + Ok(()) => {} + } + } } impl ConnectionHandler for Handler { @@ -402,36 +291,37 @@ impl ConnectionHandler for Handler { type Error = StreamUpgradeError< Either, >; - type InboundProtocol = inbound_stop::Upgrade; - type OutboundProtocol = outbound_hop::Upgrade; - type OutboundOpenInfo = OutboundOpenInfo; + type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); + type OutboundProtocol = ReadyUpgrade; + type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(inbound_stop::Upgrade {}, ()) + SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()) } fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { In::Reserve { to_listener } => { + self.wait_for_outbound_stream + .push_back(outbound_hop::OutboundStreamInfo::Reserve(to_listener)); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - outbound_hop::Upgrade::Reserve, - OutboundOpenInfo::Reserve { to_listener }, - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), }); } In::EstablishCircuit { send_back, dst_peer_id, } => { + self.wait_for_outbound_stream.push_back( + outbound_hop::OutboundStreamInfo::CircuitConnection( + outbound_hop::Command::new(dst_peer_id, send_back), + ), + ); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - outbound_hop::Upgrade::Connect { dst_peer_id }, - OutboundOpenInfo::Connect { send_back }, - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), }); } } @@ -458,38 +348,132 @@ impl ConnectionHandler for Handler { return Poll::Ready(ConnectionHandlerEvent::Close(err)); } + // Inbound circuits + loop { + match self.outbound_circuits.poll_unpin(cx) { + Poll::Ready(Ok(Ok(Either::Left(Ok(outbound_hop::Reservation { + renewal_timeout, + addrs, + limit, + to_listener, + }))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + self.reservation.accepted( + renewal_timeout, + addrs, + to_listener, + self.local_peer_id, + limit, + ), + )) + } + Poll::Ready(Ok(Ok(Either::Right(Ok(Some(outbound_hop::Circuit { limit })))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundCircuitEstablished { limit }, + )); + } + Poll::Ready(Ok(Ok(Either::Right(Ok(None))))) => continue, + Poll::Ready(Ok(Ok(Either::Right(Err(e))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundCircuitReqFailed { + error: StreamUpgradeError::Apply(e), + }, + )); + } + Poll::Ready(Ok(Ok(Either::Left(Err(e))))) => { + let renewal = self.reservation.failed(); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::ReservationReqFailed { + renewal, + error: StreamUpgradeError::Apply(e), + }, + )); + } + Poll::Ready(Ok(Err(e))) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( + Either::Right(e), + ))) + } + Poll::Ready(Err(Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); + } + Poll::Pending => break, + } + } + // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } - if let Poll::Ready(Some(protocol)) = self.reservation.poll(cx) { - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }); + if let Poll::Ready(worker_res) = self.open_circuit_futs.poll_unpin(cx) { + let res = match worker_res { + Ok(r) => r, + Err(Timeout { .. }) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); + } + }; + + match res { + Ok(circuit) => match &mut self.reservation { + Reservation::Accepted { pending_msgs, .. } + | Reservation::Renewing { pending_msgs, .. } => { + let src_peer_id = circuit.src_peer_id(); + let limit = circuit.limit(); + + let (tx, rx) = oneshot::channel(); + self.alive_lend_out_substreams.push(rx); + let connection = super::ConnectionState::new_inbound(circuit, tx); + + pending_msgs.push_back( + transport::ToListenerMsg::IncomingRelayedConnection { + stream: super::Connection { state: connection }, + src_peer_id, + relay_peer_id: self.remote_peer_id, + relay_addr: self.remote_addr.clone(), + }, + ); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundCircuitEstablished { src_peer_id, limit }, + )); + } + Reservation::None => { + self.insert_to_deny_futs(circuit); + } + }, + Err(e) => { + return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( + Either::Left(e), + ))); + } + } + } + + if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { + self.wait_for_outbound_stream + .push_back(outbound_hop::OutboundStreamInfo::Reserve(to_listener)); + + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); } // Deny incoming circuit requests. - let maybe_event = - self.circuit_deny_futs - .iter_mut() - .find_map(|(src_peer_id, fut)| match fut.poll_unpin(cx) { - Poll::Ready(Ok(())) => Some(( - *src_peer_id, - Event::InboundCircuitReqDenied { - src_peer_id: *src_peer_id, - }, - )), - Poll::Ready(Err(error)) => Some(( - *src_peer_id, - Event::InboundCircuitReqDenyFailed { - src_peer_id: *src_peer_id, - error, - }, - )), - Poll::Pending => None, - }); - if let Some((src_peer_id, event)) = maybe_event { - self.circuit_deny_futs.remove(&src_peer_id); - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + match self.circuit_deny_futs.poll_unpin(cx) { + Poll::Ready((src_peer_id, Ok(Ok(())))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundCircuitReqDenied { src_peer_id }, + )); + } + Poll::Ready((src_peer_id, Ok(Err(error)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundCircuitReqDenyFailed { src_peer_id, error }, + )); + } + Poll::Ready((src_peer_id, Err(Timeout { .. }))) => { + log::warn!("Dropping inbound circuit request to be denied from {:?} due to exceeding limit.", src_peer_id); + } + Poll::Pending => {} } // Send errors to transport. @@ -533,14 +517,62 @@ impl ConnectionHandler for Handler { >, ) { match event { - ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { - self.on_fully_negotiated_inbound(fully_negotiated_inbound) + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol: stream, + .. + }) => { + if self + .open_circuit_futs + .try_push(inbound_stop::handle_open_circuit(stream)) + .is_err() + { + log::warn!("Dropping inbound stream because we are at capacity") + } } - ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { - self.on_fully_negotiated_outbound(fully_negotiated_outbound) + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol: stream, + .. + }) => { + let outbound_info = self.wait_for_outbound_stream.pop_front().expect( + "opened a stream without a pending connection command or a reserve listener", + ); + match outbound_info { + outbound_hop::OutboundStreamInfo::Reserve(to_listener) => { + if self + .outbound_circuits + .try_push( + outbound_hop::handle_reserve_message_response(stream, to_listener) + .map_ok(Either::Left), + ) + .is_err() + { + log::warn!("Dropping outbound stream because we are at capacity") + } + } + outbound_hop::OutboundStreamInfo::CircuitConnection(cmd) => { + let (tx, rx) = oneshot::channel(); + self.alive_lend_out_substreams.push(rx); + + if self + .outbound_circuits + .try_push( + outbound_hop::handle_connection_message_response( + stream, + self.remote_peer_id, + cmd, + tx, + ) + .map_ok(Either::Right), + ) + .is_err() + { + log::warn!("Dropping outbound stream because we are at capacity") + } + } + } } ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { - self.on_listen_upgrade_error(listen_upgrade_error) + void::unreachable(listen_upgrade_error.error) } ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) @@ -648,7 +680,7 @@ impl Reservation { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll>> { self.forward_messages_to_transport_listener(cx); // Check renewal timeout if any. @@ -660,10 +692,7 @@ impl Reservation { } => match renewal_timeout.poll_unpin(cx) { Poll::Ready(()) => ( Reservation::Renewing { pending_msgs }, - Poll::Ready(Some(SubstreamProtocol::new( - outbound_hop::Upgrade::Reserve, - OutboundOpenInfo::Reserve { to_listener }, - ))), + Poll::Ready(Some(to_listener)), ), Poll::Pending => ( Reservation::Accepted { @@ -681,12 +710,3 @@ impl Reservation { poll_val } } - -pub enum OutboundOpenInfo { - Reserve { - to_listener: mpsc::Sender, - }, - Connect { - send_back: oneshot::Sender>, - }, -} diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index 45cc685a..41114d0c 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -55,7 +55,7 @@ use thiserror::Error; /// # use libp2p_identity::PeerId; /// let actual_transport = MemoryTransport::default(); /// let (relay_transport, behaviour) = relay::client::new( -/// PeerId::random(), +/// PeerId::random() /// ); /// let mut transport = OrTransport::new(relay_transport, actual_transport); /// # let relay_id = PeerId::random(); @@ -80,7 +80,7 @@ use thiserror::Error; /// # let local_peer_id = PeerId::random(); /// let actual_transport = MemoryTransport::default(); /// let (relay_transport, behaviour) = relay::client::new( -/// local_peer_id, +/// local_peer_id /// ); /// let mut transport = OrTransport::new(relay_transport, actual_transport); /// let relay_addr = Multiaddr::empty() diff --git a/protocols/relay/src/protocol.rs b/protocols/relay/src/protocol.rs index f9b1e1ac..b9415125 100644 --- a/protocols/relay/src/protocol.rs +++ b/protocols/relay/src/protocol.rs @@ -31,7 +31,7 @@ pub const HOP_PROTOCOL_NAME: StreamProtocol = pub const STOP_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/circuit/relay/0.2.0/stop"); -const MAX_MESSAGE_SIZE: usize = 4096; +pub(crate) const MAX_MESSAGE_SIZE: usize = 4096; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Limit { diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 27f2572a..b44d29e4 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -18,79 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{HOP_PROTOCOL_NAME, MAX_MESSAGE_SIZE}; +use std::time::{Duration, SystemTime}; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; -use instant::{Duration, SystemTime}; -use libp2p_core::{upgrade, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::{Stream, StreamProtocol}; -use std::convert::TryInto; -use std::iter; +use either::Either; +use futures::prelude::*; use thiserror::Error; -pub struct Upgrade { - pub reservation_duration: Duration, - pub max_circuit_duration: Duration, - pub max_circuit_bytes: u64, -} +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::Stream; -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(HOP_PROTOCOL_NAME) - } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = Req; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - let proto::HopMessage { - type_pb, - peer, - reservation: _, - limit: _, - status: _, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - let req = match type_pb { - proto::HopMessageType::RESERVE => Req::Reserve(ReservationReq { - substream, - reservation_duration: self.reservation_duration, - max_circuit_duration: self.max_circuit_duration, - max_circuit_bytes: self.max_circuit_bytes, - }), - proto::HopMessageType::CONNECT => { - let dst = PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) - .map_err(|_| FatalUpgradeError::ParsePeerId)?; - Req::Connect(CircuitReq { dst, substream }) - } - proto::HopMessageType::STATUS => { - return Err(FatalUpgradeError::UnexpectedTypeStatus.into()) - } - }; - - Ok(req) - } - .boxed() - } -} +use crate::proto; +use crate::proto::message_v2::pb::mod_HopMessage::Type; +use crate::protocol::MAX_MESSAGE_SIZE; #[derive(Debug, Error)] pub enum UpgradeError { @@ -120,11 +62,6 @@ pub enum FatalUpgradeError { UnexpectedTypeStatus, } -pub enum Req { - Reserve(ReservationReq), - Connect(CircuitReq), -} - pub struct ReservationReq { substream: Framed>, reservation_duration: Duration, @@ -133,7 +70,7 @@ pub struct ReservationReq { } impl ReservationReq { - pub async fn accept(self, addrs: Vec) -> Result<(), UpgradeError> { + pub async fn accept(self, addrs: Vec) -> Result<(), FatalUpgradeError> { if addrs.is_empty() { log::debug!( "Accepting relay reservation without providing external addresses of local node. \ @@ -167,7 +104,7 @@ impl ReservationReq { self.send(msg).await } - pub async fn deny(self, status: proto::Status) -> Result<(), UpgradeError> { + pub async fn deny(self, status: proto::Status) -> Result<(), FatalUpgradeError> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -179,7 +116,7 @@ impl ReservationReq { self.send(msg).await } - async fn send(mut self, msg: proto::HopMessage) -> Result<(), UpgradeError> { + async fn send(mut self, msg: proto::HopMessage) -> Result<(), FatalUpgradeError> { self.substream.send(msg).await?; self.substream.flush().await?; self.substream.close().await?; @@ -198,7 +135,7 @@ impl CircuitReq { self.dst } - pub async fn accept(mut self) -> Result<(Stream, Bytes), UpgradeError> { + pub async fn accept(mut self) -> Result<(Stream, Bytes), FatalUpgradeError> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -223,7 +160,7 @@ impl CircuitReq { Ok((io, read_buffer.freeze())) } - pub async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { + pub async fn deny(mut self, status: proto::Status) -> Result<(), FatalUpgradeError> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -242,3 +179,51 @@ impl CircuitReq { Ok(()) } } + +pub(crate) async fn handle_inbound_request( + io: Stream, + reservation_duration: Duration, + max_circuit_duration: Duration, + max_circuit_bytes: u64, +) -> Result, FatalUpgradeError> { + let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + let res = substream.next().await; + + if let None | Some(Err(_)) = res { + return Err(FatalUpgradeError::StreamClosed); + } + + let proto::HopMessage { + type_pb, + peer, + reservation: _, + limit: _, + status: _, + } = res.unwrap().expect("should be ok"); + + let req = match type_pb { + Type::RESERVE => Either::Left(ReservationReq { + substream, + reservation_duration, + max_circuit_duration, + max_circuit_bytes, + }), + Type::CONNECT => { + let peer_id_res = match peer { + Some(r) => PeerId::from_bytes(&r.id), + None => return Err(FatalUpgradeError::MissingPeer), + }; + + let dst = match peer_id_res { + Ok(res) => res, + Err(_) => return Err(FatalUpgradeError::ParsePeerId), + }; + + Either::Right(CircuitReq { dst, substream }) + } + Type::STATUS => return Err(FatalUpgradeError::UnexpectedTypeStatus), + }; + + Ok(req) +} diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index c279c8ee..caaeee9c 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -19,66 +19,38 @@ // DEALINGS IN THE SOFTWARE. use crate::proto; -use crate::protocol::{self, MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; +use crate::protocol::{self, MAX_MESSAGE_SIZE}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::upgrade; +use futures::prelude::*; use libp2p_identity::PeerId; -use libp2p_swarm::{Stream, StreamProtocol}; -use std::iter; +use libp2p_swarm::Stream; use thiserror::Error; -pub struct Upgrade {} +pub(crate) async fn handle_open_circuit(io: Stream) -> Result { + let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; + let proto::StopMessage { + type_pb, + peer, + limit, + status: _, + } = substream + .next() + .await + .ok_or(FatalUpgradeError::StreamClosed)??; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(STOP_PROTOCOL_NAME) - } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = Circuit; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - let proto::StopMessage { - type_pb, - peer, - limit, - status: _, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - match type_pb { - proto::StopMessageType::CONNECT => { - let src_peer_id = - PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) - .map_err(|_| FatalUpgradeError::ParsePeerId)?; - Ok(Circuit { - substream, - src_peer_id, - limit: limit.map(Into::into), - }) - } - proto::StopMessageType::STATUS => { - Err(FatalUpgradeError::UnexpectedTypeStatus.into()) - } - } + match type_pb { + proto::StopMessageType::CONNECT => { + let src_peer_id = PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) + .map_err(|_| FatalUpgradeError::ParsePeerId)?; + Ok(Circuit { + substream, + src_peer_id, + limit: limit.map(Into::into), + }) } - .boxed() + proto::StopMessageType::STATUS => Err(FatalUpgradeError::UnexpectedTypeStatus), } } @@ -110,22 +82,22 @@ pub enum FatalUpgradeError { UnexpectedTypeStatus, } -pub struct Circuit { +pub(crate) struct Circuit { substream: Framed>, src_peer_id: PeerId, limit: Option, } impl Circuit { - pub fn src_peer_id(&self) -> PeerId { + pub(crate) fn src_peer_id(&self) -> PeerId { self.src_peer_id } - pub fn limit(&self) -> Option { + pub(crate) fn limit(&self) -> Option { self.limit } - pub async fn accept(mut self) -> Result<(Stream, Bytes), UpgradeError> { + pub(crate) async fn accept(mut self) -> Result<(Stream, Bytes), UpgradeError> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, @@ -149,7 +121,7 @@ impl Circuit { Ok((io, read_buffer.freeze())) } - pub async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { + pub(crate) async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index bec348e8..adad0e23 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,201 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{Limit, HOP_PROTOCOL_NAME, MAX_MESSAGE_SIZE}; +use std::time::{Duration, SystemTime}; + use asynchronous_codec::{Framed, FramedParts}; -use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; +use futures::channel::{mpsc, oneshot}; +use futures::prelude::*; use futures_timer::Delay; -use instant::{Duration, SystemTime}; -use libp2p_core::{upgrade, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::{Stream, StreamProtocol}; -use std::convert::TryFrom; -use std::iter; +use log::debug; use thiserror::Error; +use void::Void; -pub enum Upgrade { - Reserve, - Connect { dst_peer_id: PeerId }, -} +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::Stream; -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(HOP_PROTOCOL_NAME) - } -} - -impl upgrade::OutboundUpgrade for Upgrade { - type Output = Output; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let msg = match self { - Upgrade::Reserve => proto::HopMessage { - type_pb: proto::HopMessageType::RESERVE, - peer: None, - reservation: None, - limit: None, - status: None, - }, - Upgrade::Connect { dst_peer_id } => proto::HopMessage { - type_pb: proto::HopMessageType::CONNECT, - peer: Some(proto::Peer { - id: dst_peer_id.to_bytes(), - addrs: vec![], - }), - reservation: None, - limit: None, - status: None, - }, - }; - - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - substream.send(msg).await?; - let proto::HopMessage { - type_pb, - peer: _, - reservation, - limit, - status, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - match type_pb { - proto::HopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect.into()) - } - proto::HopMessageType::RESERVE => { - return Err(FatalUpgradeError::UnexpectedTypeReserve.into()) - } - proto::HopMessageType::STATUS => {} - } - - let limit = limit.map(Into::into); - - let output = match self { - Upgrade::Reserve => { - match status - .ok_or(UpgradeError::Fatal(FatalUpgradeError::MissingStatusField))? - { - proto::Status::OK => {} - proto::Status::RESERVATION_REFUSED => { - return Err(ReservationFailedReason::Refused.into()) - } - proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Err(ReservationFailedReason::ResourceLimitExceeded.into()) - } - s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()), - } - - let reservation = - reservation.ok_or(FatalUpgradeError::MissingReservationField)?; - - if reservation.addrs.is_empty() { - return Err(FatalUpgradeError::NoAddressesInReservation.into()); - } - - let addrs = reservation - .addrs - .into_iter() - .map(|b| Multiaddr::try_from(b.to_vec())) - .collect::, _>>() - .map_err(|_| FatalUpgradeError::InvalidReservationAddrs)?; - - let renewal_timeout = reservation - .expire - .checked_sub( - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(), - ) - // Renew the reservation after 3/4 of the reservation expiration timestamp. - .and_then(|duration| duration.checked_sub(duration / 4)) - .map(Duration::from_secs) - .map(Delay::new) - .ok_or(FatalUpgradeError::InvalidReservationExpiration)?; - - substream.close().await?; - - Output::Reservation { - renewal_timeout, - addrs, - limit, - } - } - Upgrade::Connect { .. } => { - match status - .ok_or(UpgradeError::Fatal(FatalUpgradeError::MissingStatusField))? - { - proto::Status::OK => {} - proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Err(CircuitFailedReason::ResourceLimitExceeded.into()) - } - proto::Status::CONNECTION_FAILED => { - return Err(CircuitFailedReason::ConnectionFailed.into()) - } - proto::Status::NO_RESERVATION => { - return Err(CircuitFailedReason::NoReservation.into()) - } - proto::Status::PERMISSION_DENIED => { - return Err(CircuitFailedReason::PermissionDenied.into()) - } - s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()), - } - - let FramedParts { - io, - read_buffer, - write_buffer, - .. - } = substream.into_parts(); - assert!( - write_buffer.is_empty(), - "Expect a flushed Framed to have empty write buffer." - ); - - Output::Circuit { - substream: io, - read_buffer: read_buffer.freeze(), - limit, - } - } - }; - - Ok(output) - } - .boxed() - } -} - -#[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Reservation failed")] - ReservationFailed(#[from] ReservationFailedReason), - #[error("Circuit failed")] - CircuitFailed(#[from] CircuitFailedReason), - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} - -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} +use crate::priv_client::transport; +use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; +use crate::{priv_client, proto}; #[derive(Debug, Error)] pub enum CircuitFailedReason { @@ -262,15 +84,216 @@ pub enum FatalUpgradeError { UnexpectedStatus(proto::Status), } -pub enum Output { - Reservation { - renewal_timeout: Delay, - addrs: Vec, - limit: Option, - }, - Circuit { - substream: Stream, - read_buffer: Bytes, - limit: Option, - }, +pub(crate) struct Reservation { + pub(crate) renewal_timeout: Delay, + pub(crate) addrs: Vec, + pub(crate) limit: Option, + pub(crate) to_listener: mpsc::Sender, +} + +pub(crate) struct Circuit { + pub(crate) limit: Option, +} + +pub(crate) async fn handle_reserve_message_response( + protocol: Stream, + to_listener: mpsc::Sender, +) -> Result, FatalUpgradeError> { + let msg = proto::HopMessage { + type_pb: proto::HopMessageType::RESERVE, + peer: None, + reservation: None, + limit: None, + status: None, + }; + let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + substream.send(msg).await?; + + let proto::HopMessage { + type_pb, + peer: _, + reservation, + limit, + status, + } = substream + .next() + .await + .ok_or(FatalUpgradeError::StreamClosed)??; + + match type_pb { + proto::HopMessageType::CONNECT => { + return Err(FatalUpgradeError::UnexpectedTypeConnect); + } + proto::HopMessageType::RESERVE => { + return Err(FatalUpgradeError::UnexpectedTypeReserve); + } + proto::HopMessageType::STATUS => {} + } + + let limit = limit.map(Into::into); + + match status.ok_or(FatalUpgradeError::MissingStatusField)? { + proto::Status::OK => {} + proto::Status::RESERVATION_REFUSED => { + return Ok(Err(ReservationFailedReason::Refused)); + } + proto::Status::RESOURCE_LIMIT_EXCEEDED => { + return Ok(Err(ReservationFailedReason::ResourceLimitExceeded)); + } + s => return Err(FatalUpgradeError::UnexpectedStatus(s)), + } + + let reservation = reservation.ok_or(FatalUpgradeError::MissingReservationField)?; + + if reservation.addrs.is_empty() { + return Err(FatalUpgradeError::NoAddressesInReservation); + } + + let addrs = reservation + .addrs + .into_iter() + .map(|b| Multiaddr::try_from(b.to_vec())) + .collect::, _>>() + .map_err(|_| FatalUpgradeError::InvalidReservationAddrs)?; + + let renewal_timeout = reservation + .expire + .checked_sub( + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(), + ) + // Renew the reservation after 3/4 of the reservation expiration timestamp. + .and_then(|duration| duration.checked_sub(duration / 4)) + .map(Duration::from_secs) + .map(Delay::new) + .ok_or(FatalUpgradeError::InvalidReservationExpiration)?; + + substream.close().await?; + + Ok(Ok(Reservation { + renewal_timeout, + addrs, + limit, + to_listener, + })) +} + +pub(crate) async fn handle_connection_message_response( + protocol: Stream, + remote_peer_id: PeerId, + con_command: Command, + tx: oneshot::Sender, +) -> Result, CircuitFailedReason>, FatalUpgradeError> { + let msg = proto::HopMessage { + type_pb: proto::HopMessageType::CONNECT, + peer: Some(proto::Peer { + id: con_command.dst_peer_id.to_bytes(), + addrs: vec![], + }), + reservation: None, + limit: None, + status: None, + }; + + let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + if substream.send(msg).await.is_err() { + return Err(FatalUpgradeError::StreamClosed); + } + + let proto::HopMessage { + type_pb, + peer: _, + reservation: _, + limit, + status, + } = match substream.next().await { + Some(Ok(r)) => r, + _ => return Err(FatalUpgradeError::StreamClosed), + }; + + match type_pb { + proto::HopMessageType::CONNECT => { + return Err(FatalUpgradeError::UnexpectedTypeConnect); + } + proto::HopMessageType::RESERVE => { + return Err(FatalUpgradeError::UnexpectedTypeReserve); + } + proto::HopMessageType::STATUS => {} + } + + match status { + Some(proto::Status::OK) => {} + Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { + return Ok(Err(CircuitFailedReason::ResourceLimitExceeded)); + } + Some(proto::Status::CONNECTION_FAILED) => { + return Ok(Err(CircuitFailedReason::ConnectionFailed)); + } + Some(proto::Status::NO_RESERVATION) => { + return Ok(Err(CircuitFailedReason::NoReservation)); + } + Some(proto::Status::PERMISSION_DENIED) => { + return Ok(Err(CircuitFailedReason::PermissionDenied)); + } + Some(s) => { + return Err(FatalUpgradeError::UnexpectedStatus(s)); + } + None => { + return Err(FatalUpgradeError::MissingStatusField); + } + } + + let limit = limit.map(Into::into); + + let FramedParts { + io, + read_buffer, + write_buffer, + .. + } = substream.into_parts(); + assert!( + write_buffer.is_empty(), + "Expect a flushed Framed to have empty write buffer." + ); + + match con_command.send_back.send(Ok(priv_client::Connection { + state: priv_client::ConnectionState::new_outbound(io, read_buffer.freeze(), tx), + })) { + Ok(()) => Ok(Ok(Some(Circuit { limit }))), + Err(_) => { + debug!( + "Oneshot to `client::transport::Dial` future dropped. \ + Dropping established relayed connection to {:?}.", + remote_peer_id, + ); + + Ok(Ok(None)) + } + } +} + +pub(crate) enum OutboundStreamInfo { + Reserve(mpsc::Sender), + CircuitConnection(Command), +} + +pub(crate) struct Command { + dst_peer_id: PeerId, + pub(crate) send_back: oneshot::Sender>, +} + +impl Command { + pub(crate) fn new( + dst_peer_id: PeerId, + send_back: oneshot::Sender>, + ) -> Self { + Self { + dst_peer_id, + send_back, + } + } } diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index 836468a8..e4502957 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,112 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; +use std::time::Duration; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::upgrade; -use libp2p_identity::PeerId; -use libp2p_swarm::{Stream, StreamProtocol}; -use std::convert::TryInto; -use std::iter; -use std::time::Duration; +use futures::channel::oneshot::{self}; +use futures::prelude::*; use thiserror::Error; -pub struct Upgrade { - pub src_peer_id: PeerId, - pub max_circuit_duration: Duration, - pub max_circuit_bytes: u64, -} +use libp2p_identity::PeerId; +use libp2p_swarm::{ConnectionId, Stream, StreamUpgradeError}; -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(STOP_PROTOCOL_NAME) - } -} - -impl upgrade::OutboundUpgrade for Upgrade { - type Output = (Stream, Bytes); - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let msg = proto::StopMessage { - type_pb: proto::StopMessageType::CONNECT, - peer: Some(proto::Peer { - id: self.src_peer_id.to_bytes(), - addrs: vec![], - }), - limit: Some(proto::Limit { - duration: Some( - self.max_circuit_duration - .as_secs() - .try_into() - .expect("`max_circuit_duration` not to exceed `u32::MAX`."), - ), - data: Some(self.max_circuit_bytes), - }), - status: None, - }; - - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - substream.send(msg).await?; - let proto::StopMessage { - type_pb, - peer: _, - limit: _, - status, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - match type_pb { - proto::StopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect.into()) - } - proto::StopMessageType::STATUS => {} - } - - match status.ok_or(UpgradeError::Fatal(FatalUpgradeError::MissingStatusField))? { - proto::Status::OK => {} - proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Err(CircuitFailedReason::ResourceLimitExceeded.into()) - } - proto::Status::PERMISSION_DENIED => { - return Err(CircuitFailedReason::PermissionDenied.into()) - } - s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()), - } - - let FramedParts { - io, - read_buffer, - write_buffer, - .. - } = substream.into_parts(); - assert!( - write_buffer.is_empty(), - "Expect a flushed Framed to have an empty write buffer." - ); - - Ok((io, read_buffer.freeze())) - } - .boxed() - } -} +use crate::behaviour::handler::Config; +use crate::protocol::{inbound_hop, MAX_MESSAGE_SIZE}; +use crate::{proto, CircuitId}; #[derive(Debug, Error)] -pub enum UpgradeError { +pub(crate) enum UpgradeError { #[error("Circuit failed")] CircuitFailed(#[from] CircuitFailedReason), #[error("Fatal")] @@ -161,3 +72,147 @@ pub enum FatalUpgradeError { #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } + +/// Attempts to _connect_ to a peer via the given stream. +pub(crate) async fn connect( + io: Stream, + stop_command: PendingConnect, + tx: oneshot::Sender<()>, +) -> Result, FatalUpgradeError> { + let msg = proto::StopMessage { + type_pb: proto::StopMessageType::CONNECT, + peer: Some(proto::Peer { + id: stop_command.src_peer_id.to_bytes(), + addrs: vec![], + }), + limit: Some(proto::Limit { + duration: Some( + stop_command + .max_circuit_duration + .as_secs() + .try_into() + .expect("`max_circuit_duration` not to exceed `u32::MAX`."), + ), + data: Some(stop_command.max_circuit_bytes), + }), + status: None, + }; + + let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + if substream.send(msg).await.is_err() { + return Err(FatalUpgradeError::StreamClosed); + } + + let res = substream.next().await; + + if let None | Some(Err(_)) = res { + return Err(FatalUpgradeError::StreamClosed); + } + + let proto::StopMessage { + type_pb, + peer: _, + limit: _, + status, + } = res.unwrap().expect("should be ok"); + + match type_pb { + proto::StopMessageType::CONNECT => return Err(FatalUpgradeError::UnexpectedTypeConnect), + proto::StopMessageType::STATUS => {} + } + + match status { + Some(proto::Status::OK) => {} + Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { + return Ok(Err(CircuitFailed { + circuit_id: stop_command.circuit_id, + src_peer_id: stop_command.src_peer_id, + src_connection_id: stop_command.src_connection_id, + inbound_circuit_req: stop_command.inbound_circuit_req, + status: proto::Status::RESOURCE_LIMIT_EXCEEDED, + error: StreamUpgradeError::Apply(CircuitFailedReason::ResourceLimitExceeded), + })) + } + Some(proto::Status::PERMISSION_DENIED) => { + return Ok(Err(CircuitFailed { + circuit_id: stop_command.circuit_id, + src_peer_id: stop_command.src_peer_id, + src_connection_id: stop_command.src_connection_id, + inbound_circuit_req: stop_command.inbound_circuit_req, + status: proto::Status::PERMISSION_DENIED, + error: StreamUpgradeError::Apply(CircuitFailedReason::PermissionDenied), + })) + } + Some(s) => return Err(FatalUpgradeError::UnexpectedStatus(s)), + None => return Err(FatalUpgradeError::MissingStatusField), + } + + let FramedParts { + io, + read_buffer, + write_buffer, + .. + } = substream.into_parts(); + assert!( + write_buffer.is_empty(), + "Expect a flushed Framed to have an empty write buffer." + ); + + Ok(Ok(Circuit { + circuit_id: stop_command.circuit_id, + src_peer_id: stop_command.src_peer_id, + src_connection_id: stop_command.src_connection_id, + inbound_circuit_req: stop_command.inbound_circuit_req, + dst_handler_notifier: tx, + dst_stream: io, + dst_pending_data: read_buffer.freeze(), + })) +} + +pub(crate) struct Circuit { + pub(crate) circuit_id: CircuitId, + pub(crate) src_peer_id: PeerId, + pub(crate) src_connection_id: ConnectionId, + pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, + pub(crate) dst_handler_notifier: oneshot::Sender<()>, + pub(crate) dst_stream: Stream, + pub(crate) dst_pending_data: Bytes, +} + +pub(crate) struct CircuitFailed { + pub(crate) circuit_id: CircuitId, + pub(crate) src_peer_id: PeerId, + pub(crate) src_connection_id: ConnectionId, + pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, + pub(crate) status: proto::Status, + pub(crate) error: StreamUpgradeError, +} + +pub(crate) struct PendingConnect { + pub(crate) circuit_id: CircuitId, + pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, + pub(crate) src_peer_id: PeerId, + pub(crate) src_connection_id: ConnectionId, + max_circuit_duration: Duration, + max_circuit_bytes: u64, +} + +impl PendingConnect { + pub(crate) fn new( + circuit_id: CircuitId, + inbound_circuit_req: inbound_hop::CircuitReq, + src_peer_id: PeerId, + src_connection_id: ConnectionId, + config: &Config, + ) -> Self { + Self { + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + max_circuit_duration: config.max_circuit_duration, + max_circuit_bytes: config.max_circuit_bytes, + } + } +} diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index fa79ab67..b7784d17 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -30,9 +30,8 @@ use libp2p_core::transport::{Boxed, MemoryTransport, Transport}; use libp2p_core::upgrade; use libp2p_identity as identity; use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; use libp2p_ping as ping; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use libp2p_relay as relay; use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; use std::time::Duration; @@ -307,10 +306,9 @@ fn reuse_connection() { fn build_relay() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let local_peer_id = local_public_key.to_peer_id(); + let local_peer_id = local_key.public().to_peer_id(); - let transport = upgrade_transport(MemoryTransport::default().boxed(), local_public_key); + let transport = upgrade_transport(MemoryTransport::default().boxed(), &local_key); SwarmBuilder::with_async_std_executor( transport, @@ -331,13 +329,12 @@ fn build_relay() -> Swarm { fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let local_peer_id = local_public_key.to_peer_id(); + let local_peer_id = local_key.public().to_peer_id(); let (relay_transport, behaviour) = relay::client::new(local_peer_id); let transport = upgrade_transport( OrTransport::new(relay_transport, MemoryTransport::default()).boxed(), - local_public_key, + &local_key, ); SwarmBuilder::with_async_std_executor( @@ -353,14 +350,14 @@ fn build_client() -> Swarm { fn upgrade_transport( transport: Boxed, - local_public_key: PublicKey, + identity: &identity::Keypair, ) -> Boxed<(PeerId, StreamMuxerBox)> where StreamSink: AsyncRead + AsyncWrite + Send + Unpin + 'static, { transport .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new(identity)) .multiplex(libp2p_yamux::Config::default()) .boxed() } diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index 7929d3c4..b231f9d7 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -37,7 +37,7 @@ libp2p-identify = { workspace = true } libp2p-yamux = { workspace = true } libp2p-tcp = { workspace = true, features = ["tokio"] } rand = "0.8" -tokio = { version = "1.31", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } +tokio = { version = "1.32", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } libp2p-swarm-test = { path = "../../swarm-test" } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 40e65f1c..3b228fe2 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -20,8 +20,8 @@ libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } rand = "0.8" serde = { version = "1.0", optional = true} -serde_json = { version = "1.0.100", optional = true } -smallvec = "1.11.0" +serde_json = { version = "1.0.107", optional = true } +smallvec = "1.11.1" void = "1.0.2" log = "0.4.20" diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md new file mode 100644 index 00000000..8ebea5e7 --- /dev/null +++ b/protocols/upnp/CHANGELOG.md @@ -0,0 +1,11 @@ +## 0.1.1 - unreleased + +- Fix high CPU usage due to repeated generation of failure events. + See [PR 4569](https://github.com/libp2p/rust-libp2p/pull/4569). + +- Fix port mapping protocol used for a UDP multiaddress. + See [PR 4542](https://github.com/libp2p/rust-libp2p/pull/4542). + +## 0.1.0 + +- Initial version diff --git a/protocols/upnp/Cargo.toml b/protocols/upnp/Cargo.toml new file mode 100644 index 00000000..4121745d --- /dev/null +++ b/protocols/upnp/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "libp2p-upnp" +edition = "2021" +rust-version = "1.60.0" +description = "UPnP support for libp2p transports" +version = "0.1.1" +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] +publish = true + +[dependencies] +futures = "0.3.28" +futures-timer = "3.0.2" +igd-next = "0.14.2" +libp2p-core = { workspace = true } +libp2p-swarm = { workspace = true } +log = "0.4.19" +void = "1.0.2" +tokio = { version = "1.29", default-features = false, features = ["rt"], optional = true } + +[features] +tokio = ["igd-next/aio_tokio", "dep:tokio"] diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs new file mode 100644 index 00000000..45b82edc --- /dev/null +++ b/protocols/upnp/src/behaviour.rs @@ -0,0 +1,550 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use std::{ + borrow::Borrow, + collections::{HashMap, VecDeque}, + error::Error, + hash::{Hash, Hasher}, + net::{self, IpAddr, SocketAddr, SocketAddrV4}, + ops::{Deref, DerefMut}, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use crate::tokio::{is_addr_global, Gateway}; +use futures::{channel::oneshot, Future, StreamExt}; +use futures_timer::Delay; +use igd_next::PortMappingProtocol; +use libp2p_core::{multiaddr, transport::ListenerId, Endpoint, Multiaddr}; +use libp2p_swarm::{ + derive_prelude::PeerId, dummy, ConnectionDenied, ConnectionId, ExpiredListenAddr, FromSwarm, + NetworkBehaviour, NewListenAddr, PollParameters, ToSwarm, +}; + +/// The duration in seconds of a port mapping on the gateway. +const MAPPING_DURATION: u32 = 3600; + +/// Renew the Mapping every half of `MAPPING_DURATION` to avoid the port being unmapped. +const MAPPING_TIMEOUT: u64 = MAPPING_DURATION as u64 / 2; + +/// A [`Gateway`] Request. +#[derive(Debug)] +pub(crate) enum GatewayRequest { + AddMapping { mapping: Mapping, duration: u32 }, + RemoveMapping(Mapping), +} + +/// A [`Gateway`] event. +#[derive(Debug)] +pub(crate) enum GatewayEvent { + /// Port was successfully mapped. + Mapped(Mapping), + /// There was a failure mapping port. + MapFailure(Mapping, Box), + /// Port was successfully removed. + Removed(Mapping), + /// There was a failure removing the mapped port. + RemovalFailure(Mapping, Box), +} + +/// Mapping of a Protocol and Port on the gateway. +#[derive(Debug, Clone)] +pub(crate) struct Mapping { + pub(crate) listener_id: ListenerId, + pub(crate) protocol: PortMappingProtocol, + pub(crate) multiaddr: Multiaddr, + pub(crate) internal_addr: SocketAddr, +} + +impl Mapping { + /// Given the input gateway address, calculate the + /// open external `Multiaddr`. + fn external_addr(&self, gateway_addr: IpAddr) -> Multiaddr { + let addr = match gateway_addr { + net::IpAddr::V4(ip) => multiaddr::Protocol::Ip4(ip), + net::IpAddr::V6(ip) => multiaddr::Protocol::Ip6(ip), + }; + self.multiaddr + .replace(0, |_| Some(addr)) + .expect("multiaddr should be valid") + } +} + +impl Hash for Mapping { + fn hash(&self, state: &mut H) { + self.listener_id.hash(state); + } +} + +impl PartialEq for Mapping { + fn eq(&self, other: &Self) -> bool { + self.listener_id == other.listener_id + } +} + +impl Eq for Mapping {} + +impl Borrow for Mapping { + fn borrow(&self) -> &ListenerId { + &self.listener_id + } +} + +/// Current state of a [`Mapping`]. +#[derive(Debug)] +enum MappingState { + /// Port mapping is inactive, will be requested or re-requested on the next iteration. + Inactive, + /// Port mapping/removal has been requested on the gateway. + Pending, + /// Port mapping is active with the inner timeout. + Active(Delay), + /// Port mapping failed, we will try again. + Failed, +} + +/// Current state of the UPnP [`Gateway`]. +enum GatewayState { + Searching(oneshot::Receiver>>), + Available(Gateway), + GatewayNotFound, + NonRoutableGateway(IpAddr), +} + +/// The event produced by `Behaviour`. +#[derive(Debug)] +pub enum Event { + /// The multiaddress is reachable externally. + NewExternalAddr(Multiaddr), + /// The renewal of the multiaddress on the gateway failed. + ExpiredExternalAddr(Multiaddr), + /// The IGD gateway was not found. + GatewayNotFound, + /// The Gateway is not exposed directly to the public network. + NonRoutableGateway, +} + +/// A list of port mappings and its state. +#[derive(Debug, Default)] +struct MappingList(HashMap); + +impl Deref for MappingList { + type Target = HashMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for MappingList { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl MappingList { + /// Queue for renewal the current mapped ports on the `Gateway` that are expiring, + /// and try to activate the inactive. + fn renew(&mut self, gateway: &mut Gateway, cx: &mut Context<'_>) { + for (mapping, state) in self.iter_mut() { + match state { + MappingState::Inactive | MappingState::Failed => { + let duration = MAPPING_DURATION; + if let Err(err) = gateway.sender.try_send(GatewayRequest::AddMapping { + mapping: mapping.clone(), + duration, + }) { + log::debug!( + "could not request port mapping for {} on the gateway: {}", + mapping.multiaddr, + err + ); + } + *state = MappingState::Pending; + } + MappingState::Active(timeout) => { + if Pin::new(timeout).poll(cx).is_ready() { + let duration = MAPPING_DURATION; + if let Err(err) = gateway.sender.try_send(GatewayRequest::AddMapping { + mapping: mapping.clone(), + duration, + }) { + log::debug!( + "could not request port mapping for {} on the gateway: {}", + mapping.multiaddr, + err + ); + } + } + } + MappingState::Pending => {} + } + } + } +} + +/// A [`NetworkBehaviour`] for UPnP port mapping. Automatically tries to map the external port +/// to an internal address on the gateway on a [`FromSwarm::NewListenAddr`]. +pub struct Behaviour { + /// UPnP interface state. + state: GatewayState, + + /// List of port mappings. + mappings: MappingList, + + /// Pending behaviour events to be emitted. + pending_events: VecDeque, +} + +impl Default for Behaviour { + fn default() -> Self { + Self { + state: GatewayState::Searching(crate::tokio::search_gateway()), + mappings: Default::default(), + pending_events: VecDeque::new(), + } + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = dummy::ConnectionHandler; + + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &Multiaddr, + _role_override: Endpoint, + ) -> Result, libp2p_swarm::ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::NewListenAddr(NewListenAddr { + listener_id, + addr: multiaddr, + }) => { + let (addr, protocol) = match multiaddr_to_socketaddr_protocol(multiaddr.clone()) { + Ok(addr_port) => addr_port, + Err(()) => { + log::debug!("multiaddress not supported for UPnP {multiaddr}"); + return; + } + }; + + if let Some((mapping, _state)) = self + .mappings + .iter() + .find(|(mapping, _state)| mapping.internal_addr.port() == addr.port()) + { + log::debug!("port from multiaddress {multiaddr} is already being mapped to another multiaddr: {}", mapping.multiaddr); + return; + } + + match &mut self.state { + GatewayState::Searching(_) => { + // As the gateway is not yet available we add the mapping with `MappingState::Inactive` + // so that when and if it becomes available we map it. + self.mappings.insert( + Mapping { + listener_id, + protocol, + internal_addr: addr, + multiaddr: multiaddr.clone(), + }, + MappingState::Inactive, + ); + } + GatewayState::Available(ref mut gateway) => { + let mapping = Mapping { + listener_id, + protocol, + internal_addr: addr, + multiaddr: multiaddr.clone(), + }; + + let duration = MAPPING_DURATION; + if let Err(err) = gateway.sender.try_send(GatewayRequest::AddMapping { + mapping: mapping.clone(), + duration, + }) { + log::debug!( + "could not request port mapping for {} on the gateway: {}", + mapping.multiaddr, + err + ); + } + + self.mappings.insert(mapping, MappingState::Pending); + } + GatewayState::GatewayNotFound => { + log::debug!( + "network gateway not found, UPnP port mapping of {multiaddr} discarded" + ); + } + GatewayState::NonRoutableGateway(addr) => { + log::debug!( + "the network gateway is not exposed to the public network, \ + it's ip is {addr}. UPnP port mapping of {multiaddr} discarded" + ); + } + }; + } + FromSwarm::ExpiredListenAddr(ExpiredListenAddr { + listener_id, + addr: _addr, + }) => { + if let GatewayState::Available(ref mut gateway) = &mut self.state { + if let Some((mapping, _state)) = self.mappings.remove_entry(&listener_id) { + if let Err(err) = gateway + .sender + .try_send(GatewayRequest::RemoveMapping(mapping.clone())) + { + log::debug!( + "could not request port removal for {} on the gateway: {}", + mapping.multiaddr, + err + ); + } + self.mappings.insert(mapping, MappingState::Pending); + } + } + } + FromSwarm::ConnectionEstablished(_) + | FromSwarm::ConnectionClosed(_) + | FromSwarm::AddressChange(_) + | FromSwarm::DialFailure(_) + | FromSwarm::ListenFailure(_) + | FromSwarm::NewListener(_) + | FromSwarm::ListenerError(_) + | FromSwarm::ListenerClosed(_) + | FromSwarm::NewExternalAddrCandidate(_) + | FromSwarm::ExternalAddrConfirmed(_) + | FromSwarm::ExternalAddrExpired(_) => {} + } + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + event: libp2p_swarm::THandlerOutEvent, + ) { + void::unreachable(event) + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + _params: &mut impl PollParameters, + ) -> Poll>> { + // If there are pending addresses to be emitted we emit them. + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(ToSwarm::GenerateEvent(event)); + } + + // Loop through the gateway state so that if it changes from `Searching` to `Available` + // we poll the pending mapping requests. + loop { + match self.state { + GatewayState::Searching(ref mut fut) => match Pin::new(fut).poll(cx) { + Poll::Ready(result) => { + match result.expect("sender shouldn't have been dropped") { + Ok(gateway) => { + if !is_addr_global(gateway.external_addr) { + self.state = + GatewayState::NonRoutableGateway(gateway.external_addr); + log::debug!( + "the gateway is not routable, its address is {}", + gateway.external_addr + ); + return Poll::Ready(ToSwarm::GenerateEvent( + Event::NonRoutableGateway, + )); + } + self.state = GatewayState::Available(gateway); + } + Err(err) => { + log::debug!("could not find gateway: {err}"); + self.state = GatewayState::GatewayNotFound; + return Poll::Ready(ToSwarm::GenerateEvent(Event::GatewayNotFound)); + } + } + } + Poll::Pending => return Poll::Pending, + }, + GatewayState::Available(ref mut gateway) => { + // Poll pending mapping requests. + if let Poll::Ready(Some(result)) = gateway.receiver.poll_next_unpin(cx) { + match result { + GatewayEvent::Mapped(mapping) => { + let new_state = MappingState::Active(Delay::new( + Duration::from_secs(MAPPING_TIMEOUT), + )); + + match self + .mappings + .insert(mapping.clone(), new_state) + .expect("mapping should exist") + { + MappingState::Pending => { + let external_multiaddr = + mapping.external_addr(gateway.external_addr); + self.pending_events.push_back(Event::NewExternalAddr( + external_multiaddr.clone(), + )); + log::debug!( + "succcessfully mapped UPnP {} for {} protocol", + mapping.internal_addr, + mapping.protocol + ); + return Poll::Ready(ToSwarm::ExternalAddrConfirmed( + external_multiaddr, + )); + } + MappingState::Active(_) => { + log::debug!( + "succcessfully renewed UPnP mapping {} for {} protocol", + mapping.internal_addr, + mapping.protocol + ); + } + _ => unreachable!(), + } + } + GatewayEvent::MapFailure(mapping, err) => { + match self + .mappings + .insert(mapping.clone(), MappingState::Failed) + .expect("mapping should exist") + { + MappingState::Active(_) => { + log::debug!( + "failed to remap UPnP mapped {} for {} protocol: {err}", + mapping.internal_addr, + mapping.protocol + ); + let external_multiaddr = + mapping.external_addr(gateway.external_addr); + self.pending_events.push_back(Event::ExpiredExternalAddr( + external_multiaddr.clone(), + )); + return Poll::Ready(ToSwarm::ExternalAddrExpired( + external_multiaddr, + )); + } + MappingState::Pending => { + log::debug!( + "failed to map upnp mapped {} for {} protocol: {err}", + mapping.internal_addr, + mapping.protocol + ); + } + _ => { + unreachable!() + } + } + } + GatewayEvent::Removed(mapping) => { + log::debug!( + "succcessfully removed UPnP mapping {} for {} protocol", + mapping.internal_addr, + mapping.protocol + ); + self.mappings + .remove(&mapping) + .expect("mapping should exist"); + } + GatewayEvent::RemovalFailure(mapping, err) => { + log::debug!( + "could not remove UPnP mapping {} for {} protocol: {err}", + mapping.internal_addr, + mapping.protocol + ); + if let Err(err) = gateway + .sender + .try_send(GatewayRequest::RemoveMapping(mapping.clone())) + { + log::debug!( + "could not request port removal for {} on the gateway: {}", + mapping.multiaddr, + err + ); + } + } + } + } + + // Renew expired and request inactive mappings. + self.mappings.renew(gateway, cx); + return Poll::Pending; + } + _ => return Poll::Pending, + } + } + } +} + +/// Extracts a [`SocketAddrV4`] and [`PortMappingProtocol`] from a given [`Multiaddr`]. +/// +/// Fails if the given [`Multiaddr`] does not begin with an IP +/// protocol encapsulating a TCP or UDP port. +fn multiaddr_to_socketaddr_protocol( + addr: Multiaddr, +) -> Result<(SocketAddr, PortMappingProtocol), ()> { + let mut iter = addr.into_iter(); + match iter.next() { + // Idg only supports Ipv4. + Some(multiaddr::Protocol::Ip4(ipv4)) if ipv4.is_private() => match iter.next() { + Some(multiaddr::Protocol::Tcp(port)) => { + return Ok(( + SocketAddr::V4(SocketAddrV4::new(ipv4, port)), + PortMappingProtocol::TCP, + )); + } + Some(multiaddr::Protocol::Udp(port)) => { + return Ok(( + SocketAddr::V4(SocketAddrV4::new(ipv4, port)), + PortMappingProtocol::UDP, + )); + } + _ => {} + }, + _ => {} + } + Err(()) +} diff --git a/protocols/upnp/src/lib.rs b/protocols/upnp/src/lib.rs new file mode 100644 index 00000000..8a74d7e8 --- /dev/null +++ b/protocols/upnp/src/lib.rs @@ -0,0 +1,37 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Implementation of UPnP port mapping for libp2p. +//! +//! This crate provides a `tokio::Behaviour` which +//! implements the [`libp2p_swarm::NetworkBehaviour`] trait. +//! This struct will automatically try to map the ports externally to internal +//! addresses on the gateway. +//! + +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +#[cfg(feature = "tokio")] +mod behaviour; +#[cfg(feature = "tokio")] +pub mod tokio; + +#[cfg(feature = "tokio")] +pub use behaviour::Event; diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs new file mode 100644 index 00000000..c6a40182 --- /dev/null +++ b/protocols/upnp/src/tokio.rs @@ -0,0 +1,169 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{error::Error, net::IpAddr}; + +use crate::behaviour::{GatewayEvent, GatewayRequest}; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, StreamExt, +}; +use igd_next::SearchOptions; + +pub use crate::behaviour::Behaviour; + +//TODO: remove when `IpAddr::is_global` stabilizes. +pub(crate) fn is_addr_global(addr: IpAddr) -> bool { + match addr { + IpAddr::V4(ip) => { + !(ip.octets()[0] == 0 // "This network" + || ip.is_private() + // code for Ipv4::is_shared() + || (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) + || ip.is_loopback() + || ip.is_link_local() + // addresses reserved for future protocols (`192.0.0.0/24`) + ||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0) + || ip.is_documentation() + // code for Ipv4::is_benchmarking() + || (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) + // code for Ipv4::is_reserved() + || (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) + || ip.is_broadcast()) + } + IpAddr::V6(ip) => { + !(ip.is_unspecified() + || ip.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b)) + )) + // code for Ipv4::is_documentation() + || (ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8) + // code for Ipv4::is_unique_local() + || (ip.segments()[0] & 0xfe00) == 0xfc00 + // code for Ipv4::is_unicast_link_local() + || (ip.segments()[0] & 0xffc0) == 0xfe80) + } + } +} + +/// Interface that interacts with the inner gateway by messages, +/// `GatewayRequest`s and `GatewayEvent`s. +#[derive(Debug)] +pub(crate) struct Gateway { + pub(crate) sender: mpsc::Sender, + pub(crate) receiver: mpsc::Receiver, + pub(crate) external_addr: IpAddr, +} + +pub(crate) fn search_gateway() -> oneshot::Receiver>> { + let (search_result_sender, search_result_receiver) = oneshot::channel(); + + let (events_sender, mut task_receiver) = mpsc::channel(10); + let (mut task_sender, events_queue) = mpsc::channel(0); + + tokio::spawn(async move { + let gateway = match igd_next::aio::tokio::search_gateway(SearchOptions::default()).await { + Ok(gateway) => gateway, + Err(err) => { + search_result_sender + .send(Err(err.into())) + .expect("receiver shouldn't have been dropped"); + return; + } + }; + + let external_addr = match gateway.get_external_ip().await { + Ok(addr) => addr, + Err(err) => { + search_result_sender + .send(Err(err.into())) + .expect("receiver shouldn't have been dropped"); + return; + } + }; + + search_result_sender + .send(Ok(Gateway { + sender: events_sender, + receiver: events_queue, + external_addr, + })) + .expect("receiver shouldn't have been dropped"); + + loop { + // The task sender has dropped so we can return. + let Some(req) = task_receiver.next().await else { + return; + }; + let event = match req { + GatewayRequest::AddMapping { mapping, duration } => { + let gateway = gateway.clone(); + match gateway + .add_port( + mapping.protocol, + mapping.internal_addr.port(), + mapping.internal_addr, + duration, + "rust-libp2p mapping", + ) + .await + { + Ok(()) => GatewayEvent::Mapped(mapping), + Err(err) => GatewayEvent::MapFailure(mapping, err.into()), + } + } + GatewayRequest::RemoveMapping(mapping) => { + let gateway = gateway.clone(); + match gateway + .remove_port(mapping.protocol, mapping.internal_addr.port()) + .await + { + Ok(()) => GatewayEvent::Removed(mapping), + Err(err) => GatewayEvent::RemovalFailure(mapping, err.into()), + } + } + }; + task_sender + .send(event) + .await + .expect("receiver should be available"); + } + }); + + search_result_receiver +} diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 3ec7b9ef..75a3ac29 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -16,8 +16,8 @@ proc-macro = true [dependencies] heck = "0.4" quote = "1.0" -syn = { version = "2.0.28", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } -proc-macro-warning = "0.4.0" +syn = { version = "2.0.37", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } +proc-macro-warning = "0.4.2" proc-macro2 = "1.0" # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index 0ed8dbce..5cc85728 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -25,7 +25,7 @@ use libp2p_core::{ multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, }; use libp2p_identity::{Keypair, PeerId}; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use libp2p_swarm::dial_opts::PeerCondition; use libp2p_swarm::{ dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr, @@ -41,8 +41,8 @@ pub trait SwarmExt { /// Create a new [`Swarm`] with an ephemeral identity. /// - /// The swarm will use a [`MemoryTransport`] together with [`PlainText2Config`] authentication layer and - /// yamux as the multiplexer. However, these details should not be relied upon by the test + /// The swarm will use a [`MemoryTransport`] together with a [`plaintext::Config`] authentication layer and + /// [`yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test /// and may change at any time. fn new_ephemeral(behaviour_fn: impl FnOnce(Keypair) -> Self::NB) -> Self where @@ -211,14 +211,14 @@ where let transport = MemoryTransport::default() .or_transport(libp2p_tcp::async_io::Transport::default()) .upgrade(Version::V1) - .authenticate(PlainText2Config { - local_public_key: identity.public(), - }) + .authenticate(plaintext::Config::new(&identity)) .multiplex(yamux::Config::default()) .timeout(Duration::from_secs(20)) .boxed(); - SwarmBuilder::without_executor(transport, behaviour_fn(identity), peer_id).build() + SwarmBuilder::without_executor(transport, behaviour_fn(identity), peer_id) + .idle_connection_timeout(Duration::from_secs(5)) // Some tests need connections to be kept alive beyond what the individual behaviour configures. + .build() } async fn connect(&mut self, other: &mut Swarm) diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 37a5cbc6..23013486 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,3 +1,23 @@ +## 0.43.5 + +- Fix overflow in `KeepAlive` computation that could occur if `SwarmBuilder::idle_connection_timeout` is configured with `u64::MAX`. + See [PR 4559](https://github.com/libp2p/rust-libp2p/pull/4559). + +## 0.43.4 + +- Implement `Debug` for event structs. + See [PR 4426]. + +- Improve error message when `DialPeerCondition` prevents a dial. + See [PR 4409]. + +- Introduce `SwarmBuilder::idle_conncetion_timeout` and deprecate `keep_alive::Behaviour` as a result. + See [PR 4161]. + +[PR 4426]: https://github.com/libp2p/rust-libp2p/pull/4426 +[PR 4409]: https://github.com/libp2p/rust-libp2p/pull/4409 +[PR 4161]: https://github.com/libp2p/rust-libp2p/pull/4161 + ## 0.43.3 - Implement `Display` for `ConnectionId`. diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 00c18698..a991bacd 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm" edition = "2021" rust-version = { workspace = true } description = "The libp2p swarm" -version = "0.43.3" +version = "0.43.5" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -21,7 +21,7 @@ libp2p-identity = { workspace = true } libp2p-swarm-derive = { workspace = true, optional = true } log = "0.4" rand = "0.8" -smallvec = "1.11.0" +smallvec = "1.11.1" void = "1" wasm-bindgen-futures = { version = "0.4.37", optional = true } getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature @@ -30,7 +30,7 @@ multistream-select = { workspace = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] async-std = { version = "1.6.2", optional = true } -tokio = { version = "1.31", features = ["rt"], optional = true } +tokio = { version = "1.32", features = ["rt"], optional = true } [features] macros = ["dep:libp2p-swarm-derive"] @@ -54,7 +54,8 @@ libp2p-yamux = { path = "../muxers/yamux" } # Using `path` here because this is quickcheck = { workspace = true } void = "1" once_cell = "1.18.0" -trybuild = "1.0.83" +trybuild = "1.0.85" +tokio = { version = "1.29.1", features = ["time", "rt", "macros"] } [[test]] name = "swarm_derive" diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 0ecdf7b3..d80f5f0c 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -408,6 +408,7 @@ pub enum CloseConnection { /// Enumeration with the list of the possible events /// to pass to [`on_swarm_event`](NetworkBehaviour::on_swarm_event). +#[derive(Debug)] pub enum FromSwarm<'a, Handler> { /// Informs the behaviour about a newly established connection to a peer. ConnectionEstablished(ConnectionEstablished<'a>), @@ -450,7 +451,7 @@ pub enum FromSwarm<'a, Handler> { } /// [`FromSwarm`] variant that informs the behaviour about a newly established connection to a peer. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ConnectionEstablished<'a> { pub peer_id: PeerId, pub connection_id: ConnectionId, @@ -464,6 +465,7 @@ pub struct ConnectionEstablished<'a> { /// This event is always paired with an earlier /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID /// and endpoint. +#[derive(Debug)] pub struct ConnectionClosed<'a, Handler> { pub peer_id: PeerId, pub connection_id: ConnectionId, @@ -474,7 +476,7 @@ pub struct ConnectionClosed<'a, Handler> { /// [`FromSwarm`] variant that informs the behaviour that the [`ConnectedPoint`] of an existing /// connection has changed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct AddressChange<'a> { pub peer_id: PeerId, pub connection_id: ConnectionId, @@ -484,7 +486,7 @@ pub struct AddressChange<'a> { /// [`FromSwarm`] variant that informs the behaviour that the dial to a known /// or unknown node failed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct DialFailure<'a> { pub peer_id: Option, pub error: &'a DialError, @@ -496,7 +498,7 @@ pub struct DialFailure<'a> { /// /// This can include, for example, an error during the handshake of the encryption layer, or the /// connection unexpectedly closed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ListenFailure<'a> { pub local_addr: &'a Multiaddr, pub send_back_addr: &'a Multiaddr, @@ -505,14 +507,14 @@ pub struct ListenFailure<'a> { } /// [`FromSwarm`] variant that informs the behaviour that a new listener was created. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct NewListener { pub listener_id: ListenerId, } /// [`FromSwarm`] variant that informs the behaviour /// that we have started listening on a new multiaddr. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct NewListenAddr<'a> { pub listener_id: ListenerId, pub addr: &'a Multiaddr, @@ -521,40 +523,40 @@ pub struct NewListenAddr<'a> { /// [`FromSwarm`] variant that informs the behaviour that a multiaddr /// we were listening on has expired, /// which means that we are no longer listening on it. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ExpiredListenAddr<'a> { pub listener_id: ListenerId, pub addr: &'a Multiaddr, } /// [`FromSwarm`] variant that informs the behaviour that a listener experienced an error. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ListenerError<'a> { pub listener_id: ListenerId, pub err: &'a (dyn std::error::Error + 'static), } /// [`FromSwarm`] variant that informs the behaviour that a listener closed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ListenerClosed<'a> { pub listener_id: ListenerId, pub reason: Result<(), &'a std::io::Error>, } /// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address for us. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct NewExternalAddrCandidate<'a> { pub addr: &'a Multiaddr, } /// [`FromSwarm`] variant that informs the behaviour that an external address was confirmed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ExternalAddrConfirmed<'a> { pub addr: &'a Multiaddr, } /// [`FromSwarm`] variant that informs the behaviour that an external address was removed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ExternalAddrExpired<'a> { pub addr: &'a Multiaddr, } diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 3796d9a0..d99319cf 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -52,6 +52,7 @@ use libp2p_core::upgrade; use libp2p_core::upgrade::{NegotiationError, ProtocolError}; use libp2p_core::Endpoint; use libp2p_identity::PeerId; +use std::cmp::max; use std::collections::HashSet; use std::fmt::{Display, Formatter}; use std::future::Future; @@ -156,6 +157,7 @@ where local_supported_protocols: HashSet, remote_supported_protocols: HashSet, + idle_timeout: Duration, } impl fmt::Debug for Connection @@ -183,9 +185,9 @@ where mut handler: THandler, substream_upgrade_protocol_override: Option, max_negotiating_inbound_streams: usize, + idle_timeout: Duration, ) -> Self { let initial_protocols = gather_supported_protocols(&handler); - if !initial_protocols.is_empty() { handler.on_connection_event(ConnectionEvent::LocalProtocolsChange( ProtocolsChange::Added(ProtocolsAdded::from_set(&initial_protocols)), @@ -203,6 +205,7 @@ where requested_substreams: Default::default(), local_supported_protocols: initial_protocols, remote_supported_protocols: Default::default(), + idle_timeout, } } @@ -234,6 +237,7 @@ where substream_upgrade_protocol_override, local_supported_protocols: supported_protocols, remote_supported_protocols, + idle_timeout, } = self.get_mut(); loop { @@ -348,17 +352,39 @@ where (Shutdown::Later(timer, deadline), KeepAlive::Until(t)) => { if *deadline != t { *deadline = t; - if let Some(dur) = deadline.checked_duration_since(Instant::now()) { - timer.reset(dur) + if let Some(new_duration) = deadline.checked_duration_since(Instant::now()) + { + let effective_keep_alive = max(new_duration, *idle_timeout); + + timer.reset(effective_keep_alive) } } } - (_, KeepAlive::Until(t)) => { - if let Some(dur) = t.checked_duration_since(Instant::now()) { - *shutdown = Shutdown::Later(Delay::new(dur), t) + (_, KeepAlive::Until(earliest_shutdown)) => { + let now = Instant::now(); + + if let Some(requested) = earliest_shutdown.checked_duration_since(now) { + let effective_keep_alive = max(requested, *idle_timeout); + + let safe_keep_alive = checked_add_fraction(now, effective_keep_alive); + + // Important: We store the _original_ `Instant` given by the `ConnectionHandler` in the `Later` instance to ensure we can compare it in the above branch. + // This is quite subtle but will hopefully become simpler soon once `KeepAlive::Until` is fully deprecated. See / + *shutdown = Shutdown::Later(Delay::new(safe_keep_alive), earliest_shutdown) } } - (_, KeepAlive::No) => *shutdown = Shutdown::Asap, + (_, KeepAlive::No) if idle_timeout == &Duration::ZERO => { + *shutdown = Shutdown::Asap; + } + (Shutdown::Later(_, _), KeepAlive::No) => { + // Do nothing, i.e. let the shutdown timer continue to tick. + } + (_, KeepAlive::No) => { + let now = Instant::now(); + let safe_keep_alive = checked_add_fraction(now, *idle_timeout); + + *shutdown = Shutdown::Later(Delay::new(safe_keep_alive), now + safe_keep_alive); + } (_, KeepAlive::Yes) => *shutdown = Shutdown::None, }; @@ -456,6 +482,20 @@ fn gather_supported_protocols(handler: &impl ConnectionHandler) -> HashSet Duration { + while start.checked_add(duration).is_none() { + log::debug!("{start:?} + {duration:?} cannot be presented, halving duration"); + + duration /= 2; + } + + duration +} + /// Borrowed information about an incoming connection currently being negotiated. #[derive(Debug, Copy, Clone)] pub(crate) struct IncomingInfo<'a> { @@ -696,7 +736,7 @@ enum Shutdown { #[cfg(test)] mod tests { use super::*; - use crate::keep_alive; + use crate::dummy; use futures::future; use futures::AsyncRead; use futures::AsyncWrite; @@ -704,6 +744,7 @@ mod tests { use libp2p_core::StreamMuxer; use quickcheck::*; use std::sync::{Arc, Weak}; + use std::time::Instant; use void::Void; #[test] @@ -712,14 +753,14 @@ mod tests { let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); let alive_substream_counter = Arc::new(()); - let mut connection = Connection::new( StreamMuxerBox::new(DummyStreamMuxer { counter: alive_substream_counter.clone(), }), - keep_alive::ConnectionHandler, + MockConnectionHandler::new(Duration::ZERO), None, max_negotiating_inbound_streams, + Duration::ZERO, ); let result = connection.poll_noop_waker(); @@ -743,6 +784,7 @@ mod tests { MockConnectionHandler::new(upgrade_timeout), None, 2, + Duration::ZERO, ); connection.handler.open_new_outbound(); @@ -765,6 +807,7 @@ mod tests { ConfigurableProtocolConnectionHandler::default(), None, 0, + Duration::ZERO, ); // First, start listening on a single protocol. @@ -803,6 +846,7 @@ mod tests { ConfigurableProtocolConnectionHandler::default(), None, 0, + Duration::ZERO, ); // First, remote supports a single protocol. @@ -846,6 +890,151 @@ mod tests { assert_eq!(connection.handler.remote_removed, vec![vec!["/bar"]]); } + #[tokio::test] + async fn idle_timeout_with_keep_alive_no() { + let idle_timeout = Duration::from_millis(100); + + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + dummy::ConnectionHandler, + None, + 0, + idle_timeout, + ); + + assert!(connection.poll_noop_waker().is_pending()); + + tokio::time::sleep(idle_timeout).await; + + assert!(matches!( + connection.poll_noop_waker(), + Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) + )); + } + + #[tokio::test] + async fn idle_timeout_with_keep_alive_until_greater_than_idle_timeout() { + let idle_timeout = Duration::from_millis(100); + + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + KeepAliveUntilConnectionHandler { + until: Instant::now() + idle_timeout * 2, + }, + None, + 0, + idle_timeout, + ); + + assert!(connection.poll_noop_waker().is_pending()); + + tokio::time::sleep(idle_timeout).await; + + assert!( + connection.poll_noop_waker().is_pending(), + "`KeepAlive::Until` is greater than idle-timeout, continue sleeping" + ); + + tokio::time::sleep(idle_timeout).await; + + assert!(matches!( + connection.poll_noop_waker(), + Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) + )); + } + + #[tokio::test] + async fn idle_timeout_with_keep_alive_until_less_than_idle_timeout() { + let idle_timeout = Duration::from_millis(100); + + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + KeepAliveUntilConnectionHandler { + until: Instant::now() + idle_timeout / 2, + }, + None, + 0, + idle_timeout, + ); + + assert!(connection.poll_noop_waker().is_pending()); + + tokio::time::sleep(idle_timeout / 2).await; + + assert!( + connection.poll_noop_waker().is_pending(), + "`KeepAlive::Until` is less than idle-timeout, honor idle-timeout" + ); + + tokio::time::sleep(idle_timeout / 2).await; + + assert!(matches!( + connection.poll_noop_waker(), + Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) + )); + } + + #[test] + fn checked_add_fraction_can_add_u64_max() { + let _ = env_logger::try_init(); + let start = Instant::now(); + + let duration = checked_add_fraction(start, Duration::from_secs(u64::MAX)); + + assert!(start.checked_add(duration).is_some()) + } + + struct KeepAliveUntilConnectionHandler { + until: Instant, + } + + impl ConnectionHandler for KeepAliveUntilConnectionHandler { + type FromBehaviour = Void; + type ToBehaviour = Void; + type Error = Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = Void; + + fn listen_protocol( + &self, + ) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) + } + + fn connection_keep_alive(&self) -> KeepAlive { + KeepAlive::Until(self.until) + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::ToBehaviour, + Self::Error, + >, + > { + Poll::Pending + } + + fn on_behaviour_event(&mut self, _: Self::FromBehaviour) {} + + fn on_connection_event( + &mut self, + _: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + } + } + struct DummyStreamMuxer { counter: Arc<()>, } diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index e9f7504f..07fc9075 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -37,7 +37,7 @@ use futures::{ ready, stream::FuturesUnordered, }; -use instant::Instant; +use instant::{Duration, Instant}; use libp2p_core::connection::Endpoint; use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; use std::task::Waker; @@ -135,6 +135,9 @@ where /// Receivers for [`NewConnection`] objects that are dropped. new_connection_dropped_listeners: FuturesUnordered>, + + /// How long a connection should be kept alive once it starts idling. + idle_connection_timeout: Duration, } #[derive(Debug)] @@ -322,6 +325,7 @@ where substream_upgrade_protocol_override: config.substream_upgrade_protocol_override, max_negotiating_inbound_streams: config.max_negotiating_inbound_streams, per_connection_event_buffer_size: config.per_connection_event_buffer_size, + idle_connection_timeout: config.idle_connection_timeout, executor, pending_connection_events_tx, pending_connection_events_rx, @@ -518,6 +522,7 @@ where handler, self.substream_upgrade_protocol_override, self.max_negotiating_inbound_streams, + self.idle_connection_timeout, ); self.executor.spawn(task::new_for_established_connection( @@ -947,6 +952,8 @@ pub(crate) struct PoolConfig { pub(crate) per_connection_event_buffer_size: usize, /// Number of addresses concurrently dialed for a single outbound connection attempt. pub(crate) dial_concurrency_factor: NonZeroU8, + /// How long a connection should be kept alive once it is idling. + pub(crate) idle_connection_timeout: Duration, /// The configured override for substream protocol upgrades, if any. substream_upgrade_protocol_override: Option, @@ -963,6 +970,7 @@ impl PoolConfig { task_command_buffer_size: 32, per_connection_event_buffer_size: 7, dial_concurrency_factor: NonZeroU8::new(8).expect("8 > 0"), + idle_connection_timeout: Duration::ZERO, substream_upgrade_protocol_override: None, max_negotiating_inbound_streams: 128, } diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index dcc7ab1c..9374903f 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -222,6 +222,42 @@ pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IO RemoteProtocolsChange(ProtocolsChange<'a>), } +impl<'a, IP, OP, IOI, OOI> fmt::Debug for ConnectionEvent<'a, IP, OP, IOI, OOI> +where + IP: InboundUpgradeSend + fmt::Debug, + IP::Output: fmt::Debug, + IP::Error: fmt::Debug, + OP: OutboundUpgradeSend + fmt::Debug, + OP::Output: fmt::Debug, + OP::Error: fmt::Debug, + IOI: fmt::Debug, + OOI: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConnectionEvent::FullyNegotiatedInbound(v) => { + f.debug_tuple("FullyNegotiatedInbound").field(v).finish() + } + ConnectionEvent::FullyNegotiatedOutbound(v) => { + f.debug_tuple("FullyNegotiatedOutbound").field(v).finish() + } + ConnectionEvent::AddressChange(v) => f.debug_tuple("AddressChange").field(v).finish(), + ConnectionEvent::DialUpgradeError(v) => { + f.debug_tuple("DialUpgradeError").field(v).finish() + } + ConnectionEvent::ListenUpgradeError(v) => { + f.debug_tuple("ListenUpgradeError").field(v).finish() + } + ConnectionEvent::LocalProtocolsChange(v) => { + f.debug_tuple("LocalProtocolsChange").field(v).finish() + } + ConnectionEvent::RemoteProtocolsChange(v) => { + f.debug_tuple("RemoteProtocolsChange").field(v).finish() + } + } + } +} + impl<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> ConnectionEvent<'a, IP, OP, IOI, OOI> { @@ -262,6 +298,7 @@ impl<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> /// of simultaneously open negotiated inbound substreams. In other words it is up to the /// [`ConnectionHandler`] implementation to stop a malicious remote node to open and keep alive /// an excessive amount of inbound substreams. +#[derive(Debug)] pub struct FullyNegotiatedInbound { pub protocol: IP::Output, pub info: IOI, @@ -271,18 +308,20 @@ pub struct FullyNegotiatedInbound { /// /// The `protocol` field is the information that was previously passed to /// [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. +#[derive(Debug)] pub struct FullyNegotiatedOutbound { pub protocol: OP::Output, pub info: OOI, } /// [`ConnectionEvent`] variant that informs the handler about a change in the address of the remote. +#[derive(Debug)] pub struct AddressChange<'a> { pub new_address: &'a Multiaddr, } /// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported on the connection. -#[derive(Clone)] +#[derive(Debug, Clone)] pub enum ProtocolsChange<'a> { Added(ProtocolsAdded<'a>), Removed(ProtocolsRemoved<'a>), @@ -352,7 +391,7 @@ impl<'a> ProtocolsChange<'a> { } /// An [`Iterator`] over all protocols that have been added. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ProtocolsAdded<'a> { protocols: Peekable>, } @@ -366,7 +405,7 @@ impl<'a> ProtocolsAdded<'a> { } /// An [`Iterator`] over all protocols that have been removed. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ProtocolsRemoved<'a> { protocols: Either< Peekable>, @@ -399,6 +438,7 @@ impl<'a> Iterator for ProtocolsRemoved<'a> { /// [`ConnectionEvent`] variant that informs the handler /// that upgrading an outbound substream to the given protocol has failed. +#[derive(Debug)] pub struct DialUpgradeError { pub info: OOI, pub error: StreamUpgradeError, @@ -406,6 +446,7 @@ pub struct DialUpgradeError { /// [`ConnectionEvent`] variant that informs the handler /// that upgrading an inbound substream to the given protocol has failed. +#[derive(Debug)] pub struct ListenUpgradeError { pub info: IOI, pub error: IP::Error, diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index af828999..82cb12a1 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -24,6 +24,7 @@ use crate::handler::{ use std::{fmt::Debug, marker::PhantomData, task::Context, task::Poll}; /// Wrapper around a protocol handler that turns the input event into something else. +#[derive(Debug)] pub struct MapInEvent { inner: TConnectionHandler, map: TMap, diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index e92d1403..8528b563 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -25,6 +25,7 @@ use std::fmt::Debug; use std::task::{Context, Poll}; /// Wrapper around a protocol handler that turns the output event into something else. +#[derive(Debug)] pub struct MapOutEvent { inner: TConnectionHandler, map: TMap, diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 867e4941..22433d22 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -67,6 +67,9 @@ pub mod behaviour; pub mod dial_opts; pub mod dummy; pub mod handler; +#[deprecated( + note = "Configure an appropriate idle connection timeout via `SwarmBuilder::idle_connection_timeout` instead. To keep connections alive 'forever', use `Duration::from_secs(u64::MAX)`." +)] pub mod keep_alive; mod listen_opts; @@ -146,6 +149,7 @@ use libp2p_identity::PeerId; use smallvec::SmallVec; use std::collections::{HashMap, HashSet}; use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; +use std::time::Duration; use std::{ convert::TryFrom, error, fmt, io, @@ -1518,8 +1522,17 @@ where self } + /// How long to keep a connection alive once it is idling. + /// + /// Defaults to 0. + pub fn idle_connection_timeout(mut self, timeout: Duration) -> Self { + self.pool_config.idle_connection_timeout = timeout; + self + } + /// Builds a `Swarm` with the current configuration. pub fn build(self) -> Swarm { + log::info!("Local peer id: {}", self.local_peer_id); Swarm { local_peer_id: self.local_peer_id, transport: self.transport, @@ -1580,9 +1593,9 @@ impl fmt::Display for DialError { f, "Dial error: tried to dial local peer id at {endpoint:?}." ), - DialError::DialPeerConditionFalse(c) => { - write!(f, "Dial error: condition {c:?} for dialing peer was false.") - } + DialError::DialPeerConditionFalse(PeerCondition::Disconnected) => write!(f, "Dial error: dial condition was configured to only happen when disconnected (`PeerCondition::Disconnected`), but node is already connected, thus cancelling new dial."), + DialError::DialPeerConditionFalse(PeerCondition::NotDialing) => write!(f, "Dial error: dial condition was configured to only happen if there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but a dial is in progress, thus cancelling new dial."), + DialError::DialPeerConditionFalse(PeerCondition::Always) => unreachable!("Dial peer condition is by definition true."), DialError::Aborted => write!( f, "Dial error: Pending connection attempt has been aborted." @@ -1715,9 +1728,9 @@ pub struct ConnectionDenied { } impl ConnectionDenied { - pub fn new(cause: impl error::Error + Send + Sync + 'static) -> Self { + pub fn new(cause: impl Into>) -> Self { Self { - inner: Box::new(cause), + inner: cause.into(), } } @@ -1807,6 +1820,7 @@ fn p2p_addr(peer: Option, addr: Multiaddr) -> Result( - handler_proto: T, - ) -> SwarmBuilder>> - where - T: ConnectionHandler + Clone, - T::ToBehaviour: Clone, - O: Send + 'static, - { + fn new_test_swarm( + ) -> SwarmBuilder>> { let id_keys = identity::Keypair::generate_ed25519(); let local_public_key = id_keys.public(); let transport = transport::MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(plaintext::PlainText2Config { - local_public_key: local_public_key.clone(), - }) + .authenticate(plaintext::Config::new(&id_keys)) .multiplex(yamux::Config::default()) .boxed(); - let behaviour = CallTraceBehaviour::new(MockBehaviour::new(handler_proto)); - match ThreadPool::new().ok() { + let behaviour = CallTraceBehaviour::new(MockBehaviour::new(dummy::ConnectionHandler)); + let builder = match ThreadPool::new().ok() { Some(tp) => { SwarmBuilder::with_executor(transport, behaviour, local_public_key.into(), tp) } None => SwarmBuilder::without_executor(transport, behaviour, local_public_key.into()), - } + }; + + builder.idle_connection_timeout(Duration::from_secs(5)) } fn swarms_connected( @@ -1902,12 +1910,8 @@ mod tests { /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] #[test] fn test_swarm_disconnect() { - // Since the test does not try to open any substreams, we can - // use the dummy protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); + let mut swarm1 = new_test_swarm().build(); + let mut swarm2 = new_test_swarm().build(); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -1968,12 +1972,8 @@ mod tests { /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] #[test] fn test_behaviour_disconnect_all() { - // Since the test does not try to open any substreams, we can - // use the dummy protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); + let mut swarm1 = new_test_swarm().build(); + let mut swarm2 = new_test_swarm().build(); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -2038,12 +2038,8 @@ mod tests { /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] #[test] fn test_behaviour_disconnect_one() { - // Since the test does not try to open any substreams, we can - // use the dummy protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); + let mut swarm1 = new_test_swarm().build(); + let mut swarm2 = new_test_swarm().build(); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -2121,7 +2117,7 @@ mod tests { fn prop(concurrency_factor: DialConcurrencyFactor) { block_on(async { - let mut swarm = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) + let mut swarm = new_test_swarm() .dial_concurrency_factor(concurrency_factor.0) .build(); @@ -2157,19 +2153,14 @@ mod tests { ) .unwrap(); for mut transport in transports.into_iter() { - loop { - match futures::future::select(transport.select_next_some(), swarm.next()) - .await - { - future::Either::Left((TransportEvent::Incoming { .. }, _)) => { - break; - } - future::Either::Left(_) => { - panic!("Unexpected transport event.") - } - future::Either::Right((e, _)) => { - panic!("Expect swarm to not emit any event {e:?}") - } + match futures::future::select(transport.select_next_some(), swarm.next()).await + { + future::Either::Left((TransportEvent::Incoming { .. }, _)) => {} + future::Either::Left(_) => { + panic!("Unexpected transport event.") + } + future::Either::Right((e, _)) => { + panic!("Expect swarm to not emit any event {e:?}") } } } @@ -2189,8 +2180,8 @@ mod tests { // Checks whether dialing an address containing the wrong peer id raises an error // for the expected peer id instead of the obtained peer id. - let mut swarm1 = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); - let mut swarm2 = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm1 = new_test_swarm().build(); + let mut swarm2 = new_test_swarm().build(); swarm1.listen_on("/memory/0".parse().unwrap()).unwrap(); @@ -2249,7 +2240,7 @@ mod tests { // // The last two can happen in any order. - let mut swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm = new_test_swarm().build(); swarm.listen_on("/memory/0".parse().unwrap()).unwrap(); let local_address = @@ -2309,7 +2300,7 @@ mod tests { fn dial_self_by_id() { // Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first // place. - let swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let swarm = new_test_swarm().build(); let peer_id = *swarm.local_peer_id(); assert!(!swarm.is_connected(&peer_id)); } @@ -2320,7 +2311,7 @@ mod tests { let target = PeerId::random(); - let mut swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm = new_test_swarm().build(); let addresses = HashSet::from([ multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], @@ -2366,8 +2357,8 @@ mod tests { fn aborting_pending_connection_surfaces_error() { let _ = env_logger::try_init(); - let mut dialer = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); - let mut listener = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut dialer = new_test_swarm().build(); + let mut listener = new_test_swarm().build(); let listener_peer_id = *listener.local_peer_id(); listener.listen_on(multiaddr![Memory(0u64)]).unwrap(); diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index fa3f6c69..d0680591 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -98,7 +98,7 @@ fn three_fields() { struct Foo { ping: ping::Behaviour, identify: identify::Behaviour, - kad: libp2p_kad::Kademlia, + kad: libp2p_kad::Behaviour, } #[allow( @@ -115,7 +115,7 @@ fn three_fields() { let _: identify::Event = event; } FooEvent::Kad(event) => { - let _: libp2p_kad::KademliaEvent = event; + let _: libp2p_kad::Event = event; } } } @@ -327,7 +327,7 @@ fn with_either() { #[derive(NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Foo { - kad: libp2p_kad::Kademlia, + kad: libp2p_kad::Behaviour, ping_or_identify: Either, } @@ -351,7 +351,7 @@ fn with_generics() { fn foo() { require_net_behaviour::< Foo< - libp2p_kad::Kademlia, + libp2p_kad::Behaviour, libp2p_ping::Behaviour, >, >(); @@ -370,7 +370,7 @@ fn with_generics_mixed() { #[allow(dead_code)] fn foo() { - require_net_behaviour::>>( + require_net_behaviour::>>( ); } } @@ -381,12 +381,12 @@ fn custom_event_with_either() { #[allow(clippy::large_enum_variant)] enum BehaviourOutEvent { - Kad(libp2p_kad::KademliaEvent), + Kad(libp2p_kad::Event), PingOrIdentify(Either), } - impl From for BehaviourOutEvent { - fn from(event: libp2p_kad::KademliaEvent) -> Self { + impl From for BehaviourOutEvent { + fn from(event: libp2p_kad::Event) -> Self { BehaviourOutEvent::Kad(event) } } @@ -404,7 +404,7 @@ fn custom_event_with_either() { prelude = "libp2p_swarm::derive_prelude" )] struct Foo { - kad: libp2p_kad::Kademlia, + kad: libp2p_kad::Behaviour, ping_or_identify: Either, } diff --git a/transports/deflate/CHANGELOG.md b/transports/deflate/CHANGELOG.md index f75a7bf2..bb1b85d6 100644 --- a/transports/deflate/CHANGELOG.md +++ b/transports/deflate/CHANGELOG.md @@ -1,3 +1,9 @@ +## 0.40.1 - unreleased + +- Deprecate in preparation for removal from the workspace. + See [issue 4522](https://github.com/libp2p/rust-libp2p/issues/4522) for details. + See [PR 4540](https://github.com/libp2p/rust-libp2p/pull/4540). + ## 0.40.0 - Raise MSRV to 1.65. diff --git a/transports/deflate/Cargo.toml b/transports/deflate/Cargo.toml index 1d096316..95f552f6 100644 --- a/transports/deflate/Cargo.toml +++ b/transports/deflate/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-deflate" edition = "2021" rust-version = { workspace = true } description = "Deflate encryption protocol for libp2p" -version = "0.40.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/transports/deflate/src/lib.rs b/transports/deflate/src/lib.rs index 0d837138..54367ff2 100644 --- a/transports/deflate/src/lib.rs +++ b/transports/deflate/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(deprecated)] // Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a @@ -17,13 +18,15 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. - #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use futures::{prelude::*, ready}; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use std::{io, iter, pin::Pin, task::Context, task::Poll}; +#[deprecated( + note = "Will be removed in the next release, see https://github.com/libp2p/rust-libp2p/issues/4522 for details." +)] #[derive(Debug, Copy, Clone)] pub struct DeflateConfig { compression: flate2::Compression, diff --git a/transports/deflate/tests/test.rs b/transports/deflate/tests/test.rs index 504888a7..4224dcf4 100644 --- a/transports/deflate/tests/test.rs +++ b/transports/deflate/tests/test.rs @@ -1,3 +1,5 @@ +#![allow(deprecated)] + // Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index a51e4165..29b5ac44 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,3 +1,11 @@ +## 0.40.1 - unreleased + +- Remove `Dns` prefix from types like `TokioDnsConfig` and `DnsConfig` in favor of modules that describe the different variants. + Users are encouraged to import the `libp2p::dns` module and refer to types as `dns::tokio::Transport` and `dns::async_std::Transport`. + See [PR 4505]. + +[PR 4505]: https://github.com/libp2p/rust-libp2p/pull/4505 + ## 0.40.0 - Raise MSRV to 1.65. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index a85307aa..81ee2312 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = { workspace = true } description = "DNS transport implementation for libp2p" -version = "0.40.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,14 +11,15 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] +async-trait = "0.1.72" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } log = "0.4.20" futures = "0.3.28" -async-std-resolver = { version = "0.22", optional = true } +async-std-resolver = { version = "0.23", optional = true } parking_lot = "0.12.0" -trust-dns-resolver = { version = "0.22", default-features = false, features = ["system-config"] } -smallvec = "1.11.0" +trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } +smallvec = "1.11.1" [dev-dependencies] env_logger = "0.10" @@ -34,7 +35,7 @@ tokio = ["trust-dns-resolver/tokio-runtime"] tokio-dns-over-rustls = ["tokio", "trust-dns-resolver/dns-over-rustls"] tokio-dns-over-https-rustls = ["tokio", "trust-dns-resolver/dns-over-https-rustls"] -# Passing arguments to the docsrs builder in order to properly document cfg's. +# Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 771c7dbc..b27b14a7 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -21,17 +21,17 @@ //! # [DNS name resolution](https://github.com/libp2p/specs/blob/master/addressing/README.md#ip-and-name-resolution) //! [`Transport`] for libp2p. //! -//! This crate provides the type [`GenDnsConfig`] with its instantiations -//! [`DnsConfig`] and `TokioDnsConfig` for use with `async-std` and `tokio`, +//! This crate provides the type [`async_std::Transport`] and [`tokio::Transport`] +//! for use with `async-std` and `tokio`, //! respectively. //! -//! A [`GenDnsConfig`] is an address-rewriting [`Transport`] wrapper around +//! A [`Transport`] is an address-rewriting [`libp2p_core::Transport`] wrapper around //! an inner `Transport`. The composed transport behaves like the inner -//! transport, except that [`Transport::dial`] resolves `/dns/...`, `/dns4/...`, +//! transport, except that [`libp2p_core::Transport::dial`] resolves `/dns/...`, `/dns4/...`, //! `/dns6/...` and `/dnsaddr/...` components of the given `Multiaddr` through //! a DNS, replacing them with the resolved protocols (typically TCP/IP). //! -//! The `async-std` feature and hence the `DnsConfig` are +//! The `async-std` feature and hence the [`async_std::Transport`] are //! enabled by default. Tokio users can furthermore opt-in //! to the `tokio-dns-over-rustls` and `tokio-dns-over-https-rustls` //! features. For more information about these features, please @@ -49,7 +49,7 @@ //! problematic on platforms like Android, where there's a lot of //! complexity hidden behind the system APIs. //! If the implementation requires different characteristics, one should -//! consider providing their own implementation of [`GenDnsConfig`] or use +//! consider providing their own implementation of [`Transport`] or use //! platform specific APIs to extract the host's DNS configuration (if possible) //! and provide a custom [`ResolverConfig`]. //! @@ -58,35 +58,108 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #[cfg(feature = "async-std")] -use async_std_resolver::{AsyncStdConnection, AsyncStdConnectionProvider}; +pub mod async_std { + use async_std_resolver::AsyncStdResolver; + use parking_lot::Mutex; + use std::{io, sync::Arc}; + use trust_dns_resolver::{ + config::{ResolverConfig, ResolverOpts}, + system_conf, + }; + + /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses + /// using `async-std` for all async I/O. + pub type Transport = crate::Transport; + + impl Transport { + /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. + pub async fn system(inner: T) -> Result, io::Error> { + let (cfg, opts) = system_conf::read_system_conf()?; + Self::custom(inner, cfg, opts).await + } + + /// Creates a [`Transport`] with a custom resolver configuration and options. + pub async fn custom( + inner: T, + cfg: ResolverConfig, + opts: ResolverOpts, + ) -> Result, io::Error> { + Ok(Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: async_std_resolver::resolver(cfg, opts).await, + }) + } + } +} + +#[cfg(feature = "async-std")] +#[deprecated(note = "Use `async_std::Transport` instead.")] +pub type DnsConfig = async_std::Transport; + +#[cfg(feature = "tokio")] +pub mod tokio { + use parking_lot::Mutex; + use std::sync::Arc; + use trust_dns_resolver::{system_conf, TokioAsyncResolver}; + + /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses + /// using `tokio` for all async I/O. + pub type Transport = crate::Transport; + + impl Transport { + /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. + pub fn system(inner: T) -> Result, std::io::Error> { + let (cfg, opts) = system_conf::read_system_conf()?; + Self::custom(inner, cfg, opts) + } + + /// Creates a [`Transport`] with a custom resolver configuration + /// and options. + pub fn custom( + inner: T, + cfg: trust_dns_resolver::config::ResolverConfig, + opts: trust_dns_resolver::config::ResolverOpts, + ) -> Result, std::io::Error> { + // TODO: Make infallible in next breaking release. Or deprecation? + Ok(Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: TokioAsyncResolver::tokio(cfg, opts), + }) + } + } +} + +#[cfg(feature = "tokio")] +#[deprecated(note = "Use `tokio::Transport` instead.")] +pub type TokioDnsConfig = tokio::Transport; + +use async_trait::async_trait; use futures::{future::BoxFuture, prelude::*}; use libp2p_core::{ connection::Endpoint, multiaddr::{Multiaddr, Protocol}, transport::{ListenerId, TransportError, TransportEvent}, - Transport, }; use parking_lot::Mutex; use smallvec::SmallVec; use std::io; +use std::net::{Ipv4Addr, Ipv6Addr}; use std::{ convert::TryFrom, error, fmt, iter, - net::IpAddr, ops::DerefMut, pin::Pin, str, sync::Arc, task::{Context, Poll}, }; -#[cfg(any(feature = "async-std", feature = "tokio"))] -use trust_dns_resolver::system_conf; -use trust_dns_resolver::{proto::xfer::dns_handle::DnsHandle, AsyncResolver, ConnectionProvider}; -#[cfg(feature = "tokio")] -use trust_dns_resolver::{TokioAsyncResolver, TokioConnection, TokioConnectionProvider}; pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts}; pub use trust_dns_resolver::error::{ResolveError, ResolveErrorKind}; +use trust_dns_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; +use trust_dns_resolver::lookup_ip::LookupIp; +use trust_dns_resolver::name_server::ConnectionProvider; +use trust_dns_resolver::AsyncResolver; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; @@ -106,98 +179,28 @@ const MAX_DNS_LOOKUPS: usize = 32; /// result of a single `/dnsaddr` lookup. const MAX_TXT_RECORDS: usize = 16; -/// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses -/// using `async-std` for all async I/O. -#[cfg(feature = "async-std")] -pub type DnsConfig = GenDnsConfig; - -/// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses -/// using `tokio` for all async I/O. -#[cfg(feature = "tokio")] -pub type TokioDnsConfig = GenDnsConfig; - -/// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses. -pub struct GenDnsConfig -where - C: DnsHandle, - P: ConnectionProvider, -{ +/// A [`Transport`] for performing DNS lookups when dialing `Multiaddr`esses. +/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or [`async_std::Transport`] instead. +#[derive(Debug)] +pub struct Transport { /// The underlying transport. inner: Arc>, /// The DNS resolver used when dialing addresses with DNS components. - resolver: AsyncResolver, + resolver: R, } -#[cfg(feature = "async-std")] -impl DnsConfig +#[deprecated(note = "Use `async_std::Transport` or `tokio::Transport` instead.")] +pub type GenDnsConfig = Transport; + +impl libp2p_core::Transport for Transport where - T: Send, -{ - /// Creates a new [`DnsConfig`] from the OS's DNS configuration and defaults. - pub async fn system(inner: T) -> Result, io::Error> { - let (cfg, opts) = system_conf::read_system_conf()?; - Self::custom(inner, cfg, opts).await - } - - /// Creates a [`DnsConfig`] with a custom resolver configuration and options. - pub async fn custom( - inner: T, - cfg: ResolverConfig, - opts: ResolverOpts, - ) -> Result, io::Error> { - Ok(DnsConfig { - inner: Arc::new(Mutex::new(inner)), - resolver: async_std_resolver::resolver(cfg, opts).await?, - }) - } -} - -#[cfg(feature = "tokio")] -impl TokioDnsConfig -where - T: Send, -{ - /// Creates a new [`TokioDnsConfig`] from the OS's DNS configuration and defaults. - pub fn system(inner: T) -> Result, io::Error> { - let (cfg, opts) = system_conf::read_system_conf()?; - Self::custom(inner, cfg, opts) - } - - /// Creates a [`TokioDnsConfig`] with a custom resolver configuration - /// and options. - pub fn custom( - inner: T, - cfg: ResolverConfig, - opts: ResolverOpts, - ) -> Result, io::Error> { - Ok(TokioDnsConfig { - inner: Arc::new(Mutex::new(inner)), - resolver: TokioAsyncResolver::tokio(cfg, opts)?, - }) - } -} - -impl fmt::Debug for GenDnsConfig -where - C: DnsHandle, - P: ConnectionProvider, - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_tuple("GenDnsConfig").field(&self.inner).finish() - } -} - -impl Transport for GenDnsConfig -where - T: Transport + Send + Unpin + 'static, + T: libp2p_core::Transport + Send + Unpin + 'static, T::Error: Send, T::Dial: Send, - C: DnsHandle, - P: ConnectionProvider, + R: Clone + Send + Sync + Resolver + 'static, { type Output = T::Output; - type Error = DnsErr; + type Error = Error; type ListenerUpgrade = future::MapErr Self::Error>; type Dial = future::Either< future::MapErr Self::Error>, @@ -212,7 +215,7 @@ where self.inner .lock() .listen_on(id, addr) - .map_err(|e| e.map(DnsErr::Transport)) + .map_err(|e| e.map(Error::Transport)) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -239,27 +242,29 @@ where cx: &mut Context<'_>, ) -> Poll> { let mut inner = self.inner.lock(); - Transport::poll(Pin::new(inner.deref_mut()), cx).map(|event| { + libp2p_core::Transport::poll(Pin::new(inner.deref_mut()), cx).map(|event| { event - .map_upgrade(|upgr| upgr.map_err::<_, fn(_) -> _>(DnsErr::Transport)) - .map_err(DnsErr::Transport) + .map_upgrade(|upgr| upgr.map_err::<_, fn(_) -> _>(Error::Transport)) + .map_err(Error::Transport) }) } } -impl GenDnsConfig +impl Transport where - T: Transport + Send + Unpin + 'static, + T: libp2p_core::Transport + Send + Unpin + 'static, T::Error: Send, T::Dial: Send, - C: DnsHandle, - P: ConnectionProvider, + R: Clone + Send + Sync + Resolver + 'static, { fn do_dial( &mut self, addr: Multiaddr, role_override: Endpoint, - ) -> Result<::Dial, TransportError<::Error>> { + ) -> Result< + ::Dial, + TransportError<::Error>, + > { let resolver = self.resolver.clone(); let inner = self.inner.clone(); @@ -289,7 +294,7 @@ where }) { if dns_lookups == MAX_DNS_LOOKUPS { log::debug!("Too many DNS lookups. Dropping unresolved {}.", addr); - last_err = Some(DnsErr::TooManyLookups); + last_err = Some(Error::TooManyLookups); // There may still be fully resolved addresses in `unresolved`, // so keep going until `unresolved` is empty. continue; @@ -354,12 +359,12 @@ where // actually accepted, i.e. for which it produced // a dialing future. dial_attempts += 1; - out.await.map_err(DnsErr::Transport) + out.await.map_err(Error::Transport) } Err(TransportError::MultiaddrNotSupported(a)) => { - Err(DnsErr::MultiaddrNotSupported(a)) + Err(Error::MultiaddrNotSupported(a)) } - Err(TransportError::Other(err)) => Err(DnsErr::Transport(err)), + Err(TransportError::Other(err)) => Err(Error::Transport(err)), }; match result { @@ -387,7 +392,7 @@ where // for the given address to begin with (i.e. DNS lookups succeeded but // produced no records relevant for the given `addr`). Err(last_err.unwrap_or_else(|| { - DnsErr::ResolveError(ResolveErrorKind::Message("No matching records found.").into()) + Error::ResolveError(ResolveErrorKind::Message("No matching records found.").into()) })) } .boxed() @@ -395,10 +400,10 @@ where } } -/// The possible errors of a [`GenDnsConfig`] wrapped transport. +/// The possible errors of a [`Transport`] wrapped transport. #[derive(Debug)] #[allow(clippy::large_enum_variant)] -pub enum DnsErr { +pub enum Error { /// The underlying transport encountered an error. Transport(TErr), /// DNS resolution failed. @@ -414,30 +419,33 @@ pub enum DnsErr { TooManyLookups, } -impl fmt::Display for DnsErr +#[deprecated(note = "Use `Error` instead.")] +pub type DnsErr = Error; + +impl fmt::Display for Error where TErr: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - DnsErr::Transport(err) => write!(f, "{err}"), - DnsErr::ResolveError(err) => write!(f, "{err}"), - DnsErr::MultiaddrNotSupported(a) => write!(f, "Unsupported resolved address: {a}"), - DnsErr::TooManyLookups => write!(f, "Too many DNS lookups"), + Error::Transport(err) => write!(f, "{err}"), + Error::ResolveError(err) => write!(f, "{err}"), + Error::MultiaddrNotSupported(a) => write!(f, "Unsupported resolved address: {a}"), + Error::TooManyLookups => write!(f, "Too many DNS lookups"), } } } -impl error::Error for DnsErr +impl error::Error for Error where TErr: error::Error + 'static, { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - DnsErr::Transport(err) => Some(err), - DnsErr::ResolveError(err) => Some(err), - DnsErr::MultiaddrNotSupported(_) => None, - DnsErr::TooManyLookups => None, + Error::Transport(err) => Some(err), + Error::ResolveError(err) => Some(err), + Error::MultiaddrNotSupported(_) => None, + Error::TooManyLookups => None, } } } @@ -460,14 +468,10 @@ enum Resolved<'a> { /// Asynchronously resolves the domain name of a `Dns`, `Dns4`, `Dns6` or `Dnsaddr` protocol /// component. If the given protocol is of a different type, it is returned unchanged as a /// [`Resolved::One`]. -fn resolve<'a, E: 'a + Send, C, P>( +fn resolve<'a, E: 'a + Send, R: Resolver>( proto: &Protocol<'a>, - resolver: &'a AsyncResolver, -) -> BoxFuture<'a, Result, DnsErr>> -where - C: DnsHandle, - P: ConnectionProvider, -{ + resolver: &'a R, +) -> BoxFuture<'a, Result, Error>> { match proto { Protocol::Dns(ref name) => resolver .lookup_ip(name.clone().into_owned()) @@ -489,7 +493,7 @@ where Ok(Resolved::One(Protocol::from(one))) } } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed(), Protocol::Dns4(ref name) => resolver @@ -505,15 +509,15 @@ where iter::once(one) .chain(iter::once(two)) .chain(ips) - .map(IpAddr::from) + .map(Ipv4Addr::from) .map(Protocol::from) .collect(), )) } else { - Ok(Resolved::One(Protocol::from(IpAddr::from(one)))) + Ok(Resolved::One(Protocol::from(Ipv4Addr::from(one)))) } } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed(), Protocol::Dns6(ref name) => resolver @@ -529,15 +533,15 @@ where iter::once(one) .chain(iter::once(two)) .chain(ips) - .map(IpAddr::from) + .map(Ipv6Addr::from) .map(Protocol::from) .collect(), )) } else { - Ok(Resolved::One(Protocol::from(IpAddr::from(one)))) + Ok(Resolved::One(Protocol::from(Ipv6Addr::from(one)))) } } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed(), Protocol::Dnsaddr(ref name) => { @@ -562,7 +566,7 @@ where } Ok(Resolved::Addrs(addrs)) } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed() } @@ -583,6 +587,37 @@ fn invalid_data(e: impl Into>) -> io::E io::Error::new(io::ErrorKind::InvalidData, e) } +#[async_trait::async_trait] +#[doc(hidden)] +pub trait Resolver { + async fn lookup_ip(&self, name: String) -> Result; + async fn ipv4_lookup(&self, name: String) -> Result; + async fn ipv6_lookup(&self, name: String) -> Result; + async fn txt_lookup(&self, name: String) -> Result; +} + +#[async_trait] +impl Resolver for AsyncResolver +where + C: ConnectionProvider, +{ + async fn lookup_ip(&self, name: String) -> Result { + self.lookup_ip(name).await + } + + async fn ipv4_lookup(&self, name: String) -> Result { + self.ipv4_lookup(name).await + } + + async fn ipv6_lookup(&self, name: String) -> Result { + self.ipv6_lookup(name).await + } + + async fn txt_lookup(&self, name: String) -> Result { + self.txt_lookup(name).await + } +} + #[cfg(all(test, any(feature = "tokio", feature = "async-std")))] mod tests { use super::*; @@ -647,13 +682,12 @@ mod tests { } } - async fn run(mut transport: GenDnsConfig) + async fn run(mut transport: super::Transport) where - C: DnsHandle, - P: ConnectionProvider, T: Transport + Clone + Send + Unpin + 'static, T::Error: Send, T::Dial: Send, + R: Clone + Send + Sync + Resolver + 'static, { // Success due to existing A record for example.com. let _ = transport @@ -703,7 +737,7 @@ mod tests { .unwrap() .await { - Err(DnsErr::ResolveError(_)) => {} + Err(Error::ResolveError(_)) => {} Err(e) => panic!("Unexpected error: {e:?}"), Ok(_) => panic!("Unexpected success."), } @@ -714,7 +748,7 @@ mod tests { .unwrap() .await { - Err(DnsErr::ResolveError(e)) => match e.kind() { + Err(Error::ResolveError(e)) => match e.kind() { ResolveErrorKind::NoRecordsFound { .. } => {} _ => panic!("Unexpected DNS error: {e:?}"), }, @@ -730,7 +764,8 @@ mod tests { let config = ResolverConfig::quad9(); let opts = ResolverOpts::default(); async_std_crate::task::block_on( - DnsConfig::custom(CustomTransport, config, opts).then(|dns| run(dns.unwrap())), + async_std::Transport::custom(CustomTransport, config, opts) + .then(|dns| run(dns.unwrap())), ); } @@ -745,8 +780,9 @@ mod tests { .enable_time() .build() .unwrap(); + rt.block_on(run( - TokioDnsConfig::custom(CustomTransport, config, opts).unwrap() + tokio::Transport::custom(CustomTransport, config, opts).unwrap() )); } } diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index bebdf4f9..2a0c31c9 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,4 +1,8 @@ -## 0.43.0 +## 0.43.1 + +- Update dependencies. + +## 0.43.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index 7db51263..3ebf0364 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,14 +3,14 @@ name = "libp2p-noise" edition = "2021" rust-version = { workspace = true } description = "Cryptographic handshake protocol using the noise framework." -version = "0.43.0" +version = "0.43.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" [dependencies] bytes = "1" -curve25519-dalek = "3.0.0" +curve25519-dalek = "4.1.1" futures = "0.3.28" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519"] } @@ -22,7 +22,7 @@ quick-protobuf = "0.8" rand = "0.8.3" sha2 = "0.10.7" static_assertions = "1" -thiserror = "1.0.44" +thiserror = "1.0.48" x25519-dalek = "1.1.0" zeroize = "1" diff --git a/transports/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md index 63fd538f..dbbc04b5 100644 --- a/transports/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.40.1 - unreleased + +- Rename `Plaintext2Config` to `Config` to follow naming conventions across repository. + See [PR 4535](https://github.com/libp2p/rust-libp2p/pull/4535). + ## 0.40.0 - Raise MSRV to 1.65. diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index 1f15004d..57311467 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-plaintext" edition = "2021" rust-version = { workspace = true } description = "Plaintext encryption dummy protocol for libp2p" -version = "0.40.0" +version = "0.40.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index 23f196f1..a1e4d866 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -23,9 +23,9 @@ use std::fmt; use std::io::Error as IoError; #[derive(Debug)] -pub enum PlainTextError { +pub enum Error { /// I/O error. - IoError(IoError), + Io(IoError), /// Failed to parse the handshake protobuf message. InvalidPayload(DecodeError), @@ -55,52 +55,52 @@ impl error::Error for DecodeError { } } -impl error::Error for PlainTextError { +impl error::Error for Error { fn cause(&self) -> Option<&dyn error::Error> { match *self { - PlainTextError::IoError(ref err) => Some(err), - PlainTextError::InvalidPayload(ref err) => Some(err), - PlainTextError::InvalidPublicKey(ref err) => Some(err), - PlainTextError::InvalidPeerId(ref err) => Some(err), + Error::Io(ref err) => Some(err), + Error::InvalidPayload(ref err) => Some(err), + Error::InvalidPublicKey(ref err) => Some(err), + Error::InvalidPeerId(ref err) => Some(err), _ => None, } } } -impl fmt::Display for PlainTextError { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match self { - PlainTextError::IoError(e) => write!(f, "I/O error: {e}"), - PlainTextError::InvalidPayload(_) => f.write_str("Failed to decode protobuf"), - PlainTextError::PeerIdMismatch => f.write_str( + Error::Io(e) => write!(f, "I/O error: {e}"), + Error::InvalidPayload(_) => f.write_str("Failed to decode protobuf"), + Error::PeerIdMismatch => f.write_str( "The peer id of the exchange isn't consistent with the remote public key", ), - PlainTextError::InvalidPublicKey(_) => f.write_str("Failed to decode public key"), - PlainTextError::InvalidPeerId(_) => f.write_str("Failed to decode PeerId"), + Error::InvalidPublicKey(_) => f.write_str("Failed to decode public key"), + Error::InvalidPeerId(_) => f.write_str("Failed to decode PeerId"), } } } -impl From for PlainTextError { - fn from(err: IoError) -> PlainTextError { - PlainTextError::IoError(err) +impl From for Error { + fn from(err: IoError) -> Error { + Error::Io(err) } } -impl From for PlainTextError { - fn from(err: DecodeError) -> PlainTextError { - PlainTextError::InvalidPayload(err) +impl From for Error { + fn from(err: DecodeError) -> Error { + Error::InvalidPayload(err) } } -impl From for PlainTextError { - fn from(err: libp2p_identity::DecodingError) -> PlainTextError { - PlainTextError::InvalidPublicKey(err) +impl From for Error { + fn from(err: libp2p_identity::DecodingError) -> Error { + Error::InvalidPublicKey(err) } } -impl From for PlainTextError { - fn from(err: libp2p_identity::ParseError) -> PlainTextError { - PlainTextError::InvalidPeerId(err) +impl From for Error { + fn from(err: libp2p_identity::ParseError) -> Error { + Error::InvalidPeerId(err) } } diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 46dd6119..05e3b908 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -18,9 +18,9 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{DecodeError, PlainTextError}; +use crate::error::{DecodeError, Error}; use crate::proto::Exchange; -use crate::PlainText2Config; +use crate::Config; use asynchronous_codec::{Framed, FramedParts}; use bytes::{Bytes, BytesMut}; @@ -32,7 +32,7 @@ use std::io::{Error as IoError, ErrorKind as IoErrorKind}; use unsigned_varint::codec::UviBytes; struct HandshakeContext { - config: PlainText2Config, + config: Config, state: T, } @@ -50,7 +50,8 @@ pub(crate) struct Remote { } impl HandshakeContext { - fn new(config: PlainText2Config) -> Self { + fn new(config: Config) -> Self { + #[allow(deprecated)] let exchange = Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), pubkey: Some(config.local_public_key.encode_protobuf()), @@ -69,10 +70,7 @@ impl HandshakeContext { } } - fn with_remote( - self, - exchange_bytes: BytesMut, - ) -> Result, PlainTextError> { + fn with_remote(self, exchange_bytes: BytesMut) -> Result, Error> { let mut reader = BytesReader::from_bytes(&exchange_bytes); let prop = Exchange::from_reader(&mut reader, &exchange_bytes).map_err(DecodeError)?; @@ -81,7 +79,7 @@ impl HandshakeContext { // Check the validity of the remote's `Exchange`. if peer_id != public_key.to_peer_id() { - return Err(PlainTextError::PeerIdMismatch); + return Err(Error::PeerIdMismatch); } Ok(HandshakeContext { @@ -94,10 +92,7 @@ impl HandshakeContext { } } -pub(crate) async fn handshake( - socket: S, - config: PlainText2Config, -) -> Result<(S, Remote, Bytes), PlainTextError> +pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, Remote, Bytes), Error> where S: AsyncRead + AsyncWrite + Send + Unpin, { diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index 76e70a02..fa7cba6b 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -22,7 +22,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::error::PlainTextError; +use crate::error::Error; use bytes::Bytes; use futures::future::BoxFuture; @@ -46,14 +46,29 @@ mod proto { pub(crate) use self::structs::Exchange; } -/// `PlainText2Config` is an insecure connection handshake for testing purposes only, implementing -/// the libp2p plaintext connection handshake specification. +#[deprecated(note = "Has been renamed to `Config`.")] +pub type PlainText2Config = Config; + +#[deprecated(note = "Has been renamed to `Output`.")] +pub type PlainTextOutput = Output; + +/// [`Config`] is an insecure connection handshake for testing purposes only. #[derive(Clone)] -pub struct PlainText2Config { +pub struct Config { + #[deprecated(note = "Will be made private in the future, please use `Config::new` instead!")] pub local_public_key: identity::PublicKey, } -impl UpgradeInfo for PlainText2Config { +impl Config { + #[allow(deprecated)] + pub fn new(identity: &identity::Keypair) -> Self { + Self { + local_public_key: identity.public(), + } + } +} + +impl UpgradeInfo for Config { type Info = &'static str; type InfoIter = iter::Once; @@ -62,12 +77,12 @@ impl UpgradeInfo for PlainText2Config { } } -impl InboundUpgrade for PlainText2Config +impl InboundUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - type Output = (PeerId, PlainTextOutput); - type Error = PlainTextError; + type Output = (PeerId, Output); + type Error = Error; type Future = BoxFuture<'static, Result>; fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { @@ -75,12 +90,12 @@ where } } -impl OutboundUpgrade for PlainText2Config +impl OutboundUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - type Output = (PeerId, PlainTextOutput); - type Error = PlainTextError; + type Output = (PeerId, Output); + type Error = Error; type Future = BoxFuture<'static, Result>; fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { @@ -88,8 +103,8 @@ where } } -impl PlainText2Config { - async fn handshake(self, socket: T) -> Result<(PeerId, PlainTextOutput), PlainTextError> +impl Config { + async fn handshake(self, socket: T) -> Result<(PeerId, Output), Error> where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -99,7 +114,7 @@ impl PlainText2Config { Ok(( remote.peer_id, - PlainTextOutput { + Output { socket, remote_key: remote.public_key, read_buffer, @@ -109,7 +124,7 @@ impl PlainText2Config { } /// Output of the plaintext protocol. -pub struct PlainTextOutput +pub struct Output where S: AsyncRead + AsyncWrite + Unpin, { @@ -123,7 +138,7 @@ where read_buffer: Bytes, } -impl AsyncRead for PlainTextOutput { +impl AsyncRead for Output { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -139,7 +154,7 @@ impl AsyncRead for PlainTextOutput { } } -impl AsyncWrite for PlainTextOutput { +impl AsyncWrite for Output { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index 7147ed56..ed18fb44 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -21,7 +21,7 @@ use futures::io::{AsyncReadExt, AsyncWriteExt}; use libp2p_core::InboundUpgrade; use libp2p_identity as identity; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use log::debug; use quickcheck::QuickCheck; @@ -34,10 +34,7 @@ fn variable_msg_length() { let msg_to_receive = msg; let server_id = identity::Keypair::generate_ed25519(); - let server_id_public = server_id.public(); - let client_id = identity::Keypair::generate_ed25519(); - let client_id_public = client_id.public(); let (server, client) = futures_ringbuf::Endpoint::pair(100, 100); @@ -46,14 +43,8 @@ fn variable_msg_length() { (received_client_id, mut server_channel), (received_server_id, mut client_channel), ) = futures::future::try_join( - PlainText2Config { - local_public_key: server_id_public, - } - .upgrade_inbound(server, ""), - PlainText2Config { - local_public_key: client_id_public, - } - .upgrade_inbound(client, ""), + plaintext::Config::new(&server_id).upgrade_inbound(server, ""), + plaintext::Config::new(&client_id).upgrade_inbound(client, ""), ) .await .unwrap(); diff --git a/transports/pnet/Cargo.toml b/transports/pnet/Cargo.toml index 597709c5..5861bf2d 100644 --- a/transports/pnet/Cargo.toml +++ b/transports/pnet/Cargo.toml @@ -27,7 +27,7 @@ libp2p-tcp = { workspace = true, features = ["tokio"] } libp2p-websocket = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -tokio = { version = "1.31.0", features = ["full"] } +tokio = { version = "1.32.0", features = ["full"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/pnet/tests/smoke.rs b/transports/pnet/tests/smoke.rs index a7635c00..5e02ed85 100644 --- a/transports/pnet/tests/smoke.rs +++ b/transports/pnet/tests/smoke.rs @@ -6,7 +6,7 @@ use libp2p_core::upgrade::Version; use libp2p_core::Transport; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_pnet::{PnetConfig, PreSharedKey}; -use libp2p_swarm::{keep_alive, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{dummy, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; const TIMEOUT: Duration = Duration::from_secs(5); @@ -98,7 +98,7 @@ where assert_eq!(&outbound_peer_id, swarm1.local_peer_id()); } -fn make_swarm(transport: T, pnet: PnetConfig) -> Swarm +fn make_swarm(transport: T, pnet: PnetConfig) -> Swarm where T: Transport + Send + Unpin + 'static, ::Error: Send + Sync + 'static, @@ -113,12 +113,9 @@ where .authenticate(libp2p_noise::Config::new(&identity).unwrap()) .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::with_tokio_executor( - transport, - keep_alive::Behaviour, - identity.public().to_peer_id(), - ) - .build() + SwarmBuilder::with_tokio_executor(transport, dummy::Behaviour, identity.public().to_peer_id()) + .idle_connection_timeout(Duration::from_secs(5)) + .build() } async fn listen_on(swarm: &mut Swarm, addr: Multiaddr) -> Multiaddr { diff --git a/transports/quic/CHANGELOG.md b/transports/quic/CHANGELOG.md index c7da2895..a7ad810b 100644 --- a/transports/quic/CHANGELOG.md +++ b/transports/quic/CHANGELOG.md @@ -1,4 +1,10 @@ -## 0.9.2 - unreleased +## 0.9.3 - unreleased + +- Support QUIC stateless resets for supported `libp2p_identity::Keypair`s. See [PR 4554]. + +[PR 4554]: https://github.com/libp2p/rust-libp2p/pull/4554 + +## 0.9.2 - Cut stable release. diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 9344970a..cac3c05a 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -10,7 +10,7 @@ license = "MIT" [dependencies] async-std = { version = "1.12.0", optional = true } -bytes = "1.4.0" +bytes = "1.5.0" futures = "0.3.28" futures-timer = "3.0.2" if-watch = "3.0.1" @@ -21,10 +21,11 @@ log = "0.4" parking_lot = "0.12.0" quinn = { version = "0.10.2", default-features = false, features = ["tls-rustls", "futures-io"] } rand = "0.8.5" -rustls = { version = "0.21.6", default-features = false } -thiserror = "1.0.44" -tokio = { version = "1.31.0", default-features = false, features = ["net", "rt", "time"], optional = true } -socket2 = "0.5.3" +rustls = { version = "0.21.7", default-features = false } +thiserror = "1.0.48" +tokio = { version = "1.32.0", default-features = false, features = ["net", "rt", "time"], optional = true } +socket2 = "0.5.4" +ring = "0.16.20" [features] tokio = ["dep:tokio", "if-watch/tokio", "quinn/runtime-tokio"] @@ -45,7 +46,7 @@ libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } quickcheck = "1" -tokio = { version = "1.31.0", features = ["macros", "rt-multi-thread", "time"] } +tokio = { version = "1.32.0", features = ["macros", "rt-multi-thread", "time"] } [[test]] name = "stream_compliance" diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs index 201594e2..5351a537 100644 --- a/transports/quic/src/config.rs +++ b/transports/quic/src/config.rs @@ -61,6 +61,8 @@ pub struct Config { client_tls_config: Arc, /// TLS server config for the inner [`quinn::ServerConfig`]. server_tls_config: Arc, + /// Libp2p identity of the node. + keypair: libp2p_identity::Keypair, } impl Config { @@ -80,6 +82,7 @@ impl Config { // Ensure that one stream is not consuming the whole connection. max_stream_data: 10_000_000, + keypair: keypair.clone(), } } } @@ -104,6 +107,7 @@ impl From for QuinnConfig { max_stream_data, support_draft_29, handshake_timeout: _, + keypair, } = config; let mut transport = quinn::TransportConfig::default(); // Disable uni-directional streams. @@ -128,7 +132,14 @@ impl From for QuinnConfig { let mut client_config = quinn::ClientConfig::new(client_tls_config); client_config.transport_config(transport); - let mut endpoint_config = quinn::EndpointConfig::default(); + let mut endpoint_config = keypair + .derive_secret(b"libp2p quic stateless reset key") + .map(|secret| { + let reset_key = Arc::new(ring::hmac::Key::new(ring::hmac::HMAC_SHA256, &secret)); + quinn::EndpointConfig::new(reset_key) + }) + .unwrap_or_default(); + if !support_draft_29 { endpoint_config.supported_versions(vec![1]); } diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index a0c43b9e..5581ceb7 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -730,6 +730,7 @@ async fn open_outbound_streams( } /// Helper function for driving two transports until they established a connection. +#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. async fn connect( listener: &mut Boxed<(PeerId, StreamMuxerBox)>, dialer: &mut Boxed<(PeerId, StreamMuxerBox)>, diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 5aa01133..ffee29c5 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -15,12 +15,12 @@ async-io = { version = "1.13.0", optional = true } futures = "0.3.28" futures-timer = "3.0" if-watch = "3.0.1" -libc = "0.2.147" +libc = "0.2.148" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } log = "0.4.20" -socket2 = { version = "0.5.3", features = ["all"] } -tokio = { version = "1.31.0", default-features = false, features = ["net"], optional = true } +socket2 = { version = "0.5.4", features = ["all"] } +tokio = { version = "1.32.0", default-features = false, features = ["net"], optional = true } [features] tokio = ["dep:tokio", "if-watch/tokio"] @@ -28,12 +28,13 @@ async-io = ["dep:async-io", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.6.5", features = ["attributes"] } -tokio = { version = "1.31.0", default-features = false, features = ["full"] } +tokio = { version = "1.32.0", default-features = false, features = ["full"] } env_logger = "0.10.0" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true + rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] diff --git a/transports/tls/CHANGELOG.md b/transports/tls/CHANGELOG.md index 9d014acc..4c85ccf5 100644 --- a/transports/tls/CHANGELOG.md +++ b/transports/tls/CHANGELOG.md @@ -1,4 +1,13 @@ -## 0.2.0 +## 0.2.1 + +- Switch from webpki to rustls-webpki. + This is a part of the resolution of the [RUSTSEC-2023-0052]. + See [PR 4381]. + +[PR 4381]: https://github.com/libp2p/rust-libp2p/pull/4381 +[RUSTSEC-2023-0052]: https://rustsec.org/advisories/RUSTSEC-2023-0052.html + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index 81da6858..ba60164c 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-tls" -version = "0.2.0" +version = "0.2.1" edition = "2021" rust-version = { workspace = true } description = "TLS configuration based on libp2p TLS specs." @@ -15,14 +15,14 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true } rcgen = "0.10.0" ring = "0.16.20" -thiserror = "1.0.44" -webpki = { version = "0.22.0", features = ["std"] } +thiserror = "1.0.48" +webpki = { version = "0.101.4", package = "rustls-webpki", features = ["std"] } x509-parser = "0.15.1" yasna = "0.5.2" # Exposed dependencies. Breaking changes to these are breaking changes to us. [dependencies.rustls] -version = "0.21.6" +version = "0.21.7" default-features = false features = ["dangerous_configuration"] # Must enable this to allow for custom verification code. @@ -33,7 +33,7 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "secp256k1", "ecdsa"] } libp2p-swarm = { workspace = true } libp2p-yamux = { workspace = true } -tokio = { version = "1.31.0", features = ["full"] } +tokio = { version = "1.32.0", features = ["full"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/tls/tests/smoke.rs b/transports/tls/tests/smoke.rs index 17aa959c..0db39edf 100644 --- a/transports/tls/tests/smoke.rs +++ b/transports/tls/tests/smoke.rs @@ -3,7 +3,8 @@ use libp2p_core::multiaddr::Protocol; use libp2p_core::transport::MemoryTransport; use libp2p_core::upgrade::Version; use libp2p_core::Transport; -use libp2p_swarm::{keep_alive, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{dummy, Swarm, SwarmBuilder, SwarmEvent}; +use std::time::Duration; #[tokio::test] async fn can_establish_connection() { @@ -55,7 +56,7 @@ async fn can_establish_connection() { assert_eq!(&outbound_peer_id, swarm1.local_peer_id()); } -fn make_swarm() -> Swarm { +fn make_swarm() -> Swarm { let identity = libp2p_identity::Keypair::generate_ed25519(); let transport = MemoryTransport::default() @@ -64,10 +65,7 @@ fn make_swarm() -> Swarm { .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::without_executor( - transport, - keep_alive::Behaviour, - identity.public().to_peer_id(), - ) - .build() + SwarmBuilder::without_executor(transport, dummy::Behaviour, identity.public().to_peer_id()) + .idle_connection_timeout(Duration::from_secs(5)) + .build() } diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index 269eced5..d776ac33 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -15,10 +15,10 @@ async-std = { version = "1.6.2", optional = true } libp2p-core = { workspace = true } log = "0.4.20" futures = "0.3.28" -tokio = { version = "1.31", default-features = false, features = ["net"], optional = true } +tokio = { version = "1.32", default-features = false, features = ["net"], optional = true } [dev-dependencies] -tempfile = "3.7" +tempfile = "3.8" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/webrtc-websys/CHANGELOG.md b/transports/webrtc-websys/CHANGELOG.md new file mode 100644 index 00000000..7c40c08f --- /dev/null +++ b/transports/webrtc-websys/CHANGELOG.md @@ -0,0 +1,6 @@ +## 0.1.0-alpha + +- Initial alpha release. + See [PR 4248]. + +[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248 diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml new file mode 100644 index 00000000..19148068 --- /dev/null +++ b/transports/webrtc-websys/Cargo.toml @@ -0,0 +1,36 @@ +[package] +authors = ["Doug Anderson "] +categories = ["asynchronous", "network-programming", "wasm", "web-programming"] +description = "WebRTC for libp2p under WASM environment" +edition = "2021" +keywords = ["libp2p", "networking", "peer-to-peer"] +license = "MIT" +name = "libp2p-webrtc-websys" +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +version = "0.1.0-alpha" +publish = true + +[dependencies] +bytes = "1" +futures = "0.3" +futures-timer = "3" +getrandom = { version = "0.2.9", features = ["js"] } +hex = "0.4.3" +js-sys = { version = "0.3" } +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-noise = { workspace = true } +libp2p-webrtc-utils = { workspace = true } +log = "0.4.19" +send_wrapper = { version = "0.6.0", features = ["futures"] } +serde = { version = "1.0", features = ["derive"] } +thiserror = "1" +wasm-bindgen = { version = "0.2.87" } +wasm-bindgen-futures = { version = "0.4.37" } +web-sys = { version = "0.3.64", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } + +[dev-dependencies] +hex-literal = "0.4" +libp2p-ping = { workspace = true } +libp2p-swarm = { workspace = true, features = ["wasm-bindgen"] } diff --git a/transports/webrtc-websys/README.md b/transports/webrtc-websys/README.md new file mode 100644 index 00000000..b522f31b --- /dev/null +++ b/transports/webrtc-websys/README.md @@ -0,0 +1,9 @@ +# Rust `libp2p-webrtc-websys` + +Browser Transport made available through `web-sys` bindings. + +## Usage + +Use with `Swarm::with_wasm_executor` to enable the `wasm-bindgen` executor for the `Swarm`. + +See the [browser-webrtc](../../examples/browser-webrtc) example for a full example. diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs new file mode 100644 index 00000000..dfdebbc9 --- /dev/null +++ b/transports/webrtc-websys/src/connection.rs @@ -0,0 +1,308 @@ +//! A libp2p connection backed by an [RtcPeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection). + +use super::{Error, Stream}; +use crate::stream::DropListener; +use futures::channel::mpsc; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use js_sys::{Object, Reflect}; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +use libp2p_webrtc_utils::Fingerprint; +use send_wrapper::SendWrapper; +use std::pin::Pin; +use std::task::Waker; +use std::task::{ready, Context, Poll}; +use wasm_bindgen::prelude::*; +use wasm_bindgen_futures::JsFuture; +use web_sys::{ + RtcConfiguration, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelInit, RtcDataChannelType, + RtcSessionDescriptionInit, +}; + +/// A WebRTC Connection. +/// +/// All connections need to be [`Send`] which is why some fields are wrapped in [`SendWrapper`]. +/// This is safe because WASM is single-threaded. +pub struct Connection { + /// The [RtcPeerConnection] that is used for the WebRTC Connection + inner: SendWrapper, + + /// Whether the connection is closed + closed: bool, + /// An [`mpsc::channel`] for all inbound data channels. + /// + /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all inbound data channels. + inbound_data_channels: SendWrapper>, + /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. + drop_listeners: FuturesUnordered, + no_drop_listeners_waker: Option, + + _ondatachannel_closure: SendWrapper>, +} + +impl Connection { + /// Create a new inner WebRTC Connection + pub(crate) fn new(peer_connection: RtcPeerConnection) -> Self { + // An ondatachannel Future enables us to poll for incoming data channel events in poll_incoming + let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection + + let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { + log::trace!("New data channel"); + + if let Err(e) = tx_ondatachannel.try_send(ev.channel()) { + if e.is_full() { + log::warn!("Remote is opening too many data channels, we can't keep up!"); + return; + } + + if e.is_disconnected() { + log::warn!("Receiver is gone, are we shutting down?"); + } + } + }); + peer_connection + .inner + .set_ondatachannel(Some(ondatachannel_closure.as_ref().unchecked_ref())); + + Self { + inner: SendWrapper::new(peer_connection), + closed: false, + drop_listeners: FuturesUnordered::default(), + no_drop_listeners_waker: None, + inbound_data_channels: SendWrapper::new(rx_ondatachannel), + _ondatachannel_closure: SendWrapper::new(ondatachannel_closure), + } + } + + fn new_stream_from_data_channel(&mut self, data_channel: RtcDataChannel) -> Stream { + let (stream, drop_listener) = Stream::new(data_channel); + + self.drop_listeners.push(drop_listener); + if let Some(waker) = self.no_drop_listeners_waker.take() { + waker.wake() + } + stream + } + + /// Closes the Peer Connection. + /// + /// This closes the data channels also and they will return an error + /// if they are used. + fn close_connection(&mut self) { + if !self.closed { + log::trace!("connection::close_connection"); + self.inner.inner.close(); + self.closed = true; + } + } +} + +impl Drop for Connection { + fn drop(&mut self) { + self.close_connection(); + } +} + +/// WebRTC native multiplexing +/// Allows users to open substreams +impl StreamMuxer for Connection { + type Substream = Stream; + type Error = Error; + + fn poll_inbound( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match ready!(self.inbound_data_channels.poll_next_unpin(cx)) { + Some(data_channel) => { + let stream = self.new_stream_from_data_channel(data_channel); + + Poll::Ready(Ok(stream)) + } + None => { + // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. + log::debug!("`Sender` for inbound data channels has been dropped"); + Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) + } + } + } + + fn poll_outbound( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + log::trace!("Creating outbound data channel"); + + let data_channel = self.inner.new_regular_data_channel(); + let stream = self.new_stream_from_data_channel(data_channel); + + Poll::Ready(Ok(stream)) + } + + /// Closes the Peer Connection. + fn poll_close( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + log::trace!("connection::poll_close"); + + self.close_connection(); + Poll::Ready(Ok(())) + } + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + loop { + match ready!(self.drop_listeners.poll_next_unpin(cx)) { + Some(Ok(())) => {} + Some(Err(e)) => { + log::debug!("a DropListener failed: {e}") + } + None => { + self.no_drop_listeners_waker = Some(cx.waker().clone()); + return Poll::Pending; + } + } + } + } +} + +pub(crate) struct RtcPeerConnection { + inner: web_sys::RtcPeerConnection, +} + +impl RtcPeerConnection { + pub(crate) async fn new(algorithm: String) -> Result { + let algo: Object = Object::new(); + Reflect::set(&algo, &"name".into(), &"ECDSA".into()).unwrap(); + Reflect::set(&algo, &"namedCurve".into(), &"P-256".into()).unwrap(); + Reflect::set(&algo, &"hash".into(), &algorithm.into()).unwrap(); + + let certificate_promise = + web_sys::RtcPeerConnection::generate_certificate_with_object(&algo) + .expect("certificate to be valid"); + + let certificate = JsFuture::from(certificate_promise).await?; + + let mut config = RtcConfiguration::default(); + // wrap certificate in a js Array first before adding it to the config object + let certificate_arr = js_sys::Array::new(); + certificate_arr.push(&certificate); + config.certificates(&certificate_arr); + + let inner = web_sys::RtcPeerConnection::new_with_configuration(&config)?; + + Ok(Self { inner }) + } + + /// Creates the stream for the initial noise handshake. + /// + /// The underlying data channel MUST have `negotiated` set to `true` and carry the ID 0. + pub(crate) fn new_handshake_stream(&self) -> (Stream, DropListener) { + Stream::new(self.new_data_channel(true)) + } + + /// Creates a regular data channel for when the connection is already established. + pub(crate) fn new_regular_data_channel(&self) -> RtcDataChannel { + self.new_data_channel(false) + } + + fn new_data_channel(&self, negotiated: bool) -> RtcDataChannel { + const LABEL: &str = ""; + + let dc = match negotiated { + true => { + let mut options = RtcDataChannelInit::new(); + options.negotiated(true).id(0); // id is only ever set to zero when negotiated is true + + self.inner + .create_data_channel_with_data_channel_dict(LABEL, &options) + } + false => self.inner.create_data_channel(LABEL), + }; + dc.set_binary_type(RtcDataChannelType::Arraybuffer); // Hardcoded here, it's the only type we use + + dc + } + + pub(crate) async fn create_offer(&self) -> Result { + let offer = JsFuture::from(self.inner.create_offer()).await?; + + let offer = Reflect::get(&offer, &JsValue::from_str("sdp")) + .expect("sdp should be valid") + .as_string() + .expect("sdp string should be valid string"); + + Ok(offer) + } + + pub(crate) async fn set_local_description( + &self, + sdp: RtcSessionDescriptionInit, + ) -> Result<(), Error> { + let promise = self.inner.set_local_description(&sdp); + JsFuture::from(promise).await?; + + Ok(()) + } + + pub(crate) fn local_fingerprint(&self) -> Result { + let sdp = &self + .inner + .local_description() + .ok_or_else(|| Error::JsError("No local description".to_string()))? + .sdp(); + + let fingerprint = parse_fingerprint(sdp) + .ok_or_else(|| Error::JsError("No fingerprint in SDP".to_string()))?; + + Ok(fingerprint) + } + + pub(crate) async fn set_remote_description( + &self, + sdp: RtcSessionDescriptionInit, + ) -> Result<(), Error> { + let promise = self.inner.set_remote_description(&sdp); + JsFuture::from(promise).await?; + + Ok(()) + } +} + +/// Parse Fingerprint from a SDP. +fn parse_fingerprint(sdp: &str) -> Option { + // split the sdp by new lines / carriage returns + let lines = sdp.split("\r\n"); + + // iterate through the lines to find the one starting with a=fingerprint: + // get the value after the first space + // return the value as a Fingerprint + for line in lines { + if line.starts_with("a=fingerprint:") { + let fingerprint = line.split(' ').nth(1).unwrap(); + let bytes = hex::decode(fingerprint.replace(':', "")).unwrap(); + let arr: [u8; 32] = bytes.as_slice().try_into().unwrap(); + return Some(Fingerprint::raw(arr)); + } + } + None +} + +#[cfg(test)] +mod sdp_tests { + use super::*; + + #[test] + fn test_fingerprint() { + let sdp: &str = "v=0\r\no=- 0 0 IN IP6 ::1\r\ns=-\r\nc=IN IP6 ::1\r\nt=0 0\r\na=ice-lite\r\nm=application 61885 UDP/DTLS/SCTP webrtc-datachannel\r\na=mid:0\r\na=setup:passive\r\na=ice-ufrag:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=ice-pwd:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=fingerprint:sha-256 A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89\r\na=sctp-port:5000\r\na=max-message-size:16384\r\na=candidate:1467250027 1 UDP 1467250027 ::1 61885 typ host\r\n"; + let fingerprint = match parse_fingerprint(sdp) { + Some(fingerprint) => fingerprint, + None => panic!("No fingerprint found"), + }; + assert_eq!(fingerprint.algorithm(), "sha-256"); + assert_eq!(fingerprint.to_sdp_format(), "A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89"); + } +} diff --git a/transports/webrtc-websys/src/error.rs b/transports/webrtc-websys/src/error.rs new file mode 100644 index 00000000..e226dea8 --- /dev/null +++ b/transports/webrtc-websys/src/error.rs @@ -0,0 +1,57 @@ +use wasm_bindgen::{JsCast, JsValue}; + +/// Errors that may happen on the [`Transport`](crate::Transport) or the +/// [`Connection`](crate::Connection). +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Invalid multiaddr: {0}")] + InvalidMultiaddr(&'static str), + + #[error("JavaScript error: {0}")] + JsError(String), + + #[error("JavaScript typecasting failed")] + JsCastFailed, + + #[error("Unknown remote peer ID")] + UnknownRemotePeerId, + + #[error("Connection error: {0}")] + Connection(String), + + #[error("Authentication error")] + Authentication(#[from] libp2p_noise::Error), +} + +impl Error { + pub(crate) fn from_js_value(value: JsValue) -> Self { + let s = if value.is_instance_of::() { + js_sys::Error::from(value) + .to_string() + .as_string() + .unwrap_or_else(|| "Unknown error".to_string()) + } else { + "Unknown error".to_string() + }; + + Error::JsError(s) + } +} + +impl std::convert::From for Error { + fn from(value: JsValue) -> Self { + Error::from_js_value(value) + } +} + +impl From for Error { + fn from(value: String) -> Self { + Error::JsError(value) + } +} + +impl From for Error { + fn from(value: std::io::Error) -> Self { + Error::JsError(value.to_string()) + } +} diff --git a/transports/webrtc-websys/src/lib.rs b/transports/webrtc-websys/src/lib.rs new file mode 100644 index 00000000..04fced41 --- /dev/null +++ b/transports/webrtc-websys/src/lib.rs @@ -0,0 +1,13 @@ +#![doc = include_str!("../README.md")] + +mod connection; +mod error; +mod sdp; +mod stream; +mod transport; +mod upgrade; + +pub use self::connection::Connection; +pub use self::error::Error; +pub use self::stream::Stream; +pub use self::transport::{Config, Transport}; diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs new file mode 100644 index 00000000..6f50262b --- /dev/null +++ b/transports/webrtc-websys/src/sdp.rs @@ -0,0 +1,55 @@ +use libp2p_webrtc_utils::Fingerprint; +use std::net::SocketAddr; +use web_sys::{RtcSdpType, RtcSessionDescriptionInit}; + +/// Creates the SDP answer used by the client. +pub(crate) fn answer( + addr: SocketAddr, + server_fingerprint: Fingerprint, + client_ufrag: &str, +) -> RtcSessionDescriptionInit { + let mut answer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Answer); + answer_obj.sdp(&libp2p_webrtc_utils::sdp::answer( + addr, + server_fingerprint, + client_ufrag, + )); + answer_obj +} + +/// Creates the munged SDP offer from the Browser's given SDP offer +/// +/// Certificate verification is disabled which is why we hardcode a dummy fingerprint here. +pub(crate) fn offer(offer: String, client_ufrag: &str) -> RtcSessionDescriptionInit { + // find line and replace a=ice-ufrag: with "\r\na=ice-ufrag:{client_ufrag}\r\n" + // find line and replace a=ice-pwd: with "\r\na=ice-ufrag:{client_ufrag}\r\n" + + let mut munged_sdp_offer = String::new(); + + for line in offer.split("\r\n") { + if line.starts_with("a=ice-ufrag:") { + munged_sdp_offer.push_str(&format!("a=ice-ufrag:{client_ufrag}\r\n")); + continue; + } + + if line.starts_with("a=ice-pwd:") { + munged_sdp_offer.push_str(&format!("a=ice-pwd:{client_ufrag}\r\n")); + continue; + } + + if !line.is_empty() { + munged_sdp_offer.push_str(&format!("{}\r\n", line)); + continue; + } + } + + // remove any double \r\n + let munged_sdp_offer = munged_sdp_offer.replace("\r\n\r\n", "\r\n"); + + log::trace!("Created SDP offer: {munged_sdp_offer}"); + + let mut offer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Offer); + offer_obj.sdp(&munged_sdp_offer); + + offer_obj +} diff --git a/transports/webrtc-websys/src/stream.rs b/transports/webrtc-websys/src/stream.rs new file mode 100644 index 00000000..812aa5af --- /dev/null +++ b/transports/webrtc-websys/src/stream.rs @@ -0,0 +1,61 @@ +//! The WebRTC [Stream] over the Connection +use self::poll_data_channel::PollDataChannel; +use futures::{AsyncRead, AsyncWrite}; +use send_wrapper::SendWrapper; +use std::pin::Pin; +use std::task::{Context, Poll}; +use web_sys::RtcDataChannel; + +mod poll_data_channel; + +/// A stream over a WebRTC connection. +/// +/// Backed by a WebRTC data channel. +pub struct Stream { + /// Wrapper for the inner stream to make it Send + inner: SendWrapper>, +} + +pub(crate) type DropListener = SendWrapper>; + +impl Stream { + pub(crate) fn new(data_channel: RtcDataChannel) -> (Self, DropListener) { + let (inner, drop_listener) = + libp2p_webrtc_utils::Stream::new(PollDataChannel::new(data_channel)); + + ( + Self { + inner: SendWrapper::new(inner), + }, + SendWrapper::new(drop_listener), + ) + } +} + +impl AsyncRead for Stream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_close(cx) + } +} diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs new file mode 100644 index 00000000..9c9b19cd --- /dev/null +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -0,0 +1,242 @@ +use std::cmp::min; +use std::io; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex; +use std::task::{Context, Poll}; + +use bytes::BytesMut; +use futures::task::AtomicWaker; +use futures::{AsyncRead, AsyncWrite}; +use libp2p_webrtc_utils::MAX_MSG_LEN; +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::{Event, MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelState}; + +/// [`PollDataChannel`] is a wrapper around around [`RtcDataChannel`] which implements [`AsyncRead`] and [`AsyncWrite`]. +#[derive(Debug, Clone)] +pub(crate) struct PollDataChannel { + /// The [`RtcDataChannel`] being wrapped. + inner: RtcDataChannel, + + new_data_waker: Rc, + read_buffer: Rc>, + + /// Waker for when we are waiting for the DC to be opened. + open_waker: Rc, + + /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the [`MAX_MSG_LEN`] threshold. + write_waker: Rc, + + /// Waker for when we are waiting for the DC to be closed. + close_waker: Rc, + + /// Whether we've been overloaded with data by the remote. + /// + /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us messages faster than we can read them. + /// In that case, we return an [`std::io::Error`] from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. + /// Failing these will (very likely), cause the application developer to drop the stream which resets it. + overloaded: Rc, + + // Store the closures for proper garbage collection. + // These are wrapped in an [`Rc`] so we can implement [`Clone`]. + _on_open_closure: Rc>, + _on_write_closure: Rc>, + _on_close_closure: Rc>, + _on_message_closure: Rc>, +} + +impl PollDataChannel { + pub(crate) fn new(inner: RtcDataChannel) -> Self { + let open_waker = Rc::new(AtomicWaker::new()); + let on_open_closure = Closure::new({ + let open_waker = open_waker.clone(); + + move |_: RtcDataChannelEvent| { + log::trace!("DataChannel opened"); + open_waker.wake(); + } + }); + inner.set_onopen(Some(on_open_closure.as_ref().unchecked_ref())); + + let write_waker = Rc::new(AtomicWaker::new()); + inner.set_buffered_amount_low_threshold(0); + let on_write_closure = Closure::new({ + let write_waker = write_waker.clone(); + + move |_: Event| { + log::trace!("DataChannel available for writing (again)"); + write_waker.wake(); + } + }); + inner.set_onbufferedamountlow(Some(on_write_closure.as_ref().unchecked_ref())); + + let close_waker = Rc::new(AtomicWaker::new()); + let on_close_closure = Closure::new({ + let close_waker = close_waker.clone(); + + move |_: Event| { + log::trace!("DataChannel closed"); + close_waker.wake(); + } + }); + inner.set_onclose(Some(on_close_closure.as_ref().unchecked_ref())); + + let new_data_waker = Rc::new(AtomicWaker::new()); + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); // We purposely don't use `with_capacity` so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + let overloaded = Rc::new(AtomicBool::new(false)); + + let on_message_closure = Closure::::new({ + let new_data_waker = new_data_waker.clone(); + let read_buffer = read_buffer.clone(); + let overloaded = overloaded.clone(); + + move |ev: MessageEvent| { + let data = js_sys::Uint8Array::new(&ev.data()); + + let mut read_buffer = read_buffer.lock().unwrap(); + + if read_buffer.len() + data.length() as usize > MAX_MSG_LEN { + overloaded.store(true, Ordering::SeqCst); + log::warn!("Remote is overloading us with messages, resetting stream",); + return; + } + + read_buffer.extend_from_slice(&data.to_vec()); + new_data_waker.wake(); + } + }); + inner.set_onmessage(Some(on_message_closure.as_ref().unchecked_ref())); + + Self { + inner, + new_data_waker, + read_buffer, + open_waker, + write_waker, + close_waker, + overloaded, + _on_open_closure: Rc::new(on_open_closure), + _on_write_closure: Rc::new(on_write_closure), + _on_close_closure: Rc::new(on_close_closure), + _on_message_closure: Rc::new(on_message_closure), + } + } + + /// Returns the [RtcDataChannelState] of the [RtcDataChannel] + fn ready_state(&self) -> RtcDataChannelState { + self.inner.ready_state() + } + + /// Returns the current [RtcDataChannel] BufferedAmount + fn buffered_amount(&self) -> usize { + self.inner.buffered_amount() as usize + } + + /// Whether the data channel is ready for reading or writing. + fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + match self.ready_state() { + RtcDataChannelState::Connecting => { + self.open_waker.register(cx.waker()); + return Poll::Pending; + } + RtcDataChannelState::Closing | RtcDataChannelState::Closed => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + RtcDataChannelState::Open | RtcDataChannelState::__Nonexhaustive => {} + } + + if self.overloaded.load(Ordering::SeqCst) { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "remote overloaded us with messages", + ))); + } + + Poll::Ready(Ok(())) + } +} + +impl AsyncRead for PollDataChannel { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.get_mut(); + + futures::ready!(this.poll_ready(cx))?; + + let mut read_buffer = this.read_buffer.lock().unwrap(); + + if read_buffer.is_empty() { + this.new_data_waker.register(cx.waker()); + return Poll::Pending; + } + + // Ensure that we: + // - at most return what the caller can read (`buf.len()`) + // - at most what we have (`read_buffer.len()`) + let split_index = min(buf.len(), read_buffer.len()); + + let bytes_to_return = read_buffer.split_to(split_index); + let len = bytes_to_return.len(); + buf[..len].copy_from_slice(&bytes_to_return); + + Poll::Ready(Ok(len)) + } +} + +impl AsyncWrite for PollDataChannel { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.get_mut(); + + futures::ready!(this.poll_ready(cx))?; + + debug_assert!(this.buffered_amount() <= MAX_MSG_LEN); + let remaining_space = MAX_MSG_LEN - this.buffered_amount(); + + if remaining_space == 0 { + this.write_waker.register(cx.waker()); + return Poll::Pending; + } + + let bytes_to_send = min(buf.len(), remaining_space); + + if this + .inner + .send_with_u8_array(&buf[..bytes_to_send]) + .is_err() + { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); + } + + Poll::Ready(Ok(bytes_to_send)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.buffered_amount() == 0 { + return Poll::Ready(Ok(())); + } + + self.write_waker.register(cx.waker()); + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.ready_state() == RtcDataChannelState::Closed { + return Poll::Ready(Ok(())); + } + + if self.ready_state() != RtcDataChannelState::Closing { + self.inner.close(); + } + + self.close_waker.register(cx.waker()); + Poll::Pending + } +} diff --git a/transports/webrtc-websys/src/transport.rs b/transports/webrtc-websys/src/transport.rs new file mode 100644 index 00000000..ecf137ea --- /dev/null +++ b/transports/webrtc-websys/src/transport.rs @@ -0,0 +1,140 @@ +use super::upgrade; +use super::Connection; +use super::Error; +use futures::future::FutureExt; +use libp2p_core::multiaddr::Multiaddr; +use libp2p_core::muxing::StreamMuxerBox; +use libp2p_core::transport::{Boxed, ListenerId, Transport as _, TransportError, TransportEvent}; +use libp2p_identity::{Keypair, PeerId}; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Config for the [`Transport`]. +#[derive(Clone)] +pub struct Config { + keypair: Keypair, +} + +/// A WebTransport [`Transport`](libp2p_core::Transport) that works with `web-sys`. +pub struct Transport { + config: Config, +} + +impl Config { + /// Constructs a new configuration for the [`Transport`]. + pub fn new(keypair: &Keypair) -> Self { + Config { + keypair: keypair.to_owned(), + } + } +} + +impl Transport { + /// Constructs a new `Transport` with the given [`Config`]. + pub fn new(config: Config) -> Transport { + Transport { config } + } + + /// Wraps `Transport` in [`Boxed`] and makes it ready to be consumed by + /// SwarmBuilder. + pub fn boxed(self) -> Boxed<(PeerId, StreamMuxerBox)> { + self.map(|(peer_id, muxer), _| (peer_id, StreamMuxerBox::new(muxer))) + .boxed() + } +} + +impl libp2p_core::Transport for Transport { + type Output = (PeerId, Connection); + type Error = Error; + type ListenerUpgrade = Pin> + Send>>; + type Dial = Pin> + Send>>; + + fn listen_on( + &mut self, + _id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn remove_listener(&mut self, _id: ListenerId) -> bool { + false + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + if maybe_local_firefox() { + return Err(TransportError::Other( + "Firefox does not support WebRTC over localhost or 127.0.0.1" + .to_string() + .into(), + )); + } + + let (sock_addr, server_fingerprint) = libp2p_webrtc_utils::parse_webrtc_dial_addr(&addr) + .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; + + if sock_addr.port() == 0 || sock_addr.ip().is_unspecified() { + return Err(TransportError::MultiaddrNotSupported(addr)); + } + + let config = self.config.clone(); + + Ok(async move { + let (peer_id, connection) = + upgrade::outbound(sock_addr, server_fingerprint, config.keypair.clone()).await?; + + Ok((peer_id, connection)) + } + .boxed()) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + + fn address_translation(&self, _listen: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } +} + +/// Checks if local Firefox. +/// +/// See: `` for more details +fn maybe_local_firefox() -> bool { + let window = &web_sys::window().expect("window should be available"); + let ua = match window.navigator().user_agent() { + Ok(agent) => agent.to_lowercase(), + Err(_) => return false, + }; + + let hostname = match window + .document() + .expect("should be valid document") + .location() + { + Some(location) => match location.hostname() { + Ok(hostname) => hostname, + Err(_) => return false, + }, + None => return false, + }; + + // check if web_sys::Navigator::user_agent() matches any of the following: + // - firefox + // - seamonkey + // - iceape + // AND hostname is either localhost or "127.0.0.1" + (ua.contains("firefox") || ua.contains("seamonkey") || ua.contains("iceape")) + && (hostname == "localhost" || hostname == "127.0.0.1" || hostname == "[::1]") +} diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs new file mode 100644 index 00000000..092baed5 --- /dev/null +++ b/transports/webrtc-websys/src/upgrade.rs @@ -0,0 +1,56 @@ +use super::Error; +use crate::connection::RtcPeerConnection; +use crate::sdp; +use crate::Connection; +use libp2p_identity::{Keypair, PeerId}; +use libp2p_webrtc_utils::noise; +use libp2p_webrtc_utils::Fingerprint; +use send_wrapper::SendWrapper; +use std::net::SocketAddr; + +/// Upgrades an outbound WebRTC connection by creating the data channel +/// and conducting a Noise handshake +pub(crate) async fn outbound( + sock_addr: SocketAddr, + remote_fingerprint: Fingerprint, + id_keys: Keypair, +) -> Result<(PeerId, Connection), Error> { + let fut = SendWrapper::new(outbound_inner(sock_addr, remote_fingerprint, id_keys)); + fut.await +} + +/// Inner outbound function that is wrapped in [SendWrapper] +async fn outbound_inner( + sock_addr: SocketAddr, + remote_fingerprint: Fingerprint, + id_keys: Keypair, +) -> Result<(PeerId, Connection), Error> { + let rtc_peer_connection = RtcPeerConnection::new(remote_fingerprint.algorithm()).await?; + + // Create stream for Noise handshake + // Must create data channel before Offer is created for it to be included in the SDP + let (channel, listener) = rtc_peer_connection.new_handshake_stream(); + drop(listener); + + let ufrag = libp2p_webrtc_utils::sdp::random_ufrag(); + + let offer = rtc_peer_connection.create_offer().await?; + let munged_offer = sdp::offer(offer, &ufrag); + rtc_peer_connection + .set_local_description(munged_offer) + .await?; + + let answer = sdp::answer(sock_addr, remote_fingerprint, &ufrag); + rtc_peer_connection.set_remote_description(answer).await?; + + let local_fingerprint = rtc_peer_connection.local_fingerprint()?; + + log::trace!("local_fingerprint: {:?}", local_fingerprint); + log::trace!("remote_fingerprint: {:?}", remote_fingerprint); + + let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint).await?; + + log::debug!("Remote peer identified as {peer_id}"); + + Ok((peer_id, Connection::new(rtc_peer_connection))) +} diff --git a/transports/webrtc/CHANGELOG.md b/transports/webrtc/CHANGELOG.md index 0eb39557..710c2e31 100644 --- a/transports/webrtc/CHANGELOG.md +++ b/transports/webrtc/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.6.1-alpha + +- Move common dependencies to `libp2p-webrtc-utils` crate. + See [PR 4248]. + +[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248 + ## 0.6.0-alpha - Update `webrtc` dependency to `v0.8.0`. diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 05271951..7cd16f2c 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-webrtc" -version = "0.6.0-alpha" +version = "0.6.1-alpha" authors = ["Parity Technologies "] description = "WebRTC transport for libp2p" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,7 +12,6 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = "0.1" -asynchronous-codec = "0.6.2" bytes = "1" futures = "0.3" futures-timer = "3" @@ -21,40 +20,28 @@ if-watch = "3.0" libp2p-core = { workspace = true } libp2p-noise = { workspace = true } libp2p-identity = { workspace = true } +libp2p-webrtc-utils = { workspace = true } log = "0.4" -sha2 = "0.10.7" -multihash = { workspace = true } -quick-protobuf = "0.8" -quick-protobuf-codec = { workspace = true } +multihash = { workspace = true } rand = "0.8" -rcgen = "0.10.0" +rcgen = "0.11.1" serde = { version = "1.0", features = ["derive"] } -stun = "0.4" +stun = "0.5" thiserror = "1" tinytemplate = "1.2" -tokio = { version = "1.31", features = ["net"], optional = true} +tokio = { version = "1.32", features = ["net"], optional = true} tokio-util = { version = "0.7", features = ["compat"], optional = true } -webrtc = { version = "0.8.0", optional = true } +webrtc = { version = "0.9.0", optional = true } [features] tokio = ["dep:tokio", "dep:tokio-util", "dep:webrtc", "if-watch/tokio"] pem = ["webrtc?/pem"] [dev-dependencies] -anyhow = "1.0" env_logger = "0.10" -hex-literal = "0.4" -libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } -libp2p-ping = { workspace = true } -tokio = { version = "1.31", features = ["full"] } -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } -void = "1" +tokio = { version = "1.32", features = ["full"] } quickcheck = "1.0.3" [[test]] name = "smoke" required-features = ["tokio"] - -[[example]] -name = "listen_ping" -required-features = ["tokio"] diff --git a/transports/webrtc/examples/listen_ping.rs b/transports/webrtc/examples/listen_ping.rs deleted file mode 100644 index 8475195a..00000000 --- a/transports/webrtc/examples/listen_ping.rs +++ /dev/null @@ -1,62 +0,0 @@ -use anyhow::Result; -use futures::StreamExt; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::Transport; -use libp2p_identity as identity; -use libp2p_ping as ping; -use libp2p_swarm::{keep_alive, NetworkBehaviour, Swarm, SwarmBuilder}; -use rand::thread_rng; -use void::Void; - -/// An example WebRTC server that will accept connections and run the ping protocol on them. -#[tokio::main] -async fn main() -> Result<()> { - let mut swarm = create_swarm()?; - - swarm.listen_on("/ip4/127.0.0.1/udp/0/webrtc-direct".parse()?)?; - - loop { - let event = swarm.next().await.unwrap(); - eprintln!("New event: {event:?}") - } -} - -fn create_swarm() -> Result> { - let id_keys = identity::Keypair::generate_ed25519(); - let peer_id = id_keys.public().to_peer_id(); - let transport = libp2p_webrtc::tokio::Transport::new( - id_keys, - libp2p_webrtc::tokio::Certificate::generate(&mut thread_rng())?, - ); - - let transport = transport - .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))) - .boxed(); - - Ok(SwarmBuilder::with_tokio_executor(transport, Behaviour::default(), peer_id).build()) -} - -#[derive(NetworkBehaviour, Default)] -#[behaviour(to_swarm = "Event", prelude = "libp2p_swarm::derive_prelude")] -struct Behaviour { - ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, -} - -#[derive(Debug)] -#[allow(clippy::large_enum_variant)] -enum Event { - Ping(ping::Event), -} - -impl From for Event { - fn from(e: ping::Event) -> Self { - Event::Ping(e) - } -} - -impl From for Event { - fn from(event: Void) -> Self { - void::unreachable(event) - } -} diff --git a/transports/webrtc/src/lib.rs b/transports/webrtc/src/lib.rs index 012796a6..f71203cc 100644 --- a/transports/webrtc/src/lib.rs +++ b/transports/webrtc/src/lib.rs @@ -79,12 +79,5 @@ //! hand-crate the SDP answer generated by the remote, this is problematic. A way to solve this //! is to make the hash a part of the remote's multiaddr. On the server side, we turn //! certificate verification off. - -mod proto { - #![allow(unreachable_pub)] - include!("generated/mod.rs"); - pub(crate) use self::webrtc::pb::{mod_Message::Flag, Message}; -} - #[cfg(feature = "tokio")] pub mod tokio; diff --git a/transports/webrtc/src/tokio/certificate.rs b/transports/webrtc/src/tokio/certificate.rs index 748cfdb6..7c7c65f0 100644 --- a/transports/webrtc/src/tokio/certificate.rs +++ b/transports/webrtc/src/tokio/certificate.rs @@ -97,24 +97,18 @@ enum Kind { InvalidPEM(#[from] webrtc::Error), } -#[cfg(test)] +#[cfg(all(test, feature = "pem"))] mod test { - #[cfg(feature = "pem")] - use anyhow::Result; + use super::*; + use rand::thread_rng; - #[cfg(feature = "pem")] #[test] - fn test_certificate_serialize_pem_and_from_pem() -> Result<()> { - use super::*; - use rand::thread_rng; - + fn test_certificate_serialize_pem_and_from_pem() { let cert = Certificate::generate(&mut thread_rng()).unwrap(); let pem = cert.serialize_pem(); - let loaded_cert = Certificate::from_pem(&pem)?; + let loaded_cert = Certificate::from_pem(&pem).unwrap(); - assert_eq!(loaded_cert, cert); - - Ok(()) + assert_eq!(loaded_cert, cert) } } diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 72e39ce5..29983d72 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -40,7 +40,7 @@ use std::{ task::{Context, Poll}, }; -use crate::tokio::{error::Error, substream, substream::Substream}; +use crate::tokio::{error::Error, stream, stream::Stream}; /// Maximum number of unprocessed data channels. /// See [`Connection::poll_inbound`]. @@ -56,14 +56,14 @@ pub struct Connection { /// Channel onto which incoming data channels are put. incoming_data_channels_rx: mpsc::Receiver>, - /// Future, which, once polled, will result in an outbound substream. + /// Future, which, once polled, will result in an outbound stream. outbound_fut: Option, Error>>>, /// Future, which, once polled, will result in closing the entire connection. close_fut: Option>>, - /// A list of futures, which, once completed, signal that a [`Substream`] has been dropped. - drop_listeners: FuturesUnordered, + /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. + drop_listeners: FuturesUnordered, no_drop_listeners_waker: Option, } @@ -147,7 +147,7 @@ impl Connection { } impl StreamMuxer for Connection { - type Substream = Substream; + type Substream = Stream; type Error = Error; fn poll_inbound( @@ -156,15 +156,15 @@ impl StreamMuxer for Connection { ) -> Poll> { match ready!(self.incoming_data_channels_rx.poll_next_unpin(cx)) { Some(detached) => { - log::trace!("Incoming substream {}", detached.stream_identifier()); + log::trace!("Incoming stream {}", detached.stream_identifier()); - let (substream, drop_listener) = Substream::new(detached); + let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); if let Some(waker) = self.no_drop_listeners_waker.take() { waker.wake() } - Poll::Ready(Ok(substream)) + Poll::Ready(Ok(stream)) } None => { debug_assert!( @@ -226,15 +226,15 @@ impl StreamMuxer for Connection { Ok(detached) => { self.outbound_fut = None; - log::trace!("Outbound substream {}", detached.stream_identifier()); + log::trace!("Outbound stream {}", detached.stream_identifier()); - let (substream, drop_listener) = Substream::new(detached); + let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); if let Some(waker) = self.no_drop_listeners_waker.take() { waker.wake() } - Poll::Ready(Ok(substream)) + Poll::Ready(Ok(stream)) } Err(e) => { self.outbound_fut = None; diff --git a/transports/webrtc/src/tokio/fingerprint.rs b/transports/webrtc/src/tokio/fingerprint.rs index c3d58d64..c075e486 100644 --- a/transports/webrtc/src/tokio/fingerprint.rs +++ b/transports/webrtc/src/tokio/fingerprint.rs @@ -18,30 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use sha2::Digest as _; -use std::fmt; use webrtc::dtls_transport::dtls_fingerprint::RTCDtlsFingerprint; const SHA256: &str = "sha-256"; -const MULTIHASH_SHA256_CODE: u64 = 0x12; type Multihash = multihash::Multihash<64>; /// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm. -#[derive(Eq, PartialEq, Copy, Clone)] -pub struct Fingerprint([u8; 32]); +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct Fingerprint(libp2p_webrtc_utils::Fingerprint); impl Fingerprint { - pub(crate) const FF: Fingerprint = Fingerprint([0xFF; 32]); - #[cfg(test)] pub fn raw(bytes: [u8; 32]) -> Self { - Self(bytes) + Self(libp2p_webrtc_utils::Fingerprint::raw(bytes)) } /// Creates a fingerprint from a raw certificate. pub fn from_certificate(bytes: &[u8]) -> Self { - Fingerprint(sha2::Sha256::digest(bytes).into()) + Fingerprint(libp2p_webrtc_utils::Fingerprint::from_certificate(bytes)) } /// Converts [`RTCDtlsFingerprint`] to [`Fingerprint`]. @@ -53,58 +48,35 @@ impl Fingerprint { let mut buf = [0; 32]; hex::decode_to_slice(fp.value.replace(':', ""), &mut buf).ok()?; - Some(Self(buf)) + Some(Self(libp2p_webrtc_utils::Fingerprint::raw(buf))) } /// Converts [`Multihash`](multihash::Multihash) to [`Fingerprint`]. pub fn try_from_multihash(hash: Multihash) -> Option { - if hash.code() != MULTIHASH_SHA256_CODE { - // Only support SHA256 for now. - return None; - } - - let bytes = hash.digest().try_into().ok()?; - - Some(Self(bytes)) + Some(Self(libp2p_webrtc_utils::Fingerprint::try_from_multihash( + hash, + )?)) } /// Converts this fingerprint to [`Multihash`](multihash::Multihash). pub fn to_multihash(self) -> Multihash { - Multihash::wrap(MULTIHASH_SHA256_CODE, &self.0).expect("fingerprint's len to be 32 bytes") + self.0.to_multihash() } /// Formats this fingerprint as uppercase hex, separated by colons (`:`). /// /// This is the format described in . pub fn to_sdp_format(self) -> String { - self.0.map(|byte| format!("{byte:02X}")).join(":") + self.0.to_sdp_format() } /// Returns the algorithm used (e.g. "sha-256"). /// See pub fn algorithm(&self) -> String { - SHA256.to_owned() - } -} - -impl fmt::Debug for Fingerprint { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&hex::encode(self.0)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn sdp_format() { - let fp = Fingerprint::raw(hex_literal::hex!( - "7DE3D83F81A680592A471E6B6ABB0747ABD35385A8093FDFE112C1EEBB6CC6AC" - )); - - let sdp_format = fp.to_sdp_format(); - - assert_eq!(sdp_format, "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:3F:DF:E1:12:C1:EE:BB:6C:C6:AC") + self.0.algorithm() + } + + pub(crate) fn into_inner(self) -> libp2p_webrtc_utils::Fingerprint { + self.0 } } diff --git a/transports/webrtc/src/tokio/mod.rs b/transports/webrtc/src/tokio/mod.rs index 85e041bf..4f2c0dd9 100644 --- a/transports/webrtc/src/tokio/mod.rs +++ b/transports/webrtc/src/tokio/mod.rs @@ -24,7 +24,7 @@ mod error; mod fingerprint; mod req_res_chan; mod sdp; -mod substream; +mod stream; mod transport; mod udp_mux; mod upgrade; diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index d2f424e5..e49345a0 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -18,22 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use serde::Serialize; -use tinytemplate::TinyTemplate; +pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; +use libp2p_webrtc_utils::sdp::render_description; +use libp2p_webrtc_utils::Fingerprint; +use std::net::SocketAddr; use webrtc::peer_connection::sdp::session_description::RTCSessionDescription; -use std::net::{IpAddr, SocketAddr}; - -use crate::tokio::fingerprint::Fingerprint; - /// Creates the SDP answer used by the client. pub(crate) fn answer( addr: SocketAddr, - server_fingerprint: &Fingerprint, + server_fingerprint: Fingerprint, client_ufrag: &str, ) -> RTCSessionDescription { - RTCSessionDescription::answer(render_description( - SERVER_SESSION_DESCRIPTION, + RTCSessionDescription::answer(libp2p_webrtc_utils::sdp::answer( addr, server_fingerprint, client_ufrag, @@ -45,13 +42,16 @@ pub(crate) fn answer( /// /// Certificate verification is disabled which is why we hardcode a dummy fingerprint here. pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescription { - RTCSessionDescription::offer(render_description( + let offer = render_description( CLIENT_SESSION_DESCRIPTION, addr, - &Fingerprint::FF, + Fingerprint::FF, client_ufrag, - )) - .unwrap() + ); + + log::trace!("Created SDP offer: {offer}"); + + RTCSessionDescription::offer(offer).unwrap() } // An SDP message that constitutes the offer. @@ -142,111 +142,3 @@ a=setup:actpass a=sctp-port:5000 a=max-message-size:16384 "; - -// See [`CLIENT_SESSION_DESCRIPTION`]. -// -// a=ice-lite -// -// A lite implementation is only appropriate for devices that will *always* be connected to -// the public Internet and have a public IP address at which it can receive packets from any -// correspondent. ICE will not function when a lite implementation is placed behind a NAT -// (RFC8445). -// -// a=tls-id: -// -// "TLS ID" uniquely identifies a TLS association. -// The ICE protocol uses a "TLS ID" system to indicate whether a fresh DTLS connection -// must be reopened in case of ICE renegotiation. Considering that ICE renegotiations -// never happen in our use case, we can simply put a random value and not care about -// it. Note however that the TLS ID in the answer must be present if and only if the -// offer contains one. (RFC8842) -// TODO: is it true that renegotiations never happen? what about a connection closing? -// "tls-id" attribute MUST be present in the initial offer and respective answer (RFC8839). -// XXX: but right now browsers don't send it. -// -// a=setup:passive -// -// "passive" indicates that the remote DTLS server will only listen for incoming -// connections. (RFC5763) -// The answerer (server) MUST not be located behind a NAT (RFC6135). -// -// The answerer MUST use either a setup attribute value of setup:active or setup:passive. -// Note that if the answerer uses setup:passive, then the DTLS handshake will not begin until -// the answerer is received, which adds additional latency. setup:active allows the answer and -// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. -// -// a=candidate: -// -// A transport address for a candidate that can be used for connectivity checks (RFC8839). -// -// a=end-of-candidates -// -// Indicate that no more candidates will ever be sent (RFC8838). -const SERVER_SESSION_DESCRIPTION: &str = "v=0 -o=- 0 0 IN {ip_version} {target_ip} -s=- -t=0 0 -a=ice-lite -m=application {target_port} UDP/DTLS/SCTP webrtc-datachannel -c=IN {ip_version} {target_ip} -a=mid:0 -a=ice-options:ice2 -a=ice-ufrag:{ufrag} -a=ice-pwd:{pwd} -a=fingerprint:{fingerprint_algorithm} {fingerprint_value} - -a=setup:passive -a=sctp-port:5000 -a=max-message-size:16384 -a=candidate:1 1 UDP 1 {target_ip} {target_port} typ host -a=end-of-candidates -"; - -/// Indicates the IP version used in WebRTC: `IP4` or `IP6`. -#[derive(Serialize)] -enum IpVersion { - IP4, - IP6, -} - -/// Context passed to the templating engine, which replaces the above placeholders (e.g. -/// `{IP_VERSION}`) with real values. -#[derive(Serialize)] -struct DescriptionContext { - pub(crate) ip_version: IpVersion, - pub(crate) target_ip: IpAddr, - pub(crate) target_port: u16, - pub(crate) fingerprint_algorithm: String, - pub(crate) fingerprint_value: String, - pub(crate) ufrag: String, - pub(crate) pwd: String, -} - -/// Renders a [`TinyTemplate`] description using the provided arguments. -fn render_description( - description: &str, - addr: SocketAddr, - fingerprint: &Fingerprint, - ufrag: &str, -) -> String { - let mut tt = TinyTemplate::new(); - tt.add_template("description", description).unwrap(); - - let context = DescriptionContext { - ip_version: { - if addr.is_ipv4() { - IpVersion::IP4 - } else { - IpVersion::IP6 - } - }, - target_ip: addr.ip(), - target_port: addr.port(), - fingerprint_algorithm: fingerprint.algorithm(), - fingerprint_value: fingerprint.to_sdp_format(), - // NOTE: ufrag is equal to pwd. - ufrag: ufrag.to_owned(), - pwd: ufrag.to_owned(), - }; - tt.render("description", &context).unwrap() -} diff --git a/transports/webrtc/src/tokio/stream.rs b/transports/webrtc/src/tokio/stream.rs new file mode 100644 index 00000000..4278a751 --- /dev/null +++ b/transports/webrtc/src/tokio/stream.rs @@ -0,0 +1,80 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use futures::prelude::*; +use libp2p_webrtc_utils::MAX_MSG_LEN; +use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; +use webrtc::data::data_channel::{DataChannel, PollDataChannel}; + +/// A substream on top of a WebRTC data channel. +/// +/// To be a proper libp2p substream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well +/// as support a half-closed state which we do by framing messages in a protobuf envelope. +pub struct Stream { + inner: libp2p_webrtc_utils::Stream>, +} + +pub(crate) type DropListener = libp2p_webrtc_utils::DropListener>; + +impl Stream { + /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream + /// is dropped. + pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { + let mut data_channel = PollDataChannel::new(data_channel).compat(); + data_channel.get_mut().set_read_buf_capacity(MAX_MSG_LEN); + + let (inner, drop_listener) = libp2p_webrtc_utils::Stream::new(data_channel); + + (Self { inner }, drop_listener) + } +} +impl AsyncRead for Stream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_close(cx) + } +} diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index faac75b2..4b3f15d5 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -119,7 +119,7 @@ impl libp2p_core::Transport for Transport { } fn dial(&mut self, addr: Multiaddr) -> Result> { - let (sock_addr, server_fingerprint) = parse_webrtc_dial_addr(&addr) + let (sock_addr, server_fingerprint) = libp2p_webrtc_utils::parse_webrtc_dial_addr(&addr) .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; if sock_addr.port() == 0 || sock_addr.ip().is_unspecified() { return Err(TransportError::MultiaddrNotSupported(addr)); @@ -140,7 +140,7 @@ impl libp2p_core::Transport for Transport { sock_addr, config.inner, udp_mux, - client_fingerprint, + client_fingerprint.into_inner(), server_fingerprint, config.id_keys, ) @@ -337,7 +337,7 @@ impl Stream for ListenStream { new_addr.addr, self.config.inner.clone(), self.udp_mux.udp_mux_handle(), - self.config.fingerprint, + self.config.fingerprint.into_inner(), new_addr.ufrag, self.config.id_keys.clone(), ) @@ -427,40 +427,6 @@ fn parse_webrtc_listen_addr(addr: &Multiaddr) -> Option { Some(SocketAddr::new(ip, port)) } -/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. -fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { - let mut iter = addr.iter(); - - let ip = match iter.next()? { - Protocol::Ip4(ip) => IpAddr::from(ip), - Protocol::Ip6(ip) => IpAddr::from(ip), - _ => return None, - }; - - let port = iter.next()?; - let webrtc = iter.next()?; - let certhash = iter.next()?; - - let (port, fingerprint) = match (port, webrtc, certhash) { - (Protocol::Udp(port), Protocol::WebRTCDirect, Protocol::Certhash(cert_hash)) => { - let fingerprint = Fingerprint::try_from_multihash(cert_hash)?; - - (port, fingerprint) - } - _ => return None, - }; - - match iter.next() { - Some(Protocol::P2p(_)) => {} - // peer ID is optional - None => {} - // unexpected protocol - Some(_) => return None, - } - - Some((SocketAddr::new(ip, port), fingerprint)) -} - // Tests ////////////////////////////////////////////////////////////////////////////////////////// #[cfg(test)] @@ -469,7 +435,7 @@ mod tests { use futures::future::poll_fn; use libp2p_core::{multiaddr::Protocol, Transport as _}; use rand::thread_rng; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv6Addr}; #[test] fn missing_webrtc_protocol() { @@ -480,44 +446,6 @@ mod tests { assert!(maybe_parsed.is_none()); } - #[test] - fn parse_valid_address_with_certhash_and_p2p() { - let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" - .parse() - .unwrap(); - - let maybe_parsed = parse_webrtc_dial_addr(&addr); - - assert_eq!( - maybe_parsed, - Some(( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), - Fingerprint::raw(hex_literal::hex!( - "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" - )) - )) - ); - } - - #[test] - fn peer_id_is_not_required() { - let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" - .parse() - .unwrap(); - - let maybe_parsed = parse_webrtc_dial_addr(&addr); - - assert_eq!( - maybe_parsed, - Some(( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), - Fingerprint::raw(hex_literal::hex!( - "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" - )) - )) - ); - } - #[test] fn tcp_is_invalid_protocol() { let addr = "/ip4/127.0.0.1/tcp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" @@ -540,26 +468,6 @@ mod tests { assert!(maybe_parsed.is_none()); } - #[test] - fn parse_ipv6() { - let addr = - "/ip6/::1/udp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" - .parse() - .unwrap(); - - let maybe_parsed = parse_webrtc_dial_addr(&addr); - - assert_eq!( - maybe_parsed, - Some(( - SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 12345), - Fingerprint::raw(hex_literal::hex!( - "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" - )) - )) - ); - } - #[test] fn can_parse_valid_addr_without_certhash() { let addr = "/ip6/::1/udp/12345/webrtc-direct".parse().unwrap(); diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 2d5e3fe2..414fc272 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -18,15 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -mod noise; +use libp2p_webrtc_utils::{noise, Fingerprint}; use futures::channel::oneshot; use futures::future::Either; use futures_timer::Delay; use libp2p_identity as identity; use libp2p_identity::PeerId; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use webrtc::api::setting_engine::SettingEngine; use webrtc::api::APIBuilder; use webrtc::data::data_channel::DataChannel; @@ -38,9 +37,8 @@ use webrtc::ice::udp_network::UDPNetwork; use webrtc::peer_connection::configuration::RTCConfiguration; use webrtc::peer_connection::RTCPeerConnection; -use std::{net::SocketAddr, sync::Arc, time::Duration}; - -use crate::tokio::{error::Error, fingerprint::Fingerprint, sdp, substream::Substream, Connection}; +use crate::tokio::sdp::random_ufrag; +use crate::tokio::{error::Error, sdp, stream::Stream, Connection}; /// Creates a new outbound WebRTC connection. pub(crate) async fn outbound( @@ -59,7 +57,7 @@ pub(crate) async fn outbound( log::debug!("created SDP offer for outbound connection: {:?}", offer.sdp); peer_connection.set_local_description(offer).await?; - let answer = sdp::answer(addr, &server_fingerprint, &ufrag); + let answer = sdp::answer(addr, server_fingerprint, &ufrag); log::debug!( "calculated SDP answer for outbound connection: {:?}", answer @@ -155,18 +153,6 @@ async fn new_inbound_connection( Ok(connection) } -/// Generates a random ufrag and adds a prefix according to the spec. -fn random_ufrag() -> String { - format!( - "libp2p+webrtc+v1/{}", - thread_rng() - .sample_iter(&Alphanumeric) - .take(64) - .map(char::from) - .collect::() - ) -} - fn setting_engine( udp_mux: Arc, ufrag: &str, @@ -203,9 +189,7 @@ async fn get_remote_fingerprint(conn: &RTCPeerConnection) -> Fingerprint { Fingerprint::from_certificate(&cert_bytes) } -async fn create_substream_for_noise_handshake( - conn: &RTCPeerConnection, -) -> Result { +async fn create_substream_for_noise_handshake(conn: &RTCPeerConnection) -> Result { // NOTE: the data channel w/ `negotiated` flag set to `true` MUST be created on both ends. let data_channel = conn .create_data_channel( @@ -234,7 +218,7 @@ async fn create_substream_for_noise_handshake( } }; - let (substream, drop_listener) = Substream::new(channel); + let (substream, drop_listener) = Stream::new(channel); drop(drop_listener); // Don't care about cancelled substreams during initial handshake. Ok(substream) diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index 5ae250a5..a93b1484 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,4 +1,13 @@ -## 0.42.0 +## 0.42.1 + +- Bump `futures-rustls` to `0.24.0`. + This is a part of the resolution of the [RUSTSEC-2023-0052]. + See [PR 4378]. + +[PR 4378]: https://github.com/libp2p/rust-libp2p/pull/4378 +[RUSTSEC-2023-0052]: https://rustsec.org/advisories/RUSTSEC-2023-0052.html + +## 0.42.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index b33af22f..f3ebf5af 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket" edition = "2021" rust-version = { workspace = true } description = "WebSocket transport for libp2p" -version = "0.42.0" +version = "0.42.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,7 +11,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures-rustls = "0.22" +futures-rustls = "0.24.0" either = "1.9.0" futures = "0.3.28" libp2p-core = { workspace = true } @@ -21,7 +21,7 @@ parking_lot = "0.12.0" quicksink = "0.1" rw-stream-sink = { workspace = true } soketto = "0.7.0" -url = "2.1" +url = "2.4" webpki-roots = "0.25" [dev-dependencies] diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index 01c02b15..d7dd7628 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -74,7 +74,7 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new(dns::DnsConfig::system( +/// let mut transport = websocket::WsConfig::new(dns::async_std::Transport::system( /// tcp::async_io::Transport::new(tcp::Config::default()), /// ).await.unwrap()); /// diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index 63379db6..5bff818f 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -92,7 +92,7 @@ impl Config { /// Setup the rustls client configuration. fn client_root_store() -> rustls::RootCertStore { let mut client_root_store = rustls::RootCertStore::empty(); - client_root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { + client_root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( ta.subject, ta.spki, diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 695a62f4..7825f64a 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -23,7 +23,7 @@ log = "0.4.20" multiaddr = { workspace = true } multihash = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } -thiserror = "1.0.44" +thiserror = "1.0.48" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" web-sys = { version = "0.3.64", features = [ diff --git a/wasm-tests/webtransport-tests/src/lib.rs b/wasm-tests/webtransport-tests/src/lib.rs index 0ec2f0bc..1f420cd6 100644 --- a/wasm-tests/webtransport-tests/src/lib.rs +++ b/wasm-tests/webtransport-tests/src/lib.rs @@ -338,12 +338,14 @@ async fn fetch_server_addr() -> Multiaddr { .unwrap() } +#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. async fn create_stream(conn: &mut Connection) -> Stream { poll_fn(|cx| Pin::new(&mut *conn).poll_outbound(cx)) .await .unwrap() } +#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. async fn incoming_stream(conn: &mut Connection) -> Stream { let mut stream = poll_fn(|cx| Pin::new(&mut *conn).poll_inbound(cx)) .await