Merge branch 'master' into rand-feature

This commit is contained in:
Ivan Boldyrev 2023-10-02 16:36:16 +04:00
commit 6c03b8c6ae
235 changed files with 7802 additions and 4117 deletions

View File

@ -1,3 +1,3 @@
[alias] [alias]
# Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped. # Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped.
custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -W clippy::used_underscore_binding -W unreachable_pub -D warnings" custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -W clippy::used_underscore_binding -W unreachable_pub"

View File

@ -1,36 +0,0 @@
name: "Run cargo semver-checks"
description: "Install and run the cargo semver-checks tool"
inputs:
crate:
required: true
description: "The crate to run `cargo semver-checks` on."
runs:
using: "composite"
steps:
- run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.22.1/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin
shell: bash
- name: Get released version
shell: bash
id: get-released-version
run: |
MAX_STABLE_VERSION=$(curl https://crates.io/api/v1/crates/${{ inputs.crate }} --silent | jq '.crate.max_stable_version')
echo "version=${MAX_STABLE_VERSION}" >> $GITHUB_OUTPUT
- shell: bash
run: |
rustc --version | tee .rustc-version
cargo semver-checks --version | tee .semver-checks-version
- uses: actions/cache@v3
with:
path: ${{ github.workspace }}/target/semver-checks/cache
key: semver-checks-cache-${{ hashFiles('.rustc-version') }}-${{ hashFiles('.semver-checks-version') }}-${{ inputs.crate }}-${{ steps.get-released-version.outputs.version }}
- run: cargo semver-checks check-release --package ${{ inputs.crate }} --verbose
shell: bash
env:
CARGO_TERM_VERBOSE: "true"
# debugging https://github.com/libp2p/rust-libp2p/pull/3782#issuecomment-1523346255
CARGO_HTTP_DEBUG: "true"
CARGO_LOG: "cargo::ops::registry=debug"

View File

@ -8,6 +8,11 @@ updates:
commit-message: commit-message:
prefix: "deps" prefix: "deps"
rebase-strategy: "disabled" rebase-strategy: "disabled"
groups:
trust-dns:
patterns:
- "trust-dns-*"
- "async-std-resolver"
- package-ecosystem: "github-actions" - package-ecosystem: "github-actions"
directory: "/" directory: "/"
schedule: schedule:

View File

@ -18,11 +18,11 @@ jobs:
make_stable_rust_cache: make_stable_rust_cache:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
shared-key: stable-cache shared-key: stable-cache

View File

@ -7,7 +7,7 @@ jobs:
audit: audit:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions-rs/audit-check@v1 - uses: actions-rs/audit-check@v1
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,28 +0,0 @@
name: cargo deny
on:
push:
paths:
- '**/Cargo.toml'
pull_request:
paths:
- '**/Cargo.toml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
cargo-deny:
runs-on: ubuntu-latest
strategy:
matrix:
checks:
- advisories
- bans licenses sources
steps:
- uses: actions/checkout@v3
- uses: EmbarkStudios/cargo-deny-action@v1
with:
command: check ${{ matrix.checks }}

View File

@ -12,6 +12,7 @@ concurrency:
env: env:
SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 # Fail cache download after 2 minutes. SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 # Fail cache download after 2 minutes.
RUSTFLAGS: '-Dwarnings' # Never tolerate warnings.
jobs: jobs:
test: test:
@ -31,13 +32,13 @@ jobs:
env: env:
CRATE: ${{ matrix.crate }} CRATE: ${{ matrix.crate }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
shared-key: stable-cache shared-key: stable-cache
save-if: false save-if: false
@ -48,21 +49,8 @@ jobs:
- name: Check if we compile without any features activated - name: Check if we compile without any features activated
run: cargo build --package "$CRATE" --no-default-features run: cargo build --package "$CRATE" --no-default-features
- run: cargo clean
- name: Check if crate has been released
id: check-released
run: |
RESPONSE_CODE=$(curl https://crates.io/api/v1/crates/"$CRATE" --silent --write-out "%{http_code}" --output /dev/null)
echo "code=${RESPONSE_CODE}"
echo "code=${RESPONSE_CODE}" >> $GITHUB_OUTPUT
- uses: ./.github/actions/cargo-semver-checks
if: steps.check-released.outputs.code == 200 && !contains(fromJSON('["libp2p-swarm-derive"]'), env.CRATE) # Workaround until https://github.com/obi1kenobi/cargo-semver-check/issues/146 is shipped.
with:
crate: ${{ env.CRATE }}
- name: Enforce no dependency on meta crate - name: Enforce no dependency on meta crate
if: env.CRATE != 'libp2p-server'
run: | run: |
cargo metadata --format-version=1 --no-deps | \ cargo metadata --format-version=1 --no-deps | \
jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .dependencies | all(.name != "libp2p")' jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .dependencies | all(.name != "libp2p")'
@ -88,7 +76,7 @@ jobs:
env: env:
CHROMEDRIVER_VERSION: '114.0.5735.90' CHROMEDRIVER_VERSION: '114.0.5735.90'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
with: with:
@ -128,7 +116,7 @@ jobs:
os: windows-latest os: windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
with: with:
@ -136,7 +124,7 @@ jobs:
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
key: ${{ matrix.target }} key: ${{ matrix.target }}
save-if: ${{ github.ref == 'refs/heads/master' }} save-if: ${{ github.ref == 'refs/heads/master' }}
@ -147,7 +135,7 @@ jobs:
name: Compile with MSRV name: Compile with MSRV
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: Extract MSRV from workspace manifest - name: Extract MSRV from workspace manifest
shell: bash shell: bash
@ -161,7 +149,7 @@ jobs:
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
save-if: ${{ github.ref == 'refs/heads/master' }} save-if: ${{ github.ref == 'refs/heads/master' }}
@ -176,13 +164,13 @@ jobs:
- features: "mdns tcp dns tokio" - features: "mdns tcp dns tokio"
- features: "mdns tcp dns async-std" - features: "mdns tcp dns async-std"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
key: ${{ matrix.features }} key: ${{ matrix.features }}
save-if: ${{ github.ref == 'refs/heads/master' }} save-if: ${{ github.ref == 'refs/heads/master' }}
@ -193,13 +181,13 @@ jobs:
name: Check rustdoc intra-doc links name: Check rustdoc intra-doc links
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
save-if: ${{ github.ref == 'refs/heads/master' }} save-if: ${{ github.ref == 'refs/heads/master' }}
@ -212,11 +200,11 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
rust-version: [ rust-version: [
1.71.0, # current stable 1.72.0, # current stable
beta beta
] ]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master - uses: dtolnay/rust-toolchain@master
with: with:
@ -225,7 +213,7 @@ jobs:
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
save-if: ${{ github.ref == 'refs/heads/master' }} save-if: ${{ github.ref == 'refs/heads/master' }}
@ -236,13 +224,13 @@ jobs:
name: IPFS Integration tests name: IPFS Integration tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
save-if: ${{ github.ref == 'refs/heads/master' }} save-if: ${{ github.ref == 'refs/heads/master' }}
@ -252,13 +240,13 @@ jobs:
examples: examples:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
with: with:
shared-key: stable-cache shared-key: stable-cache
save-if: false save-if: false
@ -271,10 +259,18 @@ jobs:
cargo check --manifest-path "$toml"; cargo check --manifest-path "$toml";
done done
semver:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.23.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin
shell: bash
- uses: obi1kenobi/cargo-semver-checks-action@v2
rustfmt: rustfmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
with: with:
@ -288,7 +284,7 @@ jobs:
manifest_lint: manifest_lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
@ -311,7 +307,7 @@ jobs:
outputs: outputs:
members: ${{ steps.cargo-metadata.outputs.members }} members: ${{ steps.cargo-metadata.outputs.members }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
@ -324,9 +320,9 @@ jobs:
name: Check for changes in proto files name: Check for changes in proto files
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
- run: cargo install --version 0.10.0 pb-rs --locked - run: cargo install --version 0.10.0 pb-rs --locked
@ -351,6 +347,14 @@ jobs:
name: Ensure that `Cargo.lock` is up-to-date name: Ensure that `Cargo.lock` is up-to-date
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: Swatinem/rust-cache@578b235f6e5f613f7727f1c17bd3305b4d4d4e1f # v2.6.1 - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0
- run: cargo metadata --locked --format-version=1 > /dev/null - run: cargo metadata --locked --format-version=1 > /dev/null
cargo-deny:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: EmbarkStudios/cargo-deny-action@v1
with:
command: check advisories bans licenses sources

39
.github/workflows/docker-image.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Publish docker images
on:
push:
branches:
- 'master'
tags:
- 'libp2p-server-**'
pull_request:
jobs:
server:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ github.token }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}-server
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
file: ./misc/server/Dockerfile
push: ${{ ! github.event.pull_request.head.repo.fork }} # Only push image if we have the required permissions, i.e. not running from a fork
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Install nightly toolchain - name: Install nightly toolchain
run: rustup toolchain install nightly run: rustup toolchain install nightly
- name: Build Documentation - name: Build Documentation

View File

@ -17,8 +17,8 @@ jobs:
matrix: matrix:
flavour: [chromium, native] flavour: [chromium, native]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v2 - uses: docker/setup-buildx-action@v3
- name: Build ${{ matrix.flavour }} image - name: Build ${{ matrix.flavour }} image
run: docker buildx build --load -t ${{ matrix.flavour }}-rust-libp2p-head . -f interop-tests/Dockerfile.${{ matrix.flavour }} run: docker buildx build --load -t ${{ matrix.flavour }}-rust-libp2p-head . -f interop-tests/Dockerfile.${{ matrix.flavour }}
- name: Run ${{ matrix.flavour }} tests - name: Run ${{ matrix.flavour }} tests

View File

@ -20,6 +20,7 @@
- [`libp2p-relay` CHANGELOG](protocols/relay/CHANGELOG.md) - [`libp2p-relay` CHANGELOG](protocols/relay/CHANGELOG.md)
- [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md) - [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md)
- [`libp2p-rendezvous` CHANGELOG](protocols/rendezvous/CHANGELOG.md) - [`libp2p-rendezvous` CHANGELOG](protocols/rendezvous/CHANGELOG.md)
- [`libp2p-upnp` CHANGELOG](protocols/upnp/CHANGELOG.md)
## Transport Protocols & Upgrades ## Transport Protocols & Upgrades

1470
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,8 @@
members = [ members = [
"core", "core",
"examples/autonat", "examples/autonat",
"examples/chat-example", "examples/browser-webrtc",
"examples/chat",
"examples/dcutr", "examples/dcutr",
"examples/distributed-key-value-store", "examples/distributed-key-value-store",
"examples/file-sharing", "examples/file-sharing",
@ -10,13 +11,15 @@ members = [
"examples/ipfs-kad", "examples/ipfs-kad",
"examples/ipfs-private", "examples/ipfs-private",
"examples/metrics", "examples/metrics",
"examples/ping-example", "examples/ping",
"examples/relay-server", "examples/relay-server",
"examples/rendezvous", "examples/rendezvous",
"examples/upnp",
"identity", "identity",
"interop-tests", "interop-tests",
"misc/allow-block-list", "misc/allow-block-list",
"misc/connection-limits", "misc/connection-limits",
"misc/futures-bounded",
"misc/keygen", "misc/keygen",
"misc/memory-connection-limits", "misc/memory-connection-limits",
"misc/metrics", "misc/metrics",
@ -24,6 +27,8 @@ members = [
"misc/quick-protobuf-codec", "misc/quick-protobuf-codec",
"misc/quickcheck-ext", "misc/quickcheck-ext",
"misc/rw-stream-sink", "misc/rw-stream-sink",
"misc/server",
"misc/webrtc-utils",
"muxers/mplex", "muxers/mplex",
"muxers/test-harness", "muxers/test-harness",
"muxers/yamux", "muxers/yamux",
@ -39,6 +44,7 @@ members = [
"protocols/relay", "protocols/relay",
"protocols/rendezvous", "protocols/rendezvous",
"protocols/request-response", "protocols/request-response",
"protocols/upnp",
"swarm", "swarm",
"swarm-derive", "swarm-derive",
"swarm-test", "swarm-test",
@ -53,6 +59,7 @@ members = [
"transports/uds", "transports/uds",
"transports/wasm-ext", "transports/wasm-ext",
"transports/webrtc", "transports/webrtc",
"transports/webrtc-websys",
"transports/websocket", "transports/websocket",
"transports/webtransport-websys", "transports/webtransport-websys",
"wasm-tests/webtransport-tests", "wasm-tests/webtransport-tests",
@ -63,41 +70,47 @@ resolver = "2"
rust-version = "1.65.0" rust-version = "1.65.0"
[workspace.dependencies] [workspace.dependencies]
futures-bounded = { version = "0.1.0", path = "misc/futures-bounded" }
libp2p = { version = "0.52.3", path = "libp2p" }
libp2p-allow-block-list = { version = "0.2.0", path = "misc/allow-block-list" } libp2p-allow-block-list = { version = "0.2.0", path = "misc/allow-block-list" }
libp2p-autonat = { version = "0.11.0", path = "protocols/autonat" } libp2p-autonat = { version = "0.11.0", path = "protocols/autonat" }
libp2p-connection-limits = { version = "0.2.1", path = "misc/connection-limits" } libp2p-connection-limits = { version = "0.2.1", path = "misc/connection-limits" }
libp2p-core = { version = "0.40.0", path = "core" } libp2p-core = { version = "0.40.1", path = "core" }
libp2p-dcutr = { version = "0.10.0", path = "protocols/dcutr" } libp2p-dcutr = { version = "0.10.0", path = "protocols/dcutr" }
libp2p-deflate = { version = "0.40.0", path = "transports/deflate" } libp2p-deflate = { version = "0.40.1", path = "transports/deflate" }
libp2p-dns = { version = "0.40.0", path = "transports/dns" } libp2p-dns = { version = "0.40.1", path = "transports/dns" }
libp2p-floodsub = { version = "0.43.0", path = "protocols/floodsub" } libp2p-floodsub = { version = "0.43.0", path = "protocols/floodsub" }
libp2p-gossipsub = { version = "0.45.1", path = "protocols/gossipsub" } libp2p-gossipsub = { version = "0.45.1", path = "protocols/gossipsub" }
libp2p-identify = { version = "0.43.0", path = "protocols/identify" } libp2p-identify = { version = "0.43.1", path = "protocols/identify" }
libp2p-identity = { version = "0.2.3" } libp2p-identity = { version = "0.2.3" }
libp2p-kad = { version = "0.44.4", path = "protocols/kad" } libp2p-kad = { version = "0.44.6", path = "protocols/kad" }
libp2p-mdns = { version = "0.44.0", path = "protocols/mdns" } libp2p-mdns = { version = "0.44.0", path = "protocols/mdns" }
libp2p-memory-connection-limits = { version = "0.1.0", path = "misc/memory-connection-limits" } libp2p-memory-connection-limits = { version = "0.1.0", path = "misc/memory-connection-limits" }
libp2p-metrics = { version = "0.13.1", path = "misc/metrics" } libp2p-metrics = { version = "0.13.1", path = "misc/metrics" }
libp2p-mplex = { version = "0.40.0", path = "muxers/mplex" } libp2p-mplex = { version = "0.40.0", path = "muxers/mplex" }
libp2p-muxer-test-harness = { path = "muxers/test-harness" } libp2p-muxer-test-harness = { path = "muxers/test-harness" }
libp2p-noise = { version = "0.43.0", path = "transports/noise" } libp2p-noise = { version = "0.43.1", path = "transports/noise" }
libp2p-perf = { version = "0.2.0", path = "protocols/perf" } libp2p-perf = { version = "0.2.0", path = "protocols/perf" }
libp2p-ping = { version = "0.43.0", path = "protocols/ping" } libp2p-ping = { version = "0.43.1", path = "protocols/ping" }
libp2p-plaintext = { version = "0.40.0", path = "transports/plaintext" } libp2p-plaintext = { version = "0.40.1", path = "transports/plaintext" }
libp2p-pnet = { version = "0.23.0", path = "transports/pnet" } libp2p-pnet = { version = "0.23.0", path = "transports/pnet" }
libp2p-quic = { version = "0.9.2", path = "transports/quic" } libp2p-quic = { version = "0.9.2", path = "transports/quic" }
libp2p-relay = { version = "0.16.1", path = "protocols/relay" } libp2p-relay = { version = "0.16.1", path = "protocols/relay" }
libp2p-rendezvous = { version = "0.13.0", path = "protocols/rendezvous" } libp2p-rendezvous = { version = "0.13.0", path = "protocols/rendezvous" }
libp2p-upnp = { version = "0.1.1", path = "protocols/upnp" }
libp2p-request-response = { version = "0.25.1", path = "protocols/request-response" } libp2p-request-response = { version = "0.25.1", path = "protocols/request-response" }
libp2p-swarm = { version = "0.43.3", path = "swarm" } libp2p-server = { version = "0.12.3", path = "misc/server" }
libp2p-swarm = { version = "0.43.5", path = "swarm" }
libp2p-swarm-derive = { version = "0.33.0", path = "swarm-derive" } libp2p-swarm-derive = { version = "0.33.0", path = "swarm-derive" }
libp2p-swarm-test = { version = "0.2.0", path = "swarm-test" } libp2p-swarm-test = { version = "0.2.0", path = "swarm-test" }
libp2p-tcp = { version = "0.40.0", path = "transports/tcp" } libp2p-tcp = { version = "0.40.0", path = "transports/tcp" }
libp2p-tls = { version = "0.2.0", path = "transports/tls" } libp2p-tls = { version = "0.2.1", path = "transports/tls" }
libp2p-uds = { version = "0.39.0", path = "transports/uds" } libp2p-uds = { version = "0.39.0", path = "transports/uds" }
libp2p-wasm-ext = { version = "0.40.0", path = "transports/wasm-ext" } libp2p-wasm-ext = { version = "0.40.0", path = "transports/wasm-ext" }
libp2p-webrtc = { version = "0.6.0-alpha", path = "transports/webrtc" } libp2p-webrtc = { version = "0.6.1-alpha", path = "transports/webrtc" }
libp2p-websocket = { version = "0.42.0", path = "transports/websocket" } libp2p-webrtc-utils = { version = "0.1.0", path = "misc/webrtc-utils" }
libp2p-webrtc-websys = { version = "0.1.0-alpha", path = "transports/webrtc-websys" }
libp2p-websocket = { version = "0.42.1", path = "transports/websocket" }
libp2p-webtransport-websys = { version = "0.1.0", path = "transports/webtransport-websys" } libp2p-webtransport-websys = { version = "0.1.0", path = "transports/webtransport-websys" }
libp2p-yamux = { version = "0.44.1", path = "muxers/yamux" } libp2p-yamux = { version = "0.44.1", path = "muxers/yamux" }
multistream-select = { version = "0.13.0", path = "misc/multistream-select" } multistream-select = { version = "0.13.0", path = "misc/multistream-select" }
@ -105,8 +118,7 @@ quick-protobuf-codec = { version = "0.2.0", path = "misc/quick-protobuf-codec" }
quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" }
rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" }
multiaddr = "0.18.0" multiaddr = "0.18.0"
multihash = "0.19.0" multihash = "0.19.1"
[patch.crates-io] [patch.crates-io]

View File

@ -80,15 +80,6 @@ This makes rust-libp2p a truly end-to-end solution, enabling users to use rust-l
Reduce maintenance burden and reduce dependency footprint. Reduce maintenance burden and reduce dependency footprint.
### Automate port-forwarding e.g. via UPnP
| Category | Status | Target Completion | Tracking | Dependencies | Dependents |
|--------------|--------|-------------------|---------------------------------------------------|--------------|------------|
| Connectivity | todo | | https://github.com/libp2p/rust-libp2p/issues/3903 | | |
Leverage protocols like UPnP to configure port-forwarding on ones router when behind NAT and/or firewall.
Another technique in addition to hole punching increasing the probability for a node to become publicly reachable when behind a firewall and/or NAT.
## Done ## Done
### Alpha QUIC support ### Alpha QUIC support
@ -176,3 +167,13 @@ Kademlia operations.
We added alpha support for QUIC in Q4/2022 wrapping `quinn-proto`. We added alpha support for QUIC in Q4/2022 wrapping `quinn-proto`.
Evaluate using `quinn` directly, replacing the wrapper. Evaluate using `quinn` directly, replacing the wrapper.
### Automate port-forwarding e.g. via UPnP
| Category | Status | Target Completion | Tracking | Dependencies | Dependents |
|--------------|--------|-------------------|---------------------------------------------------|--------------|------------|
| Connectivity | done | | https://github.com/libp2p/rust-libp2p/pull/4156 | | |
Leverage protocols like UPnP to configure port-forwarding on ones router when behind NAT and/or firewall.
Another technique in addition to hole punching increasing the probability for a node to become publicly reachable when behind a firewall and/or NAT.

View File

@ -1,4 +1,11 @@
## 0.40.0 ## 0.40.1
- Implement `Debug` for `StreamMuxerEvent`.
See [PR 4426].
[PR 4426]: https://github.com/libp2p/rust-libp2p/pull/4426
## 0.40.0
- Allow `ListenerId` to be user-controlled, i.e. to be provided on `Transport::listen_on`. - Allow `ListenerId` to be user-controlled, i.e. to be provided on `Transport::listen_on`.
See [PR 3567]. See [PR 3567].

View File

@ -3,7 +3,7 @@ name = "libp2p-core"
edition = "2021" edition = "2021"
rust-version = { workspace = true } rust-version = { workspace = true }
description = "Core traits and structs of libp2p" description = "Core traits and structs of libp2p"
version = "0.40.0" version = "0.40.1"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -28,7 +28,7 @@ quick-protobuf = "0.8"
rand = "0.8" rand = "0.8"
rw-stream-sink = { workspace = true } rw-stream-sink = { workspace = true }
serde = { version = "1", optional = true, features = ["derive"] } serde = { version = "1", optional = true, features = ["derive"] }
smallvec = "1.11.0" smallvec = "1.11.1"
thiserror = "1.0" thiserror = "1.0"
unsigned-varint = "0.7" unsigned-varint = "0.7"
void = "1" void = "1"

View File

@ -112,6 +112,7 @@ pub trait StreamMuxer {
} }
/// An event produced by a [`StreamMuxer`]. /// An event produced by a [`StreamMuxer`].
#[derive(Debug)]
pub enum StreamMuxerEvent { pub enum StreamMuxerEvent {
/// The address of the remote has changed. /// The address of the remote has changed.
AddressChange(Multiaddr), AddressChange(Multiaddr),

View File

@ -30,8 +30,8 @@ use crate::{
TransportError, TransportEvent, TransportError, TransportEvent,
}, },
upgrade::{ upgrade::{
self, apply_inbound, apply_outbound, InboundUpgrade, InboundUpgradeApply, OutboundUpgrade, self, apply_inbound, apply_outbound, InboundConnectionUpgrade, InboundUpgradeApply,
OutboundUpgradeApply, UpgradeError, OutboundConnectionUpgrade, OutboundUpgradeApply, UpgradeError,
}, },
Negotiated, Negotiated,
}; };
@ -101,8 +101,8 @@ where
T: Transport<Output = C>, T: Transport<Output = C>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
D: AsyncRead + AsyncWrite + Unpin, D: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>, Output = (PeerId, D), Error = E>, U: InboundConnectionUpgrade<Negotiated<C>, Output = (PeerId, D), Error = E>,
U: OutboundUpgrade<Negotiated<C>, Output = (PeerId, D), Error = E> + Clone, U: OutboundConnectionUpgrade<Negotiated<C>, Output = (PeerId, D), Error = E> + Clone,
E: Error + 'static, E: Error + 'static,
{ {
let version = self.version; let version = self.version;
@ -123,7 +123,7 @@ where
pub struct Authenticate<C, U> pub struct Authenticate<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>> + OutboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>> + OutboundConnectionUpgrade<Negotiated<C>>,
{ {
#[pin] #[pin]
inner: EitherUpgrade<C, U>, inner: EitherUpgrade<C, U>,
@ -132,11 +132,11 @@ where
impl<C, U> Future for Authenticate<C, U> impl<C, U> Future for Authenticate<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>> U: InboundConnectionUpgrade<Negotiated<C>>
+ OutboundUpgrade< + OutboundConnectionUpgrade<
Negotiated<C>, Negotiated<C>,
Output = <U as InboundUpgrade<Negotiated<C>>>::Output, Output = <U as InboundConnectionUpgrade<Negotiated<C>>>::Output,
Error = <U as InboundUpgrade<Negotiated<C>>>::Error, Error = <U as InboundConnectionUpgrade<Negotiated<C>>>::Error,
>, >,
{ {
type Output = <EitherUpgrade<C, U> as Future>::Output; type Output = <EitherUpgrade<C, U> as Future>::Output;
@ -155,7 +155,7 @@ where
pub struct Multiplex<C, U> pub struct Multiplex<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>> + OutboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>> + OutboundConnectionUpgrade<Negotiated<C>>,
{ {
peer_id: Option<PeerId>, peer_id: Option<PeerId>,
#[pin] #[pin]
@ -165,8 +165,8 @@ where
impl<C, U, M, E> Future for Multiplex<C, U> impl<C, U, M, E> Future for Multiplex<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>, Output = M, Error = E>, U: InboundConnectionUpgrade<Negotiated<C>, Output = M, Error = E>,
U: OutboundUpgrade<Negotiated<C>, Output = M, Error = E>, U: OutboundConnectionUpgrade<Negotiated<C>, Output = M, Error = E>,
{ {
type Output = Result<(PeerId, M), UpgradeError<E>>; type Output = Result<(PeerId, M), UpgradeError<E>>;
@ -208,8 +208,8 @@ where
T: Transport<Output = (PeerId, C)>, T: Transport<Output = (PeerId, C)>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
D: AsyncRead + AsyncWrite + Unpin, D: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>, Output = D, Error = E>, U: InboundConnectionUpgrade<Negotiated<C>, Output = D, Error = E>,
U: OutboundUpgrade<Negotiated<C>, Output = D, Error = E> + Clone, U: OutboundConnectionUpgrade<Negotiated<C>, Output = D, Error = E> + Clone,
E: Error + 'static, E: Error + 'static,
{ {
Authenticated(Builder::new( Authenticated(Builder::new(
@ -236,8 +236,8 @@ where
T: Transport<Output = (PeerId, C)>, T: Transport<Output = (PeerId, C)>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
M: StreamMuxer, M: StreamMuxer,
U: InboundUpgrade<Negotiated<C>, Output = M, Error = E>, U: InboundConnectionUpgrade<Negotiated<C>, Output = M, Error = E>,
U: OutboundUpgrade<Negotiated<C>, Output = M, Error = E> + Clone, U: OutboundConnectionUpgrade<Negotiated<C>, Output = M, Error = E> + Clone,
E: Error + 'static, E: Error + 'static,
{ {
let version = self.0.version; let version = self.0.version;
@ -269,8 +269,8 @@ where
T: Transport<Output = (PeerId, C)>, T: Transport<Output = (PeerId, C)>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
M: StreamMuxer, M: StreamMuxer,
U: InboundUpgrade<Negotiated<C>, Output = M, Error = E>, U: InboundConnectionUpgrade<Negotiated<C>, Output = M, Error = E>,
U: OutboundUpgrade<Negotiated<C>, Output = M, Error = E> + Clone, U: OutboundConnectionUpgrade<Negotiated<C>, Output = M, Error = E> + Clone,
E: Error + 'static, E: Error + 'static,
F: for<'a> FnOnce(&'a PeerId, &'a ConnectedPoint) -> U + Clone, F: for<'a> FnOnce(&'a PeerId, &'a ConnectedPoint) -> U + Clone,
{ {
@ -395,8 +395,8 @@ where
T: Transport<Output = (PeerId, C)>, T: Transport<Output = (PeerId, C)>,
T::Error: 'static, T::Error: 'static,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>, Output = D, Error = E>, U: InboundConnectionUpgrade<Negotiated<C>, Output = D, Error = E>,
U: OutboundUpgrade<Negotiated<C>, Output = D, Error = E> + Clone, U: OutboundConnectionUpgrade<Negotiated<C>, Output = D, Error = E> + Clone,
E: Error + 'static, E: Error + 'static,
{ {
type Output = (PeerId, D); type Output = (PeerId, D);
@ -502,7 +502,7 @@ where
/// The [`Transport::Dial`] future of an [`Upgrade`]d transport. /// The [`Transport::Dial`] future of an [`Upgrade`]d transport.
pub struct DialUpgradeFuture<F, U, C> pub struct DialUpgradeFuture<F, U, C>
where where
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
{ {
future: Pin<Box<F>>, future: Pin<Box<F>>,
@ -513,7 +513,7 @@ impl<F, U, C, D> Future for DialUpgradeFuture<F, U, C>
where where
F: TryFuture<Ok = (PeerId, C)>, F: TryFuture<Ok = (PeerId, C)>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: OutboundUpgrade<Negotiated<C>, Output = D>, U: OutboundConnectionUpgrade<Negotiated<C>, Output = D>,
U::Error: Error, U::Error: Error,
{ {
type Output = Result<(PeerId, D), TransportUpgradeError<F::Error, U::Error>>; type Output = Result<(PeerId, D), TransportUpgradeError<F::Error, U::Error>>;
@ -553,7 +553,7 @@ where
impl<F, U, C> Unpin for DialUpgradeFuture<F, U, C> impl<F, U, C> Unpin for DialUpgradeFuture<F, U, C>
where where
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
{ {
} }
@ -562,7 +562,7 @@ where
pub struct ListenerUpgradeFuture<F, U, C> pub struct ListenerUpgradeFuture<F, U, C>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
future: Pin<Box<F>>, future: Pin<Box<F>>,
upgrade: future::Either<Option<U>, (PeerId, InboundUpgradeApply<C, U>)>, upgrade: future::Either<Option<U>, (PeerId, InboundUpgradeApply<C, U>)>,
@ -572,7 +572,7 @@ impl<F, U, C, D> Future for ListenerUpgradeFuture<F, U, C>
where where
F: TryFuture<Ok = (PeerId, C)>, F: TryFuture<Ok = (PeerId, C)>,
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>, Output = D>, U: InboundConnectionUpgrade<Negotiated<C>, Output = D>,
U::Error: Error, U::Error: Error,
{ {
type Output = Result<(PeerId, D), TransportUpgradeError<F::Error, U::Error>>; type Output = Result<(PeerId, D), TransportUpgradeError<F::Error, U::Error>>;
@ -613,6 +613,6 @@ where
impl<F, U, C> Unpin for ListenerUpgradeFuture<F, U, C> impl<F, U, C> Unpin for ListenerUpgradeFuture<F, U, C>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
} }

View File

@ -125,3 +125,63 @@ pub trait OutboundUpgrade<C>: UpgradeInfo {
/// The `info` is the identifier of the protocol, as produced by `protocol_info`. /// The `info` is the identifier of the protocol, as produced by `protocol_info`.
fn upgrade_outbound(self, socket: C, info: Self::Info) -> Self::Future; fn upgrade_outbound(self, socket: C, info: Self::Info) -> Self::Future;
} }
/// Possible upgrade on an inbound connection
pub trait InboundConnectionUpgrade<T>: UpgradeInfo {
/// Output after the upgrade has been successfully negotiated and the handshake performed.
type Output;
/// Possible error during the handshake.
type Error;
/// Future that performs the handshake with the remote.
type Future: Future<Output = Result<Self::Output, Self::Error>>;
/// After we have determined that the remote supports one of the protocols we support, this
/// method is called to start the handshake.
///
/// The `info` is the identifier of the protocol, as produced by `protocol_info`.
fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future;
}
/// Possible upgrade on an outbound connection
pub trait OutboundConnectionUpgrade<T>: UpgradeInfo {
/// Output after the upgrade has been successfully negotiated and the handshake performed.
type Output;
/// Possible error during the handshake.
type Error;
/// Future that performs the handshake with the remote.
type Future: Future<Output = Result<Self::Output, Self::Error>>;
/// After we have determined that the remote supports one of the protocols we support, this
/// method is called to start the handshake.
///
/// The `info` is the identifier of the protocol, as produced by `protocol_info`.
fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future;
}
// Blanket implementation for InboundConnectionUpgrade based on InboundUpgrade for backwards compatibility
impl<U, T> InboundConnectionUpgrade<T> for U
where
U: InboundUpgrade<T>,
{
type Output = <U as InboundUpgrade<T>>::Output;
type Error = <U as InboundUpgrade<T>>::Error;
type Future = <U as InboundUpgrade<T>>::Future;
fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future {
self.upgrade_inbound(socket, info)
}
}
// Blanket implementation for OutboundConnectionUpgrade based on OutboundUpgrade for backwards compatibility
impl<U, T> OutboundConnectionUpgrade<T> for U
where
U: OutboundUpgrade<T>,
{
type Output = <U as OutboundUpgrade<T>>::Output;
type Error = <U as OutboundUpgrade<T>>::Error;
type Future = <U as OutboundUpgrade<T>>::Future;
fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future {
self.upgrade_outbound(socket, info)
}
}

View File

@ -18,7 +18,7 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE. // DEALINGS IN THE SOFTWARE.
use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError}; use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError};
use crate::{connection::ConnectedPoint, Negotiated}; use crate::{connection::ConnectedPoint, Negotiated};
use futures::{future::Either, prelude::*}; use futures::{future::Either, prelude::*};
use log::debug; use log::debug;
@ -37,7 +37,7 @@ pub(crate) fn apply<C, U>(
) -> Either<InboundUpgradeApply<C, U>, OutboundUpgradeApply<C, U>> ) -> Either<InboundUpgradeApply<C, U>, OutboundUpgradeApply<C, U>>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>> + OutboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>> + OutboundConnectionUpgrade<Negotiated<C>>,
{ {
match cp { match cp {
ConnectedPoint::Dialer { role_override, .. } if role_override.is_dialer() => { ConnectedPoint::Dialer { role_override, .. } if role_override.is_dialer() => {
@ -51,7 +51,7 @@ where
pub(crate) fn apply_inbound<C, U>(conn: C, up: U) -> InboundUpgradeApply<C, U> pub(crate) fn apply_inbound<C, U>(conn: C, up: U) -> InboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
InboundUpgradeApply { InboundUpgradeApply {
inner: InboundUpgradeApplyState::Init { inner: InboundUpgradeApplyState::Init {
@ -65,7 +65,7 @@ where
pub(crate) fn apply_outbound<C, U>(conn: C, up: U, v: Version) -> OutboundUpgradeApply<C, U> pub(crate) fn apply_outbound<C, U>(conn: C, up: U, v: Version) -> OutboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
{ {
OutboundUpgradeApply { OutboundUpgradeApply {
inner: OutboundUpgradeApplyState::Init { inner: OutboundUpgradeApplyState::Init {
@ -79,7 +79,7 @@ where
pub struct InboundUpgradeApply<C, U> pub struct InboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
inner: InboundUpgradeApplyState<C, U>, inner: InboundUpgradeApplyState<C, U>,
} }
@ -88,7 +88,7 @@ where
enum InboundUpgradeApplyState<C, U> enum InboundUpgradeApplyState<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
Init { Init {
future: ListenerSelectFuture<C, U::Info>, future: ListenerSelectFuture<C, U::Info>,
@ -104,14 +104,14 @@ where
impl<C, U> Unpin for InboundUpgradeApply<C, U> impl<C, U> Unpin for InboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
} }
impl<C, U> Future for InboundUpgradeApply<C, U> impl<C, U> Future for InboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: InboundUpgrade<Negotiated<C>>, U: InboundConnectionUpgrade<Negotiated<C>>,
{ {
type Output = Result<U::Output, UpgradeError<U::Error>>; type Output = Result<U::Output, UpgradeError<U::Error>>;
@ -162,7 +162,7 @@ where
pub struct OutboundUpgradeApply<C, U> pub struct OutboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
{ {
inner: OutboundUpgradeApplyState<C, U>, inner: OutboundUpgradeApplyState<C, U>,
} }
@ -170,7 +170,7 @@ where
enum OutboundUpgradeApplyState<C, U> enum OutboundUpgradeApplyState<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
{ {
Init { Init {
future: DialerSelectFuture<C, <U::InfoIter as IntoIterator>::IntoIter>, future: DialerSelectFuture<C, <U::InfoIter as IntoIterator>::IntoIter>,
@ -186,14 +186,14 @@ where
impl<C, U> Unpin for OutboundUpgradeApply<C, U> impl<C, U> Unpin for OutboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
{ {
} }
impl<C, U> Future for OutboundUpgradeApply<C, U> impl<C, U> Future for OutboundUpgradeApply<C, U>
where where
C: AsyncRead + AsyncWrite + Unpin, C: AsyncRead + AsyncWrite + Unpin,
U: OutboundUpgrade<Negotiated<C>>, U: OutboundConnectionUpgrade<Negotiated<C>>,
{ {
type Output = Result<U::Output, UpgradeError<U::Error>>; type Output = Result<U::Output, UpgradeError<U::Error>>;

View File

@ -7,7 +7,7 @@ A set of examples showcasing how to use rust-libp2p.
## Individual libp2p features ## Individual libp2p features
- [Chat](./chat-example) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. - [Chat](./chat) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols.
- [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the mDNS and Kademlia protocol. - [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the mDNS and Kademlia protocol.
- [File sharing application](./file-sharing) Basic file sharing application with peers either providing or locating and getting files by name. - [File sharing application](./file-sharing) Basic file sharing application with peers either providing or locating and getting files by name.
@ -20,6 +20,6 @@ A set of examples showcasing how to use rust-libp2p.
- [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature. - [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature.
- [Ping](./ping-example) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. - [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example.
- [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). - [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md).

View File

@ -6,8 +6,8 @@ publish = false
license = "MIT" license = "MIT"
[dependencies] [dependencies]
async-std = { version = "1.12", features = ["attributes"] } tokio = { version = "1.32", features = ["full"] }
clap = { version = "4.3.21", features = ["derive"] } clap = { version = "4.3.23", features = ["derive"] }
env_logger = "0.10.0" env_logger = "0.10.0"
futures = "0.3.28" futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["async-std", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] }

View File

@ -43,7 +43,7 @@ struct Opt {
server_peer_id: PeerId, server_peer_id: PeerId,
} }
#[async_std::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> { async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init(); env_logger::init();
@ -51,9 +51,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
let local_key = identity::Keypair::generate_ed25519(); let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {local_peer_id:?}");
let transport = tcp::async_io::Transport::default() let transport = tcp::tokio::Transport::default()
.upgrade(Version::V1Lazy) .upgrade(Version::V1Lazy)
.authenticate(noise::Config::new(&local_key)?) .authenticate(noise::Config::new(&local_key)?)
.multiplex(yamux::Config::default()) .multiplex(yamux::Config::default())
@ -61,8 +60,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let behaviour = Behaviour::new(local_key.public()); let behaviour = Behaviour::new(local_key.public());
let mut swarm = let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build();
SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build();
swarm.listen_on( swarm.listen_on(
Multiaddr::empty() Multiaddr::empty()
.with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED))

View File

@ -35,7 +35,7 @@ struct Opt {
listen_port: Option<u16>, listen_port: Option<u16>,
} }
#[async_std::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> { async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init(); env_logger::init();
@ -43,9 +43,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
let local_key = identity::Keypair::generate_ed25519(); let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {local_peer_id:?}");
let transport = tcp::async_io::Transport::default() let transport = tcp::tokio::Transport::default()
.upgrade(Version::V1Lazy) .upgrade(Version::V1Lazy)
.authenticate(noise::Config::new(&local_key)?) .authenticate(noise::Config::new(&local_key)?)
.multiplex(yamux::Config::default()) .multiplex(yamux::Config::default())
@ -53,8 +52,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let behaviour = Behaviour::new(local_key.public()); let behaviour = Behaviour::new(local_key.public());
let mut swarm = let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build();
SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build();
swarm.listen_on( swarm.listen_on(
Multiaddr::empty() Multiaddr::empty()
.with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED))

View File

@ -0,0 +1,40 @@
[package]
authors = ["Doug Anderson <douganderson444@peerpiper.io>"]
description = "Example use of the WebRTC transport in a browser wasm environment"
edition = "2021"
license = "MIT"
name = "browser-webrtc-example"
publish = false
repository = "https://github.com/libp2p/rust-libp2p"
rust-version = { workspace = true }
version = "0.1.0"
[lib]
crate-type = ["cdylib"]
[dependencies]
anyhow = "1.0.72"
env_logger = "0.10"
futures = "0.3.28"
log = "0.4"
rand = "0.8"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
axum = "0.6.19"
libp2p = { path = "../../libp2p", features = ["ed25519", "macros", "ping", "wasm-bindgen", "tokio"] }
libp2p-webrtc = { workspace = true, features = ["tokio"] }
rust-embed = { version = "8.0.0", features = ["include-exclude", "interpolate-folder-path"] }
tokio = { version = "1.29", features = ["macros", "net", "rt", "signal"] }
tokio-util = { version = "0.7", features = ["compat"] }
tower = "0.4"
tower-http = { version = "0.4.0", features = ["cors"] }
mime_guess = "2.0.4"
[target.'cfg(target_arch = "wasm32")'.dependencies]
js-sys = "0.3.64"
libp2p = { path = "../../libp2p", features = ["ed25519", "macros", "ping", "wasm-bindgen"] }
libp2p-webrtc-websys = { workspace = true }
wasm-bindgen = "0.2.84"
wasm-bindgen-futures = "0.4.37"
wasm-logger = { version = "0.2.0" }
web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] }

View File

@ -0,0 +1,18 @@
# Rust-libp2p Browser-Server WebRTC Example
This example demonstrates how to use the `libp2p-webrtc-websys` transport library in a browser to ping the WebRTC Server.
It uses [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/) to build the project for use in the browser.
## Running the example
1. Build the client library:
```shell
wasm-pack build --target web --out-dir static
```
2. Start the server:
```shell
cargo run
```
3. Open the URL printed in the terminal

View File

@ -0,0 +1,104 @@
#![cfg(target_arch = "wasm32")]
use futures::StreamExt;
use js_sys::Date;
use libp2p::core::Multiaddr;
use libp2p::identity::{Keypair, PeerId};
use libp2p::ping;
use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent};
use std::convert::From;
use std::io;
use wasm_bindgen::prelude::*;
use web_sys::{Document, HtmlElement};
#[wasm_bindgen]
pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> {
wasm_logger::init(wasm_logger::Config::default());
let body = Body::from_current_window()?;
body.append_p("Let's ping the WebRTC Server!")?;
let local_key = Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public());
let mut swarm = SwarmBuilder::with_wasm_executor(
libp2p_webrtc_websys::Transport::new(libp2p_webrtc_websys::Config::new(&local_key)).boxed(),
Behaviour {
ping: ping::Behaviour::new(ping::Config::new()),
keep_alive: keep_alive::Behaviour,
},
local_peer_id,
)
.build();
log::info!("Initialize swarm with identity: {local_peer_id}");
let addr = libp2p_endpoint.parse::<Multiaddr>()?;
log::info!("Dialing {addr}");
swarm.dial(addr)?;
loop {
match swarm.next().await.unwrap() {
SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Err(e), .. })) => {
log::error!("Ping failed: {:?}", e);
break;
}
SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event {
peer,
result: Ok(rtt),
..
})) => {
log::info!("Ping successful: RTT: {rtt:?}, from {peer}");
body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?;
}
evt => log::info!("Swarm event: {:?}", evt),
}
}
Ok(())
}
#[derive(NetworkBehaviour)]
struct Behaviour {
ping: ping::Behaviour,
keep_alive: keep_alive::Behaviour,
}
/// Convenience wrapper around the current document body
struct Body {
body: HtmlElement,
document: Document,
}
impl Body {
fn from_current_window() -> Result<Self, JsError> {
// Use `web_sys`'s global `window` function to get a handle on the global
// window object.
let document = web_sys::window()
.ok_or(js_error("no global `window` exists"))?
.document()
.ok_or(js_error("should have a document on window"))?;
let body = document
.body()
.ok_or(js_error("document should have a body"))?;
Ok(Self { body, document })
}
fn append_p(&self, msg: &str) -> Result<(), JsError> {
let val = self
.document
.create_element("p")
.map_err(|_| js_error("failed to create <p>"))?;
val.set_text_content(Some(msg));
self.body
.append_child(&val)
.map_err(|_| js_error("failed to append <p>"))?;
Ok(())
}
}
fn js_error(msg: &str) -> JsError {
io::Error::new(io::ErrorKind::Other, msg).into()
}

View File

@ -0,0 +1,150 @@
#![allow(non_upper_case_globals)]
use anyhow::Result;
use axum::extract::{Path, State};
use axum::http::header::CONTENT_TYPE;
use axum::http::StatusCode;
use axum::response::{Html, IntoResponse};
use axum::{http::Method, routing::get, Router};
use futures::StreamExt;
use libp2p::{
core::muxing::StreamMuxerBox,
core::Transport,
identity,
multiaddr::{Multiaddr, Protocol},
ping,
swarm::{SwarmBuilder, SwarmEvent},
};
use libp2p_webrtc as webrtc;
use rand::thread_rng;
use std::net::{Ipv4Addr, SocketAddr};
use std::time::Duration;
use tower_http::cors::{Any, CorsLayer};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::builder()
.parse_filters("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug")
.parse_default_env()
.init();
let id_keys = identity::Keypair::generate_ed25519();
let local_peer_id = id_keys.public().to_peer_id();
let transport = webrtc::tokio::Transport::new(
id_keys,
webrtc::tokio::Certificate::generate(&mut thread_rng())?,
)
.map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn)))
.boxed();
let mut swarm =
SwarmBuilder::with_tokio_executor(transport, ping::Behaviour::default(), local_peer_id)
.idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe the pings.
.build();
let address_webrtc = Multiaddr::from(Ipv4Addr::UNSPECIFIED)
.with(Protocol::Udp(0))
.with(Protocol::WebRTCDirect);
swarm.listen_on(address_webrtc.clone())?;
let address = loop {
if let SwarmEvent::NewListenAddr { address, .. } = swarm.select_next_some().await {
if address
.iter()
.any(|e| e == Protocol::Ip4(Ipv4Addr::LOCALHOST))
{
log::debug!("Ignoring localhost address to make sure the example works in Firefox");
continue;
}
log::info!("Listening on: {address}");
break address;
}
};
let addr = address.with(Protocol::P2p(*swarm.local_peer_id()));
// Serve .wasm, .js and server multiaddress over HTTP on this address.
tokio::spawn(serve(addr));
loop {
tokio::select! {
swarm_event = swarm.next() => {
log::trace!("Swarm Event: {:?}", swarm_event)
},
_ = tokio::signal::ctrl_c() => {
break;
}
}
}
Ok(())
}
#[derive(rust_embed::RustEmbed)]
#[folder = "$CARGO_MANIFEST_DIR/static"]
struct StaticFiles;
/// Serve the Multiaddr we are listening on and the host files.
pub(crate) async fn serve(libp2p_transport: Multiaddr) {
let listen_addr = match libp2p_transport.iter().next() {
Some(Protocol::Ip4(addr)) => addr,
_ => panic!("Expected 1st protocol to be IP4"),
};
let server = Router::new()
.route("/", get(get_index))
.route("/index.html", get(get_index))
.route("/:path", get(get_static_file))
.with_state(Libp2pEndpoint(libp2p_transport))
.layer(
// allow cors
CorsLayer::new()
.allow_origin(Any)
.allow_methods([Method::GET]),
);
let addr = SocketAddr::new(listen_addr.into(), 8080);
log::info!("Serving client files at http://{addr}");
axum::Server::bind(&addr)
.serve(server.into_make_service())
.await
.unwrap();
}
#[derive(Clone)]
struct Libp2pEndpoint(Multiaddr);
/// Serves the index.html file for our client.
///
/// Our server listens on a random UDP port for the WebRTC transport.
/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address.
async fn get_index(
State(Libp2pEndpoint(libp2p_endpoint)): State<Libp2pEndpoint>,
) -> Result<Html<String>, StatusCode> {
let content = StaticFiles::get("index.html")
.ok_or(StatusCode::NOT_FOUND)?
.data;
let html = std::str::from_utf8(&content)
.expect("index.html to be valid utf8")
.replace("__LIBP2P_ENDPOINT__", &libp2p_endpoint.to_string());
Ok(Html(html))
}
/// Serves the static files generated by `wasm-pack`.
async fn get_static_file(Path(path): Path<String>) -> Result<impl IntoResponse, StatusCode> {
log::debug!("Serving static file: {path}");
let content = StaticFiles::get(&path).ok_or(StatusCode::NOT_FOUND)?.data;
let content_type = mime_guess::from_path(path)
.first_or_octet_stream()
.to_string();
Ok(([(CONTENT_TYPE, content_type)], content))
}

View File

@ -0,0 +1,23 @@
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type" />
<link
rel="icon"
href="https://docs.libp2p.io/logos/libp2p_color_symbol.svg"
sizes="any"
/>
</head>
<body>
<div id="wrapper">
<h1>Rust Libp2p Demo!</h1>
</div>
<script type="module" defer>
import init, { run } from "./browser_webrtc_example.js"
await init();
run("__LIBP2P_ENDPOINT__"); // This placeholder will be replaced by the server at runtime with the actual listening address.
</script>
</body>
</html>

View File

@ -1,13 +0,0 @@
[package]
name = "chat-example"
version = "0.1.0"
edition = "2021"
publish = false
license = "MIT"
[dependencies]
async-std = { version = "1.12", features = ["attributes"] }
async-trait = "0.1"
env_logger = "0.10.0"
futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["async-std", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] }

13
examples/chat/Cargo.toml Normal file
View File

@ -0,0 +1,13 @@
[package]
name = "chat-example"
version = "0.1.0"
edition = "2021"
publish = false
license = "MIT"
[dependencies]
tokio = { version = "1.32", features = ["full"] }
async-trait = "0.1"
env_logger = "0.10.0"
futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] }

View File

@ -20,8 +20,7 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
use async_std::io; use futures::{future::Either, stream::StreamExt};
use futures::{future::Either, prelude::*, select};
use libp2p::{ use libp2p::{
core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade}, core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade},
gossipsub, identity, mdns, noise, quic, gossipsub, identity, mdns, noise, quic,
@ -33,29 +32,30 @@ use std::collections::hash_map::DefaultHasher;
use std::error::Error; use std::error::Error;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::time::Duration; use std::time::Duration;
use tokio::{io, io::AsyncBufReadExt, select};
// We create a custom network behaviour that combines Gossipsub and Mdns. // We create a custom network behaviour that combines Gossipsub and Mdns.
#[derive(NetworkBehaviour)] #[derive(NetworkBehaviour)]
struct MyBehaviour { struct MyBehaviour {
gossipsub: gossipsub::Behaviour, gossipsub: gossipsub::Behaviour,
mdns: mdns::async_io::Behaviour, mdns: mdns::tokio::Behaviour,
} }
#[async_std::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> { async fn main() -> Result<(), Box<dyn Error>> {
// Create a random PeerId // Create a random PeerId
env_logger::init();
let id_keys = identity::Keypair::generate_ed25519(); let id_keys = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(id_keys.public()); let local_peer_id = PeerId::from(id_keys.public());
println!("Local peer id: {local_peer_id}");
// Set up an encrypted DNS-enabled TCP Transport over the yamux protocol. // Set up an encrypted DNS-enabled TCP Transport over the yamux protocol.
let tcp_transport = tcp::async_io::Transport::new(tcp::Config::default().nodelay(true)) let tcp_transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true))
.upgrade(upgrade::Version::V1Lazy) .upgrade(upgrade::Version::V1Lazy)
.authenticate(noise::Config::new(&id_keys).expect("signing libp2p-noise static keypair")) .authenticate(noise::Config::new(&id_keys).expect("signing libp2p-noise static keypair"))
.multiplex(yamux::Config::default()) .multiplex(yamux::Config::default())
.timeout(std::time::Duration::from_secs(20)) .timeout(std::time::Duration::from_secs(20))
.boxed(); .boxed();
let quic_transport = quic::async_std::Transport::new(quic::Config::new(&id_keys)); let quic_transport = quic::tokio::Transport::new(quic::Config::new(&id_keys));
let transport = OrTransport::new(quic_transport, tcp_transport) let transport = OrTransport::new(quic_transport, tcp_transport)
.map(|either_output, _| match either_output { .map(|either_output, _| match either_output {
Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)),
@ -91,13 +91,13 @@ async fn main() -> Result<(), Box<dyn Error>> {
// Create a Swarm to manage peers and events // Create a Swarm to manage peers and events
let mut swarm = { let mut swarm = {
let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; let mdns = mdns::tokio::Behaviour::new(mdns::Config::default(), local_peer_id)?;
let behaviour = MyBehaviour { gossipsub, mdns }; let behaviour = MyBehaviour { gossipsub, mdns };
SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build()
}; };
// Read full lines from stdin // Read full lines from stdin
let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); let mut stdin = io::BufReader::new(io::stdin()).lines();
// Listen on all interfaces and whatever port the OS assigns // Listen on all interfaces and whatever port the OS assigns
swarm.listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse()?)?; swarm.listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse()?)?;
@ -108,13 +108,13 @@ async fn main() -> Result<(), Box<dyn Error>> {
// Kick it off // Kick it off
loop { loop {
select! { select! {
line = stdin.select_next_some() => { Ok(Some(line)) = stdin.next_line() => {
if let Err(e) = swarm if let Err(e) = swarm
.behaviour_mut().gossipsub .behaviour_mut().gossipsub
.publish(topic.clone(), line.expect("Stdin not to close").as_bytes()) { .publish(topic.clone(), line.as_bytes()) {
println!("Publish error: {e:?}"); println!("Publish error: {e:?}");
} }
}, }
event = swarm.select_next_some() => match event { event = swarm.select_next_some() => match event {
SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => {
for (peer_id, _multiaddr) in list { for (peer_id, _multiaddr) in list {

View File

@ -1,12 +1,12 @@
[package] [package]
name = "dcutr" name = "dcutr-example"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
publish = false publish = false
license = "MIT" license = "MIT"
[dependencies] [dependencies]
clap = { version = "4.3.21", features = ["derive"] } clap = { version = "4.3.23", features = ["derive"] }
env_logger = "0.10.0" env_logger = "0.10.0"
futures = "0.3.28" futures = "0.3.28"
futures-timer = "3.0" futures-timer = "3.0"

View File

@ -18,12 +18,12 @@ To run the example, follow these steps:
- Example usage in client-listen mode: - Example usage in client-listen mode:
```sh ```sh
cargo run -- --mode listen --secret-key-seed 42 --relay-address /ip4/127.0.0.1/tcp/12345 cargo run -- --mode listen --secret-key-seed 42 --relay-address /ip4/$RELAY_IP/tcp/$PORT/p2p/$RELAY_PEERID
``` ```
- Example usage in client-dial mode: - Example usage in client-dial mode:
```sh ```sh
cargo run -- --mode dial --secret-key-seed 42 --relay-address /ip4/127.0.0.1/tcp/12345 --remote-peer-id <REMOTE_PEER_ID> cargo run -- --mode dial --secret-key-seed 42 --relay-address /ip4/$RELAY_IP/tcp/$PORT/p2p/$RELAY_PEERID --remote-peer-id <REMOTE_PEER_ID>
``` ```
For this example to work, it is also necessary to turn on a relay server (you will find the related instructions in the example in the `examples/relay-server` folder). For this example to work, it is also necessary to turn on a relay server (you will find the related instructions in the example in the `examples/relay-server` folder).

View File

@ -33,9 +33,7 @@ use libp2p::{
transport::Transport, transport::Transport,
upgrade, upgrade,
}, },
dcutr, dcutr, dns, identify, identity, noise, ping, quic, relay,
dns::DnsConfig,
identify, identity, noise, ping, quic, relay,
swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
tcp, yamux, PeerId, tcp, yamux, PeerId,
}; };
@ -87,7 +85,6 @@ fn main() -> Result<(), Box<dyn Error>> {
let local_key = generate_ed25519(opts.secret_key_seed); let local_key = generate_ed25519(opts.secret_key_seed);
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
info!("Local peer id: {:?}", local_peer_id);
let (relay_transport, client) = relay::client::new(local_peer_id); let (relay_transport, client) = relay::client::new(local_peer_id);
@ -103,7 +100,7 @@ fn main() -> Result<(), Box<dyn Error>> {
&local_key, &local_key,
))); )));
block_on(DnsConfig::system(relay_tcp_quic_transport)) block_on(dns::async_std::Transport::system(relay_tcp_quic_transport))
.unwrap() .unwrap()
.map(|either_output, _| match either_output { .map(|either_output, _| match either_output {
Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)),

View File

@ -1,5 +1,5 @@
[package] [package]
name = "distributed-key-value-store" name = "distributed-key-value-store-example"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
publish = false publish = false

View File

@ -23,12 +23,9 @@
use async_std::io; use async_std::io;
use futures::{prelude::*, select}; use futures::{prelude::*, select};
use libp2p::core::upgrade::Version; use libp2p::core::upgrade::Version;
use libp2p::kad;
use libp2p::kad::record::store::MemoryStore; use libp2p::kad::record::store::MemoryStore;
use libp2p::kad::Mode; use libp2p::kad::Mode;
use libp2p::kad::{
record::Key, AddProviderOk, GetProvidersOk, GetRecordOk, Kademlia, KademliaEvent, PeerRecord,
PutRecordOk, QueryResult, Quorum, Record,
};
use libp2p::{ use libp2p::{
identity, mdns, noise, identity, mdns, noise,
swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
@ -54,18 +51,18 @@ async fn main() -> Result<(), Box<dyn Error>> {
#[derive(NetworkBehaviour)] #[derive(NetworkBehaviour)]
#[behaviour(to_swarm = "MyBehaviourEvent")] #[behaviour(to_swarm = "MyBehaviourEvent")]
struct MyBehaviour { struct MyBehaviour {
kademlia: Kademlia<MemoryStore>, kademlia: kad::Behaviour<MemoryStore>,
mdns: mdns::async_io::Behaviour, mdns: mdns::async_io::Behaviour,
} }
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
enum MyBehaviourEvent { enum MyBehaviourEvent {
Kademlia(KademliaEvent), Kademlia(kad::Event),
Mdns(mdns::Event), Mdns(mdns::Event),
} }
impl From<KademliaEvent> for MyBehaviourEvent { impl From<kad::Event> for MyBehaviourEvent {
fn from(event: KademliaEvent) -> Self { fn from(event: kad::Event) -> Self {
MyBehaviourEvent::Kademlia(event) MyBehaviourEvent::Kademlia(event)
} }
} }
@ -80,7 +77,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let mut swarm = { let mut swarm = {
// Create a Kademlia behaviour. // Create a Kademlia behaviour.
let store = MemoryStore::new(local_peer_id); let store = MemoryStore::new(local_peer_id);
let kademlia = Kademlia::new(local_peer_id, store); let kademlia = kad::Behaviour::new(local_peer_id, store);
let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?;
let behaviour = MyBehaviour { kademlia, mdns }; let behaviour = MyBehaviour { kademlia, mdns };
SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build()
@ -107,9 +104,9 @@ async fn main() -> Result<(), Box<dyn Error>> {
swarm.behaviour_mut().kademlia.add_address(&peer_id, multiaddr); swarm.behaviour_mut().kademlia.add_address(&peer_id, multiaddr);
} }
} }
SwarmEvent::Behaviour(MyBehaviourEvent::Kademlia(KademliaEvent::OutboundQueryProgressed { result, ..})) => { SwarmEvent::Behaviour(MyBehaviourEvent::Kademlia(kad::Event::OutboundQueryProgressed { result, ..})) => {
match result { match result {
QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { key, providers, .. })) => { kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders { key, providers, .. })) => {
for peer in providers { for peer in providers {
println!( println!(
"Peer {peer:?} provides key {:?}", "Peer {peer:?} provides key {:?}",
@ -117,12 +114,12 @@ async fn main() -> Result<(), Box<dyn Error>> {
); );
} }
} }
QueryResult::GetProviders(Err(err)) => { kad::QueryResult::GetProviders(Err(err)) => {
eprintln!("Failed to get providers: {err:?}"); eprintln!("Failed to get providers: {err:?}");
} }
QueryResult::GetRecord(Ok( kad::QueryResult::GetRecord(Ok(
GetRecordOk::FoundRecord(PeerRecord { kad::GetRecordOk::FoundRecord(kad::PeerRecord {
record: Record { key, value, .. }, record: kad::Record { key, value, .. },
.. ..
}) })
)) => { )) => {
@ -132,26 +129,26 @@ async fn main() -> Result<(), Box<dyn Error>> {
std::str::from_utf8(&value).unwrap(), std::str::from_utf8(&value).unwrap(),
); );
} }
QueryResult::GetRecord(Ok(_)) => {} kad::QueryResult::GetRecord(Ok(_)) => {}
QueryResult::GetRecord(Err(err)) => { kad::QueryResult::GetRecord(Err(err)) => {
eprintln!("Failed to get record: {err:?}"); eprintln!("Failed to get record: {err:?}");
} }
QueryResult::PutRecord(Ok(PutRecordOk { key })) => { kad::QueryResult::PutRecord(Ok(kad::PutRecordOk { key })) => {
println!( println!(
"Successfully put record {:?}", "Successfully put record {:?}",
std::str::from_utf8(key.as_ref()).unwrap() std::str::from_utf8(key.as_ref()).unwrap()
); );
} }
QueryResult::PutRecord(Err(err)) => { kad::QueryResult::PutRecord(Err(err)) => {
eprintln!("Failed to put record: {err:?}"); eprintln!("Failed to put record: {err:?}");
} }
QueryResult::StartProviding(Ok(AddProviderOk { key })) => { kad::QueryResult::StartProviding(Ok(kad::AddProviderOk { key })) => {
println!( println!(
"Successfully put provider record {:?}", "Successfully put provider record {:?}",
std::str::from_utf8(key.as_ref()).unwrap() std::str::from_utf8(key.as_ref()).unwrap()
); );
} }
QueryResult::StartProviding(Err(err)) => { kad::QueryResult::StartProviding(Err(err)) => {
eprintln!("Failed to put provider record: {err:?}"); eprintln!("Failed to put provider record: {err:?}");
} }
_ => {} _ => {}
@ -163,14 +160,14 @@ async fn main() -> Result<(), Box<dyn Error>> {
} }
} }
fn handle_input_line(kademlia: &mut Kademlia<MemoryStore>, line: String) { fn handle_input_line(kademlia: &mut kad::Behaviour<MemoryStore>, line: String) {
let mut args = line.split(' '); let mut args = line.split(' ');
match args.next() { match args.next() {
Some("GET") => { Some("GET") => {
let key = { let key = {
match args.next() { match args.next() {
Some(key) => Key::new(&key), Some(key) => kad::record::Key::new(&key),
None => { None => {
eprintln!("Expected key"); eprintln!("Expected key");
return; return;
@ -182,7 +179,7 @@ fn handle_input_line(kademlia: &mut Kademlia<MemoryStore>, line: String) {
Some("GET_PROVIDERS") => { Some("GET_PROVIDERS") => {
let key = { let key = {
match args.next() { match args.next() {
Some(key) => Key::new(&key), Some(key) => kad::record::Key::new(&key),
None => { None => {
eprintln!("Expected key"); eprintln!("Expected key");
return; return;
@ -194,7 +191,7 @@ fn handle_input_line(kademlia: &mut Kademlia<MemoryStore>, line: String) {
Some("PUT") => { Some("PUT") => {
let key = { let key = {
match args.next() { match args.next() {
Some(key) => Key::new(&key), Some(key) => kad::record::Key::new(&key),
None => { None => {
eprintln!("Expected key"); eprintln!("Expected key");
return; return;
@ -210,20 +207,20 @@ fn handle_input_line(kademlia: &mut Kademlia<MemoryStore>, line: String) {
} }
} }
}; };
let record = Record { let record = kad::Record {
key, key,
value, value,
publisher: None, publisher: None,
expires: None, expires: None,
}; };
kademlia kademlia
.put_record(record, Quorum::One) .put_record(record, kad::Quorum::One)
.expect("Failed to store record locally."); .expect("Failed to store record locally.");
} }
Some("PUT_PROVIDER") => { Some("PUT_PROVIDER") => {
let key = { let key = {
match args.next() { match args.next() {
Some(key) => Key::new(&key), Some(key) => kad::record::Key::new(&key),
None => { None => {
eprintln!("Expected key"); eprintln!("Expected key");
return; return;

View File

@ -1,5 +1,5 @@
[package] [package]
name = "file-sharing" name = "file-sharing-example"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
publish = false publish = false
@ -8,7 +8,7 @@ license = "MIT"
[dependencies] [dependencies]
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
async-std = { version = "1.12", features = ["attributes"] } async-std = { version = "1.12", features = ["attributes"] }
clap = { version = "4.3.21", features = ["derive"] } clap = { version = "4.3.23", features = ["derive"] }
either = "1.9" either = "1.9"
env_logger = "0.10" env_logger = "0.10"
futures = "0.3.28" futures = "0.3.28"

View File

@ -5,11 +5,7 @@ use futures::prelude::*;
use libp2p::{ use libp2p::{
core::Multiaddr, core::Multiaddr,
identity, identity, kad,
kad::{
self, record::store::MemoryStore, GetProvidersOk, Kademlia, KademliaEvent, QueryId,
QueryResult,
},
multiaddr::Protocol, multiaddr::Protocol,
noise, noise,
request_response::{self, ProtocolSupport, RequestId, ResponseChannel}, request_response::{self, ProtocolSupport, RequestId, ResponseChannel},
@ -56,7 +52,7 @@ pub(crate) async fn new(
let mut swarm = SwarmBuilder::with_async_std_executor( let mut swarm = SwarmBuilder::with_async_std_executor(
transport, transport,
ComposedBehaviour { ComposedBehaviour {
kademlia: Kademlia::new(peer_id, MemoryStore::new(peer_id)), kademlia: kad::Behaviour::new(peer_id, kad::record::store::MemoryStore::new(peer_id)),
request_response: request_response::cbor::Behaviour::new( request_response: request_response::cbor::Behaviour::new(
[( [(
StreamProtocol::new("/file-exchange/1"), StreamProtocol::new("/file-exchange/1"),
@ -179,8 +175,8 @@ pub(crate) struct EventLoop {
command_receiver: mpsc::Receiver<Command>, command_receiver: mpsc::Receiver<Command>,
event_sender: mpsc::Sender<Event>, event_sender: mpsc::Sender<Event>,
pending_dial: HashMap<PeerId, oneshot::Sender<Result<(), Box<dyn Error + Send>>>>, pending_dial: HashMap<PeerId, oneshot::Sender<Result<(), Box<dyn Error + Send>>>>,
pending_start_providing: HashMap<QueryId, oneshot::Sender<()>>, pending_start_providing: HashMap<kad::QueryId, oneshot::Sender<()>>,
pending_get_providers: HashMap<QueryId, oneshot::Sender<HashSet<PeerId>>>, pending_get_providers: HashMap<kad::QueryId, oneshot::Sender<HashSet<PeerId>>>,
pending_request_file: pending_request_file:
HashMap<RequestId, oneshot::Sender<Result<Vec<u8>, Box<dyn Error + Send>>>>, HashMap<RequestId, oneshot::Sender<Result<Vec<u8>, Box<dyn Error + Send>>>>,
} }
@ -221,9 +217,9 @@ impl EventLoop {
) { ) {
match event { match event {
SwarmEvent::Behaviour(ComposedEvent::Kademlia( SwarmEvent::Behaviour(ComposedEvent::Kademlia(
KademliaEvent::OutboundQueryProgressed { kad::Event::OutboundQueryProgressed {
id, id,
result: QueryResult::StartProviding(_), result: kad::QueryResult::StartProviding(_),
.. ..
}, },
)) => { )) => {
@ -234,11 +230,12 @@ impl EventLoop {
let _ = sender.send(()); let _ = sender.send(());
} }
SwarmEvent::Behaviour(ComposedEvent::Kademlia( SwarmEvent::Behaviour(ComposedEvent::Kademlia(
KademliaEvent::OutboundQueryProgressed { kad::Event::OutboundQueryProgressed {
id, id,
result: result:
QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders {
providers, .. providers,
..
})), })),
.. ..
}, },
@ -256,11 +253,11 @@ impl EventLoop {
} }
} }
SwarmEvent::Behaviour(ComposedEvent::Kademlia( SwarmEvent::Behaviour(ComposedEvent::Kademlia(
KademliaEvent::OutboundQueryProgressed { kad::Event::OutboundQueryProgressed {
result: result:
QueryResult::GetProviders(Ok(GetProvidersOk::FinishedWithNoAdditionalRecord { kad::QueryResult::GetProviders(Ok(
.. kad::GetProvidersOk::FinishedWithNoAdditionalRecord { .. },
})), )),
.. ..
}, },
)) => {} )) => {}
@ -412,13 +409,13 @@ impl EventLoop {
#[behaviour(to_swarm = "ComposedEvent")] #[behaviour(to_swarm = "ComposedEvent")]
struct ComposedBehaviour { struct ComposedBehaviour {
request_response: request_response::cbor::Behaviour<FileRequest, FileResponse>, request_response: request_response::cbor::Behaviour<FileRequest, FileResponse>,
kademlia: Kademlia<MemoryStore>, kademlia: kad::Behaviour<kad::record::store::MemoryStore>,
} }
#[derive(Debug)] #[derive(Debug)]
enum ComposedEvent { enum ComposedEvent {
RequestResponse(request_response::Event<FileRequest, FileResponse>), RequestResponse(request_response::Event<FileRequest, FileResponse>),
Kademlia(KademliaEvent), Kademlia(kad::Event),
} }
impl From<request_response::Event<FileRequest, FileResponse>> for ComposedEvent { impl From<request_response::Event<FileRequest, FileResponse>> for ComposedEvent {
@ -427,8 +424,8 @@ impl From<request_response::Event<FileRequest, FileResponse>> for ComposedEvent
} }
} }
impl From<KademliaEvent> for ComposedEvent { impl From<kad::Event> for ComposedEvent {
fn from(event: KademliaEvent) -> Self { fn from(event: kad::Event) -> Self {
ComposedEvent::Kademlia(event) ComposedEvent::Kademlia(event)
} }
} }

View File

@ -1,5 +1,5 @@
[package] [package]
name = "identify" name = "identify-example"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
publish = false publish = false
@ -8,5 +8,6 @@ license = "MIT"
[dependencies] [dependencies]
async-std = { version = "1.12", features = ["attributes"] } async-std = { version = "1.12", features = ["attributes"] }
async-trait = "0.1" async-trait = "0.1"
env_logger = "0.10"
futures = "0.3.28" futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio", "yamux"] } libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio", "yamux"] }

View File

@ -31,9 +31,9 @@ use std::error::Error;
#[async_std::main] #[async_std::main]
async fn main() -> Result<(), Box<dyn Error>> { async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
let local_key = identity::Keypair::generate_ed25519(); let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {local_peer_id:?}");
let transport = tcp::async_io::Transport::default() let transport = tcp::async_io::Transport::default()
.upgrade(Version::V1Lazy) .upgrade(Version::V1Lazy)

View File

@ -1,5 +1,5 @@
[package] [package]
name = "ipfs-kad" name = "ipfs-kad-example"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
publish = false publish = false

View File

@ -21,8 +21,8 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
use futures::StreamExt; use futures::StreamExt;
use libp2p::kad;
use libp2p::kad::record::store::MemoryStore; use libp2p::kad::record::store::MemoryStore;
use libp2p::kad::{GetClosestPeersError, Kademlia, KademliaConfig, KademliaEvent, QueryResult};
use libp2p::{ use libp2p::{
development_transport, identity, development_transport, identity,
swarm::{SwarmBuilder, SwarmEvent}, swarm::{SwarmBuilder, SwarmEvent},
@ -51,10 +51,10 @@ async fn main() -> Result<(), Box<dyn Error>> {
// Create a swarm to manage peers and events. // Create a swarm to manage peers and events.
let mut swarm = { let mut swarm = {
// Create a Kademlia behaviour. // Create a Kademlia behaviour.
let mut cfg = KademliaConfig::default(); let mut cfg = kad::Config::default();
cfg.set_query_timeout(Duration::from_secs(5 * 60)); cfg.set_query_timeout(Duration::from_secs(5 * 60));
let store = MemoryStore::new(local_peer_id); let store = MemoryStore::new(local_peer_id);
let mut behaviour = Kademlia::with_config(local_peer_id, store, cfg); let mut behaviour = kad::Behaviour::with_config(local_peer_id, store, cfg);
// Add the bootnodes to the local routing table. `libp2p-dns` built // Add the bootnodes to the local routing table. `libp2p-dns` built
// into the `transport` resolves the `dnsaddr` when Kademlia tries // into the `transport` resolves the `dnsaddr` when Kademlia tries
@ -78,8 +78,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
loop { loop {
let event = swarm.select_next_some().await; let event = swarm.select_next_some().await;
if let SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { if let SwarmEvent::Behaviour(kad::Event::OutboundQueryProgressed {
result: QueryResult::GetClosestPeers(result), result: kad::QueryResult::GetClosestPeers(result),
.. ..
}) = event }) = event
{ {
@ -93,7 +93,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
println!("Query finished with no closest peers.") println!("Query finished with no closest peers.")
} }
} }
Err(GetClosestPeersError::Timeout { peers, .. }) => { Err(kad::GetClosestPeersError::Timeout { peers, .. }) => {
if !peers.is_empty() { if !peers.is_empty() {
println!("Query timed out with closest peers: {peers:#?}") println!("Query timed out with closest peers: {peers:#?}")
} else { } else {

View File

@ -1,5 +1,5 @@
[package] [package]
name = "ipfs-private" name = "ipfs-private-example"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
publish = false publish = false

View File

@ -26,12 +26,13 @@ use futures::stream::StreamExt;
use libp2p::core::{upgrade::Version, Multiaddr, Transport}; use libp2p::core::{upgrade::Version, Multiaddr, Transport};
use libp2p::identity::PeerId; use libp2p::identity::PeerId;
use libp2p::metrics::{Metrics, Recorder}; use libp2p::metrics::{Metrics, Recorder};
use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}; use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent};
use libp2p::{identify, identity, noise, ping, tcp, yamux}; use libp2p::{identify, identity, noise, ping, tcp, yamux};
use log::info; use log::info;
use prometheus_client::registry::Registry; use prometheus_client::registry::Registry;
use std::error::Error; use std::error::Error;
use std::thread; use std::thread;
use std::time::Duration;
mod http_service; mod http_service;
@ -41,7 +42,6 @@ fn main() -> Result<(), Box<dyn Error>> {
let local_key = identity::Keypair::generate_ed25519(); let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
let local_pub_key = local_key.public(); let local_pub_key = local_key.public();
info!("Local peer id: {local_peer_id:?}");
let mut swarm = SwarmBuilder::without_executor( let mut swarm = SwarmBuilder::without_executor(
tcp::async_io::Transport::default() tcp::async_io::Transport::default()
@ -52,6 +52,7 @@ fn main() -> Result<(), Box<dyn Error>> {
Behaviour::new(local_pub_key), Behaviour::new(local_pub_key),
local_peer_id, local_peer_id,
) )
.idle_connection_timeout(Duration::from_secs(60))
.build(); .build();
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
@ -88,13 +89,9 @@ fn main() -> Result<(), Box<dyn Error>> {
} }
/// Our network behaviour. /// Our network behaviour.
///
/// For illustrative purposes, this includes the [`keep_alive::Behaviour`]) behaviour so the ping actually happen
/// and can be observed via the metrics.
#[derive(NetworkBehaviour)] #[derive(NetworkBehaviour)]
struct Behaviour { struct Behaviour {
identify: identify::Behaviour, identify: identify::Behaviour,
keep_alive: keep_alive::Behaviour,
ping: ping::Behaviour, ping: ping::Behaviour,
} }
@ -106,7 +103,6 @@ impl Behaviour {
"/ipfs/0.1.0".into(), "/ipfs/0.1.0".into(),
local_pub_key, local_pub_key,
)), )),
keep_alive: keep_alive::Behaviour,
} }
} }
} }

View File

@ -8,5 +8,6 @@ license = "MIT"
[dependencies] [dependencies]
async-std = { version = "1.12", features = ["attributes"] } async-std = { version = "1.12", features = ["attributes"] }
async-trait = "0.1" async-trait = "0.1"
env_logger = "0.10.0"
futures = "0.3.28" futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["async-std", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux"] } libp2p = { path = "../../libp2p", features = ["async-std", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux"] }

View File

@ -24,16 +24,17 @@ use futures::prelude::*;
use libp2p::core::upgrade::Version; use libp2p::core::upgrade::Version;
use libp2p::{ use libp2p::{
identity, noise, ping, identity, noise, ping,
swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{SwarmBuilder, SwarmEvent},
tcp, yamux, Multiaddr, PeerId, Transport, tcp, yamux, Multiaddr, PeerId, Transport,
}; };
use std::error::Error; use std::error::Error;
use std::time::Duration;
#[async_std::main] #[async_std::main]
async fn main() -> Result<(), Box<dyn Error>> { async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
let local_key = identity::Keypair::generate_ed25519(); let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {local_peer_id:?}");
let transport = tcp::async_io::Transport::default() let transport = tcp::async_io::Transport::default()
.upgrade(Version::V1Lazy) .upgrade(Version::V1Lazy)
@ -42,7 +43,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
.boxed(); .boxed();
let mut swarm = let mut swarm =
SwarmBuilder::with_async_std_executor(transport, Behaviour::default(), local_peer_id) SwarmBuilder::with_async_std_executor(transport, ping::Behaviour::default(), local_peer_id)
.idle_connection_timeout(Duration::from_secs(60)) // For illustrative purposes, keep idle connections alive for a minute so we can observe a few pings.
.build(); .build();
// Tell the swarm to listen on all interfaces and a random, OS-assigned // Tell the swarm to listen on all interfaces and a random, OS-assigned
@ -65,13 +67,3 @@ async fn main() -> Result<(), Box<dyn Error>> {
} }
} }
} }
/// Our network behaviour.
///
/// For illustrative purposes, this includes the [`KeepAlive`](keep_alive::Behaviour) behaviour so a continuous sequence of
/// pings can be observed.
#[derive(NetworkBehaviour, Default)]
struct Behaviour {
keep_alive: keep_alive::Behaviour,
ping: ping::Behaviour,
}

View File

@ -6,7 +6,7 @@ publish = false
license = "MIT" license = "MIT"
[dependencies] [dependencies]
clap = { version = "4.3.21", features = ["derive"] } clap = { version = "4.3.23", features = ["derive"] }
async-std = { version = "1.12", features = ["attributes"] } async-std = { version = "1.12", features = ["attributes"] }
async-trait = "0.1" async-trait = "0.1"
env_logger = "0.10.0" env_logger = "0.10.0"

View File

@ -47,7 +47,6 @@ fn main() -> Result<(), Box<dyn Error>> {
// Create a static known PeerId based on given secret // Create a static known PeerId based on given secret
let local_key: identity::Keypair = generate_ed25519(opt.secret_key_seed); let local_key: identity::Keypair = generate_ed25519(opt.secret_key_seed);
let local_peer_id = PeerId::from(local_key.public()); let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {local_peer_id:?}");
let tcp_transport = tcp::async_io::Transport::default(); let tcp_transport = tcp::async_io::Transport::default();

View File

@ -12,4 +12,4 @@ env_logger = "0.10.0"
futures = "0.3.28" futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } libp2p = { path = "../../libp2p", features = ["async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] }
log = "0.4" log = "0.4"
tokio = { version = "1.31", features = [ "rt-multi-thread", "macros", "time" ] } tokio = { version = "1.32", features = [ "rt-multi-thread", "macros", "time" ] }

View File

@ -24,7 +24,7 @@ use libp2p::{
identity, identity,
multiaddr::Protocol, multiaddr::Protocol,
noise, ping, rendezvous, noise, ping, rendezvous,
swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
tcp, yamux, Multiaddr, PeerId, Transport, tcp, yamux, Multiaddr, PeerId, Transport,
}; };
use std::time::Duration; use std::time::Duration;
@ -50,14 +50,12 @@ async fn main() {
MyBehaviour { MyBehaviour {
rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()),
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
keep_alive: keep_alive::Behaviour,
}, },
PeerId::from(key_pair.public()), PeerId::from(key_pair.public()),
) )
.idle_connection_timeout(Duration::from_secs(5))
.build(); .build();
log::info!("Local peer id: {}", swarm.local_peer_id());
swarm.dial(rendezvous_point_address.clone()).unwrap(); swarm.dial(rendezvous_point_address.clone()).unwrap();
let mut discover_tick = tokio::time::interval(Duration::from_secs(30)); let mut discover_tick = tokio::time::interval(Duration::from_secs(30));
@ -129,5 +127,4 @@ async fn main() {
struct MyBehaviour { struct MyBehaviour {
rendezvous: rendezvous::client::Behaviour, rendezvous: rendezvous::client::Behaviour,
ping: ping::Behaviour, ping: ping::Behaviour,
keep_alive: keep_alive::Behaviour,
} }

View File

@ -22,7 +22,7 @@ use futures::StreamExt;
use libp2p::{ use libp2p::{
core::transport::upgrade::Version, core::transport::upgrade::Version,
identify, identity, noise, ping, rendezvous, identify, identity, noise, ping, rendezvous,
swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
tcp, yamux, Multiaddr, PeerId, Transport, tcp, yamux, Multiaddr, PeerId, Transport,
}; };
use std::time::Duration; use std::time::Duration;
@ -50,14 +50,12 @@ async fn main() {
)), )),
rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()),
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
keep_alive: keep_alive::Behaviour,
}, },
PeerId::from(key_pair.public()), PeerId::from(key_pair.public()),
) )
.idle_connection_timeout(Duration::from_secs(5))
.build(); .build();
log::info!("Local peer id: {}", swarm.local_peer_id());
let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap());
swarm.dial(rendezvous_point_address.clone()).unwrap(); swarm.dial(rendezvous_point_address.clone()).unwrap();
@ -135,5 +133,4 @@ struct MyBehaviour {
identify: identify::Behaviour, identify: identify::Behaviour,
rendezvous: rendezvous::client::Behaviour, rendezvous: rendezvous::client::Behaviour,
ping: ping::Behaviour, ping: ping::Behaviour,
keep_alive: keep_alive::Behaviour,
} }

View File

@ -22,7 +22,7 @@ use futures::StreamExt;
use libp2p::{ use libp2p::{
core::transport::upgrade::Version, core::transport::upgrade::Version,
identity, noise, ping, rendezvous, identity, noise, ping, rendezvous,
swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
tcp, yamux, Multiaddr, PeerId, Transport, tcp, yamux, Multiaddr, PeerId, Transport,
}; };
use std::time::Duration; use std::time::Duration;
@ -46,10 +46,10 @@ async fn main() {
MyBehaviour { MyBehaviour {
rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()),
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
keep_alive: keep_alive::Behaviour,
}, },
PeerId::from(key_pair.public()), PeerId::from(key_pair.public()),
) )
.idle_connection_timeout(Duration::from_secs(5))
.build(); .build();
// In production the external address should be the publicly facing IP address of the rendezvous point. // In production the external address should be the publicly facing IP address of the rendezvous point.
@ -57,8 +57,6 @@ async fn main() {
let external_address = "/ip4/127.0.0.1/tcp/0".parse::<Multiaddr>().unwrap(); let external_address = "/ip4/127.0.0.1/tcp/0".parse::<Multiaddr>().unwrap();
swarm.add_external_address(external_address); swarm.add_external_address(external_address);
log::info!("Local peer id: {}", swarm.local_peer_id());
swarm.dial(rendezvous_point_address.clone()).unwrap(); swarm.dial(rendezvous_point_address.clone()).unwrap();
while let Some(event) = swarm.next().await { while let Some(event) = swarm.next().await {
@ -132,5 +130,4 @@ async fn main() {
struct MyBehaviour { struct MyBehaviour {
rendezvous: rendezvous::client::Behaviour, rendezvous: rendezvous::client::Behaviour,
ping: ping::Behaviour, ping: ping::Behaviour,
keep_alive: keep_alive::Behaviour,
} }

View File

@ -24,7 +24,7 @@ use futures::StreamExt;
use libp2p::{ use libp2p::{
core::transport::upgrade::Version, core::transport::upgrade::Version,
identify, identity, noise, ping, rendezvous, identify, identity, noise, ping, rendezvous,
swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent},
tcp, yamux, PeerId, Transport, tcp, yamux, PeerId, Transport,
}; };
use std::time::Duration; use std::time::Duration;
@ -48,14 +48,12 @@ async fn main() {
)), )),
rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()),
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
keep_alive: keep_alive::Behaviour,
}, },
PeerId::from(key_pair.public()), PeerId::from(key_pair.public()),
) )
.idle_connection_timeout(Duration::from_secs(5))
.build(); .build();
log::info!("Local peer id: {}", swarm.local_peer_id());
let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap()); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap());
while let Some(event) = swarm.next().await { while let Some(event) = swarm.next().await {
@ -99,5 +97,4 @@ struct MyBehaviour {
identify: identify::Behaviour, identify: identify::Behaviour,
rendezvous: rendezvous::server::Behaviour, rendezvous: rendezvous::server::Behaviour,
ping: ping::Behaviour, ping: ping::Behaviour,
keep_alive: keep_alive::Behaviour,
} }

11
examples/upnp/Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[package]
name = "upnp-example"
version = "0.1.0"
edition = "2021"
publish = false
license = "MIT"
[dependencies]
tokio = { version = "1", features = [ "rt-multi-thread", "macros"] }
futures = "0.3.28"
libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux", "upnp"] }

23
examples/upnp/README.md Normal file
View File

@ -0,0 +1,23 @@
## Description
The upnp example showcases how to use the upnp network behaviour to externally open ports on the network gateway.
## Usage
To run the example, follow these steps:
1. In a terminal window, run the following command:
```sh
cargo run
```
2. This command will start the swarm and print the `NewExternalAddr` if the gateway supports `UPnP` or
`GatewayNotFound` if it doesn't.
## Conclusion
The upnp example demonstrates the usage of **libp2p** to externally open a port on the gateway if it
supports [`UPnP`](https://en.wikipedia.org/wiki/Universal_Plug_and_Play).

81
examples/upnp/src/main.rs Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2023 Protocol Labs.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#![doc = include_str!("../README.md")]
use futures::prelude::*;
use libp2p::core::upgrade::Version;
use libp2p::{
identity, noise,
swarm::{SwarmBuilder, SwarmEvent},
tcp, upnp, yamux, Multiaddr, PeerId, Transport,
};
use std::error::Error;
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let local_key = identity::Keypair::generate_ed25519();
let local_peer_id = PeerId::from(local_key.public());
println!("Local peer id: {local_peer_id:?}");
let transport = tcp::tokio::Transport::default()
.upgrade(Version::V1Lazy)
.authenticate(noise::Config::new(&local_key)?)
.multiplex(yamux::Config::default())
.boxed();
let mut swarm = SwarmBuilder::with_tokio_executor(
transport,
upnp::tokio::Behaviour::default(),
local_peer_id,
)
.build();
// Tell the swarm to listen on all interfaces and a random, OS-assigned
// port.
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
// Dial the peer identified by the multi-address given as the second
// command-line argument, if any.
if let Some(addr) = std::env::args().nth(1) {
let remote: Multiaddr = addr.parse()?;
swarm.dial(remote)?;
println!("Dialed {addr}")
}
loop {
match swarm.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"),
SwarmEvent::Behaviour(upnp::Event::NewExternalAddr(addr)) => {
println!("New external address: {addr}");
}
SwarmEvent::Behaviour(upnp::Event::GatewayNotFound) => {
println!("Gateway does not support UPnP");
break;
}
SwarmEvent::Behaviour(upnp::Event::NonRoutableGateway) => {
println!("Gateway is not exposed directly to the public Internet, i.e. it itself has a private IP address.");
break;
}
_ => {}
}
}
Ok(())
}

View File

@ -1,3 +1,10 @@
## 0.2.4 - unreleased
- Implement `Keypair::derive_secret`, to deterministically derive a new secret from the embedded secret key.
See [PR 4554].
[PR 4554]: https://github.com/libp2p/rust-libp2p/pull/4554
## 0.2.3 ## 0.2.3
- Fix [RUSTSEC-2022-0093] by updating `ed25519-dalek` to `2.0`. - Fix [RUSTSEC-2022-0093] by updating `ed25519-dalek` to `2.0`.

View File

@ -15,9 +15,10 @@ categories = ["cryptography"]
asn1_der = { version = "0.7.6", optional = true } asn1_der = { version = "0.7.6", optional = true }
bs58 = { version = "0.5.0", optional = true } bs58 = { version = "0.5.0", optional = true }
ed25519-dalek = { version = "2.0", optional = true } ed25519-dalek = { version = "2.0", optional = true }
hkdf = { version = "0.12.3", optional = true }
libsecp256k1 = { version = "0.7.0", optional = true } libsecp256k1 = { version = "0.7.0", optional = true }
log = "0.4" log = "0.4"
multihash = { version = "0.19.0", optional = true } multihash = { version = "0.19.1", optional = true }
p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std", "pem"], optional = true }
quick-protobuf = "0.8.1" quick-protobuf = "0.8.1"
rand = { version = "0.8", optional = true } rand = { version = "0.8", optional = true }
@ -32,16 +33,18 @@ zeroize = { version = "1.6", optional = true }
ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true} ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true}
[features] [features]
secp256k1 = [ "dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:zeroize" ] secp256k1 = [ "dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize" ]
ecdsa = [ "dep:p256", "dep:void", "dep:zeroize", "dep:sec1" ] ecdsa = [ "dep:p256", "dep:void", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf" ]
rsa = [ "dep:ring", "dep:asn1_der", "rand", "dep:zeroize" ] rsa = [ "dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize" ]
ed25519 = [ "dep:ed25519-dalek", "dep:zeroize" ] ed25519 = [ "dep:ed25519-dalek", "dep:rand", "dep:zeroize", "dep:sha2", "dep:hkdf" ]
peerid = [ "dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2" ] peerid = [ "dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2", "dep:hkdf" ]
rand = ["dep:rand", "ed25519-dalek?/rand_core"] rand = ["dep:rand", "ed25519-dalek?/rand_core"]
default = ["rand"]
[dev-dependencies] [dev-dependencies]
quickcheck = { workspace = true } quickcheck = { workspace = true }
base64 = "0.21.2" base64 = "0.21.4"
serde_json = "1.0" serde_json = "1.0"
rmp-serde = "1.1" rmp-serde = "1.1"
criterion = "0.5" criterion = "0.5"

View File

@ -129,7 +129,7 @@ impl hash::Hash for PublicKey {
impl cmp::PartialOrd for PublicKey { impl cmp::PartialOrd for PublicKey {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.0.as_bytes().partial_cmp(other.0.as_bytes()) Some(self.cmp(other))
} }
} }
@ -199,6 +199,10 @@ impl SecretKey {
sk_bytes.zeroize(); sk_bytes.zeroize();
Ok(SecretKey(secret)) Ok(SecretKey(secret))
} }
pub(crate) fn to_bytes(&self) -> [u8; 32] {
self.0
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -77,11 +77,7 @@ impl DecodingError {
} }
} }
#[cfg(any( #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))]
all(feature = "rsa", not(target_arch = "wasm32")),
feature = "secp256k1",
feature = "ecdsa"
))]
pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self { pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self {
Self { Self {
msg: format!("encoding {key_type} key to Protobuf is unsupported"), msg: format!("encoding {key_type} key to Protobuf is unsupported"),
@ -111,7 +107,7 @@ pub struct SigningError {
/// An error during encoding of key material. /// An error during encoding of key material.
impl SigningError { impl SigningError {
#[cfg(any(feature = "secp256k1", feature = "rsa"))] #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))]
pub(crate) fn new<S: ToString>(msg: S) -> Self { pub(crate) fn new<S: ToString>(msg: S) -> Self {
Self { Self {
msg: msg.to_string(), msg: msg.to_string(),
@ -119,7 +115,7 @@ impl SigningError {
} }
} }
#[cfg(feature = "rsa")] #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))]
pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self {
Self { Self {
source: Some(Box::new(source)), source: Some(Box::new(source)),

View File

@ -342,6 +342,74 @@ impl Keypair {
KeyPairInner::Ecdsa(_) => KeyType::Ecdsa, KeyPairInner::Ecdsa(_) => KeyType::Ecdsa,
} }
} }
#[doc = r##"
Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain.
This works for all key types except RSA where it returns `None`.
# Example
"##]
#[cfg_attr(
feature = "rand",
doc = r##"
```
"##
)]
#[cfg_attr(
not(feature = "rand"),
doc = r##"
```ignore
"##
)]
#[doc = r##"
# fn main() {
# use libp2p_identity as identity;
let key = identity::Keypair::generate_ed25519();
let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519");
# }
```
"##
]
#[allow(unused_variables, unreachable_code)]
pub fn derive_secret(&self, domain: &[u8]) -> Option<[u8; 32]> {
#[cfg(any(
feature = "ecdsa",
feature = "secp256k1",
feature = "ed25519",
feature = "rsa"
))]
return Some(
hkdf::Hkdf::<sha2::Sha256>::extract(None, &[domain, &self.secret()?].concat())
.0
.into(),
);
None
}
/// Return the secret key of the [`Keypair`].
#[allow(dead_code)]
pub(crate) fn secret(&self) -> Option<[u8; 32]> {
match self.keypair {
#[cfg(feature = "ed25519")]
KeyPairInner::Ed25519(ref inner) => Some(inner.secret().to_bytes()),
#[cfg(all(feature = "rsa", not(target_arch = "wasm32")))]
KeyPairInner::Rsa(_) => return None,
#[cfg(feature = "secp256k1")]
KeyPairInner::Secp256k1(ref inner) => Some(inner.secret().to_bytes()),
#[cfg(feature = "ecdsa")]
KeyPairInner::Ecdsa(ref inner) => Some(
inner
.secret()
.to_bytes()
.try_into()
.expect("Ecdsa's private key should be 32 bytes"),
),
}
}
} }
#[cfg(feature = "ecdsa")] #[cfg(feature = "ecdsa")]
@ -901,4 +969,11 @@ mod tests {
assert_eq!(converted_pubkey, pubkey); assert_eq!(converted_pubkey, pubkey);
assert_eq!(converted_pubkey.key_type(), KeyType::Ecdsa) assert_eq!(converted_pubkey.key_type(), KeyType::Ecdsa)
} }
#[test]
#[cfg(feature = "ecdsa")]
fn test_secret_from_ecdsa_private_key() {
let keypair = Keypair::generate_ecdsa();
assert!(keypair.derive_secret(b"domain separator!").is_some())
}
} }

View File

@ -180,7 +180,7 @@ impl hash::Hash for PublicKey {
impl cmp::PartialOrd for PublicKey { impl cmp::PartialOrd for PublicKey {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.to_bytes().partial_cmp(&other.to_bytes()) Some(self.cmp(other))
} }
} }

View File

@ -23,17 +23,18 @@ libp2p = { path = "../libp2p", features = ["ping", "noise", "tls", "rsa", "macro
libp2p-webrtc = { workspace = true, features = ["tokio"] } libp2p-webrtc = { workspace = true, features = ["tokio"] }
libp2p-mplex = { path = "../muxers/mplex" } libp2p-mplex = { path = "../muxers/mplex" }
mime_guess = "2.0" mime_guess = "2.0"
redis = { version = "0.23.2", default-features = false, features = ["tokio-comp"] } redis = { version = "0.23.3", default-features = false, features = ["tokio-comp"] }
rust-embed = "6.8" rust-embed = "8.0"
serde_json = "1" serde_json = "1"
thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169 thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169
tokio = { version = "1.31.0", features = ["full"] } tokio = { version = "1.32.0", features = ["full"] }
tower-http = { version = "0.4", features = ["cors", "fs", "trace"] } tower-http = { version = "0.4", features = ["cors", "fs", "trace"] }
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
[target.'cfg(target_arch = "wasm32")'.dependencies] [target.'cfg(target_arch = "wasm32")'.dependencies]
libp2p = { path = "../libp2p", features = ["ping", "macros", "webtransport-websys", "wasm-bindgen", "identify"] } libp2p = { path = "../libp2p", features = ["ping", "macros", "webtransport-websys", "wasm-bindgen", "identify"] }
libp2p-webrtc-websys = { workspace = true }
wasm-bindgen = { version = "0.2" } wasm-bindgen = { version = "0.2" }
wasm-bindgen-futures = { version = "0.4" } wasm-bindgen-futures = { version = "0.4" }
wasm-logger = { version = "0.2.0" } wasm-logger = { version = "0.2.0" }

View File

@ -6,11 +6,8 @@ ADD . .
RUN rustup target add wasm32-unknown-unknown RUN rustup target add wasm32-unknown-unknown
RUN --mount=type=cache,target=/usr/local/cargo/registry \ RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack"
cargo install wasm-pack@0.11.1 --locked RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt"
RUN --mount=type=cache,target=/usr/local/cargo/registry \
cargo install wasm-opt@0.113.0 --locked
RUN --mount=type=cache,target=./target \ RUN --mount=type=cache,target=./target \
--mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/registry \
@ -23,7 +20,7 @@ RUN --mount=type=cache,target=./target \
RUN --mount=type=cache,target=./target \ RUN --mount=type=cache,target=./target \
mv ./target/release/wasm_ping /usr/local/bin/testplan mv ./target/release/wasm_ping /usr/local/bin/testplan
FROM selenium/standalone-chrome:112.0 FROM selenium/standalone-chrome:115.0
COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan
ENV RUST_BACKTRACE=1 ENV RUST_BACKTRACE=1

View File

@ -8,13 +8,9 @@ You can run this test locally by having a local Redis instance and by having
another peer that this test can dial or listen for. For example to test that we another peer that this test can dial or listen for. For example to test that we
can dial/listen for ourselves we can do the following: can dial/listen for ourselves we can do the following:
1. Start redis (needed by the tests): `docker run --rm -it -p 6379:6379 1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine`.
redis/redis-stack`. 2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping`
2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0" 3. In another terminal, run the listener: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin native_ping`
transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping`
3. In another terminal, run the listener: `redis_addr=localhost:6379
ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin native_ping`
To test the interop with other versions do something similar, except replace one To test the interop with other versions do something similar, except replace one
of these nodes with the other version's interop test. of these nodes with the other version's interop test.
@ -29,6 +25,15 @@ Firefox is not yet supported as it doesn't support all required features yet
1. Build the wasm package: `wasm-pack build --target web` 1. Build the wasm package: `wasm-pack build --target web`
2. Run the dialer: `redis_addr=127.0.0.1:6379 ip=0.0.0.0 transport=webtransport is_dialer=true cargo run --bin wasm_ping` 2. Run the dialer: `redis_addr=127.0.0.1:6379 ip=0.0.0.0 transport=webtransport is_dialer=true cargo run --bin wasm_ping`
# Running this test with webrtc-direct
To run the webrtc-direct test, you'll need the `chromedriver` in your `$PATH`, compatible with your Chrome browser.
1. Start redis: `docker run --rm -p 6379:6379 redis:7-alpine`.
1. Build the wasm package: `wasm-pack build --target web`
1. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping`
1. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping`
# Running all interop tests locally with Compose # Running all interop tests locally with Compose
To run this test against all released libp2p versions you'll need to have the To run this test against all released libp2p versions you'll need to have the

View File

@ -1,7 +1,10 @@
{ {
"id": "chromium-rust-libp2p-head", "id": "chromium-rust-libp2p-head",
"containerImageID": "chromium-rust-libp2p-head", "containerImageID": "chromium-rust-libp2p-head",
"transports": [{ "name": "webtransport", "onlyDial": true }], "transports": [
{ "name": "webtransport", "onlyDial": true },
{ "name": "webrtc-direct", "onlyDial": true }
],
"secureChannels": [], "secureChannels": [],
"muxers": [] "muxers": []
} }

View File

@ -159,6 +159,7 @@ pub(crate) mod wasm {
use libp2p::identity::Keypair; use libp2p::identity::Keypair;
use libp2p::swarm::{NetworkBehaviour, SwarmBuilder}; use libp2p::swarm::{NetworkBehaviour, SwarmBuilder};
use libp2p::PeerId; use libp2p::PeerId;
use libp2p_webrtc_websys as webrtc;
use std::time::Duration; use std::time::Duration;
use crate::{BlpopRequest, Transport}; use crate::{BlpopRequest, Transport};
@ -181,16 +182,19 @@ pub(crate) mod wasm {
ip: &str, ip: &str,
transport: Transport, transport: Transport,
) -> Result<(BoxedTransport, String)> { ) -> Result<(BoxedTransport, String)> {
if let Transport::Webtransport = transport { match transport {
Ok(( Transport::Webtransport => Ok((
libp2p::webtransport_websys::Transport::new( libp2p::webtransport_websys::Transport::new(
libp2p::webtransport_websys::Config::new(&local_key), libp2p::webtransport_websys::Config::new(&local_key),
) )
.boxed(), .boxed(),
format!("/ip4/{ip}/udp/0/quic/webtransport"), format!("/ip4/{ip}/udp/0/quic/webtransport"),
)) )),
} else { Transport::WebRtcDirect => Ok((
bail!("Only webtransport supported with wasm") webrtc::Transport::new(webrtc::Config::new(&local_key)).boxed(),
format!("/ip4/{ip}/udp/0/webrtc-direct"),
)),
_ => bail!("Only webtransport and webrtc-direct are supported with wasm"),
} }
} }

View File

@ -1,3 +1,4 @@
#![allow(non_upper_case_globals)]
use std::process::Stdio; use std::process::Stdio;
use std::time::Duration; use std::time::Duration;
@ -103,7 +104,12 @@ async fn open_in_browser() -> Result<(Child, WebDriver)> {
// start a webdriver process // start a webdriver process
// currently only the chromedriver is supported as firefox doesn't // currently only the chromedriver is supported as firefox doesn't
// have support yet for the certhashes // have support yet for the certhashes
let mut chrome = tokio::process::Command::new("chromedriver") let chromedriver = if cfg!(windows) {
"chromedriver.cmd"
} else {
"chromedriver"
};
let mut chrome = tokio::process::Command::new(chromedriver)
.arg("--port=45782") .arg("--port=45782")
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.spawn()?; .spawn()?;

View File

@ -3,8 +3,8 @@ use std::time::Duration;
use anyhow::{bail, Context, Result}; use anyhow::{bail, Context, Result};
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmEvent}; use libp2p::swarm::SwarmEvent;
use libp2p::{identify, identity, ping, Multiaddr, PeerId}; use libp2p::{identify, identity, ping, swarm::NetworkBehaviour, Multiaddr, PeerId};
#[cfg(target_arch = "wasm32")] #[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*; use wasm_bindgen::prelude::*;
@ -33,8 +33,7 @@ pub async fn run_test(
let mut swarm = swarm_builder( let mut swarm = swarm_builder(
boxed_transport, boxed_transport,
Behaviour { Behaviour {
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(10))),
keep_alive: keep_alive::Behaviour,
// Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved. // Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved.
identify: identify::Behaviour::new(identify::Config::new( identify: identify::Behaviour::new(identify::Config::new(
"/interop-tests".to_owned(), "/interop-tests".to_owned(),
@ -43,6 +42,7 @@ pub async fn run_test(
}, },
local_peer_id, local_peer_id,
) )
.idle_connection_timeout(Duration::from_secs(5))
.build(); .build();
log::info!("Running ping test: {}", swarm.local_peer_id()); log::info!("Running ping test: {}", swarm.local_peer_id());
@ -50,6 +50,7 @@ pub async fn run_test(
let mut maybe_id = None; let mut maybe_id = None;
// See https://github.com/libp2p/rust-libp2p/issues/4071. // See https://github.com/libp2p/rust-libp2p/issues/4071.
#[cfg(not(target_arch = "wasm32"))]
if transport == Transport::WebRtcDirect { if transport == Transport::WebRtcDirect {
maybe_id = Some(swarm.listen_on(local_addr.parse()?)?); maybe_id = Some(swarm.listen_on(local_addr.parse()?)?);
} }
@ -241,7 +242,6 @@ impl FromStr for SecProtocol {
#[derive(NetworkBehaviour)] #[derive(NetworkBehaviour)]
struct Behaviour { struct Behaviour {
ping: ping::Behaviour, ping: ping::Behaviour,
keep_alive: keep_alive::Behaviour,
identify: identify::Behaviour, identify: identify::Behaviour,
} }

View File

@ -1,4 +1,4 @@
## 0.52.3 - unreleased ## 0.52.3
- Add `libp2p-quic` stable release. - Add `libp2p-quic` stable release.
@ -10,10 +10,14 @@
- Add `json` feature which exposes `request_response::json`. - Add `json` feature which exposes `request_response::json`.
See [PR 4188]. See [PR 4188].
- Add support for UPnP via the IGD protocol.
See [PR 4156].
- Add `libp2p-memory-connection-limits` providing memory usage based connection limit configurations. - Add `libp2p-memory-connection-limits` providing memory usage based connection limit configurations.
See [PR 4281]. See [PR 4281].
[PR 4188]: https://github.com/libp2p/rust-libp2p/pull/4188 [PR 4188]: https://github.com/libp2p/rust-libp2p/pull/4188
[PR 4156]: https://github.com/libp2p/rust-libp2p/pull/4156
[PR 4217]: https://github.com/libp2p/rust-libp2p/pull/4217 [PR 4217]: https://github.com/libp2p/rust-libp2p/pull/4217
[PR 4281]: https://github.com/libp2p/rust-libp2p/pull/4281 [PR 4281]: https://github.com/libp2p/rust-libp2p/pull/4281

View File

@ -50,6 +50,7 @@ full = [
"websocket", "websocket",
"webtransport-websys", "webtransport-websys",
"yamux", "yamux",
"upnp"
] ]
async-std = ["libp2p-swarm/async-std", "libp2p-mdns?/async-io", "libp2p-tcp?/async-io", "libp2p-dns?/async-std", "libp2p-quic?/async-std"] async-std = ["libp2p-swarm/async-std", "libp2p-mdns?/async-io", "libp2p-tcp?/async-io", "libp2p-dns?/async-std", "libp2p-quic?/async-std"]
@ -82,7 +83,7 @@ secp256k1 = ["libp2p-identity/secp256k1"]
serde = ["libp2p-core/serde", "libp2p-kad?/serde", "libp2p-gossipsub?/serde"] serde = ["libp2p-core/serde", "libp2p-kad?/serde", "libp2p-gossipsub?/serde"]
tcp = ["dep:libp2p-tcp"] tcp = ["dep:libp2p-tcp"]
tls = ["dep:libp2p-tls"] tls = ["dep:libp2p-tls"]
tokio = ["libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio"] tokio = ["libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio", "libp2p-upnp?/tokio"]
uds = ["dep:libp2p-uds"] uds = ["dep:libp2p-uds"]
wasm-bindgen = ["futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "libp2p-swarm/wasm-bindgen", "libp2p-gossipsub?/wasm-bindgen"] wasm-bindgen = ["futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "libp2p-swarm/wasm-bindgen", "libp2p-gossipsub?/wasm-bindgen"]
wasm-ext = ["dep:libp2p-wasm-ext"] wasm-ext = ["dep:libp2p-wasm-ext"]
@ -90,6 +91,7 @@ wasm-ext-websocket = ["wasm-ext", "libp2p-wasm-ext?/websocket"]
websocket = ["dep:libp2p-websocket"] websocket = ["dep:libp2p-websocket"]
webtransport-websys = ["dep:libp2p-webtransport-websys"] webtransport-websys = ["dep:libp2p-webtransport-websys"]
yamux = ["dep:libp2p-yamux"] yamux = ["dep:libp2p-yamux"]
upnp = ["dep:libp2p-upnp"]
[dependencies] [dependencies]
bytes = "1" bytes = "1"
@ -133,6 +135,7 @@ libp2p-quic = { workspace = true, optional = true }
libp2p-tcp = { workspace = true, optional = true } libp2p-tcp = { workspace = true, optional = true }
libp2p-tls = { workspace = true, optional = true } libp2p-tls = { workspace = true, optional = true }
libp2p-uds = { workspace = true, optional = true } libp2p-uds = { workspace = true, optional = true }
libp2p-upnp = { workspace = true, optional = true }
libp2p-websocket = { workspace = true, optional = true } libp2p-websocket = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]

View File

@ -52,10 +52,15 @@ pub use libp2p_core as core;
#[cfg(feature = "dcutr")] #[cfg(feature = "dcutr")]
#[doc(inline)] #[doc(inline)]
pub use libp2p_dcutr as dcutr; pub use libp2p_dcutr as dcutr;
#[cfg(feature = "deflate")] #[cfg(feature = "deflate")]
#[cfg(not(target_arch = "wasm32"))] #[cfg(not(target_arch = "wasm32"))]
#[doc(inline)] #[deprecated(
pub use libp2p_deflate as deflate; note = "Will be removed in the next release, see https://github.com/libp2p/rust-libp2p/issues/4522 for details."
)]
pub mod deflate {
pub use libp2p_deflate::*;
}
#[cfg(feature = "dns")] #[cfg(feature = "dns")]
#[cfg_attr(docsrs, doc(cfg(feature = "dns")))] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))]
#[cfg(not(target_arch = "wasm32"))] #[cfg(not(target_arch = "wasm32"))]
@ -127,6 +132,10 @@ pub use libp2p_tls as tls;
#[cfg(not(target_arch = "wasm32"))] #[cfg(not(target_arch = "wasm32"))]
#[doc(inline)] #[doc(inline)]
pub use libp2p_uds as uds; pub use libp2p_uds as uds;
#[cfg(feature = "upnp")]
#[cfg(not(target_arch = "wasm32"))]
#[doc(inline)]
pub use libp2p_upnp as upnp;
#[cfg(feature = "wasm-ext")] #[cfg(feature = "wasm-ext")]
#[doc(inline)] #[doc(inline)]
pub use libp2p_wasm_ext as wasm_ext; pub use libp2p_wasm_ext as wasm_ext;
@ -185,12 +194,12 @@ pub async fn development_transport(
keypair: identity::Keypair, keypair: identity::Keypair,
) -> std::io::Result<core::transport::Boxed<(PeerId, core::muxing::StreamMuxerBox)>> { ) -> std::io::Result<core::transport::Boxed<(PeerId, core::muxing::StreamMuxerBox)>> {
let transport = { let transport = {
let dns_tcp = dns::DnsConfig::system(tcp::async_io::Transport::new( let dns_tcp = dns::async_std::Transport::system(tcp::async_io::Transport::new(
tcp::Config::new().nodelay(true), tcp::Config::new().nodelay(true),
)) ))
.await?; .await?;
let ws_dns_tcp = websocket::WsConfig::new( let ws_dns_tcp = websocket::WsConfig::new(
dns::DnsConfig::system(tcp::async_io::Transport::new( dns::async_std::Transport::system(tcp::async_io::Transport::new(
tcp::Config::new().nodelay(true), tcp::Config::new().nodelay(true),
)) ))
.await?, .await?,
@ -230,10 +239,10 @@ pub fn tokio_development_transport(
keypair: identity::Keypair, keypair: identity::Keypair,
) -> std::io::Result<core::transport::Boxed<(PeerId, core::muxing::StreamMuxerBox)>> { ) -> std::io::Result<core::transport::Boxed<(PeerId, core::muxing::StreamMuxerBox)>> {
let transport = { let transport = {
let dns_tcp = dns::TokioDnsConfig::system(tcp::tokio::Transport::new( let dns_tcp = dns::tokio::Transport::system(tcp::tokio::Transport::new(
tcp::Config::new().nodelay(true), tcp::Config::new().nodelay(true),
))?; ))?;
let ws_dns_tcp = websocket::WsConfig::new(dns::TokioDnsConfig::system( let ws_dns_tcp = websocket::WsConfig::new(dns::tokio::Transport::system(
tcp::tokio::Transport::new(tcp::Config::new().nodelay(true)), tcp::tokio::Transport::new(tcp::Config::new().nodelay(true)),
)?); )?);
dns_tcp.or_transport(ws_dns_tcp) dns_tcp.or_transport(ws_dns_tcp)

View File

@ -54,16 +54,16 @@
//! //!
//! ``` bash //! ``` bash
//! ## Inside the rust-libp2p repository. //! ## Inside the rust-libp2p repository.
//! cargo build --example relay_v2 -p libp2p-relay //! cargo build --bin relay-server-example
//! ``` //! ```
//! //!
//! You can find the binary at `target/debug/examples/relay_v2`. In case you built it locally, copy //! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy
//! it to your server. //! it to your server.
//! //!
//! On your server, start the relay server binary: //! On your server, start the relay server binary:
//! //!
//! ``` bash //! ``` bash
//! ./relay_v2 --port 4001 --secret-key-seed 0 //! ./relay-server-example --port 4001 --secret-key-seed 0
//! ``` //! ```
//! //!
//! Now let's make sure that the server is public, in other words let's make sure one can reach it //! Now let's make sure that the server is public, in other words let's make sure one can reach it
@ -122,16 +122,16 @@
//! //!
//! ``` bash //! ``` bash
//! ## Inside the rust-libp2p repository. //! ## Inside the rust-libp2p repository.
//! cargo build --example client -p libp2p-dcutr //! cargo build --bin dcutr-example
//! ``` //! ```
//! //!
//! You can find the binary at `target/debug/examples/client`. In case you built it locally, copy //! You can find the binary at `target/debug/dcutr-example`. In case you built it locally, copy
//! it to your listening client machine. //! it to your listening client machine.
//! //!
//! On the listening client machine: //! On the listening client machine:
//! //!
//! ``` bash //! ``` bash
//! RUST_LOG=info ./client --secret-key-seed 1 --mode listen --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN //! RUST_LOG=info ./dcutr-example --secret-key-seed 1 --mode listen --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN
//! //!
//! [2022-05-11T10:38:52Z INFO client] Local peer id: PeerId("XXX") //! [2022-05-11T10:38:52Z INFO client] Local peer id: PeerId("XXX")
//! [2022-05-11T10:38:52Z INFO client] Listening on "/ip4/127.0.0.1/tcp/44703" //! [2022-05-11T10:38:52Z INFO client] Listening on "/ip4/127.0.0.1/tcp/44703"
@ -153,7 +153,7 @@
//! ## Connecting to the listening client from the dialing client //! ## Connecting to the listening client from the dialing client
//! //!
//! ``` bash //! ``` bash
//! RUST_LOG=info ./client --secret-key-seed 2 --mode dial --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN --remote-peer-id 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X //! RUST_LOG=info ./dcutr-example --secret-key-seed 2 --mode dial --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN --remote-peer-id 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X
//! ``` //! ```
//! //!
//! You should see the following logs appear: //! You should see the following logs appear:

View File

@ -57,6 +57,7 @@
//! [dependencies] //! [dependencies]
//! libp2p = { version = "0.50", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } //! libp2p = { version = "0.50", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] }
//! futures = "0.3.21" //! futures = "0.3.21"
//! env_logger = "0.10.0"
//! async-std = { version = "1.12.0", features = ["attributes"] } //! async-std = { version = "1.12.0", features = ["attributes"] }
//! ``` //! ```
//! //!
@ -142,7 +143,7 @@
//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end:
//! //!
//! ```rust //! ```rust
//! use libp2p::swarm::{keep_alive, NetworkBehaviour}; //! use libp2p::swarm::NetworkBehaviour;
//! use libp2p::{identity, ping, PeerId}; //! use libp2p::{identity, ping, PeerId};
//! use std::error::Error; //! use std::error::Error;
//! //!
@ -154,20 +155,10 @@
//! //!
//! let transport = libp2p::development_transport(local_key).await?; //! let transport = libp2p::development_transport(local_key).await?;
//! //!
//! let behaviour = Behaviour::default(); //! let behaviour = ping::Behaviour::default();
//! //!
//! Ok(()) //! Ok(())
//! } //! }
//!
//! /// Our network behaviour.
//! ///
//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of
//! /// pings can be observed.
//! #[derive(NetworkBehaviour, Default)]
//! struct Behaviour {
//! keep_alive: keep_alive::Behaviour,
//! ping: ping::Behaviour,
//! }
//! ``` //! ```
//! //!
//! ## Swarm //! ## Swarm
@ -177,36 +168,65 @@
//! carried out by a [`Swarm`]. Put simply, a [`Swarm`] drives both a //! carried out by a [`Swarm`]. Put simply, a [`Swarm`] drives both a
//! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the //! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the
//! [`NetworkBehaviour`] to the [`Transport`] as well as events from the //! [`NetworkBehaviour`] to the [`Transport`] as well as events from the
//! [`Transport`] to the [`NetworkBehaviour`]. //! [`Transport`] to the [`NetworkBehaviour`]. As you can see, after [`Swarm`] initialization, we
//! removed the print of the local [`PeerId`](crate::PeerId) because every time a [`Swarm`] is
//! created, it prints the local [`PeerId`](crate::PeerId) in the logs at the INFO level. In order
//! to continue to see the local [`PeerId`](crate::PeerId) you must initialize the logger
//! (In our example, `env_logger` is used)
//! //!
//! ```rust //! ```rust
//! use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder}; //! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder};
//! use libp2p::{identity, ping, PeerId}; //! use libp2p::{identity, ping, PeerId};
//! use std::error::Error; //! use std::error::Error;
//! //!
//! #[async_std::main] //! #[async_std::main]
//! async fn main() -> Result<(), Box<dyn Error>> { //! async fn main() -> Result<(), Box<dyn Error>> {
//! env_logger::init();
//! let local_key = identity::Keypair::generate_ed25519(); //! let local_key = identity::Keypair::generate_ed25519();
//! let local_peer_id = PeerId::from(local_key.public()); //! let local_peer_id = PeerId::from(local_key.public());
//! println!("Local peer id: {local_peer_id:?}");
//! //!
//! let transport = libp2p::development_transport(local_key).await?; //! let transport = libp2p::development_transport(local_key).await?;
//! //!
//! let behaviour = Behaviour::default(); //! let behaviour = ping::Behaviour::default();
//! //!
//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); //! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build();
//! //!
//! Ok(()) //! Ok(())
//! } //! }
//! ```
//! //!
//! /// Our network behaviour. //! ## Idle connection timeout
//! /// //!
//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour:: //! Now, for this example in particular, we need set the idle connection timeout.
//! /// KeepAlive) behaviour so a continuous sequence of pings can be observed. //! Otherwise, the connection will be closed immediately.
//! #[derive(NetworkBehaviour, Default)] //!
//! struct Behaviour { //! Whether you need to set this in your application too depends on your usecase.
//! keep_alive: keep_alive::Behaviour, //! Typically, connections are kept alive if they are "in use" by a certain protocol.
//! ping: ping::Behaviour, //! The ping protocol however is only an "auxiliary" kind of protocol.
//! Thus, without any other behaviour in place, we would not be able to observe the pings.
//!
//! ```rust
//! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder};
//! use libp2p::{identity, ping, PeerId};
//! use std::error::Error;
//! use std::time::Duration;
//!
//! #[async_std::main]
//! async fn main() -> Result<(), Box<dyn Error>> {
//! use std::time::Duration;
//! let local_key = identity::Keypair::generate_ed25519();
//! let local_peer_id = PeerId::from(local_key.public());
//! println!("Local peer id: {local_peer_id:?}");
//!
//! let transport = libp2p::development_transport(local_key).await?;
//!
//! let behaviour = ping::Behaviour::default();
//!
//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id)
//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds.
//! .build();
//!
//! Ok(())
//! } //! }
//! ``` //! ```
//! //!
@ -237,21 +257,24 @@
//! remote peer. //! remote peer.
//! //!
//! ```rust //! ```rust
//! use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder}; //! use libp2p::swarm::{NetworkBehaviour, SwarmBuilder};
//! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use libp2p::{identity, ping, Multiaddr, PeerId};
//! use std::error::Error; //! use std::error::Error;
//! use std::time::Duration;
//! //!
//! #[async_std::main] //! #[async_std::main]
//! async fn main() -> Result<(), Box<dyn Error>> { //! async fn main() -> Result<(), Box<dyn Error>> {
//! env_logger::init();
//! let local_key = identity::Keypair::generate_ed25519(); //! let local_key = identity::Keypair::generate_ed25519();
//! let local_peer_id = PeerId::from(local_key.public()); //! let local_peer_id = PeerId::from(local_key.public());
//! println!("Local peer id: {local_peer_id:?}");
//! //!
//! let transport = libp2p::development_transport(local_key).await?; //! let transport = libp2p::development_transport(local_key).await?;
//! //!
//! let behaviour = Behaviour::default(); //! let behaviour = ping::Behaviour::default();
//! //!
//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); //! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id)
//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds.
//! .build();
//! //!
//! // Tell the swarm to listen on all interfaces and a random, OS-assigned //! // Tell the swarm to listen on all interfaces and a random, OS-assigned
//! // port. //! // port.
@ -267,16 +290,6 @@
//! //!
//! Ok(()) //! Ok(())
//! } //! }
//!
//! /// Our network behaviour.
//! ///
//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of
//! /// pings can be observed.
//! #[derive(NetworkBehaviour, Default)]
//! struct Behaviour {
//! keep_alive: keep_alive::Behaviour,
//! ping: ping::Behaviour,
//! }
//! ``` //! ```
//! //!
//! ## Continuously polling the Swarm //! ## Continuously polling the Swarm
@ -287,21 +300,24 @@
//! //!
//! ```no_run //! ```no_run
//! use futures::prelude::*; //! use futures::prelude::*;
//! use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmEvent, SwarmBuilder}; //! use libp2p::swarm::{NetworkBehaviour, SwarmEvent, SwarmBuilder};
//! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use libp2p::{identity, ping, Multiaddr, PeerId};
//! use std::error::Error; //! use std::error::Error;
//! use std::time::Duration;
//! //!
//! #[async_std::main] //! #[async_std::main]
//! async fn main() -> Result<(), Box<dyn Error>> { //! async fn main() -> Result<(), Box<dyn Error>> {
//! env_logger::init();
//! let local_key = identity::Keypair::generate_ed25519(); //! let local_key = identity::Keypair::generate_ed25519();
//! let local_peer_id = PeerId::from(local_key.public()); //! let local_peer_id = PeerId::from(local_key.public());
//! println!("Local peer id: {local_peer_id:?}");
//! //!
//! let transport = libp2p::development_transport(local_key).await?; //! let transport = libp2p::development_transport(local_key).await?;
//! //!
//! let behaviour = Behaviour::default(); //! let behaviour = ping::Behaviour::default();
//! //!
//! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); //! let mut swarm = SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id)
//! .idle_connection_timeout(Duration::from_secs(30)) // Allows us to observe pings for 30 seconds.
//! .build();
//! //!
//! // Tell the swarm to listen on all interfaces and a random, OS-assigned //! // Tell the swarm to listen on all interfaces and a random, OS-assigned
//! // port. //! // port.
@ -323,16 +339,6 @@
//! } //! }
//! } //! }
//! } //! }
//!
//! /// Our network behaviour.
//! ///
//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of
//! /// pings can be observed.
//! #[derive(NetworkBehaviour, Default)]
//! struct Behaviour {
//! keep_alive: keep_alive::Behaviour,
//! ping: ping::Behaviour,
//! }
//! ``` //! ```
//! //!
//! ## Running two nodes //! ## Running two nodes
@ -349,9 +355,8 @@
//! cargo run --example ping //! cargo run --example ping
//! ``` //! ```
//! //!
//! It will print the PeerId and the new listening addresses, e.g. //! It will print the new listening addresses, e.g.
//! ```sh //! ```sh
//! Local peer id: PeerId("12D3KooWT1As4mwh3KYBnNTw9bSrRbYQGJTm9SSte82JSumqgCQG")
//! Listening on "/ip4/127.0.0.1/tcp/24915" //! Listening on "/ip4/127.0.0.1/tcp/24915"
//! Listening on "/ip4/192.168.178.25/tcp/24915" //! Listening on "/ip4/192.168.178.25/tcp/24915"
//! Listening on "/ip4/172.17.0.1/tcp/24915" //! Listening on "/ip4/172.17.0.1/tcp/24915"

View File

@ -283,14 +283,11 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn cannot_dial_blocked_peer() { async fn cannot_dial_blocked_peer() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
listener.listen().await; listener.listen().await;
dialer dialer.behaviour_mut().block_peer(*listener.local_peer_id());
.behaviour_mut()
.list
.block_peer(*listener.local_peer_id());
let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else {
panic!("unexpected dial error") panic!("unexpected dial error")
@ -300,17 +297,13 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn can_dial_unblocked_peer() { async fn can_dial_unblocked_peer() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
listener.listen().await; listener.listen().await;
dialer.behaviour_mut().block_peer(*listener.local_peer_id());
dialer dialer
.behaviour_mut() .behaviour_mut()
.list
.block_peer(*listener.local_peer_id());
dialer
.behaviour_mut()
.list
.unblock_peer(*listener.local_peer_id()); .unblock_peer(*listener.local_peer_id());
dial(&mut dialer, &listener).unwrap(); dial(&mut dialer, &listener).unwrap();
@ -318,14 +311,11 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn blocked_peer_cannot_dial_us() { async fn blocked_peer_cannot_dial_us() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
listener.listen().await; listener.listen().await;
listener listener.behaviour_mut().block_peer(*dialer.local_peer_id());
.behaviour_mut()
.list
.block_peer(*dialer.local_peer_id());
dial(&mut dialer, &listener).unwrap(); dial(&mut dialer, &listener).unwrap();
async_std::task::spawn(dialer.loop_on_next()); async_std::task::spawn(dialer.loop_on_next());
@ -343,15 +333,12 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn connections_get_closed_upon_blocked() { async fn connections_get_closed_upon_blocked() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<BlockedPeers>::default());
listener.listen().await; listener.listen().await;
dialer.connect(&mut listener).await; dialer.connect(&mut listener).await;
dialer dialer.behaviour_mut().block_peer(*listener.local_peer_id());
.behaviour_mut()
.list
.block_peer(*listener.local_peer_id());
let ( let (
[SwarmEvent::ConnectionClosed { [SwarmEvent::ConnectionClosed {
@ -372,8 +359,8 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn cannot_dial_peer_unless_allowed() { async fn cannot_dial_peer_unless_allowed() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
listener.listen().await; listener.listen().await;
let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else {
@ -381,26 +368,19 @@ mod tests {
}; };
assert!(cause.downcast::<NotAllowed>().is_ok()); assert!(cause.downcast::<NotAllowed>().is_ok());
dialer dialer.behaviour_mut().allow_peer(*listener.local_peer_id());
.behaviour_mut()
.list
.allow_peer(*listener.local_peer_id());
assert!(dial(&mut dialer, &listener).is_ok()); assert!(dial(&mut dialer, &listener).is_ok());
} }
#[async_std::test] #[async_std::test]
async fn cannot_dial_disallowed_peer() { async fn cannot_dial_disallowed_peer() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
listener.listen().await; listener.listen().await;
dialer.behaviour_mut().allow_peer(*listener.local_peer_id());
dialer dialer
.behaviour_mut() .behaviour_mut()
.list
.allow_peer(*listener.local_peer_id());
dialer
.behaviour_mut()
.list
.disallow_peer(*listener.local_peer_id()); .disallow_peer(*listener.local_peer_id());
let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else {
@ -411,8 +391,8 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn not_allowed_peer_cannot_dial_us() { async fn not_allowed_peer_cannot_dial_us() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
listener.listen().await; listener.listen().await;
dialer dialer
@ -448,23 +428,16 @@ mod tests {
#[async_std::test] #[async_std::test]
async fn connections_get_closed_upon_disallow() { async fn connections_get_closed_upon_disallow() {
let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut dialer = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::new()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::<AllowedPeers>::default());
listener.listen().await; listener.listen().await;
dialer dialer.behaviour_mut().allow_peer(*listener.local_peer_id());
.behaviour_mut() listener.behaviour_mut().allow_peer(*dialer.local_peer_id());
.list
.allow_peer(*listener.local_peer_id());
listener
.behaviour_mut()
.list
.allow_peer(*dialer.local_peer_id());
dialer.connect(&mut listener).await; dialer.connect(&mut listener).await;
dialer dialer
.behaviour_mut() .behaviour_mut()
.list
.disallow_peer(*listener.local_peer_id()); .disallow_peer(*listener.local_peer_id());
let ( let (
[SwarmEvent::ConnectionClosed { [SwarmEvent::ConnectionClosed {
@ -496,27 +469,4 @@ mod tests {
.build(), .build(),
) )
} }
#[derive(libp2p_swarm_derive::NetworkBehaviour)]
#[behaviour(prelude = "libp2p_swarm::derive_prelude")]
struct Behaviour<S> {
list: super::Behaviour<S>,
keep_alive: libp2p_swarm::keep_alive::Behaviour,
}
impl<S> Behaviour<S>
where
S: Default,
{
fn new() -> Self {
Self {
list: super::Behaviour {
waker: None,
close_connections: VecDeque::new(),
state: S::default(),
},
keep_alive: libp2p_swarm::keep_alive::Behaviour,
}
}
}
} }

View File

@ -529,7 +529,6 @@ mod tests {
#[behaviour(prelude = "libp2p_swarm::derive_prelude")] #[behaviour(prelude = "libp2p_swarm::derive_prelude")]
struct Behaviour { struct Behaviour {
limits: super::Behaviour, limits: super::Behaviour,
keep_alive: libp2p_swarm::keep_alive::Behaviour,
connection_denier: Toggle<ConnectionDenier>, connection_denier: Toggle<ConnectionDenier>,
} }
@ -537,14 +536,12 @@ mod tests {
fn new(limits: ConnectionLimits) -> Self { fn new(limits: ConnectionLimits) -> Self {
Self { Self {
limits: super::Behaviour::new(limits), limits: super::Behaviour::new(limits),
keep_alive: libp2p_swarm::keep_alive::Behaviour,
connection_denier: None.into(), connection_denier: None.into(),
} }
} }
fn new_with_connection_denier(limits: ConnectionLimits) -> Self { fn new_with_connection_denier(limits: ConnectionLimits) -> Self {
Self { Self {
limits: super::Behaviour::new(limits), limits: super::Behaviour::new(limits),
keep_alive: libp2p_swarm::keep_alive::Behaviour,
connection_denier: Some(ConnectionDenier {}).into(), connection_denier: Some(ConnectionDenier {}).into(),
} }
} }

View File

@ -0,0 +1,3 @@
## 0.1.0
Initial release.

View File

@ -0,0 +1,20 @@
[package]
name = "futures-bounded"
version = "0.1.0"
edition = "2021"
rust-version.workspace = true
license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p"
keywords = ["futures", "async", "backpressure"]
categories = ["data-structures", "asynchronous"]
description = "Utilities for bounding futures in size and time."
publish = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
futures-util = { version = "0.3.28" }
futures-timer = "3.0.2"
[dev-dependencies]
tokio = { version = "1.29.1", features = ["macros", "rt"] }

View File

@ -0,0 +1,28 @@
mod map;
mod set;
pub use map::{FuturesMap, PushError};
pub use set::FuturesSet;
use std::fmt;
use std::fmt::Formatter;
use std::time::Duration;
/// A future failed to complete within the given timeout.
#[derive(Debug)]
pub struct Timeout {
limit: Duration,
}
impl Timeout {
fn new(duration: Duration) -> Self {
Self { limit: duration }
}
}
impl fmt::Display for Timeout {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "future failed to complete within {:?}", self.limit)
}
}
impl std::error::Error for Timeout {}

View File

@ -0,0 +1,268 @@
use std::future::Future;
use std::hash::Hash;
use std::mem;
use std::pin::Pin;
use std::task::{Context, Poll, Waker};
use std::time::Duration;
use futures_timer::Delay;
use futures_util::future::BoxFuture;
use futures_util::stream::FuturesUnordered;
use futures_util::{FutureExt, StreamExt};
use crate::Timeout;
/// Represents a map of [`Future`]s.
///
/// Each future must finish within the specified time and the map never outgrows its capacity.
pub struct FuturesMap<ID, O> {
timeout: Duration,
capacity: usize,
inner: FuturesUnordered<TaggedFuture<ID, TimeoutFuture<BoxFuture<'static, O>>>>,
empty_waker: Option<Waker>,
full_waker: Option<Waker>,
}
/// Error of a future pushing
#[derive(PartialEq, Debug)]
pub enum PushError<F> {
/// The length of the set is equal to the capacity
BeyondCapacity(F),
/// The set already contains the given future's ID
ReplacedFuture(F),
}
impl<ID, O> FuturesMap<ID, O> {
pub fn new(timeout: Duration, capacity: usize) -> Self {
Self {
timeout,
capacity,
inner: Default::default(),
empty_waker: None,
full_waker: None,
}
}
}
impl<ID, O> FuturesMap<ID, O>
where
ID: Clone + Hash + Eq + Send + Unpin + 'static,
{
/// Push a future into the map.
///
/// This method inserts the given future with defined `future_id` to the set.
/// If the length of the map is equal to the capacity, this method returns [PushError::BeyondCapacity],
/// that contains the passed future. In that case, the future is not inserted to the map.
/// If a future with the given `future_id` already exists, then the old future will be replaced by a new one.
/// In that case, the returned error [PushError::ReplacedFuture] contains the old future.
pub fn try_push<F>(&mut self, future_id: ID, future: F) -> Result<(), PushError<BoxFuture<O>>>
where
F: Future<Output = O> + Send + 'static,
{
if self.inner.len() >= self.capacity {
return Err(PushError::BeyondCapacity(future.boxed()));
}
if let Some(waker) = self.empty_waker.take() {
waker.wake();
}
match self.inner.iter_mut().find(|tagged| tagged.tag == future_id) {
None => {
self.inner.push(TaggedFuture {
tag: future_id,
inner: TimeoutFuture {
inner: future.boxed(),
timeout: Delay::new(self.timeout),
},
});
Ok(())
}
Some(existing) => {
let old_future = mem::replace(
&mut existing.inner,
TimeoutFuture {
inner: future.boxed(),
timeout: Delay::new(self.timeout),
},
);
Err(PushError::ReplacedFuture(old_future.inner))
}
}
}
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // &mut Context is idiomatic.
pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> {
if self.inner.len() < self.capacity {
return Poll::Ready(());
}
self.full_waker = Some(cx.waker().clone());
Poll::Pending
}
pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<(ID, Result<O, Timeout>)> {
let maybe_result = futures_util::ready!(self.inner.poll_next_unpin(cx));
match maybe_result {
None => {
self.empty_waker = Some(cx.waker().clone());
Poll::Pending
}
Some((id, Ok(output))) => Poll::Ready((id, Ok(output))),
Some((id, Err(_timeout))) => Poll::Ready((id, Err(Timeout::new(self.timeout)))),
}
}
}
struct TimeoutFuture<F> {
inner: F,
timeout: Delay,
}
impl<F> Future for TimeoutFuture<F>
where
F: Future + Unpin,
{
type Output = Result<F::Output, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.timeout.poll_unpin(cx).is_ready() {
return Poll::Ready(Err(()));
}
self.inner.poll_unpin(cx).map(Ok)
}
}
struct TaggedFuture<T, F> {
tag: T,
inner: F,
}
impl<T, F> Future for TaggedFuture<T, F>
where
T: Clone + Unpin,
F: Future + Unpin,
{
type Output = (T, F::Output);
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let output = futures_util::ready!(self.inner.poll_unpin(cx));
Poll::Ready((self.tag.clone(), output))
}
}
#[cfg(test)]
mod tests {
use std::future::{pending, poll_fn, ready};
use std::pin::Pin;
use std::time::Instant;
use super::*;
#[test]
fn cannot_push_more_than_capacity_tasks() {
let mut futures = FuturesMap::new(Duration::from_secs(10), 1);
assert!(futures.try_push("ID_1", ready(())).is_ok());
matches!(
futures.try_push("ID_2", ready(())),
Err(PushError::BeyondCapacity(_))
);
}
#[test]
fn cannot_push_the_same_id_few_times() {
let mut futures = FuturesMap::new(Duration::from_secs(10), 5);
assert!(futures.try_push("ID", ready(())).is_ok());
matches!(
futures.try_push("ID", ready(())),
Err(PushError::ReplacedFuture(_))
);
}
#[tokio::test]
async fn futures_timeout() {
let mut futures = FuturesMap::new(Duration::from_millis(100), 1);
let _ = futures.try_push("ID", pending::<()>());
Delay::new(Duration::from_millis(150)).await;
let (_, result) = poll_fn(|cx| futures.poll_unpin(cx)).await;
assert!(result.is_err())
}
// Each future causes a delay, `Task` only has a capacity of 1, meaning they must be processed in sequence.
// We stop after NUM_FUTURES tasks, meaning the overall execution must at least take DELAY * NUM_FUTURES.
#[tokio::test]
async fn backpressure() {
const DELAY: Duration = Duration::from_millis(100);
const NUM_FUTURES: u32 = 10;
let start = Instant::now();
Task::new(DELAY, NUM_FUTURES, 1).await;
let duration = start.elapsed();
assert!(duration >= DELAY * NUM_FUTURES);
}
struct Task {
future: Duration,
num_futures: usize,
num_processed: usize,
inner: FuturesMap<u8, ()>,
}
impl Task {
fn new(future: Duration, num_futures: u32, capacity: usize) -> Self {
Self {
future,
num_futures: num_futures as usize,
num_processed: 0,
inner: FuturesMap::new(Duration::from_secs(60), capacity),
}
}
}
impl Future for Task {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
while this.num_processed < this.num_futures {
if let Poll::Ready((_, result)) = this.inner.poll_unpin(cx) {
if result.is_err() {
panic!("Timeout is great than future delay")
}
this.num_processed += 1;
continue;
}
if let Poll::Ready(()) = this.inner.poll_ready_unpin(cx) {
// We push the constant future's ID to prove that user can use the same ID
// if the future was finished
let maybe_future = this.inner.try_push(1u8, Delay::new(this.future));
assert!(maybe_future.is_ok(), "we polled for readiness");
continue;
}
return Poll::Pending;
}
Poll::Ready(())
}
}
}

View File

@ -0,0 +1,58 @@
use std::future::Future;
use std::task::{ready, Context, Poll};
use std::time::Duration;
use futures_util::future::BoxFuture;
use crate::{FuturesMap, PushError, Timeout};
/// Represents a list of [Future]s.
///
/// Each future must finish within the specified time and the list never outgrows its capacity.
pub struct FuturesSet<O> {
id: u32,
inner: FuturesMap<u32, O>,
}
impl<O> FuturesSet<O> {
pub fn new(timeout: Duration, capacity: usize) -> Self {
Self {
id: 0,
inner: FuturesMap::new(timeout, capacity),
}
}
}
impl<O> FuturesSet<O> {
/// Push a future into the list.
///
/// This method adds the given future to the list.
/// If the length of the list is equal to the capacity, this method returns a error that contains the passed future.
/// In that case, the future is not added to the set.
pub fn try_push<F>(&mut self, future: F) -> Result<(), BoxFuture<O>>
where
F: Future<Output = O> + Send + 'static,
{
self.id = self.id.wrapping_add(1);
match self.inner.try_push(self.id, future) {
Ok(()) => Ok(()),
Err(PushError::BeyondCapacity(w)) => Err(w),
Err(PushError::ReplacedFuture(_)) => unreachable!("we never reuse IDs"),
}
}
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> {
self.inner.poll_ready_unpin(cx)
}
pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<Result<O, Timeout>> {
let (_, res) = ready!(self.inner.poll_unpin(cx));
Poll::Ready(res)
}
}

View File

@ -10,10 +10,10 @@ categories = ["network-programming", "asynchronous"]
publish = false publish = false
[dependencies] [dependencies]
clap = { version = "4.3.21", features = ["derive"] } clap = { version = "4.3.23", features = ["derive"] }
zeroize = "1" zeroize = "1"
serde = { version = "1.0.183", features = ["derive"] } serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.100" serde_json = "1.0.107"
libp2p-core = { workspace = true } libp2p-core = { workspace = true }
base64 = "0.21.2" base64 = "0.21.4"
libp2p-identity = { workspace = true } libp2p-identity = { workspace = true }

View File

@ -159,10 +159,10 @@ impl Metrics {
} }
} }
impl super::Recorder<libp2p_kad::KademliaEvent> for Metrics { impl super::Recorder<libp2p_kad::Event> for Metrics {
fn record(&self, event: &libp2p_kad::KademliaEvent) { fn record(&self, event: &libp2p_kad::Event) {
match event { match event {
libp2p_kad::KademliaEvent::OutboundQueryProgressed { result, stats, .. } => { libp2p_kad::Event::OutboundQueryProgressed { result, stats, .. } => {
self.query_result_num_requests self.query_result_num_requests
.get_or_create(&result.into()) .get_or_create(&result.into())
.observe(stats.num_requests().into()); .observe(stats.num_requests().into());
@ -217,7 +217,7 @@ impl super::Recorder<libp2p_kad::KademliaEvent> for Metrics {
_ => {} _ => {}
} }
} }
libp2p_kad::KademliaEvent::RoutingUpdated { libp2p_kad::Event::RoutingUpdated {
is_new_peer, is_new_peer,
old_peer, old_peer,
bucket_range: (low, _high), bucket_range: (low, _high),
@ -250,7 +250,7 @@ impl super::Recorder<libp2p_kad::KademliaEvent> for Metrics {
} }
} }
libp2p_kad::KademliaEvent::InboundRequest { request } => { libp2p_kad::Event::InboundRequest { request } => {
self.inbound_requests.get_or_create(&request.into()).inc(); self.inbound_requests.get_or_create(&request.into()).inc();
} }
_ => {} _ => {}

View File

@ -118,8 +118,8 @@ impl Recorder<libp2p_identify::Event> for Metrics {
} }
#[cfg(feature = "kad")] #[cfg(feature = "kad")]
impl Recorder<libp2p_kad::KademliaEvent> for Metrics { impl Recorder<libp2p_kad::Event> for Metrics {
fn record(&self, event: &libp2p_kad::KademliaEvent) { fn record(&self, event: &libp2p_kad::Event) {
self.kad.record(event) self.kad.record(event)
} }
} }

View File

@ -1 +0,0 @@
Moved to https://github.com/multiformats/rust-multiaddr.

View File

@ -15,7 +15,7 @@ bytes = "1"
futures = "0.3" futures = "0.3"
log = "0.4" log = "0.4"
pin-project = "1.1.3" pin-project = "1.1.3"
smallvec = "1.11.0" smallvec = "1.11.1"
unsigned-varint = "0.7" unsigned-varint = "0.7"
[dev-dependencies] [dev-dependencies]

71
misc/server/CHANGELOG.md Normal file
View File

@ -0,0 +1,71 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.12.3]
### Changed
- Add libp2p-lookup to Dockerfile to enable healthchecks.
### Fixed
- Disable QUIC `draft-29` support.
Listening on `/quic` and `/quic-v1` addresses with the same port would otherwise result in an "Address already in use" error by the OS.
See [PR 4467].
[PR 4467]: https://github.com/libp2p/rust-libp2p/pull/4467
## [0.12.2]
### Fixed
- Adhere to `--metrics-path` flag and listen on `0.0.0.0:8888` (default IPFS metrics port).
[PR 4392]
[PR 4392]: https://github.com/libp2p/rust-libp2p/pull/4392
## [0.12.1]
### Changed
- Move to tokio and hyper.
See [PR 4311].
- Move to distroless Docker base image.
See [PR 4311].
[PR 4311]: https://github.com/libp2p/rust-libp2p/pull/4311
## [0.8.0]
### Changed
- Remove mplex support.
## [0.7.0]
### Changed
- Update to libp2p v0.47.0.
## [0.6.0] - [2022-05-05]
### Changed
- Update to libp2p v0.44.0.
## [0.5.4] - [2022-01-11]
### Changed
- Pull latest autonat changes.
## [0.5.3] - [2021-12-25]
### Changed
- Update dependencies.
- Pull in autonat fixes.
## [0.5.2] - [2021-12-20]
### Added
- Add support for libp2p autonat protocol via `--enable-autonat`.
## [0.5.1] - [2021-12-20]
### Fixed
- Update dependencies.
- Fix typo in command line flag `--enable-kademlia`.
## [0.5.0] - 2021-11-18
### Changed
- Disable Kademlia protocol by default.
## [0.4.0] - 2021-11-18
### Fixed
- Update dependencies.

27
misc/server/Cargo.toml Normal file
View File

@ -0,0 +1,27 @@
[package]
name = "libp2p-server"
version = "0.12.3"
authors = ["Max Inden <mail@max-inden.de>"]
edition = "2021"
repository = "https://github.com/libp2p/rust-libp2p"
rust-version = { workspace = true }
description = "A rust-libp2p server binary."
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
base64 = "0.21"
clap = { version = "4.3.12", features = ["derive"] }
env_logger = "0.10.0"
futures = "0.3"
futures-timer = "3"
hyper = { version = "0.14", features = ["server", "tcp", "http1"] }
libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] }
log = "0.4"
prometheus-client = "0.21.2"
serde = "1.0.188"
serde_derive = "1.0.125"
serde_json = "1.0"
tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
zeroize = "1"

20
misc/server/Dockerfile Normal file
View File

@ -0,0 +1,20 @@
FROM rust:1.72-bullseye as builder
WORKDIR /usr/src/rust-libp2p-server
# Run with access to the target cache to speed up builds
WORKDIR /workspace
RUN --mount=type=cache,target=/usr/local/cargo/registry \
cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4
ADD . .
RUN --mount=type=cache,target=./target \
--mount=type=cache,target=/usr/local/cargo/registry \
cargo build --release --package libp2p-server
RUN --mount=type=cache,target=./target \
mv ./target/release/libp2p-server /usr/local/bin/libp2p-server
FROM gcr.io/distroless/cc
COPY --from=builder /usr/local/bin/libp2p-server /usr/local/bin/libp2p-lookup /usr/local/bin/
CMD ["libp2p-server"]

41
misc/server/README.md Normal file
View File

@ -0,0 +1,41 @@
# Rust libp2p Server
A rust-libp2p based server implementation running:
- the [Kademlia protocol](https://github.com/libp2p/specs/tree/master/kad-dht)
- the [Circuit Relay v2 protocol](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md)
- the [AutoNAT protocol](https://github.com/libp2p/specs/blob/master/autonat/README.md)
## Usage
```
cargo run -- --help
A rust-libp2p server binary.
Usage: libp2p-server [OPTIONS] --config <CONFIG>
Options:
--config <CONFIG> Path to IPFS config file
--metrics-path <METRICS_PATH> Metric endpoint path [default: /metrics]
--enable-kademlia Whether to run the libp2p Kademlia protocol and join the IPFS DHT
--enable-autonat Whether to run the libp2p Autonat protocol
-h, --help Print help
```
```
cargo run -- --config ~/.ipfs/config
Local peer id: PeerId("12D3KooWSa1YEeQVSwvoqAMhwjKQ6kqZQckhWPb3RWEGV3sZGU6Z")
Listening on "/ip4/127.0.0.1/udp/4001/quic"
[...]
```
The Docker container includes [libp2-lookup](https://github.com/mxinden/libp2p-lookup/) to enable adding a proper healthcheck for container startup, e.g.
``` shell
docker run --health-cmd 'libp2p-lookup direct --address /ip4/127.0.0.1/tcp/4001/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa' /home/ipfs/.ipfs:/ipfs ghcr.io/libp2p/rust-libp2p-server --config /ipfs/config
```

View File

@ -0,0 +1,78 @@
use libp2p::autonat;
use libp2p::identify;
use libp2p::kad;
use libp2p::ping;
use libp2p::relay;
use libp2p::swarm::behaviour::toggle::Toggle;
use libp2p::{identity, swarm::NetworkBehaviour, Multiaddr, PeerId};
use std::str::FromStr;
use std::time::Duration;
const BOOTNODES: [&str; 4] = [
"QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
"QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
"QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
"QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
];
#[derive(NetworkBehaviour)]
pub(crate) struct Behaviour {
relay: relay::Behaviour,
ping: ping::Behaviour,
identify: identify::Behaviour,
pub(crate) kademlia: Toggle<kad::Behaviour<kad::record::store::MemoryStore>>,
autonat: Toggle<autonat::Behaviour>,
}
impl Behaviour {
pub(crate) fn new(
pub_key: identity::PublicKey,
enable_kademlia: bool,
enable_autonat: bool,
) -> Self {
let kademlia = if enable_kademlia {
let mut kademlia_config = kad::Config::default();
// Instantly remove records and provider records.
//
// TODO: Replace hack with option to disable both.
kademlia_config.set_record_ttl(Some(Duration::from_secs(0)));
kademlia_config.set_provider_record_ttl(Some(Duration::from_secs(0)));
let mut kademlia = kad::Behaviour::with_config(
pub_key.to_peer_id(),
kad::record::store::MemoryStore::new(pub_key.to_peer_id()),
kademlia_config,
);
let bootaddr = Multiaddr::from_str("/dnsaddr/bootstrap.libp2p.io").unwrap();
for peer in &BOOTNODES {
kademlia.add_address(&PeerId::from_str(peer).unwrap(), bootaddr.clone());
}
kademlia.bootstrap().unwrap();
Some(kademlia)
} else {
None
}
.into();
let autonat = if enable_autonat {
Some(autonat::Behaviour::new(
PeerId::from(pub_key.clone()),
Default::default(),
))
} else {
None
}
.into();
Self {
relay: relay::Behaviour::new(PeerId::from(pub_key.clone()), Default::default()),
ping: ping::Behaviour::new(ping::Config::new()),
identify: identify::Behaviour::new(
identify::Config::new("ipfs/0.1.0".to_string(), pub_key).with_agent_version(
format!("rust-libp2p-server/{}", env!("CARGO_PKG_VERSION")),
),
),
kademlia,
autonat,
}
}
}

39
misc/server/src/config.rs Normal file
View File

@ -0,0 +1,39 @@
use libp2p::Multiaddr;
use serde_derive::Deserialize;
use std::error::Error;
use std::path::Path;
#[derive(Clone, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub(crate) struct Config {
pub(crate) identity: Identity,
pub(crate) addresses: Addresses,
}
impl Config {
pub(crate) fn from_file(path: &Path) -> Result<Self, Box<dyn Error>> {
Ok(serde_json::from_str(&std::fs::read_to_string(path)?)?)
}
}
#[derive(Clone, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub(crate) struct Identity {
#[serde(rename = "PeerID")]
pub(crate) peer_id: String,
pub(crate) priv_key: String,
}
#[derive(Clone, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub(crate) struct Addresses {
pub(crate) swarm: Vec<Multiaddr>,
pub(crate) append_announce: Vec<Multiaddr>,
}
impl zeroize::Zeroize for Config {
fn zeroize(&mut self) {
self.identity.peer_id.zeroize();
self.identity.priv_key.zeroize();
}
}

View File

@ -0,0 +1,137 @@
// Copyright 2022 Protocol Labs.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use hyper::http::StatusCode;
use hyper::service::Service;
use hyper::{Body, Method, Request, Response, Server};
use log::info;
use prometheus_client::encoding::text::encode;
use prometheus_client::registry::Registry;
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0";
pub(crate) async fn metrics_server(
registry: Registry,
metrics_path: String,
) -> Result<(), hyper::Error> {
// Serve on localhost.
let addr = ([0, 0, 0, 0], 8888).into();
let server = Server::bind(&addr).serve(MakeMetricService::new(registry, metrics_path.clone()));
info!(
"Metrics server on http://{}{}",
server.local_addr(),
metrics_path
);
server.await?;
Ok(())
}
pub(crate) struct MetricService {
reg: Arc<Mutex<Registry>>,
metrics_path: String,
}
type SharedRegistry = Arc<Mutex<Registry>>;
impl MetricService {
fn get_reg(&mut self) -> SharedRegistry {
Arc::clone(&self.reg)
}
fn respond_with_metrics(&mut self) -> Response<String> {
let mut response: Response<String> = Response::default();
response.headers_mut().insert(
hyper::header::CONTENT_TYPE,
METRICS_CONTENT_TYPE.try_into().unwrap(),
);
let reg = self.get_reg();
encode(&mut response.body_mut(), &reg.lock().unwrap()).unwrap();
*response.status_mut() = StatusCode::OK;
response
}
fn respond_with_404_not_found(&mut self) -> Response<String> {
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(format!(
"Not found try localhost:[port]/{}",
self.metrics_path
))
.unwrap()
}
}
impl Service<Request<Body>> for MetricService {
type Response = Response<String>;
type Error = hyper::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request<Body>) -> Self::Future {
let req_path = req.uri().path();
let req_method = req.method();
let resp = if (req_method == Method::GET) && (req_path == self.metrics_path) {
// Encode and serve metrics from registry.
self.respond_with_metrics()
} else {
self.respond_with_404_not_found()
};
Box::pin(async { Ok(resp) })
}
}
pub(crate) struct MakeMetricService {
reg: SharedRegistry,
metrics_path: String,
}
impl MakeMetricService {
pub(crate) fn new(registry: Registry, metrics_path: String) -> MakeMetricService {
MakeMetricService {
reg: Arc::new(Mutex::new(registry)),
metrics_path,
}
}
}
impl<T> Service<T> for MakeMetricService {
type Response = MetricService;
type Error = hyper::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _: T) -> Self::Future {
let reg = self.reg.clone();
let metrics_path = self.metrics_path.clone();
let fut = async move { Ok(MetricService { reg, metrics_path }) };
Box::pin(fut)
}
}

211
misc/server/src/main.rs Normal file
View File

@ -0,0 +1,211 @@
use base64::Engine;
use clap::Parser;
use futures::future::Either;
use futures::stream::StreamExt;
use futures_timer::Delay;
use libp2p::core::muxing::StreamMuxerBox;
use libp2p::core::upgrade;
use libp2p::dns;
use libp2p::identify;
use libp2p::identity;
use libp2p::identity::PeerId;
use libp2p::kad;
use libp2p::metrics::{Metrics, Recorder};
use libp2p::noise;
use libp2p::quic;
use libp2p::swarm::{SwarmBuilder, SwarmEvent};
use libp2p::tcp;
use libp2p::yamux;
use libp2p::Transport;
use log::{debug, info, warn};
use prometheus_client::metrics::info::Info;
use prometheus_client::registry::Registry;
use std::error::Error;
use std::io;
use std::path::PathBuf;
use std::str::FromStr;
use std::task::Poll;
use std::time::Duration;
use zeroize::Zeroizing;
mod behaviour;
mod config;
mod http_service;
const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(5 * 60);
#[derive(Debug, Parser)]
#[clap(name = "libp2p server", about = "A rust-libp2p server binary.")]
struct Opts {
/// Path to IPFS config file.
#[clap(long)]
config: PathBuf,
/// Metric endpoint path.
#[clap(long, default_value = "/metrics")]
metrics_path: String,
/// Whether to run the libp2p Kademlia protocol and join the IPFS DHT.
#[clap(long)]
enable_kademlia: bool,
/// Whether to run the libp2p Autonat protocol.
#[clap(long)]
enable_autonat: bool,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
let opt = Opts::parse();
let config = Zeroizing::new(config::Config::from_file(opt.config.as_path())?);
let (local_peer_id, local_keypair) = {
let keypair = identity::Keypair::from_protobuf_encoding(&Zeroizing::new(
base64::engine::general_purpose::STANDARD
.decode(config.identity.priv_key.as_bytes())?,
))?;
let peer_id = keypair.public().into();
assert_eq!(
PeerId::from_str(&config.identity.peer_id)?,
peer_id,
"Expect peer id derived from private key and peer id retrieved from config to match."
);
(peer_id, keypair)
};
let transport = {
let tcp_transport =
tcp::tokio::Transport::new(tcp::Config::new().port_reuse(true).nodelay(true))
.upgrade(upgrade::Version::V1)
.authenticate(noise::Config::new(&local_keypair)?)
.multiplex(yamux::Config::default())
.timeout(Duration::from_secs(20));
let quic_transport = quic::tokio::Transport::new(quic::Config::new(&local_keypair));
dns::tokio::Transport::system(libp2p::core::transport::OrTransport::new(
quic_transport,
tcp_transport,
))?
.map(|either_output, _| match either_output {
Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)),
Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)),
})
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.boxed()
};
let behaviour = behaviour::Behaviour::new(
local_keypair.public(),
opt.enable_kademlia,
opt.enable_autonat,
);
let mut swarm = SwarmBuilder::with_tokio_executor(transport, behaviour, local_peer_id).build();
if config.addresses.swarm.is_empty() {
warn!("No listen addresses configured.");
}
for address in &config.addresses.swarm {
match swarm.listen_on(address.clone()) {
Ok(_) => {}
Err(e @ libp2p::TransportError::MultiaddrNotSupported(_)) => {
warn!("Failed to listen on {address}, continuing anyways, {e}")
}
Err(e) => return Err(e.into()),
}
}
if config.addresses.append_announce.is_empty() {
warn!("No external addresses configured.");
}
for address in &config.addresses.append_announce {
swarm.add_external_address(address.clone())
}
info!(
"External addresses: {:?}",
swarm.external_addresses().collect::<Vec<_>>()
);
let mut metric_registry = Registry::default();
let metrics = Metrics::new(&mut metric_registry);
let build_info = Info::new(vec![("version".to_string(), env!("CARGO_PKG_VERSION"))]);
metric_registry.register(
"build",
"A metric with a constant '1' value labeled by version",
build_info,
);
tokio::spawn(async move {
if let Err(e) = http_service::metrics_server(metric_registry, opt.metrics_path).await {
log::error!("Metrics server failed: {e}");
}
});
let mut bootstrap_timer = Delay::new(BOOTSTRAP_INTERVAL);
loop {
if let Poll::Ready(()) = futures::poll!(&mut bootstrap_timer) {
bootstrap_timer.reset(BOOTSTRAP_INTERVAL);
let _ = swarm
.behaviour_mut()
.kademlia
.as_mut()
.map(|k| k.bootstrap());
}
let event = swarm.next().await.expect("Swarm not to terminate.");
metrics.record(&event);
match event {
SwarmEvent::Behaviour(behaviour::BehaviourEvent::Identify(e)) => {
info!("{:?}", e);
metrics.record(&e);
if let identify::Event::Received {
peer_id,
info:
identify::Info {
listen_addrs,
protocols,
..
},
} = e
{
if protocols.iter().any(|p| *p == kad::PROTOCOL_NAME) {
for addr in listen_addrs {
swarm
.behaviour_mut()
.kademlia
.as_mut()
.map(|k| k.add_address(&peer_id, addr));
}
}
}
}
SwarmEvent::Behaviour(behaviour::BehaviourEvent::Ping(e)) => {
debug!("{:?}", e);
metrics.record(&e);
}
SwarmEvent::Behaviour(behaviour::BehaviourEvent::Kademlia(e)) => {
debug!("{:?}", e);
metrics.record(&e);
}
SwarmEvent::Behaviour(behaviour::BehaviourEvent::Relay(e)) => {
info!("{:?}", e);
metrics.record(&e)
}
SwarmEvent::Behaviour(behaviour::BehaviourEvent::Autonat(e)) => {
info!("{:?}", e);
// TODO: Add metric recording for `NatStatus`.
// metrics.record(&e)
}
SwarmEvent::NewListenAddr { address, .. } => {
info!("Listening on {address:?}");
}
_ => {}
}
}
}

View File

@ -0,0 +1,6 @@
## 0.1.0
- Initial release.
See [PR 4248].
[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248

View File

@ -0,0 +1,32 @@
[package]
authors = ["Doug Anderson <DougAnderson444@peerpiper.io>"]
categories = ["network-programming"]
description = "Utilities for WebRTC in libp2p"
edition = "2021"
license = "MIT"
name = "libp2p-webrtc-utils"
repository = "https://github.com/libp2p/rust-libp2p"
rust-version = { workspace = true }
version = "0.1.0"
publish = true
[dependencies]
bytes = "1"
futures = "0.3"
hex = "0.4"
libp2p-core = { workspace = true }
libp2p-identity = { workspace = true }
libp2p-noise = { workspace = true }
log = "0.4.19"
quick-protobuf = "0.8"
quick-protobuf-codec = { workspace = true }
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
sha2 = "0.10.7"
thiserror = "1"
tinytemplate = "1.2"
asynchronous-codec = "0.6"
[dev-dependencies]
hex-literal = "0.4"
unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] }

View File

@ -0,0 +1,109 @@
// Copyright 2023 Doug Anderson.
// Copyright 2022 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use libp2p_core::multihash;
use sha2::Digest as _;
use std::fmt;
pub const SHA256: &str = "sha-256";
const MULTIHASH_SHA256_CODE: u64 = 0x12;
type Multihash = multihash::Multihash<64>;
/// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm.
#[derive(Eq, PartialEq, Copy, Clone)]
pub struct Fingerprint([u8; 32]);
impl Fingerprint {
pub const FF: Fingerprint = Fingerprint([0xFF; 32]);
pub const fn raw(digest: [u8; 32]) -> Self {
Fingerprint(digest)
}
/// Creates a new [Fingerprint] from a raw certificate by hashing the given bytes with SHA256.
pub fn from_certificate(bytes: &[u8]) -> Self {
Fingerprint(sha2::Sha256::digest(bytes).into())
}
/// Converts [`Multihash`](multihash::Multihash) to [`Fingerprint`].
pub fn try_from_multihash(hash: Multihash) -> Option<Self> {
if hash.code() != MULTIHASH_SHA256_CODE {
// Only support SHA256 for now.
return None;
}
let bytes = hash.digest().try_into().ok()?;
Some(Self(bytes))
}
/// Converts this fingerprint to [`Multihash`](multihash::Multihash).
pub fn to_multihash(self) -> Multihash {
Multihash::wrap(MULTIHASH_SHA256_CODE, &self.0).expect("fingerprint's len to be 32 bytes")
}
/// Formats this fingerprint as uppercase hex, separated by colons (`:`).
///
/// This is the format described in <https://www.rfc-editor.org/rfc/rfc4572#section-5>.
pub fn to_sdp_format(self) -> String {
self.0.map(|byte| format!("{byte:02X}")).join(":")
}
/// Returns the algorithm used (e.g. "sha-256").
/// See <https://datatracker.ietf.org/doc/html/rfc8122#section-5>
pub fn algorithm(&self) -> String {
SHA256.to_owned()
}
}
impl fmt::Debug for Fingerprint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&hex::encode(self.0))
}
}
#[cfg(test)]
mod tests {
use super::*;
const SDP_FORMAT: &str = "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:3F:DF:E1:12:C1:EE:BB:6C:C6:AC";
const REGULAR_FORMAT: [u8; 32] =
hex_literal::hex!("7DE3D83F81A680592A471E6B6ABB0747ABD35385A8093FDFE112C1EEBB6CC6AC");
#[test]
fn sdp_format() {
let fp = Fingerprint::raw(REGULAR_FORMAT);
let formatted = fp.to_sdp_format();
assert_eq!(formatted, SDP_FORMAT)
}
#[test]
fn from_sdp() {
let mut bytes = [0; 32];
bytes.copy_from_slice(&hex::decode(SDP_FORMAT.replace(':', "")).unwrap());
let fp = Fingerprint::raw(bytes);
assert_eq!(fp, Fingerprint::raw(REGULAR_FORMAT));
}
}

Some files were not shown because too many files have changed in this diff Show More