From 3cfe4bbfac46dac3c7875f891909b17f39fa7c85 Mon Sep 17 00:00:00 2001 From: Rakesh Shrestha <38497578+aomini@users.noreply.github.com> Date: Mon, 28 Feb 2022 22:31:56 +0545 Subject: [PATCH 1/5] docs: update auto relay example code usage (#1163) Co-authored-by: aomini daiki --- examples/auto-relay/README.md | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/examples/auto-relay/README.md b/examples/auto-relay/README.md index 17ed5554..0f88b758 100644 --- a/examples/auto-relay/README.md +++ b/examples/auto-relay/README.md @@ -106,18 +106,15 @@ console.log(`Node started with id ${node.peerId.toB58String()}`) const conn = await node.dial(relayAddr) -// Wait for connection and relay to be bind for the example purpose -await new Promise((resolve) => { - node.peerStore.on('change:multiaddrs', ({ peerId }) => { - // Updated self multiaddrs? - if (peerId.equals(node.peerId)) { - resolve() - } - }) -}) - console.log(`Connected to the HOP relay ${conn.remotePeer.toString()}`) -console.log(`Advertising with a relay address of ${node.multiaddrs[0].toString()}/p2p/${node.peerId.toB58String()}`) + +// Wait for connection and relay to be bind for the example purpose +node.peerStore.on('change:multiaddrs', ({ peerId }) => { + // Updated self multiaddrs? + if (peerId.equals(node.peerId)) { + console.log(`Advertising with a relay address of ${node.multiaddrs[0].toString()}/p2p/${node.peerId.toB58String()}`) + } +}) ``` As you can see in the code, we need to provide the relay address, `relayAddr`, as a process argument. This node will dial the provided relay address and automatically bind to it. @@ -189,4 +186,4 @@ As you can see from the output, the remote address of the established connection Before moving into production, there are a few things that you should take into account. -A relay node should not advertise its private address in a real world scenario, as the node would not be reachable by others. You should provide an array of public addresses in the libp2p `addresses.announce` option. If you are using websockets, bear in mind that due to browser’s security policies you cannot establish unencrypted connection from secure context. The simplest solution is to setup SSL with nginx and proxy to the node and setup a domain name for the certificate. +A relay node should not advertise its private address in a real world scenario, as the node would not be reachable by others. You should provide an array of public addresses in the libp2p `addresses.announce` option. If you are using websockets, bear in mind that due to browser’s security policies you cannot establish unencrypted connection from secure context. The simplest solution is to setup SSL with nginx and proxy to the node and setup a domain name for the certificate. \ No newline at end of file From 199395de4d8139cc77d0b408626f37c9b8520d28 Mon Sep 17 00:00:00 2001 From: Alex Potsides Date: Mon, 28 Mar 2022 14:30:27 +0100 Subject: [PATCH 2/5] feat: convert to typescript (#1172) Converts this module to typescript. - Ecosystem modules renamed from (e.g.) `libp2p-tcp` to `@libp2p/tcp` - Ecosystem module now have named exports - Configuration has been updated, now pass instances of modules instead of classes: - Some configuration keys have been renamed to make them more descriptive. `transport` -> `transports`, `connEncryption` -> `connectionEncryption`. In general where we pass multiple things, the key is now plural, e.g. `streamMuxer` -> `streamMuxers`, `contentRouting` -> `contentRouters`, etc. Where we are configuring a singleton the config key is singular, e.g. `connProtector` -> `connectionProtector` etc. - Properties of the `modules` config key have been moved to the root - Properties of the `config` config key have been moved to the root ```js // before import Libp2p from 'libp2p' import TCP from 'libp2p-tcp' await Libp2p.create({ modules: { transport: [ TCP ], } config: { transport: { [TCP.tag]: { foo: 'bar' } }, relay: { enabled: true, hop: { enabled: true, active: true } } } }) ``` ```js // after import { createLibp2p } from 'libp2p' import { TCP } from '@libp2p/tcp' await createLibp2p({ transports: [ new TCP({ foo: 'bar' }) ], relay: { enabled: true, hop: { enabled: true, active: true } } }) ``` - Use of `enabled` flag has been reduced - previously you could pass a module but disable it with config. Now if you don't want a feature, just don't pass an implementation. Eg: ```js // before await Libp2p.create({ modules: { transport: [ TCP ], pubsub: Gossipsub }, config: { pubsub: { enabled: false } } }) ``` ```js // after await createLibp2p({ transports: [ new TCP() ] }) ``` - `.multiaddrs` renamed to `.getMultiaddrs()` because it's not a property accessor, work is done by that method to calculate announce addresses, observed addresses, etc - `/p2p/${peerId}` is now appended to all addresses returned by `.getMultiaddrs()` so they can be used opaquely (every consumer has to append the peer ID to the address to actually use it otherwise). If you need low-level unadulterated addresses, call methods on the address manager. BREAKING CHANGE: types are no longer hand crafted, this module is now ESM only --- .aegir.cjs | 63 ++ .aegir.js | 70 -- .github/dependabot.yml | 2 +- .github/workflows/main.yml | 72 +- LICENSE | 24 +- LICENSE-APACHE | 5 + LICENSE-MIT | 19 + README.md | 23 +- doc/API.md | 50 +- doc/CONFIGURATION.md | 499 +++++------ doc/GETTING_STARTED.md | 107 ++- doc/PEER_DISCOVERY.md | 2 +- doc/STREAMING_ITERABLES.md | 4 +- doc/migrations/v0.26-v0.27.md | 30 +- doc/migrations/v0.27-v0.28.md | 6 +- doc/migrations/v0.28-v0.29.md | 14 +- doc/migrations/v0.29-v0.30.md | 44 +- doc/migrations/v0.30-v0.31.md | 2 +- examples/auto-relay/README.md | 78 +- examples/auto-relay/dialer.js | 28 +- examples/auto-relay/listener.js | 46 +- examples/auto-relay/relay.js | 44 +- examples/auto-relay/test.js | 17 +- examples/chat/src/dialer.js | 21 +- examples/chat/src/libp2p.js | 35 +- examples/chat/src/listener.js | 19 +- ...{peer-id-dialer.json => peer-id-dialer.js} | 2 +- .../src/peer-id-listener.js} | 2 +- examples/chat/src/stream.js | 21 +- examples/chat/test.js | 15 +- examples/connection-encryption/1.js | 31 +- examples/connection-encryption/README.md | 20 +- examples/connection-encryption/test.js | 12 +- examples/delegated-routing/package.json | 23 +- .../delegated-routing/src/libp2p-bundle.js | 82 +- examples/discovery-mechanisms/1.js | 50 +- examples/discovery-mechanisms/2.js | 45 +- examples/discovery-mechanisms/3.js | 92 +- examples/discovery-mechanisms/README.md | 158 ++-- .../{bootstrapers.js => bootstrappers.js} | 6 +- examples/discovery-mechanisms/test-1.js | 11 +- examples/discovery-mechanisms/test-2.js | 32 +- examples/discovery-mechanisms/test-3.js | 32 +- examples/discovery-mechanisms/test.js | 12 +- examples/echo/src/dialer.js | 24 +- examples/echo/src/{id-d.json => id-d.js} | 2 +- .../src/id-l.js} | 2 +- examples/echo/src/libp2p.js | 36 +- examples/echo/src/listener.js | 19 +- examples/echo/test.js | 15 +- examples/libp2p-in-the-browser/.babelrc | 3 - examples/libp2p-in-the-browser/index.js | 74 +- examples/libp2p-in-the-browser/package.json | 25 +- examples/libp2p-in-the-browser/test.js | 29 +- examples/package.json | 8 +- examples/peer-and-content-routing/1.js | 37 +- examples/peer-and-content-routing/2.js | 43 +- examples/peer-and-content-routing/README.md | 30 +- examples/peer-and-content-routing/test-1.js | 11 +- examples/peer-and-content-routing/test-2.js | 11 +- examples/peer-and-content-routing/test.js | 10 +- examples/pnet/index.js | 16 +- examples/pnet/libp2p-node.js | 45 +- examples/pnet/test.js | 11 +- examples/pnet/utils.js | 4 +- examples/protocol-and-stream-muxing/1.js | 33 +- examples/protocol-and-stream-muxing/2.js | 35 +- examples/protocol-and-stream-muxing/3.js | 34 +- examples/protocol-and-stream-muxing/README.md | 22 +- examples/protocol-and-stream-muxing/test-1.js | 11 +- examples/protocol-and-stream-muxing/test-2.js | 11 +- examples/protocol-and-stream-muxing/test-3.js | 11 +- examples/protocol-and-stream-muxing/test.js | 12 +- examples/pubsub/1.js | 42 +- examples/pubsub/README.md | 28 +- examples/pubsub/message-filtering/1.js | 48 +- examples/pubsub/message-filtering/README.md | 22 +- examples/pubsub/message-filtering/test.js | 15 +- examples/pubsub/test-1.js | 15 +- examples/pubsub/test.js | 10 +- examples/test-all.js | 14 +- examples/test.js | 15 +- examples/transports/1.js | 25 +- examples/transports/2.js | 36 +- examples/transports/3.js | 48 +- examples/transports/4.js | 65 +- examples/transports/README.md | 54 +- examples/transports/test-1.js | 11 +- examples/transports/test-2.js | 11 +- examples/transports/test-3.js | 11 +- examples/transports/test-4.js | 11 +- examples/transports/test.js | 14 +- examples/utils.js | 18 +- examples/webrtc-direct/README.md | 9 +- examples/webrtc-direct/dialer.js | 50 +- examples/webrtc-direct/listener.js | 37 +- examples/webrtc-direct/package.json | 33 +- examples/webrtc-direct/test.js | 48 +- package.json | 342 ++++---- scripts/node-globals.js | 2 - src/address-manager/index.js | 96 --- src/address-manager/index.ts | 129 +++ src/circuit/README.md | 27 +- src/circuit/auto-relay.js | 302 ------- src/circuit/auto-relay.ts | 284 ++++++ src/circuit/circuit/hop.js | 205 ----- src/circuit/circuit/hop.ts | 211 +++++ src/circuit/circuit/stop.js | 81 -- src/circuit/circuit/stop.ts | 78 ++ src/circuit/circuit/stream-handler.js | 94 -- src/circuit/circuit/stream-handler.ts | 87 ++ src/circuit/circuit/{utils.js => utils.ts} | 34 +- src/circuit/constants.js | 12 - src/circuit/constants.ts | 31 + src/circuit/index.js | 102 --- src/circuit/index.ts | 120 +++ src/circuit/{listener.js => listener.ts} | 48 +- src/circuit/multicodec.js | 5 - src/circuit/multicodec.ts | 2 + src/circuit/{protocol => pb}/index.d.ts | 0 src/circuit/{protocol => pb}/index.js | 16 +- src/circuit/{protocol => pb}/index.proto | 0 src/circuit/transport.js | 229 ----- src/circuit/transport.ts | 216 +++++ src/circuit/utils.js | 17 - src/circuit/utils.ts | 12 + src/config.js | 114 --- src/config.ts | 101 +++ src/connection-manager/auto-dialler.js | 132 --- src/connection-manager/auto-dialler.ts | 154 ++++ src/connection-manager/index.js | 374 -------- src/connection-manager/index.ts | 422 +++++++++ src/connection-manager/latency-monitor.js | 264 ------ src/connection-manager/latency-monitor.ts | 319 +++++++ ...mitter.js => visibility-change-emitter.ts} | 86 +- src/constants.js | 18 - src/constants.ts | 31 + src/content-routing/index.js | 163 ---- src/content-routing/index.ts | 143 +++ src/content-routing/utils.js | 89 -- src/content-routing/utils.ts | 54 ++ src/dht/dht-content-routing.js | 44 - src/dht/dht-content-routing.ts | 43 + src/dht/dht-peer-routing.js | 51 -- src/dht/dht-peer-routing.ts | 35 + src/dialer/auto-dialer.ts | 40 + .../{dial-request.js => dial-request.ts} | 103 ++- src/dialer/index.js | 376 -------- src/dialer/index.ts | 374 ++++++++ src/errors.js | 66 -- src/errors.ts | 71 ++ src/fetch/README.md | 2 +- src/fetch/constants.js | 6 - src/fetch/constants.ts | 3 + src/fetch/{index.js => index.ts} | 151 ++-- src/fetch/{ => pb}/proto.d.ts | 0 src/fetch/{ => pb}/proto.js | 16 +- src/fetch/{ => pb}/proto.proto | 0 src/get-peer.js | 49 -- src/get-peer.ts | 57 ++ src/identify/consts.js | 15 - src/identify/consts.ts | 13 + src/identify/index.js | 384 --------- src/identify/index.ts | 445 ++++++++++ src/identify/{ => pb}/message.d.ts | 25 +- src/identify/{ => pb}/message.js | 125 ++- src/identify/{ => pb}/message.proto | 0 src/index.js | 813 ------------------ src/index.ts | 234 +++++ src/insecure/index.ts | 97 +++ src/insecure/{ => pb}/proto.d.ts | 8 +- src/insecure/{ => pb}/proto.js | 61 +- src/insecure/{ => pb}/proto.proto | 0 src/insecure/plaintext.js | 95 -- src/keychain/{cms.js => cms.ts} | 116 +-- src/keychain/index.js | 561 ------------ src/keychain/index.ts | 588 +++++++++++++ src/keychain/{util.js => util.ts} | 24 +- src/libp2p.ts | 501 +++++++++++ src/metrics/index.js | 290 ------- src/metrics/index.ts | 310 +++++++ src/metrics/moving-average.ts | 53 ++ src/metrics/old-peers.js | 16 - src/metrics/stats.js | 270 ------ src/metrics/stats.ts | 243 ++++++ src/metrics/tracked-map.js | 94 -- src/nat-manager.js | 197 ----- src/nat-manager.ts | 194 +++++ src/peer-record-updater.ts | 55 ++ src/peer-routing.js | 176 ---- src/peer-routing.ts | 185 ++++ src/peer-store/README.md | 145 ---- src/peer-store/address-book.js | 382 -------- src/peer-store/index.js | 121 --- src/peer-store/key-book.js | 141 --- src/peer-store/metadata-book.js | 250 ------ src/peer-store/pb/peer.d.ts | 222 ----- src/peer-store/pb/peer.js | 643 -------------- src/peer-store/pb/peer.proto | 31 - src/peer-store/proto-book.js | 237 ----- src/peer-store/store.js | 263 ------ src/peer-store/types.ts | 245 ------ src/ping/README.md | 2 +- src/ping/constants.js | 8 - src/ping/constants.ts | 5 + src/ping/index.js | 84 -- src/ping/index.ts | 83 ++ src/ping/util.js | 18 - src/pnet/README.md | 4 +- src/pnet/crypto.js | 84 -- src/pnet/crypto.ts | 67 ++ src/pnet/errors.js | 7 - src/pnet/errors.ts | 5 + src/pnet/index.js | 86 -- src/pnet/index.ts | 95 ++ src/pnet/key-generator.js | 33 - src/pnet/key-generator.ts | 28 + src/pubsub-adapter.js | 61 -- src/record/README.md | 130 --- src/record/envelope/envelope.d.ts | 77 -- src/record/envelope/envelope.js | 243 ------ src/record/envelope/envelope.proto | 19 - src/record/envelope/index.js | 183 ---- src/record/peer-record/consts.js | 14 - src/record/peer-record/index.js | 113 --- src/record/peer-record/peer-record.d.ts | 133 --- src/record/peer-record/peer-record.js | 367 -------- src/record/peer-record/peer-record.proto | 18 - src/record/utils.js | 25 - src/registrar.js | 127 --- src/registrar.ts | 205 +++++ src/transport-manager.js | 269 ------ src/transport-manager.ts | 279 ++++++ src/types.ts | 98 --- src/upgrader.js | 492 ----------- src/upgrader.ts | 499 +++++++++++ src/version.ts | 3 + test/addresses/address-manager.spec.js | 146 ---- test/addresses/address-manager.spec.ts | 188 ++++ .../{addresses.node.js => addresses.node.ts} | 93 +- test/addresses/utils.js | 16 - test/addresses/utils.ts | 10 + ...prefix.node.js => protocol-prefix.node.ts} | 26 +- test/configuration/pubsub.spec.js | 129 --- test/configuration/pubsub.spec.ts | 106 +++ test/configuration/utils.js | 52 -- test/configuration/utils.ts | 76 ++ test/connection-manager/auto-dialler.spec.js | 64 -- test/connection-manager/auto-dialler.spec.ts | 61 ++ .../{index.node.js => index.node.ts} | 395 +++++---- test/connection-manager/index.spec.js | 132 --- test/connection-manager/index.spec.ts | 143 +++ ...outing.node.js => content-routing.node.ts} | 293 ++++--- .../content-routing/dht/configuration.node.js | 94 -- .../content-routing/dht/configuration.node.ts | 27 + test/content-routing/dht/operation.node.js | 146 ---- test/content-routing/dht/operation.node.ts | 178 ++++ test/content-routing/dht/utils.js | 35 - test/content-routing/dht/utils.ts | 15 + test/content-routing/utils.js | 21 - test/content-routing/utils.ts | 11 + test/core/consume-peer-record.spec.js | 46 - test/core/consume-peer-record.spec.ts | 51 ++ test/core/encryption.spec.js | 54 -- test/core/encryption.spec.ts | 54 ++ .../{listening.node.js => listening.node.ts} | 34 +- test/core/ping.node.js | 84 -- test/core/ping.node.ts | 75 ++ test/dialing/dial-request.spec.js | 224 ----- test/dialing/dial-request.spec.ts | 218 +++++ test/dialing/direct.node.js | 572 ------------ test/dialing/direct.node.ts | 572 ++++++++++++ test/dialing/direct.spec.js | 637 -------------- test/dialing/direct.spec.ts | 589 +++++++++++++ test/dialing/resolver.spec.js | 180 ---- test/dialing/resolver.spec.ts | 226 +++++ test/fetch/{fetch.node.js => fetch.node.ts} | 97 ++- test/fixtures/{browser.js => browser.ts} | 5 +- test/fixtures/{peers.js => peers.ts} | 4 +- test/fixtures/{swarm.key.js => swarm.key.ts} | 3 +- test/identify/index.spec.js | 605 ------------- test/identify/index.spec.ts | 619 +++++++++++++ test/insecure/compliance.spec.js | 13 - test/insecure/compliance.spec.ts | 15 + test/insecure/plaintext.spec.js | 67 -- test/insecure/plaintext.spec.ts | 74 ++ test/interop.ts | 159 ++++ ...ms-interop.spec.js => cms-interop.spec.ts} | 16 +- .../{keychain.spec.js => keychain.spec.ts} | 374 +++----- .../{peerid.spec.js => peerid.spec.ts} | 53 +- test/metrics/index.node.js | 146 ---- test/metrics/index.node.ts | 187 ++++ test/metrics/index.spec.js | 275 ------ test/metrics/index.spec.ts | 301 +++++++ test/nat-manager/nat-manager.node.js | 295 ------- test/nat-manager/nat-manager.node.ts | 231 +++++ test/peer-discovery/index.node.js | 206 ----- test/peer-discovery/index.node.ts | 217 +++++ test/peer-discovery/index.spec.js | 137 --- test/peer-discovery/index.spec.ts | 101 +++ test/peer-routing/peer-routing.node.js | 665 -------------- test/peer-routing/peer-routing.node.ts | 788 +++++++++++++++++ test/peer-routing/utils.js | 21 - test/peer-routing/utils.ts | 11 + test/peer-store/address-book.spec.js | 745 ---------------- test/peer-store/key-book.spec.js | 114 --- test/peer-store/metadata-book.spec.js | 384 --------- test/peer-store/peer-store.node.js | 50 -- test/peer-store/peer-store.spec.js | 227 ----- test/peer-store/proto-book.spec.js | 416 --------- test/pnet/index.spec.js | 92 -- test/pnet/index.spec.ts | 115 +++ test/record/envelope.spec.js | 87 -- test/record/peer-record.spec.js | 157 ---- test/registrar/registrar.spec.js | 198 ----- test/registrar/registrar.spec.ts | 228 +++++ ...{auto-relay.node.js => auto-relay.node.ts} | 377 ++++---- test/relay/relay.node.js | 165 ---- test/relay/relay.node.ts | 173 ++++ test/relay/utils.ts | 34 + test/transports/transport-manager.node.js | 106 --- test/transports/transport-manager.node.ts | 123 +++ test/transports/transport-manager.spec.js | 247 ------ test/transports/transport-manager.spec.ts | 158 ++++ test/ts-use/package.json | 25 - test/ts-use/src/main.ts | 195 ----- test/ts-use/tsconfig.json | 7 - test/upgrading/upgrader.spec.js | 477 ---------- test/upgrading/upgrader.spec.ts | 517 +++++++++++ test/utils/base-options.browser.js | 29 - test/utils/base-options.browser.ts | 31 + test/utils/base-options.js | 24 - test/utils/base-options.ts | 30 + test/utils/creators/peer.js | 72 -- test/utils/creators/peer.ts | 104 +++ test/utils/mock-connection-gater.js | 19 - test/utils/mockConnection.js | 155 ---- test/utils/mockCrypto.js | 24 - test/utils/mockMultiaddrConn.js | 44 - test/utils/mockUpgrader.js | 6 - tsconfig.json | 21 +- 341 files changed, 17035 insertions(+), 23548 deletions(-) create mode 100644 .aegir.cjs delete mode 100644 .aegir.js create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT rename examples/chat/src/{peer-id-dialer.json => peer-id-dialer.js} (99%) rename examples/{echo/src/id-l.json => chat/src/peer-id-listener.js} (99%) rename examples/discovery-mechanisms/{bootstrapers.js => bootstrappers.js} (90%) rename examples/echo/src/{id-d.json => id-d.js} (99%) rename examples/{chat/src/peer-id-listener.json => echo/src/id-l.js} (99%) delete mode 100644 examples/libp2p-in-the-browser/.babelrc delete mode 100644 scripts/node-globals.js delete mode 100644 src/address-manager/index.js create mode 100644 src/address-manager/index.ts delete mode 100644 src/circuit/auto-relay.js create mode 100644 src/circuit/auto-relay.ts delete mode 100644 src/circuit/circuit/hop.js create mode 100644 src/circuit/circuit/hop.ts delete mode 100644 src/circuit/circuit/stop.js create mode 100644 src/circuit/circuit/stop.ts delete mode 100644 src/circuit/circuit/stream-handler.js create mode 100644 src/circuit/circuit/stream-handler.ts rename src/circuit/circuit/{utils.js => utils.ts} (50%) delete mode 100644 src/circuit/constants.js create mode 100644 src/circuit/constants.ts delete mode 100644 src/circuit/index.js create mode 100644 src/circuit/index.ts rename src/circuit/{listener.js => listener.ts} (54%) delete mode 100644 src/circuit/multicodec.js create mode 100644 src/circuit/multicodec.ts rename src/circuit/{protocol => pb}/index.d.ts (100%) rename src/circuit/{protocol => pb}/index.js (97%) rename src/circuit/{protocol => pb}/index.proto (100%) delete mode 100644 src/circuit/transport.js create mode 100644 src/circuit/transport.ts delete mode 100644 src/circuit/utils.js create mode 100644 src/circuit/utils.ts delete mode 100644 src/config.js create mode 100644 src/config.ts delete mode 100644 src/connection-manager/auto-dialler.js create mode 100644 src/connection-manager/auto-dialler.ts delete mode 100644 src/connection-manager/index.js create mode 100644 src/connection-manager/index.ts delete mode 100644 src/connection-manager/latency-monitor.js create mode 100644 src/connection-manager/latency-monitor.ts rename src/connection-manager/{visibility-change-emitter.js => visibility-change-emitter.ts} (53%) delete mode 100644 src/constants.js create mode 100644 src/constants.ts delete mode 100644 src/content-routing/index.js create mode 100644 src/content-routing/index.ts delete mode 100644 src/content-routing/utils.js create mode 100644 src/content-routing/utils.ts delete mode 100644 src/dht/dht-content-routing.js create mode 100644 src/dht/dht-content-routing.ts delete mode 100644 src/dht/dht-peer-routing.js create mode 100644 src/dht/dht-peer-routing.ts create mode 100644 src/dialer/auto-dialer.ts rename src/dialer/{dial-request.js => dial-request.ts} (51%) delete mode 100644 src/dialer/index.js create mode 100644 src/dialer/index.ts delete mode 100644 src/errors.js create mode 100644 src/errors.ts delete mode 100644 src/fetch/constants.js create mode 100644 src/fetch/constants.ts rename src/fetch/{index.js => index.ts} (52%) rename src/fetch/{ => pb}/proto.d.ts (100%) rename src/fetch/{ => pb}/proto.js (95%) rename src/fetch/{ => pb}/proto.proto (100%) delete mode 100644 src/get-peer.js create mode 100644 src/get-peer.ts delete mode 100644 src/identify/consts.js create mode 100644 src/identify/consts.ts delete mode 100644 src/identify/index.js create mode 100644 src/identify/index.ts rename src/identify/{ => pb}/message.d.ts (80%) rename src/identify/{ => pb}/message.js (77%) rename src/identify/{ => pb}/message.proto (100%) delete mode 100644 src/index.js create mode 100644 src/index.ts create mode 100644 src/insecure/index.ts rename src/insecure/{ => pb}/proto.d.ts (96%) rename src/insecure/{ => pb}/proto.js (88%) rename src/insecure/{ => pb}/proto.proto (100%) delete mode 100644 src/insecure/plaintext.js rename src/keychain/{cms.js => cms.ts} (50%) delete mode 100644 src/keychain/index.js create mode 100644 src/keychain/index.ts rename src/keychain/{util.js => util.ts} (78%) create mode 100644 src/libp2p.ts delete mode 100644 src/metrics/index.js create mode 100644 src/metrics/index.ts create mode 100644 src/metrics/moving-average.ts delete mode 100644 src/metrics/old-peers.js delete mode 100644 src/metrics/stats.js create mode 100644 src/metrics/stats.ts delete mode 100644 src/metrics/tracked-map.js delete mode 100644 src/nat-manager.js create mode 100644 src/nat-manager.ts create mode 100644 src/peer-record-updater.ts delete mode 100644 src/peer-routing.js create mode 100644 src/peer-routing.ts delete mode 100644 src/peer-store/README.md delete mode 100644 src/peer-store/address-book.js delete mode 100644 src/peer-store/index.js delete mode 100644 src/peer-store/key-book.js delete mode 100644 src/peer-store/metadata-book.js delete mode 100644 src/peer-store/pb/peer.d.ts delete mode 100644 src/peer-store/pb/peer.js delete mode 100644 src/peer-store/pb/peer.proto delete mode 100644 src/peer-store/proto-book.js delete mode 100644 src/peer-store/store.js delete mode 100644 src/peer-store/types.ts delete mode 100644 src/ping/constants.js create mode 100644 src/ping/constants.ts delete mode 100644 src/ping/index.js create mode 100644 src/ping/index.ts delete mode 100644 src/ping/util.js delete mode 100644 src/pnet/crypto.js create mode 100644 src/pnet/crypto.ts delete mode 100644 src/pnet/errors.js create mode 100644 src/pnet/errors.ts delete mode 100644 src/pnet/index.js create mode 100644 src/pnet/index.ts delete mode 100644 src/pnet/key-generator.js create mode 100644 src/pnet/key-generator.ts delete mode 100644 src/pubsub-adapter.js delete mode 100644 src/record/README.md delete mode 100644 src/record/envelope/envelope.d.ts delete mode 100644 src/record/envelope/envelope.js delete mode 100644 src/record/envelope/envelope.proto delete mode 100644 src/record/envelope/index.js delete mode 100644 src/record/peer-record/consts.js delete mode 100644 src/record/peer-record/index.js delete mode 100644 src/record/peer-record/peer-record.d.ts delete mode 100644 src/record/peer-record/peer-record.js delete mode 100644 src/record/peer-record/peer-record.proto delete mode 100644 src/record/utils.js delete mode 100644 src/registrar.js create mode 100644 src/registrar.ts delete mode 100644 src/transport-manager.js create mode 100644 src/transport-manager.ts delete mode 100644 src/types.ts delete mode 100644 src/upgrader.js create mode 100644 src/upgrader.ts create mode 100644 src/version.ts delete mode 100644 test/addresses/address-manager.spec.js create mode 100644 test/addresses/address-manager.spec.ts rename test/addresses/{addresses.node.js => addresses.node.ts} (50%) delete mode 100644 test/addresses/utils.js create mode 100644 test/addresses/utils.ts rename test/configuration/{protocol-prefix.node.js => protocol-prefix.node.ts} (61%) delete mode 100644 test/configuration/pubsub.spec.js create mode 100644 test/configuration/pubsub.spec.ts delete mode 100644 test/configuration/utils.js create mode 100644 test/configuration/utils.ts delete mode 100644 test/connection-manager/auto-dialler.spec.js create mode 100644 test/connection-manager/auto-dialler.spec.ts rename test/connection-manager/{index.node.js => index.node.ts} (53%) delete mode 100644 test/connection-manager/index.spec.js create mode 100644 test/connection-manager/index.spec.ts rename test/content-routing/{content-routing.node.js => content-routing.node.ts} (56%) delete mode 100644 test/content-routing/dht/configuration.node.js create mode 100644 test/content-routing/dht/configuration.node.ts delete mode 100644 test/content-routing/dht/operation.node.js create mode 100644 test/content-routing/dht/operation.node.ts delete mode 100644 test/content-routing/dht/utils.js create mode 100644 test/content-routing/dht/utils.ts delete mode 100644 test/content-routing/utils.js create mode 100644 test/content-routing/utils.ts delete mode 100644 test/core/consume-peer-record.spec.js create mode 100644 test/core/consume-peer-record.spec.ts delete mode 100644 test/core/encryption.spec.js create mode 100644 test/core/encryption.spec.ts rename test/core/{listening.node.js => listening.node.ts} (57%) delete mode 100644 test/core/ping.node.js create mode 100644 test/core/ping.node.ts delete mode 100644 test/dialing/dial-request.spec.js create mode 100644 test/dialing/dial-request.spec.ts delete mode 100644 test/dialing/direct.node.js create mode 100644 test/dialing/direct.node.ts delete mode 100644 test/dialing/direct.spec.js create mode 100644 test/dialing/direct.spec.ts delete mode 100644 test/dialing/resolver.spec.js create mode 100644 test/dialing/resolver.spec.ts rename test/fetch/{fetch.node.js => fetch.node.ts} (72%) rename test/fixtures/{browser.js => browser.ts} (52%) rename test/fixtures/{peers.js => peers.ts} (98%) rename test/fixtures/{swarm.key.js => swarm.key.ts} (60%) delete mode 100644 test/identify/index.spec.js create mode 100644 test/identify/index.spec.ts delete mode 100644 test/insecure/compliance.spec.js create mode 100644 test/insecure/compliance.spec.ts delete mode 100644 test/insecure/plaintext.spec.js create mode 100644 test/insecure/plaintext.spec.ts create mode 100644 test/interop.ts rename test/keychain/{cms-interop.spec.js => cms-interop.spec.ts} (84%) rename test/keychain/{keychain.spec.js => keychain.spec.ts} (52%) rename test/keychain/{peerid.spec.js => peerid.spec.ts} (71%) delete mode 100644 test/metrics/index.node.js create mode 100644 test/metrics/index.node.ts delete mode 100644 test/metrics/index.spec.js create mode 100644 test/metrics/index.spec.ts delete mode 100644 test/nat-manager/nat-manager.node.js create mode 100644 test/nat-manager/nat-manager.node.ts delete mode 100644 test/peer-discovery/index.node.js create mode 100644 test/peer-discovery/index.node.ts delete mode 100644 test/peer-discovery/index.spec.js create mode 100644 test/peer-discovery/index.spec.ts delete mode 100644 test/peer-routing/peer-routing.node.js create mode 100644 test/peer-routing/peer-routing.node.ts delete mode 100644 test/peer-routing/utils.js create mode 100644 test/peer-routing/utils.ts delete mode 100644 test/peer-store/address-book.spec.js delete mode 100644 test/peer-store/key-book.spec.js delete mode 100644 test/peer-store/metadata-book.spec.js delete mode 100644 test/peer-store/peer-store.node.js delete mode 100644 test/peer-store/peer-store.spec.js delete mode 100644 test/peer-store/proto-book.spec.js delete mode 100644 test/pnet/index.spec.js create mode 100644 test/pnet/index.spec.ts delete mode 100644 test/record/envelope.spec.js delete mode 100644 test/record/peer-record.spec.js delete mode 100644 test/registrar/registrar.spec.js create mode 100644 test/registrar/registrar.spec.ts rename test/relay/{auto-relay.node.js => auto-relay.node.ts} (56%) delete mode 100644 test/relay/relay.node.js create mode 100644 test/relay/relay.node.ts create mode 100644 test/relay/utils.ts delete mode 100644 test/transports/transport-manager.node.js create mode 100644 test/transports/transport-manager.node.ts delete mode 100644 test/transports/transport-manager.spec.js create mode 100644 test/transports/transport-manager.spec.ts delete mode 100644 test/ts-use/package.json delete mode 100644 test/ts-use/src/main.ts delete mode 100644 test/ts-use/tsconfig.json delete mode 100644 test/upgrading/upgrader.spec.js create mode 100644 test/upgrading/upgrader.spec.ts delete mode 100644 test/utils/base-options.browser.js create mode 100644 test/utils/base-options.browser.ts delete mode 100644 test/utils/base-options.js create mode 100644 test/utils/base-options.ts delete mode 100644 test/utils/creators/peer.js create mode 100644 test/utils/creators/peer.ts delete mode 100644 test/utils/mock-connection-gater.js delete mode 100644 test/utils/mockConnection.js delete mode 100644 test/utils/mockCrypto.js delete mode 100644 test/utils/mockMultiaddrConn.js delete mode 100644 test/utils/mockUpgrader.js diff --git a/.aegir.cjs b/.aegir.cjs new file mode 100644 index 00000000..48d03697 --- /dev/null +++ b/.aegir.cjs @@ -0,0 +1,63 @@ +'use strict' + +/** @type {import('aegir').PartialOptions} */ +module.exports = { + build: { + bundlesizeMax: '253kB' + }, + test: { + before: async () => { + const { createLibp2p } = await import('./dist/src/index.js') + const { MULTIADDRS_WEBSOCKETS } = await import('./dist/test/fixtures/browser.js') + const { default: Peers } = await import('./dist/test/fixtures/peers.js') + const { WebSockets } = await import('@libp2p/websockets') + const { Mplex } = await import('@libp2p/mplex') + const { NOISE } = await import('@chainsafe/libp2p-noise') + const { Plaintext } = await import('./dist/src/insecure/index.js') + const { pipe } = await import('it-pipe') + const { createFromJSON } = await import('@libp2p/peer-id-factory') + + // Use the last peer + const peerId = await createFromJSON(Peers[Peers.length - 1]) + const libp2p = await createLibp2p({ + addresses: { + listen: [MULTIADDRS_WEBSOCKETS[0]] + }, + peerId, + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE, + new Plaintext() + ], + relay: { + enabled: true, + hop: { + enabled: true, + active: false + } + }, + nat: { + enabled: false + } + }) + // Add the echo protocol + await libp2p.handle('/echo/1.0.0', ({ stream }) => { + pipe(stream, stream) + .catch() // sometimes connections are closed before multistream-select finishes which causes an error + }) + await libp2p.start() + + return { + libp2p + } + }, + after: async (_, before) => { + await before.libp2p.stop() + } + } +} diff --git a/.aegir.js b/.aegir.js deleted file mode 100644 index 92fe923d..00000000 --- a/.aegir.js +++ /dev/null @@ -1,70 +0,0 @@ -'use strict' - -const path = require('path') -const Libp2p = require('./src') -const { MULTIADDRS_WEBSOCKETS } = require('./test/fixtures/browser') -const Peers = require('./test/fixtures/peers') -const PeerId = require('peer-id') -const WebSockets = require('libp2p-websockets') -const Muxer = require('libp2p-mplex') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') -const pipe = require('it-pipe') -let libp2p - -const before = async () => { - // Use the last peer - const peerId = await PeerId.createFromJSON(Peers[Peers.length - 1]) - - libp2p = new Libp2p({ - addresses: { - listen: [MULTIADDRS_WEBSOCKETS[0]] - }, - peerId, - modules: { - transport: [WebSockets], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - relay: { - enabled: true, - hop: { - enabled: true, - active: false - } - }, - nat: { - enabled: false - } - } - }) - // Add the echo protocol - libp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - - await libp2p.start() -} - -const after = async () => { - await libp2p.stop() -} - -/** @type {import('aegir').Options["build"]["config"]} */ -const esbuild = { - inject: [path.join(__dirname, './scripts/node-globals.js')] -} - -/** @type {import('aegir').PartialOptions} */ -module.exports = { - build: { - bundlesizeMax: '253kB' - }, - test: { - before, - after, - browser: { - config: { - buildConfig: esbuild - } - } - } -} diff --git a/.github/dependabot.yml b/.github/dependabot.yml index de46e326..290ad028 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,5 +4,5 @@ updates: directory: "/" schedule: interval: daily - time: "11:00" + time: "10:00" open-pull-requests-limit: 10 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4054a7b0..1e08cff4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -8,21 +8,8 @@ on: - '**' jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest] - node: [16] - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-node@v2 - with: - node-version: 16 - - uses: ipfs/aegir/actions/cache-node-modules@master check: - needs: build runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -30,15 +17,11 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npx aegir lint - - run: npx aegir dep-check - - uses: ipfs/aegir/actions/bundle-size@master - name: size - with: - github_token: ${{ secrets.GITHUB_TOKEN }} + - run: npm run --if-present lint + - run: npm run --if-present dep-check test-node: - needs: build + needs: check runs-on: ${{ matrix.os }} strategy: matrix: @@ -51,14 +34,14 @@ jobs: with: node-version: ${{ matrix.node }} - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:node -- --cov --bail + - run: npm run --if-present test:node - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 with: directory: ./.nyc_output flags: node test-chrome: - needs: build + needs: check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -66,14 +49,14 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:browser -- -t browser --cov --bail + - run: npm run --if-present test:chrome - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 with: directory: ./.nyc_output flags: chrome test-chrome-webworker: - needs: build + needs: check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -81,14 +64,14 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:browser -- -t webworker --bail + - run: npm run --if-present test:chrome-webworker - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 with: directory: ./.nyc_output flags: chrome-webworker test-firefox: - needs: build + needs: check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -96,14 +79,14 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:browser -- -t browser --bail -- --browser firefox + - run: npm run --if-present test:firefox - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 with: directory: ./.nyc_output flags: firefox test-firefox-webworker: - needs: build + needs: check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -111,14 +94,14 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:browser -- -t webworker --bail -- --browser firefox + - run: npm run --if-present test:firefox-webworker - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 with: directory: ./.nyc_output flags: firefox-webworker - test-ts: - needs: build + test-electron-main: + needs: check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -126,10 +109,29 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:ts + - run: npx xvfb-maybe npm run --if-present test:electron-main + - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 + with: + directory: ./.nyc_output + flags: electron-main + + test-electron-renderer: + needs: check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + with: + node-version: lts/* + - uses: ipfs/aegir/actions/cache-node-modules@master + - run: npx xvfb-maybe npm run --if-present test:electron-renderer + - uses: codecov/codecov-action@f32b3a3741e1053eb607407145bc9619351dc93b # v2.1.0 + with: + directory: ./.nyc_output + flags: electron-renderer test-interop: - needs: build + needs: check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -137,11 +139,11 @@ jobs: with: node-version: lts/* - uses: ipfs/aegir/actions/cache-node-modules@master - - run: npm run test:interop -- --bail -- --exit + - run: npm run test:interop -- --bail release: runs-on: ubuntu-latest - needs: [test-node, test-chrome, test-chrome-webworker, test-firefox, test-firefox-webworker, test-ts, test-interop] + needs: [test-node, test-chrome, test-chrome-webworker, test-firefox, test-firefox-webworker, test-electron-main, test-electron-renderer, test-interop] if: github.event_name == 'push' && github.ref == 'refs/heads/master' steps: - uses: GoogleCloudPlatform/release-please-action@v2 diff --git a/LICENSE b/LICENSE index 59a33bab..20ce483c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,22 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2015 David Dias - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +This project is dual licensed under MIT and Apache-2.0. +MIT: https://www.opensource.org/licenses/mit +Apache-2.0: https://www.apache.org/licenses/license-2.0 diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 00000000..14478a3b --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 00000000..72dc60d8 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/README.md b/README.md index 90f3ac9f..028a4594 100644 --- a/README.md +++ b/README.md @@ -39,26 +39,22 @@ If you are looking for the documentation of the latest release, you can view the **Want to update libp2p in your project?** Check our [migrations folder](./doc/migrations). -[**`Weekly Core Dev Calls`**](https://github.com/libp2p/team-mgmt/issues/16) - -## Lead Maintainer - -[Jacob Heun](https://github.com/jacobheun/) - -## Table of Contents +## Table of Contents - [Background](#background) - [Install](#install) - [Usage](#usage) - [Configuration](#configuration) - [API](#api) - - [Getting Started](#getting-started) + - [Getting started](#getting-started) - [Tutorials and Examples](#tutorials-and-examples) - [Development](#development) - [Tests](#tests) + - [Run unit tests](#run-unit-tests) - [Packages](#packages) - [Contribute](#contribute) - [License](#license) + - [Contribution](#contribution) ## Background @@ -123,7 +119,7 @@ You can find multiple examples on the [examples folder](./examples) that will gu > npm run test:node # run just Browser tests (Chrome) -> npm run test:browser +> npm run test:chrome ``` ### Packages @@ -183,4 +179,11 @@ The libp2p implementation in JavaScript is a work in progress. As such, there ar ## License -[MIT](LICENSE) © Protocol Labs +Licensed under either of + + * Apache 2.0, ([LICENSE-APACHE](LICENSE-APACHE) / http://www.apache.org/licenses/LICENSE-2.0) + * MIT ([LICENSE-MIT](LICENSE-MIT) / http://opensource.org/licenses/MIT) + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/doc/API.md b/doc/API.md index fd2cd824..908fff23 100644 --- a/doc/API.md +++ b/doc/API.md @@ -119,23 +119,21 @@ For Libp2p configurations and modules details read the [Configuration Document]( #### Example ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' async function main () { // specify options const options = { - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] } // create libp2p - const libp2p = await Libp2p.create(options) + const libp2p = await createLibp2p(options) } main() @@ -149,11 +147,11 @@ As an alternative, it is possible to create a Libp2p instance with the construct #### Example ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const PeerId = require('peer-id') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' + async function main () { const peerId = await PeerId.create(); @@ -162,11 +160,9 @@ async function main () { // peerId is required when Libp2p is instantiated via the constructor const options = { peerId, - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] } // create libp2p @@ -200,11 +196,11 @@ Load keychain keys from the datastore, importing the private key as 'self', if n #### Example ```js -const Libp2p = require('libp2p') +import { createLibp2p } from 'libp2p' // ... -const libp2p = await Libp2p.create({ +const libp2p = await createLibp2p({ // ... keychain: { pass: '0123456789pass1234567890' @@ -230,11 +226,11 @@ Starts the libp2p node. #### Example ```js -const Libp2p = require('libp2p') +import { createLibp2p } from 'libp2p' // ... -const libp2p = await Libp2p.create(options) +const libp2p = await createLibp2p(options) // start libp2p await libp2p.start() @@ -255,10 +251,10 @@ Stops the libp2p node. #### Example ```js -const Libp2p = require('libp2p') +import { createLibp2p } from 'libp2p' // ... -const libp2p = await Libp2p.create(options) +const libp2p = await createLibp2p(options) // ... // stop libp2p @@ -354,7 +350,7 @@ Dials to another peer in the network and selects a protocol to communicate with ```js // ... -const pipe = require('it-pipe') +import { pipe } from 'it-pipe' const { stream, protocol } = await libp2p.dialProtocol(remotePeerId, protocols) diff --git a/doc/CONFIGURATION.md b/doc/CONFIGURATION.md index 0276c014..41cd0e7a 100644 --- a/doc/CONFIGURATION.md +++ b/doc/CONFIGURATION.md @@ -199,9 +199,9 @@ When [creating a libp2p node](./API.md#create), the modules needed should be spe ```js const modules = { - transport: [], - streamMuxer: [], - connEncryption: [], + transports: [], + streamMuxers: [], + connectionEncryption: [], contentRouting: [], peerRouting: [], peerDiscovery: [], @@ -235,67 +235,59 @@ Besides the `modules` and `config`, libp2p allows other internal options and con // dht: kad-dht // pubsub: gossipsub -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const WS = require('libp2p-websockets') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const MulticastDNS = require('libp2p-mdns') -const DHT = require('libp2p-kad-dht') -const GossipSub = require('libp2p-gossipsub') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { WebSockets } from '@libp2p/websockets' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { MulticastDNS } from '@libp2p/mdns' +import { KadDHT } from '@libp2p/kad-dht' +import { GossipSub } from 'libp2p-gossipsub' -const node = await Libp2p.create({ - modules: { - transport: [ - TCP, - new WS() // It can take instances too! - ], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - peerDiscovery: [MulticastDNS], - dht: DHT, - pubsub: GossipSub - } +const node = await createLibp2p({ + transports: [ + TCP, + new WS() // It can take instances too! + ], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + peerDiscovery: [MulticastDNS], + dht: DHT, + pubsub: GossipSub }) ``` #### Customizing Peer Discovery ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const MulticastDNS = require('libp2p-mdns') -const Bootstrap = require('libp2p-bootstrap') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { MulticastDNS } from '@libp2p/mdns' +import { Bootstrap } from '@libp2p/bootstrap' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - peerDiscovery: [MulticastDNS, Bootstrap] - }, - config: { - peerDiscovery: { - autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minConnections) - // The `tag` property will be searched when creating the instance of your Peer Discovery service. - // The associated object, will be passed to the service when it is instantiated. - [MulticastDNS.tag]: { - interval: 1000, - enabled: true - }, - [Bootstrap.tag]: { - list: [ // A list of bootstrap peers to connect to starting up the node - "/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - "/dnsaddr/bootstrap.libp2p.io/ipfs/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", - "/dnsaddr/bootstrap.libp2p.io/ipfs/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", - ], - interval: 2000, - enabled: true - } - // .. other discovery module options. - } +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + peerDiscovery: [ + new MulticastDNS({ + interval: 1000 + }), + new Bootstrap( + list: [ // A list of bootstrap peers to connect to starting up the node + "/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + "/dnsaddr/bootstrap.libp2p.io/ipfs/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/ipfs/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + ], + interval: 2000 + ) + ], + connectionManager: { + autoDial: true // Auto connect to discovered peers (limited by ConnectionManager minConnections) + // The `tag` property will be searched when creating the instance of your Peer Discovery service. + // The associated object, will be passed to the service when it is instantiated. } }) ``` @@ -303,56 +295,50 @@ const node = await Libp2p.create({ #### Setup webrtc transport and discovery ```js +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { WebRTCStar } from '@libp2p/webrtc-star' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const Libp2p = require('libp2p') -const WS = require('libp2p-websockets') -const WebRTCStar = require('libp2p-webrtc-star') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') - -const node = await Libp2p.create({ - modules: { - transport: [ - WS, - WebRTCStar - ], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - }, - config: { - peerDiscovery: { - [WebRTCStar.tag]: { - enabled: true - } - } - } +const node = await createLibp2p({ + transports: [ + new WebSockets(), + new WebRTCStar() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ] }) ``` #### Customizing Pubsub ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const GossipSub = require('libp2p-gossipsub') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { GossipSub } from 'libp2p-gossipsub' +import { SignaturePolicy } from '@libp2p/interfaces/pubsub' -const { SignaturePolicy } = require('libp2p-interfaces/src/pubsub/signature-policy') - -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - pubsub: GossipSub - }, - config: { - pubsub: { // The pubsub options (and defaults) can be found in the pubsub router documentation - enabled: true, +const node = await createLibp2p({ + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + pubsub: new GossipSub({ emitSelf: false, // whether the node should emit to self on publish globalSignaturePolicy: SignaturePolicy.StrictSign // message signing policy - } + }) } }) ``` @@ -360,64 +346,66 @@ const node = await Libp2p.create({ #### Customizing DHT ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const DHT = require('libp2p-kad-dht') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { KadDHT } from '@libp2p/kad-dht' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - dht: DHT - }, - config: { - dht: { // The DHT options (and defaults) can be found in its documentation - kBucketSize: 20, - enabled: true, // This flag is required for DHT to run (disabled by default) - clientMode: false // Whether to run the WAN DHT in client or server mode (default: client mode) - } - } +const node = await createLibp2p({ + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + dht: new KadDHT({ + kBucketSize: 20, + clientMode: false // Whether to run the WAN DHT in client or server mode (default: client mode) + }) }) ``` #### Setup with Content and Peer Routing ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const ipfsHttpClient = require('ipfs-http-client') -const DelegatedPeerRouter = require('libp2p-delegated-peer-routing') -const DelegatedContentRouter = require('libp2p-delegated-content-routing') -const PeerId = require('peer-id') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { create as ipfsHttpClient } from 'ipfs-http-client' +import { DelegatedPeerRouting } from '@libp2p/delegated-peer-routing' +import { DelegatedContentRouting} from '@libp2p/delegated-content-routing' + // create a peerId const peerId = await PeerId.create() -const delegatedPeerRouting = new DelegatedPeerRouter(ipfsHttpClient.create({ +const delegatedPeerRouting = new DelegatedPeerRouting(ipfsHttpClient.create({ host: 'node0.delegate.ipfs.io', // In production you should setup your own delegates protocol: 'https', port: 443 })) -const delegatedContentRouting = new DelegatedContentRouter(peerId, ipfsHttpClient.create({ +const delegatedContentRouting = new DelegatedContentRouting(peerId, ipfsHttpClient.create({ host: 'node0.delegate.ipfs.io', // In production you should setup your own delegates protocol: 'https', port: 443 })) -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - contentRouting: [delegatedContentRouting], - peerRouting: [delegatedPeerRouting], - }, +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + contentRouting: [ + delegatedContentRouting + ], + peerRouting: [ + delegatedPeerRouting + ], peerId, peerRouting: { // Peer routing configuration refreshManager: { // Refresh known and connected closest peers @@ -432,29 +420,25 @@ const node = await Libp2p.create({ #### Setup with Relay ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, - config: { - relay: { // Circuit Relay options (this config is part of libp2p core configurations) - enabled: true, // Allows you to dial and accept relayed connections. Does not make you a relay. - hop: { - enabled: true, // Allows you to be a relay for other peers - active: true // You will attempt to dial destination peers if you are not connected to them - }, - advertise: { - bootDelay: 15 * 60 * 1000, // Delay before HOP relay service is advertised on the network - enabled: true, // Allows you to disable the advertise of the Hop service - ttl: 30 * 60 * 1000 // Delay Between HOP relay service advertisements on the network - } +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + relay: { // Circuit Relay options (this config is part of libp2p core configurations) + enabled: true, // Allows you to dial and accept relayed connections. Does not make you a relay. + hop: { + enabled: true, // Allows you to be a relay for other peers + active: true // You will attempt to dial destination peers if you are not connected to them + }, + advertise: { + bootDelay: 15 * 60 * 1000, // Delay before HOP relay service is advertised on the network + enabled: true, // Allows you to disable the advertise of the Hop service + ttl: 30 * 60 * 1000 // Delay Between HOP relay service advertisements on the network } } }) @@ -463,24 +447,20 @@ const node = await Libp2p.create({ #### Setup with Auto Relay ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, - config: { - relay: { // Circuit Relay options (this config is part of libp2p core configurations) - enabled: true, // Allows you to dial and accept relayed connections. Does not make you a relay. - autoRelay: { - enabled: true, // Allows you to bind to relays with HOP enabled for improving node dialability - maxListeners: 2 // Configure maximum number of HOP relays to use - } +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] + relay: { // Circuit Relay options (this config is part of libp2p core configurations) + enabled: true, // Allows you to dial and accept relayed connections. Does not make you a relay. + autoRelay: { + enabled: true, // Allows you to bind to relays with HOP enabled for improving node dialability + maxListeners: 2 // Configure maximum number of HOP relays to use } } }) @@ -496,21 +476,19 @@ Libp2p allows you to setup a secure keychain to manage your keys. The keychain c | datastore | `object` | must implement [ipfs/interface-datastore](https://github.com/ipfs/interface-datastore) | ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const { LevelDatastore } = require('datastore-level') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { LevelDatastore } from 'datastore-level' const datastore = new LevelDatastore('path/to/store') await datastore.open() -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], keychain: { pass: 'notsafepassword123456789', datastore: dsInstant, @@ -536,20 +514,18 @@ Dialing in libp2p can be configured to limit the rate of dialing, and how long d The below configuration example shows how the dialer should be configured, with the current defaults: ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const { dnsaddrResolver } = require('multiaddr/src/resolvers') -const { publicAddressesFirst } = require('libp2p-utils/src/address-sort') +import { dnsaddrResolver } from '@multiformats/multiaddr/resolvers' +import { publicAddressesFirst } from '@libp2p-utils/address-sort' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], dialer: { maxParallelDials: 100, maxAddrsToDial: 25, @@ -567,17 +543,15 @@ const node = await Libp2p.create({ The Connection Manager prunes Connections in libp2p whenever certain limits are exceeded. If Metrics are enabled, you can also configure the Connection Manager to monitor the bandwidth of libp2p and prune connections as needed. You can read more about what Connection Manager does at [./CONNECTION_MANAGER.md](./CONNECTION_MANAGER.md). The configuration values below show the defaults for Connection Manager. See [./CONNECTION_MANAGER.md](./CONNECTION_MANAGER.md#options) for a full description of the parameters. ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], connectionManager: { maxConnections: Infinity, minConnections: 0, @@ -615,7 +589,7 @@ The order in which methods are called is as follows: 3. `connectionGater.denyInboundUpgradedConnection(...)` ```js -const node = await Libp2p.create({ +const node = await createLibp2p({ // .. other config connectionGater: { /** @@ -719,19 +693,17 @@ const node = await Libp2p.create({ The Transport Manager is responsible for managing the libp2p transports life cycle. This includes starting listeners for the provided listen addresses, closing these listeners and dialing using the provided transports. By default, if a libp2p node has a list of multiaddrs for listening on and there are no valid transports for those multiaddrs, libp2p will throw an error on startup and shutdown. However, for some applications it is perfectly acceptable for libp2p nodes to start in dial only mode if all the listen multiaddrs failed. This error tolerance can be enabled as follows: ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const { FaultTolerance } = require('libp2p/src/transport-manager') +const { FaultTolerance } from 'libp2p/src/transport-manager') -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], transportManager: { faultTolerance: FaultTolerance.NO_FATAL } @@ -753,17 +725,15 @@ Metrics are disabled in libp2p by default. You can enable and configure them as The below configuration example shows how the metrics should be configured. Aside from enabled being `false` by default, the following default configuration options are listed below: ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const node = await Libp2p.create({ - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] metrics: { enabled: true, computeThrottleMaxQueueSize: 1000, @@ -792,22 +762,20 @@ The threshold number represents the maximum number of "dirty peers" allowed in t The below configuration example shows how the PeerStore should be configured. Aside from persistence being `false` by default, the following default configuration options are listed below: ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const LevelDatastore = require('datastore-level') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { LevelDatastore } from 'datastore-level' const datastore = new LevelDatastore('path/to/store') await datastore.open() // level database must be ready before node boot -const node = await Libp2p.create({ +const node = await createLibp2p({ datastore, // pass the opened datastore - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], peerStore: { persistence: true, threshold: 5 @@ -820,19 +788,23 @@ const node = await Libp2p.create({ Some Transports can be passed additional options when they are created. For example, `libp2p-webrtc-star` accepts an optional, custom `wrtc` implementation. In addition to libp2p passing itself and an `Upgrader` to handle connection upgrading, libp2p will also pass the options, if they are provided, from `config.transport`. ```js -const Libp2p = require('libp2p') -const WebRTCStar = require('libp2p-webrtc-star') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') -const wrtc = require('wrtc') +import { createLibp2p } from 'libp2p' +import { WebRTCStar } from '@libp2p/webrtc-star' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import wrtc from 'wrtc' const transportKey = WebRTCStar.prototype[Symbol.toStringTag] -const node = await Libp2p.create({ - modules: { - transport: [WebRTCStar], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [ + new WebRTCStar() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], config: { transport: { [transportKey]: { @@ -847,12 +819,16 @@ During Libp2p startup, transport listeners will be created for the configured li ```js const transportKey = WebRTCStar.prototype[Symbol.toStringTag] -const node = await Libp2p.create({ - modules: { - transport: [WebRTCStar], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - }, +const node = await createLibp2p({ + transports: [ + new WebRTCStar() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], addresses: { listen: ['/dns4/your-wrtc-star.pub/tcp/443/wss/p2p-webrtc-star'] // your webrtc dns multiaddr }, @@ -879,18 +855,16 @@ Network Address Translation (NAT) is a function performed by your router to enab The NAT manager can be configured as follows: ```js -const node = await Libp2p.create({ +const node = await createLibp2p({ config: { nat: { description: 'my-node', // set as the port mapping description on the router, defaults the current libp2p version and your peer id enabled: true, // defaults to true gateway: '192.168.1.1', // leave unset to auto-discover externalIp: '80.1.1.1', // leave unset to auto-discover + localAddress: '129.168.1.123', // leave unset to auto-discover ttl: 7200, // TTL for port mappings (min 20 minutes) keepAlive: true, // Refresh port mapping after TTL expires - pmp: { - enabled: false, // defaults to false - } } } }) @@ -911,10 +885,8 @@ By default under nodejs libp2p will attempt to use [UPnP](https://en.wikipedia.o Changing the protocol name prefix can isolate default public network (IPFS) for custom purposes. ```js -const node = await Libp2p.create({ - config: { - protocolPrefix: 'ipfs' // default - } +const node = await createLibp2p({ + protocolPrefix: 'ipfs' // default }) /* protocols: [ @@ -925,7 +897,6 @@ protocols: [ */ ``` - ## Configuration examples As libp2p is designed to be a modular networking library, its usage will vary based on individual project needs. We've included links to some existing project configurations for your reference, in case you wish to replicate their configuration: diff --git a/doc/GETTING_STARTED.md b/doc/GETTING_STARTED.md index fd180e84..c6169f1f 100644 --- a/doc/GETTING_STARTED.md +++ b/doc/GETTING_STARTED.md @@ -12,7 +12,6 @@ Welcome to libp2p! This guide will walk you through setting up a fully functiona - [Running Libp2p](#running-libp2p) - [Custom setup](#custom-setup) - [Peer Discovery](#peer-discovery) - - [Pubsub](#pubsub) - [What is next](#what-is-next) ## Install @@ -46,13 +45,11 @@ npm install libp2p-websockets Now that we have the module installed, let's configure libp2p to use the Transport. We'll use the [`Libp2p.create`](./API.md#create) method, which takes a single configuration object as its only parameter. We can add the Transport by passing it into the `modules.transport` array: ```js -const Libp2p = require('libp2p') -const WebSockets = require('libp2p-websockets') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' -const node = await Libp2p.create({ - modules: { - transport: [WebSockets] - } +const node = await createLibp2p({ + transports: [new WebSockets()] }) ``` @@ -78,15 +75,13 @@ npm install libp2p-noise With `libp2p-noise` installed, we can add it to our existing configuration by importing it and adding it to the `modules.connEncryption` array: ```js -const Libp2p = require('libp2p') -const WebSockets = require('libp2p-websockets') -const { NOISE } = require('libp2p-noise') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' -const node = await Libp2p.create({ - modules: { - transport: [WebSockets], - connEncryption: [NOISE] - } +const node = await createLibp2p({ + transports: [new WebSockets()], + connectionEncryption: [new Noise()] }) ``` @@ -110,17 +105,15 @@ npm install libp2p-mplex ``` ```js -const Libp2p = require('libp2p') -const WebSockets = require('libp2p-websockets') -const { NOISE } = require('libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' -const node = await Libp2p.create({ - modules: { - transport: [WebSockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } +const node = await createLibp2p({ + transports: [new WebSockets()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] }) ``` @@ -137,20 +130,18 @@ If you want to know more about libp2p stream multiplexing, you should read the f Now that you have configured a [**Transport**][transport], [**Crypto**][crypto] and [**Stream Multiplexer**](streamMuxer) module, you can start your libp2p node. We can start and stop libp2p using the [`libp2p.start()`](./API.md#start) and [`libp2p.stop()`](./API.md#stop) methods. ```js -const Libp2p = require('libp2p') -const WebSockets = require('libp2p-websockets') -const { NOISE } = require('libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' -const node = await Libp2p.create({ +const node = await createLibp2p({ addresses: { listen: ['/ip4/127.0.0.1/tcp/8000/ws'] }, - modules: { - transport: [WebSockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } + transports: [new WebSockets()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] }) // start libp2p @@ -195,12 +186,12 @@ npm install libp2p-bootstrap We can provide specific configurations for each protocol within a `config.peerDiscovery` property in the options as shown below. ```js -const Libp2p = require('libp2p') -const WebSockets = require('libp2p-websockets') -const { NOISE } = require('libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' -const Bootstrap = require('libp2p-bootstrap') +import { Bootstrap } from '@libp2p/bootstrap' // Known peers addresses const bootstrapMultiaddrs = [ @@ -208,23 +199,25 @@ const bootstrapMultiaddrs = [ '/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN' ] -const node = await Libp2p.create({ - modules: { - transport: [WebSockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX], - peerDiscovery: [Bootstrap] - }, - config: { - peerDiscovery: { - autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minConnections) - // The `tag` property will be searched when creating the instance of your Peer Discovery service. - // The associated object, will be passed to the service when it is instantiated. - [Bootstrap.tag]: { - enabled: true, - list: bootstrapMultiaddrs // provide array of multiaddrs - } - } +const node = await createLibp2p({ + transports: [ + new WebSockets() + ], + connectionEncryption: [ + new Noise() + ], + streamMuxers: [ + new Mplex() + ], + peerDiscovery: [ + new Bootstrap({ + list: bootstrapMultiaddrs // provide array of multiaddrs + }) + ], + connectionManager: { + autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minConnections) + // The `tag` property will be searched when creating the instance of your Peer Discovery service. + // The associated object, will be passed to the service when it is instantiated. } }) diff --git a/doc/PEER_DISCOVERY.md b/doc/PEER_DISCOVERY.md index 65f93e19..4b9aea09 100644 --- a/doc/PEER_DISCOVERY.md +++ b/doc/PEER_DISCOVERY.md @@ -3,7 +3,7 @@ **Synopsis**: * All peers discovered are emitted via `peer:discovery` so applications can take any desired action. * Libp2p defaults to automatically connecting to new peers, when under the [ConnectionManager](https://github.com/libp2p/js-libp2p-connection-manager) low watermark (minimum peers). - * Applications can disable this via the `peerDiscovery.autoDial` config property, and handle connections themselves. + * Applications can disable this via the `connectionManager.autoDial` config property, and handle connections themselves. * Applications who have not disabled this should **never** connect on peer discovery. Applications should use the `peer:connect` event if they wish to take a specific action on new peers. ## Scenarios diff --git a/doc/STREAMING_ITERABLES.md b/doc/STREAMING_ITERABLES.md index 5804ea48..fe6a349d 100644 --- a/doc/STREAMING_ITERABLES.md +++ b/doc/STREAMING_ITERABLES.md @@ -22,8 +22,8 @@ Sometimes you may need to wrap an existing duplex stream in order to perform incoming and outgoing [transforms](#transform) on data. This type of wrapping is commonly used in stream encryption/decryption. Using [it-pair][it-pair] and [it-pipe][it-pipe], we can do this rather easily, given an existing [duplex iterable](#duplex). ```js -const duplexPair = require('it-pair/duplex') -const pipe = require('it-pipe') +const duplexPair from 'it-pair/duplex') +import { pipe } from 'it-pipe' // Wrapper is what we will write and read from // This gives us two duplex iterables that are internally connected diff --git a/doc/migrations/v0.26-v0.27.md b/doc/migrations/v0.26-v0.27.md index b7d3acaf..67dd44c6 100644 --- a/doc/migrations/v0.26-v0.27.md +++ b/doc/migrations/v0.26-v0.27.md @@ -4,16 +4,18 @@ A migration guide for refactoring your application code from libp2p v0.26.x to v ## Table of Contents -- [Migrating from callbacks](#migrating-from-callbacks) -- [Pull Streams to Streaming Iterables](#pull-streams-to-streaming-iterables) -- [Sample API Migrations](#sample-api-migrations) - - [Registering Protocol Handlers](#registering-protocol-handlers) - - [Dialing and Sending Data](#dialing-and-sending-data) - - [Checking if a peer is connected](#checking-if-a-peer-is-connected) - - [Pinging another peer](#pinging-another-peer) - - [Pubsub](#pubsub) - - [Getting subscribers](#getting-subscribers) - - [Getting subscribed topics](#getting-subscribed-topics) +- [Migrating to the libp2p@0.27 API](#migrating-to-the-libp2p027-api) + - [Table of Contents](#table-of-contents) + - [Migrating from callbacks](#migrating-from-callbacks) + - [Pull Streams to Streaming Iterables](#pull-streams-to-streaming-iterables) + - [Sample API Migrations](#sample-api-migrations) + - [Registering Protocol Handlers](#registering-protocol-handlers) + - [Dialing and Sending Data](#dialing-and-sending-data) + - [Checking if a peer is connected](#checking-if-a-peer-is-connected) + - [Pinging another peer](#pinging-another-peer) + - [Pubsub](#pubsub) + - [Getting subscribers](#getting-subscribers) + - [Getting subscribed topics](#getting-subscribed-topics) ## Migrating from callbacks @@ -47,13 +49,13 @@ Protocol registration is very similar to how it previously was, however, the han **Before** ```js -const pull = require('pull-stream') +const pull from 'pull-stream') libp2p.handle('/echo/1.0.0', (protocol, conn) => pull(conn, conn)) ``` **After** ```js -const pipe = require('it-pipe') +const pipe from 'it-pipe') libp2p.handle(['/echo/1.0.0'], ({ protocol, stream }) => pipe(stream, stream)) ``` @@ -63,7 +65,7 @@ libp2p.handle(['/echo/1.0.0'], ({ protocol, stream }) => pipe(stream, stream)) **Before** ```js -const pull = require('pull-stream') +const pull from 'pull-stream') libp2p.dialProtocol(peerInfo, '/echo/1.0.0', (err, conn) => { if (err) { throw err } pull( @@ -80,7 +82,7 @@ libp2p.dialProtocol(peerInfo, '/echo/1.0.0', (err, conn) => { **After** ```js -const pipe = require('it-pipe') +const pipe from 'it-pipe') const { protocol, stream } = await libp2p.dialProtocol(peerInfo, '/echo/1.0.0') await pipe( ['hey'], diff --git a/doc/migrations/v0.27-v0.28.md b/doc/migrations/v0.27-v0.28.md index 67c9882b..23c8f9d5 100644 --- a/doc/migrations/v0.27-v0.28.md +++ b/doc/migrations/v0.27-v0.28.md @@ -16,7 +16,7 @@ A migration guide for refactoring your application code from libp2p v0.27.x to v In `libp2p@0.27` we integrated the PeerStore (former [peer-book](https://github.com/libp2p/js-peer-book)) into the codebase. By that time, it was not documented in the [API DOC](../API.md) since it kept the same API as the `peer-book` and it was expected to be completelly rewritten in `libp2p@0.28`. -Moving towards a separation of concerns regarding known peers' data, as well as enabling PeerStore persistence, the PeerStore is now divided into four main components: `AddressBook`, `ProtoBook`, `KeyBook` and `MetadataBook`. This resulted in API changes in the PeerStore, since each type of peer data should now be added in an atomic fashion. +Moving towards a separation of concerns regarding known peers' data, as well as enabling PeerStore persistence, the PeerStore is now divided into four main components: `AddressBook`, `ProtoBook`, `KeyBook` and `MetadataBook`. This resulted in API changes in the PeerStore, since each type of peer data should now be added in an atomic fashion. ### Adding a Peer @@ -109,7 +109,7 @@ const peers = libp2p.peerStore.peers Since this PeerInfo instances were navigating through the entire codebases, some data inconsistencies could be observed in libp2p. Different libp2p subsystems were running with different visions of the known peers data. For instance, a libp2p subsystem receives a copy of this instance with the peer multiaddrs and protocols, but if new data of the peer is obtained from other subsystem, it would not be updated on the former. Moreover, considering that several subsystems were modifying the peer data, libp2p had no way to determine the accurate data. -Considering the complete revamp of the libp2p PeerStore towards its second version, the PeerStore now acts as the single source of truth, we do not need to carry [`PeerInfo`][peer-info] instances around. This also solves all the problems stated above, since subsystems will report new observations to the PeerStore. +Considering the complete revamp of the libp2p PeerStore towards its second version, the PeerStore now acts as the single source of truth, we do not need to carry [`PeerInfo`][peer-info] instances around. This also solves all the problems stated above, since subsystems will report new observations to the PeerStore. ### Create @@ -211,7 +211,7 @@ await libp2p.start() #### Peer Dialing, Hangup and Ping -`libp2p.dial`, `libp2p.dialProtocol`, `libp2p.hangup` and `libp2p.ping` supported as the target parameter a [`PeerInfo`](peer-info), a [`PeerId`](peer-id), a [`Multiaddr`][multiaddr] and a string representation of the multiaddr. Considering that [`PeerInfo`](peer-info) is being removed from libp2p, all these methods will now support the other 3 possibilities. +`libp2p.dial`, `libp2p.dialProtocol`, `libp2p.hangup` and `libp2p.ping` supported as the target parameter a [`PeerInfo`](peer-info), a [`PeerId`](peer-id), a [`Multiaddr`][multiaddr] and a string representation of the multiaddr. Considering that [`PeerInfo`](peer-info) is being removed from libp2p, all these methods will now support the other 3 possibilities. There is one relevant aspect to consider with this change. When using a [`PeerId`](peer-id), the PeerStore **MUST** have known addresses for that peer in its AddressBook, so that it can perform the request. This was also true in the past, but it is important pointing it out because it might not be enough to switch from using [`PeerInfo`](peer-info) to [`PeerId`](peer-id). When using a [`PeerInfo`](peer-info), the PeerStore was not required to have the multiaddrs when they existed on the PeerInfo instance. diff --git a/doc/migrations/v0.28-v0.29.md b/doc/migrations/v0.28-v0.29.md index 131b97ae..18568652 100644 --- a/doc/migrations/v0.28-v0.29.md +++ b/doc/migrations/v0.28-v0.29.md @@ -46,18 +46,18 @@ Publish uses `Uint8Array` data instead of `Buffer`. const topic = 'topic' const data = Buffer.from('data') -await libp2p.pubsub.publish(topic, data) +await libp2p.pubsub.publish(topic, data) ``` **After** ```js -const uint8ArrayFromString = require('uint8arrays/from-string') +const uint8ArrayFromString from 'uint8arrays/from-string') const topic = 'topic' const data = uint8ArrayFromString('data') -await libp2p.pubsub.publish(topic, data) +await libp2p.pubsub.publish(topic, data) ``` #### Subscribe @@ -79,7 +79,7 @@ libp2p.pubsub.subscribe(topic, handler) **After** ```js -const uint8ArrayToString = require('uint8arrays/to-string') +const uint8ArrayToString from 'uint8arrays/to-string') const topic = 'topic' const handler = (msg) => { @@ -106,7 +106,7 @@ libp2p.pubsub.subscribe(topics, handler) **After** ```js -const uint8ArrayToString = require('uint8arrays/to-string') +const uint8ArrayToString from 'uint8arrays/to-string') const topics = ['a', 'b'] const handler = (msg) => { @@ -177,8 +177,8 @@ Aiming to improve libp2p browser support, we are moving away from node core modu We use the [uint8arrays](https://www.npmjs.com/package/uint8arrays) utilities module to deal with `Uint8Arrays` easily and we recommend its usage in the application layer. Thanks for the module [@achingbrain](https://github.com/achingbrain)! It includes utilities like `compare`, `concat`, `equals`, `fromString` and `toString`. In this migration examples, we will be using the following: ```js -const uint8ArrayFromString = require('uint8arrays/from-string') -const uint8ArrayToString = require('uint8arrays/to-string') +const uint8ArrayFromString from 'uint8arrays/from-string') +const uint8ArrayToString from 'uint8arrays/to-string') ``` #### contentRouting.put diff --git a/doc/migrations/v0.29-v0.30.md b/doc/migrations/v0.29-v0.30.md index ffec86f9..2d90ee0f 100644 --- a/doc/migrations/v0.29-v0.30.md +++ b/doc/migrations/v0.29-v0.30.md @@ -5,9 +5,13 @@ A migration guide for refactoring your application code from libp2p v0.29.x to v ## Table of Contents -- [API](#api) -- [Development and Testing](#development-and-testing) -- [Module Updates](#module-updates) +- [Migrating to libp2p@30](#migrating-to-libp2p30) + - [Table of Contents](#table-of-contents) + - [API](#api) + - [Pubsub](#pubsub) + - [Addresses](#addresses) + - [Development and Testing](#development-and-testing) + - [Module Updates](#module-updates) ## API @@ -20,8 +24,8 @@ Now `js-libp2p` does not overwrite the pubsub router options anymore. Upstream p **Before** ```js -const Gossipsub = require('libp2p-gossipsub') -const Libp2p = require('libp2p') +const Gossipsub from 'libp2p-gossipsub') +const Libp2p from 'libp2p') const libp2p = await Libp2p.create({ modules: { @@ -34,8 +38,8 @@ const libp2p = await Libp2p.create({ **After** ```js -const Gossipsub = require('libp2p-gossipsub') -const Libp2p = require('libp2p') +const Gossipsub from 'libp2p-gossipsub') +const Libp2p from 'libp2p') const libp2p = await Libp2p.create({ modules: { @@ -57,8 +61,8 @@ The signing property is now based on a `globalSignaturePolicy` option instead of **Before** ```js -const Gossipsub = require('libp2p-gossipsub') -const Libp2p = require('libp2p') +const Gossipsub from 'libp2p-gossipsub') +const Libp2p from 'libp2p') const libp2p = await Libp2p.create({ modules: { @@ -77,9 +81,9 @@ const libp2p = await Libp2p.create({ **After** ```js -const Gossipsub = require('libp2p-gossipsub') -const { SignaturePolicy } = require('libp2p-interfaces/src/pubsub/signature-policy') -const Libp2p = require('libp2p') +const Gossipsub from 'libp2p-gossipsub') +const { SignaturePolicy } from 'libp2p-interfaces/src/pubsub/signature-policy') +const Libp2p from 'libp2p') const libp2p = await Libp2p.create({ modules: { @@ -101,7 +105,7 @@ Libp2p has supported `noAnnounce` addresses configuration for some time now. How **Before** ```js -const Libp2p = require('libp2p') +const Libp2p from 'libp2p') const libp2p = await Libp2p.create({ addresses: { @@ -115,10 +119,10 @@ const libp2p = await Libp2p.create({ **After** ```js -const Libp2p = require('libp2p') +const Libp2p from 'libp2p') // Libp2p utils has several multiaddr utils you can leverage -const isPrivate = require('libp2p-utils/src/multiaddr/is-private') +const isPrivate from 'libp2p-utils/src/multiaddr/is-private') const libp2p = await Libp2p.create({ addresses: { @@ -131,7 +135,7 @@ const libp2p = await Libp2p.create({ ``` It is important pointing out another change regarding address advertising. This is not an API breaking change, but it might have influence on your libp2p setup. -Previously, when using the addresses `announce` property, its multiaddrs were concatenated with the `listen` multiaddrs and then they were filtered out by the `noAnnounce` multiaddrs, in order to create the list of multiaddrs to advertise. +Previously, when using the addresses `announce` property, its multiaddrs were concatenated with the `listen` multiaddrs and then they were filtered out by the `noAnnounce` multiaddrs, in order to create the list of multiaddrs to advertise. In `libp2p@0.30` the logic now operates as follows: - If `announce` addresses are provided, only they will be announced (no filters are applied) @@ -145,9 +149,9 @@ While this is not an API breaking change, there was a behavioral breaking change With this new behavior, if you need to use non DNS addresses, you can configure your libp2p node as follows: ```js -const Websockets = require('libp2p-websockets') -const filters = require('libp2p-websockets/src/filters') -const Libp2p = require('libp2p') +const Websockets from 'libp2p-websockets') +const filters from 'libp2p-websockets/src/filters') +const Libp2p from 'libp2p') const transportKey = Websockets.prototype[Symbol.toStringTag] const libp2p = await Libp2p.create({ @@ -170,7 +174,7 @@ const libp2p = await Libp2p.create({ With this release you should update the following libp2p modules if you are relying on them: diff --git a/doc/migrations/v0.30-v0.31.md b/doc/migrations/v0.30-v0.31.md index 8ccb12d1..9065adc5 100644 --- a/doc/migrations/v0.30-v0.31.md +++ b/doc/migrations/v0.30-v0.31.md @@ -100,7 +100,7 @@ const keychain = new Keychain(datastore, { With this release you should update the following libp2p modules if you are relying on them: diff --git a/examples/auto-relay/README.md b/examples/auto-relay/README.md index 0f88b758..e03d4939 100644 --- a/examples/auto-relay/README.md +++ b/examples/auto-relay/README.md @@ -16,31 +16,27 @@ In the first step of this example, we need to configure and run a relay node in The relay node will need to have its relay subsystem enabled, as well as its HOP capability. It can be configured as follows: ```js -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' -const node = await Libp2p.create({ - modules: { - transport: [Websockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - }, +const node = await createLibp2p({ + transports: [new WebSockets()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] addresses: { listen: ['/ip4/0.0.0.0/tcp/0/ws'] // TODO check "What is next?" section // announce: ['/dns4/auto-relay.libp2p.io/tcp/443/wss/p2p/QmWDn2LY8nannvSWJzruUYoLZ4vV83vfCBwd8DipvdgQc3'] }, - config: { - relay: { + relay: { + enabled: true, + hop: { + enabled: true + }, + advertise: { enabled: true, - hop: { - enabled: true - }, - advertise: { - enabled: true, - } } } }) @@ -74,29 +70,25 @@ Listening on: One of the typical use cases for Auto Relay is nodes behind a NAT or browser nodes due to their inability to expose a public address. For running a libp2p node that automatically binds itself to connected HOP relays, you can see the following: ```js -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' const relayAddr = process.argv[2] if (!relayAddr) { throw new Error('the relay address needs to be specified as a parameter') } -const node = await Libp2p.create({ - modules: { - transport: [Websockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - }, - config: { - relay: { +const node = await createLibp2p({ + transports: [new WebSockets()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()], + relay: { + enabled: true, + autoRelay: { enabled: true, - autoRelay: { - enabled: true, - maxListeners: 2 - } + maxListeners: 2 } } }) @@ -142,22 +134,20 @@ Instead of dialing this relay manually, you could set up this node with the Boot Now that you have a relay node and a node bound to that relay, you can test connecting to the auto relay node via the relay. ```js -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' const autoRelayNodeAddr = process.argv[2] if (!autoRelayNodeAddr) { throw new Error('the auto relay node address needs to be specified') } -const node = await Libp2p.create({ - modules: { - transport: [Websockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } +const node = await createLibp2p({ + transports: [new WebSockets()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] }) await node.start() diff --git a/examples/auto-relay/dialer.js b/examples/auto-relay/dialer.js index 5f6423ed..e358e099 100644 --- a/examples/auto-relay/dialer.js +++ b/examples/auto-relay/dialer.js @@ -1,9 +1,7 @@ -'use strict' - -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' async function main () { const autoRelayNodeAddr = process.argv[2] @@ -11,16 +9,20 @@ async function main () { throw new Error('the auto relay node address needs to be specified') } - const node = await Libp2p.create({ - modules: { - transport: [Websockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } + const node = await createLibp2p({ + transports: [ + new WebSockets() + ], + connectionEncryption: [ + new Noise() + ], + streamMuxers: [ + new Mplex() + ] }) await node.start() - console.log(`Node started with id ${node.peerId.toB58String()}`) + console.log(`Node started with id ${node.peerId.toString()}`) const conn = await node.dial(autoRelayNodeAddr) console.log(`Connected to the auto relay node via ${conn.remoteAddr.toString()}`) diff --git a/examples/auto-relay/listener.js b/examples/auto-relay/listener.js index 4f762c67..50aae8bb 100644 --- a/examples/auto-relay/listener.js +++ b/examples/auto-relay/listener.js @@ -1,9 +1,7 @@ -'use strict' - -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' async function main () { const relayAddr = process.argv[2] @@ -11,37 +9,41 @@ async function main () { throw new Error('the relay address needs to be specified as a parameter') } - const node = await Libp2p.create({ - modules: { - transport: [Websockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - }, - config: { - relay: { + const node = await createLibp2p({ + transports: [ + new WebSockets() + ], + connectionEncryption: [ + new Noise() + ], + streamMuxers: [ + new Mplex() + ], + relay: { + enabled: true, + autoRelay: { enabled: true, - autoRelay: { - enabled: true, - maxListeners: 2 - } + maxListeners: 2 } } }) await node.start() - console.log(`Node started with id ${node.peerId.toB58String()}`) + console.log(`Node started with id ${node.peerId.toString()}`) const conn = await node.dial(relayAddr) console.log(`Connected to the HOP relay ${conn.remotePeer.toString()}`) // Wait for connection and relay to be bind for the example purpose - node.peerStore.on('change:multiaddrs', ({ peerId }) => { + node.peerStore.addEventListener('change:multiaddrs', (evt) => { + const { peerId } = evt.detail + // Updated self multiaddrs? if (peerId.equals(node.peerId)) { - console.log(`Advertising with a relay address of ${node.multiaddrs[0].toString()}/p2p/${node.peerId.toB58String()}`) + console.log(`Advertising with a relay address of ${node.getMultiaddrs()[0].toString()}`) } - }) + }) } main() diff --git a/examples/auto-relay/relay.js b/examples/auto-relay/relay.js index 18a1df51..94ff3ace 100644 --- a/examples/auto-relay/relay.js +++ b/examples/auto-relay/relay.js @@ -1,40 +1,40 @@ -'use strict' - -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' async function main () { - const node = await Libp2p.create({ - modules: { - transport: [Websockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - }, + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0/ws'] // TODO check "What is next?" section // announce: ['/dns4/auto-relay.libp2p.io/tcp/443/wss/p2p/QmWDn2LY8nannvSWJzruUYoLZ4vV83vfCBwd8DipvdgQc3'] }, - config: { - relay: { + transports: [ + new WebSockets() + ], + connectionEncryption: [ + new Noise() + ], + streamMuxers: [ + new Mplex() + ], + relay: { + enabled: true, + hop: { + enabled: true + }, + advertise: { enabled: true, - hop: { - enabled: true - }, - advertise: { - enabled: true, - } } } }) await node.start() - console.log(`Node started with id ${node.peerId.toB58String()}`) + console.log(`Node started with id ${node.peerId.toString()}`) console.log('Listening on:') - node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`)) + node.getMultiaddrs().forEach((ma) => console.log(ma.toString())) } main() diff --git a/examples/auto-relay/test.js b/examples/auto-relay/test.js index ac5945ab..4c7541b9 100644 --- a/examples/auto-relay/test.js +++ b/examples/auto-relay/test.js @@ -1,9 +1,10 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pDefer from 'p-defer' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pDefer = require('p-defer') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) function startProcess (name, args = []) { return execa('node', [path.join(__dirname, name), ...args], { @@ -12,7 +13,7 @@ function startProcess (name, args = []) { }) } -async function test () { +export async function test () { let output1 = '' let output2 = '' let output3 = '' @@ -50,7 +51,7 @@ async function test () { output2 += uint8ArrayToString(data) if (output2.includes('Advertising with a relay address of') && output2.includes('/p2p/')) { - autoRelayAddr = output2.trim().split('Advertising with a relay address of ')[1] + autoRelayAddr = output2.trim().split('Advertising with a relay address of ')[1].trim() proc2Ready.resolve() } }) @@ -90,5 +91,3 @@ async function test () { } }) } - -module.exports = test \ No newline at end of file diff --git a/examples/chat/src/dialer.js b/examples/chat/src/dialer.js index 2815448b..e9d59262 100644 --- a/examples/chat/src/dialer.js +++ b/examples/chat/src/dialer.js @@ -1,15 +1,16 @@ -'use strict' /* eslint-disable no-console */ -const PeerId = require('peer-id') -const { Multiaddr } = require('multiaddr') -const createLibp2p = require('./libp2p') -const { stdinToStream, streamToConsole } = require('./stream') +import { Multiaddr } from '@multiformats/multiaddr' +import { createLibp2p } from './libp2p.js' +import { stdinToStream, streamToConsole } from './stream.js' +import { createFromJSON } from '@libp2p/peer-id-factory' +import peerIdDialerJson from './peer-id-dialer.js' +import peerIdListenerJson from './peer-id-listener.js' async function run () { const [idDialer, idListener] = await Promise.all([ - PeerId.createFromJSON(require('./peer-id-dialer')), - PeerId.createFromJSON(require('./peer-id-listener')) + createFromJSON(peerIdDialerJson), + createFromJSON(peerIdListenerJson) ]) // Create a new libp2p node on localhost with a randomly chosen port @@ -25,12 +26,12 @@ async function run () { // Output this node's address console.log('Dialer ready, listening on:') - nodeDialer.multiaddrs.forEach((ma) => { - console.log(ma.toString() + '/p2p/' + idDialer.toB58String()) + nodeDialer.getMultiaddrs().forEach((ma) => { + console.log(ma.toString()) }) // Dial to the remote peer (the "listener") - const listenerMa = new Multiaddr(`/ip4/127.0.0.1/tcp/10333/p2p/${idListener.toB58String()}`) + const listenerMa = new Multiaddr(`/ip4/127.0.0.1/tcp/10333/p2p/${idListener.toString()}`) const { stream } = await nodeDialer.dialProtocol(listenerMa, '/chat/1.0.0') console.log('Dialer dialed to listener on protocol: /chat/1.0.0') diff --git a/examples/chat/src/libp2p.js b/examples/chat/src/libp2p.js index fbce60e9..610dae07 100644 --- a/examples/chat/src/libp2p.js +++ b/examples/chat/src/libp2p.js @@ -1,22 +1,23 @@ -'use strict' +import { TCP } from '@libp2p/tcp' +import { WebSockets } from '@libp2p/websockets' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import defaultsDeep from '@nodeutils/defaults-deep' +import { createLibp2p as create } from 'libp2p' -const TCP = require('libp2p-tcp') -const WS = require('libp2p-websockets') -const mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const defaultsDeep = require('@nodeutils/defaults-deep') -const libp2p = require('../../..') - -async function createLibp2p(_options) { +export async function createLibp2p(_options) { const defaults = { - modules: { - transport: [TCP, WS], - streamMuxer: [mplex], - connEncryption: [NOISE], - }, + transports: [ + new TCP(), + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ] } - return libp2p.create(defaultsDeep(_options, defaults)) + return create(defaultsDeep(_options, defaults)) } - -module.exports = createLibp2p diff --git a/examples/chat/src/listener.js b/examples/chat/src/listener.js index 9dc928ca..dbaf0c2a 100644 --- a/examples/chat/src/listener.js +++ b/examples/chat/src/listener.js @@ -1,13 +1,13 @@ -'use strict' /* eslint-disable no-console */ -const PeerId = require('peer-id') -const createLibp2p = require('./libp2p.js') -const { stdinToStream, streamToConsole } = require('./stream') +import { createLibp2p } from './libp2p.js' +import { stdinToStream, streamToConsole } from './stream.js' +import { createFromJSON } from '@libp2p/peer-id-factory' +import peerIdListenerJson from './peer-id-listener.js' async function run () { // Create a new libp2p node with the given multi-address - const idListener = await PeerId.createFromJSON(require('./peer-id-listener')) + const idListener = await createFromJSON(peerIdListenerJson) const nodeListener = await createLibp2p({ peerId: idListener, addresses: { @@ -16,8 +16,9 @@ async function run () { }) // Log a message when a remote peer connects to us - nodeListener.connectionManager.on('peer:connect', (connection) => { - console.log('connected to: ', connection.remotePeer.toB58String()) + nodeListener.connectionManager.addEventListener('peer:connect', (evt) => { + const connection = evt.detail + console.log('connected to: ', connection.remotePeer.toString()) }) // Handle messages for the protocol @@ -33,8 +34,8 @@ async function run () { // Output listen addresses to the console console.log('Listener ready, listening on:') - nodeListener.multiaddrs.forEach((ma) => { - console.log(ma.toString() + '/p2p/' + idListener.toB58String()) + nodeListener.getMultiaddrs().forEach((ma) => { + console.log(ma.toString()) }) } diff --git a/examples/chat/src/peer-id-dialer.json b/examples/chat/src/peer-id-dialer.js similarity index 99% rename from examples/chat/src/peer-id-dialer.json rename to examples/chat/src/peer-id-dialer.js index 5716d74b..4c5f00de 100644 --- a/examples/chat/src/peer-id-dialer.json +++ b/examples/chat/src/peer-id-dialer.js @@ -1,4 +1,4 @@ -{ +export default { "id": "Qma3GsJmB47xYuyahPZPSadh1avvxfyYQwk8R3UnFrQ6aP", "privKey": "CAASpwkwggSjAgEAAoIBAQCaNSDOjPz6T8HZsf7LDpxiQRiN2OjeyIHUS05p8QWOr3EFUCFsC31R4moihE5HN+FxNalUyyFZU//yjf1pdnlMJqrVByJSMa+y2y4x2FucpoCAO97Tx+iWzwlZ2UXEUXM1Y81mhPbeWXy+wP2xElTgIER0Tsn/thoA0SD2u9wJuVvM7dB7cBcHYmqV6JH+KWCedRTum6O1BssqP/4Lbm2+rkrbZ4+oVRoU2DRLoFhKqwqLtylrbuj4XOI3XykMXV5+uQXz1JzubNOB9lsc6K+eRC+w8hhhDuFMgzkZ4qomCnx3uhO67KaICd8yqqBa6PJ/+fBM5Xk4hjyR40bwcf41AgMBAAECggEAZnrCJ6IYiLyyRdr9SbKXCNDb4YByGYPEi/HT1aHgIJfFE1PSMjxcdytxfyjP4JJpVtPjiT9JFVU2ddoYu5qJN6tGwjVwgJEWg1UXmPaAw1T/drjS94kVsAs82qICtFmwp52Apg3dBZ0Qwq/8qE1XbG7lLyohIbfCBiL0tiPYMfkcsN9gnFT/kFCX0LVs2pa9fHCRMY9rqCc4/rWJa1w8sMuQ23y4lDaxKF9OZVvOHFQkbBDrkquWHE4r55fchCz/rJklkPJUNENuncBRu0/2X+p4IKFD1DnttXNwb8j4LPiSlLro1T0hiUr5gO2QmdYwXFF63Q3mjQy0+5I4eNbjjQKBgQDZvZy3gUKS/nQNkYfq9za80uLbIj/cWbO+ZZjXCsj0fNIcQFJcKMBoA7DjJvu2S/lf86/41YHkPdmrLAEQAkJ+5BBNOycjYK9minTEjIMMmZDTXXugZ62wnU6F46uLkgEChTqEP57Y6xwwV+JaEDFEsW5N1eE9lEVX9nGIr4phMwKBgQC1TazLuEt1WBx/iUT83ita7obXqoKNzwsS/MWfY2innzYZKDOqeSYZzLtt9uTtp4X4uLyPbYs0qFYhXLsUYMoGHNN8+NdjoyxCjQRJRBkMtaNR0lc5lVDWl3bTuJovjFCgAr9uqJrmI5OHcCIk/cDpdWb3nWaMihVlePmiTcTy9wKBgQCU0u7c1jKkudqks4XM6a+2HAYGdUBk4cLjLhnrUWnNAcuyl5wzdX8dGPi8KZb+IKuQE8WBNJ2VXVj7kBYh1QmSJVunDflQSvNYCOaKuOeRoxzD+y9Wkca74qkbBmPn/6FFEb7PSZTO+tPHjyodGNgz9XpJJRjQuBk1aDJtlF3m1QKBgE5SAr5ym65SZOU3UGUIOKRsfDW4Q/OsqDUImvpywCgBICaX9lHDShFFHwau7FA52ScL7vDquoMB4UtCOtLfyQYA9995w9oYCCurrVlVIJkb8jSLcADBHw3EmqF1kq3NqJqm9TmBfoDCh52vdCCUufxgKh33kfBOSlXuf7B8dgMbAoGAZ3r0/mBQX6S+s5+xCETMTSNv7TQzxgtURIpVs+ZVr2cMhWhiv+n0Omab9X9Z50se8cWl5lkvx8vn3D/XHHIPrMF6qk7RAXtvReb+PeitNvm0odqjFv0J2qki6fDs0HKwq4kojAXI1Md8Th0eobNjsy21fEEJT7uKMJdovI/SErI=", "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCaNSDOjPz6T8HZsf7LDpxiQRiN2OjeyIHUS05p8QWOr3EFUCFsC31R4moihE5HN+FxNalUyyFZU//yjf1pdnlMJqrVByJSMa+y2y4x2FucpoCAO97Tx+iWzwlZ2UXEUXM1Y81mhPbeWXy+wP2xElTgIER0Tsn/thoA0SD2u9wJuVvM7dB7cBcHYmqV6JH+KWCedRTum6O1BssqP/4Lbm2+rkrbZ4+oVRoU2DRLoFhKqwqLtylrbuj4XOI3XykMXV5+uQXz1JzubNOB9lsc6K+eRC+w8hhhDuFMgzkZ4qomCnx3uhO67KaICd8yqqBa6PJ/+fBM5Xk4hjyR40bwcf41AgMBAAE=" diff --git a/examples/echo/src/id-l.json b/examples/chat/src/peer-id-listener.js similarity index 99% rename from examples/echo/src/id-l.json rename to examples/chat/src/peer-id-listener.js index 7acb97c8..722f95eb 100644 --- a/examples/echo/src/id-l.json +++ b/examples/chat/src/peer-id-listener.js @@ -1,4 +1,4 @@ -{ +export default { "id": "QmcrQZ6RJdpYuGvZqD5QEHAv6qX4BrQLJLQPQUrTrzdcgm", "privKey": "CAASqAkwggSkAgEAAoIBAQDLZZcGcbe4urMBVlcHgN0fpBymY+xcr14ewvamG70QZODJ1h9sljlExZ7byLiqRB3SjGbfpZ1FweznwNxWtWpjHkQjTVXeoM4EEgDSNO/Cg7KNlU0EJvgPJXeEPycAZX9qASbVJ6EECQ40VR/7+SuSqsdL1hrmG1phpIju+D64gLyWpw9WEALfzMpH5I/KvdYDW3N4g6zOD2mZNp5y1gHeXINHWzMF596O72/6cxwyiXV1eJ000k1NVnUyrPjXtqWdVLRk5IU1LFpoQoXZU5X1hKj1a2qt/lZfH5eOrF/ramHcwhrYYw1txf8JHXWO/bbNnyemTHAvutZpTNrsWATfAgMBAAECggEAQj0obPnVyjxLFZFnsFLgMHDCv9Fk5V5bOYtmxfvcm50us6ye+T8HEYWGUa9RrGmYiLweuJD34gLgwyzE1RwptHPj3tdNsr4NubefOtXwixlWqdNIjKSgPlaGULQ8YF2tm/kaC2rnfifwz0w1qVqhPReO5fypL+0ShyANVD3WN0Fo2ugzrniCXHUpR2sHXSg6K+2+qWdveyjNWog34b7CgpV73Ln96BWae6ElU8PR5AWdMnRaA9ucA+/HWWJIWB3Fb4+6uwlxhu2L50Ckq1gwYZCtGw63q5L4CglmXMfIKnQAuEzazq9T4YxEkp+XDnVZAOgnQGUBYpetlgMmkkh9qQKBgQDvsEs0ThzFLgnhtC2Jy//ZOrOvIAKAZZf/mS08AqWH3L0/Rjm8ZYbLsRcoWU78sl8UFFwAQhMRDBP9G+RPojWVahBL/B7emdKKnFR1NfwKjFdDVaoX5uNvZEKSl9UubbC4WZJ65u/cd5jEnj+w3ir9G8n+P1gp/0yBz02nZXFgSwKBgQDZPQr4HBxZL7Kx7D49ormIlB7CCn2i7mT11Cppn5ifUTrp7DbFJ2t9e8UNk6tgvbENgCKXvXWsmflSo9gmMxeEOD40AgAkO8Pn2R4OYhrwd89dECiKM34HrVNBzGoB5+YsAno6zGvOzLKbNwMG++2iuNXqXTk4uV9GcI8OnU5ZPQKBgCZUGrKSiyc85XeiSGXwqUkjifhHNh8yH8xPwlwGUFIZimnD4RevZI7OEtXw8iCWpX2gg9XGuyXOuKORAkF5vvfVriV4e7c9Ad4Igbj8mQFWz92EpV6NHXGCpuKqRPzXrZrNOA9PPqwSs+s9IxI1dMpk1zhBCOguWx2m+NP79NVhAoGBAI6WSoTfrpu7ewbdkVzTWgQTdLzYNe6jmxDf2ZbKclrf7lNr/+cYIK2Ud5qZunsdBwFdgVcnu/02czeS42TvVBgs8mcgiQc/Uy7yi4/VROlhOnJTEMjlU2umkGc3zLzDgYiRd7jwRDLQmMrYKNyEr02HFKFn3w8kXSzW5I8rISnhAoGBANhchHVtJd3VMYvxNcQb909FiwTnT9kl9pkjhwivx+f8/K8pDfYCjYSBYCfPTM5Pskv5dXzOdnNuCj6Y2H/9m2SsObukBwF0z5Qijgu1DsxvADVIKZ4rzrGb4uSEmM6200qjJ/9U98fVM7rvOraakrhcf9gRwuspguJQnSO9cLj6", "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLZZcGcbe4urMBVlcHgN0fpBymY+xcr14ewvamG70QZODJ1h9sljlExZ7byLiqRB3SjGbfpZ1FweznwNxWtWpjHkQjTVXeoM4EEgDSNO/Cg7KNlU0EJvgPJXeEPycAZX9qASbVJ6EECQ40VR/7+SuSqsdL1hrmG1phpIju+D64gLyWpw9WEALfzMpH5I/KvdYDW3N4g6zOD2mZNp5y1gHeXINHWzMF596O72/6cxwyiXV1eJ000k1NVnUyrPjXtqWdVLRk5IU1LFpoQoXZU5X1hKj1a2qt/lZfH5eOrF/ramHcwhrYYw1txf8JHXWO/bbNnyemTHAvutZpTNrsWATfAgMBAAE=" diff --git a/examples/chat/src/stream.js b/examples/chat/src/stream.js index 7e883db0..391ac986 100644 --- a/examples/chat/src/stream.js +++ b/examples/chat/src/stream.js @@ -1,15 +1,19 @@ -'use strict' /* eslint-disable no-console */ -const pipe = require('it-pipe') -const lp = require('it-length-prefixed') +import { pipe } from 'it-pipe' +import * as lp from 'it-length-prefixed' +import map from 'it-map' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' -function stdinToStream(stream) { +export function stdinToStream(stream) { // Read utf-8 from stdin process.stdin.setEncoding('utf8') pipe( // Read from stdin (the source) process.stdin, + // Turn strings into buffers + (source) => map(source, (string) => uint8ArrayFromString(string)), // Encode with length prefix (so receiving side knows how much data is coming) lp.encode(), // Write to the stream (the sink) @@ -17,12 +21,14 @@ function stdinToStream(stream) { ) } -function streamToConsole(stream) { +export function streamToConsole(stream) { pipe( // Read from the stream (the source) stream.source, // Decode length-prefixed data lp.decode(), + // Turn buffers into strings + (source) => map(source, (buf) => uint8ArrayToString(buf)), // Sink function async function (source) { // For each chunk of data @@ -33,8 +39,3 @@ function streamToConsole(stream) { } ) } - -module.exports = { - stdinToStream, - streamToConsole -} diff --git a/examples/chat/test.js b/examples/chat/test.js index 0d9b40d4..dd75cb91 100644 --- a/examples/chat/test.js +++ b/examples/chat/test.js @@ -1,9 +1,10 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pDefer from 'p-defer' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pDefer = require('p-defer') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) function startProcess(name) { return execa('node', [path.join(__dirname, name)], { @@ -12,7 +13,7 @@ function startProcess(name) { }) } -async function test () { +export async function test () { const message = 'test message' let listenerOutput = '' let dialerOutput = '' @@ -73,5 +74,3 @@ async function test () { } }) } - -module.exports = test diff --git a/examples/connection-encryption/1.js b/examples/connection-encryption/1.js index fccb7537..eafb4ff7 100644 --- a/examples/connection-encryption/1.js +++ b/examples/connection-encryption/1.js @@ -1,22 +1,19 @@ -'use strict' - -const Libp2p = require('../..') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') - -const pipe = require('it-pipe') +import { createLibp2p } from '../../dist/src/index.js' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { pipe } from 'it-pipe' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE] - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] }) await node.start() @@ -30,14 +27,14 @@ const createNode = async () => { createNode() ]) - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) node2.handle('/a-protocol', ({ stream }) => { pipe( stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -46,7 +43,7 @@ const createNode = async () => { const { stream } = await node1.dialProtocol(node2.peerId, '/a-protocol') await pipe( - ['This information is sent out encrypted to the other peer'], + [uint8ArrayFromString('This information is sent out encrypted to the other peer')], stream ) })(); diff --git a/examples/connection-encryption/README.md b/examples/connection-encryption/README.md index ac824bf9..ccc8cee1 100644 --- a/examples/connection-encryption/README.md +++ b/examples/connection-encryption/README.md @@ -13,17 +13,17 @@ We will build this example on top of example for [Protocol and Stream Multiplexi To add them to your libp2p configuration, all you have to do is: ```JavaScript -const Libp2p = require('libp2p') -const { NOISE } = require('@chainsafe/libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' -const createNode = () => { - return Libp2p.create({ - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - // Attach noise as the crypto channel to use - connEncryption: [ NOISE ] - } +const createNode = async () => { + return await createLibp2p({ + transports: [ new TCP() ], + streamMuxers: [ new Mplex() ], + // Attach noise as the crypto channel to use + conectionEncrypters: [ new Noise() ] }) } ``` diff --git a/examples/connection-encryption/test.js b/examples/connection-encryption/test.js index b6ea10ad..8111be9c 100644 --- a/examples/connection-encryption/test.js +++ b/examples/connection-encryption/test.js @@ -1,14 +1,14 @@ -'use strict' -const path = require('path') -const { waitForOutput } = require('../utils') +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -async function test () { +const __dirname = path.dirname(fileURLToPath(import.meta.url)) + +export async function test () { process.stdout.write('1.js\n') await waitForOutput('This information is sent out encrypted to the other peer', 'node', [path.join(__dirname, '1.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/delegated-routing/package.json b/examples/delegated-routing/package.json index 6893edc3..acdbb035 100644 --- a/examples/delegated-routing/package.json +++ b/examples/delegated-routing/package.json @@ -3,19 +3,18 @@ "version": "0.1.0", "private": true, "dependencies": { - "@chainsafe/libp2p-noise": "^5.0.2", - "ipfs-core": "^0.13.0", + "@chainsafe/libp2p-noise": "^6.0.1", + "ipfs-core": "^0.14.1", "libp2p": "../../", - "libp2p-delegated-content-routing": "^0.11.0", - "libp2p-delegated-peer-routing": "^0.11.1", - "libp2p-kad-dht": "^0.28.6", - "libp2p-mplex": "^0.10.4", - "libp2p-webrtc-star": "^0.25.0", - "libp2p-websocket-star": "^0.10.2", - "libp2p-websockets": "^0.16.2", - "react": "^16.8.6", - "react-dom": "^16.8.6", - "react-scripts": "2.1.8" + "@libp2p/delegated-content-routing": "^1.0.1", + "@libp2p/delegated-peer-routing": "^1.0.1", + "@libp2p/kad-dht": "^1.0.1", + "@libp2p/mplex": "^1.0.2", + "@libp2p/webrtc-star": "^1.0.6", + "@libp2p/websockets": "^1.0.3", + "react": "^17.0.2", + "react-dom": "^17.0.2", + "react-scripts": "5.0.0" }, "scripts": { "start": "react-scripts start" diff --git a/examples/delegated-routing/src/libp2p-bundle.js b/examples/delegated-routing/src/libp2p-bundle.js index 0ff6786e..515c2d1c 100644 --- a/examples/delegated-routing/src/libp2p-bundle.js +++ b/examples/delegated-routing/src/libp2p-bundle.js @@ -1,26 +1,23 @@ // eslint-disable-next-line 'use strict' -const Libp2p = require('libp2p') -const Websockets = require('libp2p-websockets') -const WebSocketStar = require('libp2p-websocket-star') -const WebRTCStar = require('libp2p-webrtc-star') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const KadDHT = require('libp2p-kad-dht') -const DelegatedPeerRouter = require('libp2p-delegated-peer-routing') -const DelegatedContentRouter = require('libp2p-delegated-content-routing') +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { WebRTCStar } from '@libp2p/webrtc-star' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { DelegatedPeerRouting } from '@libp2p/delegated-peer-routing' +import { DelegatedContentRouting } from '@libp2p/delegated-content-routing' export default function Libp2pBundle ({peerInfo, peerBook}) { - const wrtcstar = new WebRTCStar({id: peerInfo.id}) - const wsstar = new WebSocketStar({id: peerInfo.id}) + const wrtcstar = new WebRTCStar() const delegatedApiOptions = { host: '0.0.0.0', protocol: 'http', port: '8080' } - return new Libp2p({ + return createLibp2p({ peerInfo, peerBook, // Lets limit the connection managers peers and have it check peer health less frequently @@ -28,48 +25,29 @@ export default function Libp2pBundle ({peerInfo, peerBook}) { maxPeers: 10, pollInterval: 5000 }, - modules: { - contentRouting: [ - new DelegatedContentRouter(peerInfo.id, delegatedApiOptions) - ], - peerRouting: [ - new DelegatedPeerRouter(delegatedApiOptions) - ], - peerDiscovery: [ - wrtcstar.discovery, - wsstar.discovery - ], - transport: [ - wrtcstar, - wsstar, - Websockets - ], - streamMuxer: [ - MPLEX - ], - connEncryption: [ - NOISE - ], - dht: KadDHT + contentRouting: [ + new DelegatedPeerRouting(peerInfo.id, delegatedApiOptions) + ], + peerRouting: [ + new DelegatedContentRouting(delegatedApiOptions) + ], + transports: [ + wrtcstar, + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + connectionManager: { + autoDial: false }, - config: { - peerDiscovery: { - autoDial: false, - webrtcStar: { - enabled: false - }, - websocketStar: { - enabled: false - } - }, - dht: { + relay: { + enabled: true, + hop: { enabled: false - }, - relay: { - enabled: true, - hop: { - enabled: false - } } } }) diff --git a/examples/discovery-mechanisms/1.js b/examples/discovery-mechanisms/1.js index 8ac3cc48..6b099bbf 100644 --- a/examples/discovery-mechanisms/1.js +++ b/examples/discovery-mechanisms/1.js @@ -1,43 +1,37 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Bootstrap = require('libp2p-bootstrap') - -const bootstrapers = require('./bootstrapers') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { Bootstrap } from '@libp2p/bootstrap' +import bootstrapers from './bootstrappers.js' ;(async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - peerDiscovery: [Bootstrap] - }, - config: { - peerDiscovery: { - bootstrap: { - interval: 60e3, - enabled: true, - list: bootstrapers - } - } - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + peerDiscovery: [ + new Bootstrap({ + interval: 60e3, + list: bootstrapers + }) + ] }) - node.connectionManager.on('peer:connect', (connection) => { - console.log('Connection established to:', connection.remotePeer.toB58String()) // Emitted when a peer has been found + node.connectionManager.addEventListener('peer:connect', (evt) => { + const connection = evt.detail + console.log('Connection established to:', connection.remotePeer.toString()) // Emitted when a peer has been found }) - node.on('peer:discovery', (peerId) => { + node.addEventListener('peer:discovery', (evt) => { + const peer = evt.detail // No need to dial, autoDial is on - console.log('Discovered:', peerId.toB58String()) + console.log('Discovered:', peer.id.toString()) }) await node.start() diff --git a/examples/discovery-mechanisms/2.js b/examples/discovery-mechanisms/2.js index bd5f8ddc..bad5a13f 100644 --- a/examples/discovery-mechanisms/2.js +++ b/examples/discovery-mechanisms/2.js @@ -1,31 +1,30 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MulticastDNS = require('libp2p-mdns') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { MulticastDNS } from '@libp2p/mdns' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - peerDiscovery: [MulticastDNS] - }, - config: { - peerDiscovery: { - [MulticastDNS.tag]: { - interval: 20e3, - enabled: true - } - } - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + peerDiscovery: [ + new MulticastDNS({ + interval: 20e3 + }) + ] }) return node @@ -37,8 +36,8 @@ const createNode = async () => { createNode() ]) - node1.on('peer:discovery', (peerId) => console.log('Discovered:', peerId.toB58String())) - node2.on('peer:discovery', (peerId) => console.log('Discovered:', peerId.toB58String())) + node1.addEventListener('peer:discovery', (evt) => console.log('Discovered:', evt.detail.id.toString())) + node2.addEventListener('peer:discovery', (evt) => console.log('Discovered:', evt.detail.id.toString())) await Promise.all([ node1.start(), diff --git a/examples/discovery-mechanisms/3.js b/examples/discovery-mechanisms/3.js index 645025d4..5cf6522b 100644 --- a/examples/discovery-mechanisms/3.js +++ b/examples/discovery-mechanisms/3.js @@ -1,66 +1,78 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Gossipsub = require('@achingbrain/libp2p-gossipsub') -const Bootstrap = require('libp2p-bootstrap') -const PubsubPeerDiscovery = require('libp2p-pubsub-peer-discovery') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { Gossipsub } from '@achingbrain/libp2p-gossipsub' +import { Bootstrap } from '@libp2p/bootstrap' +import { PubSubPeerDiscovery } from '@libp2p/pubsub-peer-discovery' -const createRelayServer = require('libp2p-relay-server') - -const createNode = async (bootstrapers) => { - const node = await Libp2p.create({ +const createNode = async (bootstrappers) => { + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - pubsub: Gossipsub, - peerDiscovery: [Bootstrap, PubsubPeerDiscovery] - }, - config: { - peerDiscovery: { - [PubsubPeerDiscovery.tag]: { - interval: 1000, - enabled: true - }, - [Bootstrap.tag]: { - enabled: true, - list: bootstrapers - } - } - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + pubsub: new Gossipsub(), + peerDiscovery: [ + new Bootstrap({ + list: bootstrappers + }), + new PubSubPeerDiscovery({ + interval: 1000 + }) + ] }) return node } ;(async () => { - const relay = await createRelayServer({ - listenAddresses: ['/ip4/0.0.0.0/tcp/0'] + const relay = await createLibp2p({ + addresses: { + listen: [ + '/ip4/0.0.0.0/tcp/0' + ] + }, + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + pubsub: new Gossipsub(), + peerDiscovery: [ + new PubSubPeerDiscovery({ + interval: 1000 + }) + ], + relay: { + enabled: true, // Allows you to dial and accept relayed connections. Does not make you a relay. + hop: { + enabled: true // Allows you to be a relay for other peers + } + } }) - console.log(`libp2p relay starting with id: ${relay.peerId.toB58String()}`) + console.log(`libp2p relay starting with id: ${relay.peerId.toString()}`) await relay.start() - const relayMultiaddrs = relay.multiaddrs.map((m) => `${m.toString()}/p2p/${relay.peerId.toB58String()}`) + + const relayMultiaddrs = relay.getMultiaddrs().map((m) => m.toString()) const [node1, node2] = await Promise.all([ createNode(relayMultiaddrs), createNode(relayMultiaddrs) ]) - node1.on('peer:discovery', (peerId) => { - console.log(`Peer ${node1.peerId.toB58String()} discovered: ${peerId.toB58String()}`) + node1.addEventListener('peer:discovery', (evt) => { + const peer = evt.detail + console.log(`Peer ${node1.peerId.toString()} discovered: ${peer.id.toString()}`) }) - node2.on('peer:discovery', (peerId) => { - console.log(`Peer ${node2.peerId.toB58String()} discovered: ${peerId.toB58String()}`) + node2.addEventListener('peer:discovery',(evt) => { + const peer = evt.detail + console.log(`Peer ${node2.peerId.toString()} discovered: ${peer.id.toString()}`) }) - ;[node1, node2].forEach((node, index) => console.log(`Node ${index} starting with id: ${node.peerId.toB58String()}`)) + ;[node1, node2].forEach((node, index) => console.log(`Node ${index} starting with id: ${node.peerId.toString()}`)) await Promise.all([ node1.start(), node2.start() diff --git a/examples/discovery-mechanisms/README.md b/examples/discovery-mechanisms/README.md index 6d5c647e..b7d24518 100644 --- a/examples/discovery-mechanisms/README.md +++ b/examples/discovery-mechanisms/README.md @@ -13,25 +13,25 @@ For this demo, we will connect to IPFS default bootstrapper nodes and so, we wil First, we create our libp2p node. ```JavaScript -const Libp2p = require('libp2p') -const Bootstrap = require('libp2p-bootstrap') +import { createLibp2p } from 'libp2p' +import { Bootstrap } from '@libp2p/bootstrap' -const node = await Libp2p.create({ - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - connEncryption: [ NOISE ], - peerDiscovery: [ Bootstrap ] - }, - config: { - peerDiscovery: { - bootstrap: { - interval: 60e3, - enabled: true, - list: bootstrapers - } - } - } +const node = await createLibp2p({ + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + peerDiscovery: [ + new Bootstrap({ + interval: 60e3, + list: bootstrapers + }) + ] }) ``` @@ -51,26 +51,26 @@ const bootstrapers = [ Now, once we create and start the node, we can listen for events such as `peer:discovery` and `peer:connect`, these events tell us when we found a peer, independently of the discovery mechanism used and when we actually dialed to that peer. ```JavaScript -const node = await Libp2p.create({ +const node = await createLibp2p({ peerId, addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - connEncryption: [ NOISE ], - peerDiscovery: [ Bootstrap ] - }, - config: { - peerDiscovery: { - bootstrap: { - interval: 60e3, - enabled: true, - list: bootstrapers - } - } - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + peerDiscovery: [ + new Bootstrap({ + interval: 60e3, + list: bootstrapers + }) + ] }) node.connectionManager.on('peer:connect', (connection) => { @@ -110,28 +110,28 @@ For this example, we need `libp2p-mdns`, go ahead and `npm install` it. You can Update your libp2p configuration to include MulticastDNS. ```JavaScript -const Libp2p = require('libp2p') -const MulticastDNS = require('libp2p-mdns') +import { createLibp2p } from 'libp2p' +import { MulticastDNS } from '@libp2p/mdns' const createNode = () => { return Libp2p.create({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - connEncryption: [ NOISE ], - peerDiscovery: [ MulticastDNS ] - }, - config: { - peerDiscovery: { - mdns: { - interval: 20e3, - enabled: true - } - } - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + peerDiscovery: [ + new MulticastDNS({ + interval: 20e3 + }) + ] }) } ``` @@ -170,39 +170,37 @@ In the context of this example, we will create and run the `libp2p-relay-server` You can create your libp2p nodes as follows: ```js -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Gossipsub = require('libp2p-gossipsub') -const Bootstrap = require('libp2p-bootstrap') -const PubsubPeerDiscovery = require('libp2p-pubsub-peer-discovery') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { Gossipsub } from 'libp2p-gossipsub' +import { Bootstrap } from '@libp2p/bootstrap' +const PubsubPeerDiscovery from 'libp2p-pubsub-peer-discovery') const createNode = async (bootstrapers) => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - pubsub: Gossipsub, - peerDiscovery: [Bootstrap, PubsubPeerDiscovery] - }, - config: { - peerDiscovery: { - [PubsubPeerDiscovery.tag]: { - interval: 1000, - enabled: true - }, - [Bootstrap.tag]: { - enabled: true, - list: bootstrapers - } - } - } - }) + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + peerDiscovery: [ + new Bootstrap({ + interval: 60e3, + list: bootstrapers + }), + new PubsubPeerDiscovery({ + interval: 1000 + }) + ]) return node } @@ -212,7 +210,9 @@ We will use the `libp2p-relay-server` as bootstrap nodes for the libp2p nodes, s ```js const relay = await createRelayServer({ - listenAddresses: ['/ip4/0.0.0.0/tcp/0'] + addresses: { + listen: ['/ip4/0.0.0.0/tcp/0'] + } }) console.log(`libp2p relay starting with id: ${relay.peerId.toB58String()}`) await relay.start() diff --git a/examples/discovery-mechanisms/bootstrapers.js b/examples/discovery-mechanisms/bootstrappers.js similarity index 90% rename from examples/discovery-mechanisms/bootstrapers.js rename to examples/discovery-mechanisms/bootstrappers.js index 8ebedb1f..7f88820e 100644 --- a/examples/discovery-mechanisms/bootstrapers.js +++ b/examples/discovery-mechanisms/bootstrappers.js @@ -1,7 +1,5 @@ -'use strict' - // Find this list at: https://github.com/ipfs/js-ipfs/blob/master/packages/ipfs-core-config/src/config.js -const bootstrapers = [ +export default [ '/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ', '/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN', '/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb', @@ -9,5 +7,3 @@ const bootstrapers = [ '/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa', '/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt', ] - -module.exports = bootstrapers diff --git a/examples/discovery-mechanisms/test-1.js b/examples/discovery-mechanisms/test-1.js index 73e90ad5..554f2c2e 100644 --- a/examples/discovery-mechanisms/test-1.js +++ b/examples/discovery-mechanisms/test-1.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('1.js\n') await waitForOutput('Connection established to:', 'node', [path.join(__dirname, '1.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/discovery-mechanisms/test-2.js b/examples/discovery-mechanisms/test-2.js index 86183c81..b40da4a3 100644 --- a/examples/discovery-mechanisms/test-2.js +++ b/examples/discovery-mechanisms/test-2.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pWaitFor from 'p-wait-for' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pWaitFor = require('p-wait-for') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -const discoveredCopy = 'Discovered:' - -async function test() { - const discoveredNodes = [] +export async function test () { + let discoveredNodes = 0 process.stdout.write('2.js\n') @@ -19,17 +18,16 @@ async function test() { proc.all.on('data', async (data) => { process.stdout.write(data) - const line = uint8ArrayToString(data) + const str = uint8ArrayToString(data) - if (line.includes(discoveredCopy)) { - const id = line.trim().split(discoveredCopy)[1] - discoveredNodes.push(id) - } + str.split('\n').forEach(line => { + if (line.includes('Discovered:')) { + discoveredNodes++ + } + }) }) - await pWaitFor(() => discoveredNodes.length === 2) + await pWaitFor(() => discoveredNodes > 1) proc.kill() } - -module.exports = test diff --git a/examples/discovery-mechanisms/test-3.js b/examples/discovery-mechanisms/test-3.js index f73744cd..375ef6f8 100644 --- a/examples/discovery-mechanisms/test-3.js +++ b/examples/discovery-mechanisms/test-3.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pWaitFor from 'p-wait-for' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pWaitFor = require('p-wait-for') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -const discoveredCopy = 'discovered:' - -async function test() { - let discoverCount = 0 +export async function test () { + let discoveredNodes = 0 process.stdout.write('3.js\n') @@ -19,17 +18,16 @@ async function test() { proc.all.on('data', async (data) => { process.stdout.write(data) - const line = uint8ArrayToString(data) + const str = uint8ArrayToString(data) - // Discovered or Connected - if (line.includes(discoveredCopy)) { - discoverCount++ - } + str.split('\n').forEach(line => { + if (line.includes('discovered:')) { + discoveredNodes++ + } + }) }) - await pWaitFor(() => discoverCount === 4) + await pWaitFor(() => discoveredNodes > 3) proc.kill() } - -module.exports = test diff --git a/examples/discovery-mechanisms/test.js b/examples/discovery-mechanisms/test.js index d9faeb2e..79f41f94 100644 --- a/examples/discovery-mechanisms/test.js +++ b/examples/discovery-mechanisms/test.js @@ -1,13 +1,9 @@ -'use strict' +import { test as test1 } from './test-1.js' +import { test as test2 } from './test-2.js' +import { test as test3 } from './test-3.js' -const test1 = require('./test-1') -const test2 = require('./test-2') -const test3 = require('./test-3') - -async function test () { +export async function test () { await test1() await test2() await test3() } - -module.exports = test diff --git a/examples/echo/src/dialer.js b/examples/echo/src/dialer.js index 59387607..74a65087 100644 --- a/examples/echo/src/dialer.js +++ b/examples/echo/src/dialer.js @@ -1,18 +1,21 @@ -'use strict' /* eslint-disable no-console */ /* * Dialer Node */ -const PeerId = require('peer-id') -const createLibp2p = require('./libp2p') -const pipe = require('it-pipe') +import { createLibp2p } from './libp2p.js' +import { pipe } from 'it-pipe' +import idd from './id-d.js' +import idl from './id-l.js' +import { createFromJSON } from '@libp2p/peer-id-factory' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' async function run() { const [dialerId, listenerId] = await Promise.all([ - PeerId.createFromJSON(require('./id-d')), - PeerId.createFromJSON(require('./id-l')) + createFromJSON(idd), + createFromJSON(idl) ]) // Dialer @@ -24,14 +27,13 @@ async function run() { }) // Add peer to Dial (the listener) into the PeerStore - const listenerMultiaddr = '/ip4/127.0.0.1/tcp/10333/p2p/' + listenerId.toB58String() + const listenerMultiaddr = '/ip4/127.0.0.1/tcp/10333/p2p/' + listenerId.toString() // Start the dialer libp2p node await dialerNode.start() console.log('Dialer ready, listening on:') - dialerNode.multiaddrs.forEach((ma) => console.log(ma.toString() + - '/p2p/' + dialerId.toB58String())) + dialerNode.getMultiaddrs().forEach((ma) => console.log(ma.toString())) // Dial the listener node console.log('Dialing to peer:', listenerMultiaddr) @@ -41,7 +43,7 @@ async function run() { pipe( // Source data - ['hey'], + [uint8ArrayFromString('hey')], // Write to the stream, and pass its output to the next function stream, // Sink function @@ -49,7 +51,7 @@ async function run() { // For each chunk of data for await (const data of source) { // Output the data - console.log('received echo:', data.toString()) + console.log('received echo:', uint8ArrayToString(data)) } } ) diff --git a/examples/echo/src/id-d.json b/examples/echo/src/id-d.js similarity index 99% rename from examples/echo/src/id-d.json rename to examples/echo/src/id-d.js index 5716d74b..4c5f00de 100644 --- a/examples/echo/src/id-d.json +++ b/examples/echo/src/id-d.js @@ -1,4 +1,4 @@ -{ +export default { "id": "Qma3GsJmB47xYuyahPZPSadh1avvxfyYQwk8R3UnFrQ6aP", "privKey": "CAASpwkwggSjAgEAAoIBAQCaNSDOjPz6T8HZsf7LDpxiQRiN2OjeyIHUS05p8QWOr3EFUCFsC31R4moihE5HN+FxNalUyyFZU//yjf1pdnlMJqrVByJSMa+y2y4x2FucpoCAO97Tx+iWzwlZ2UXEUXM1Y81mhPbeWXy+wP2xElTgIER0Tsn/thoA0SD2u9wJuVvM7dB7cBcHYmqV6JH+KWCedRTum6O1BssqP/4Lbm2+rkrbZ4+oVRoU2DRLoFhKqwqLtylrbuj4XOI3XykMXV5+uQXz1JzubNOB9lsc6K+eRC+w8hhhDuFMgzkZ4qomCnx3uhO67KaICd8yqqBa6PJ/+fBM5Xk4hjyR40bwcf41AgMBAAECggEAZnrCJ6IYiLyyRdr9SbKXCNDb4YByGYPEi/HT1aHgIJfFE1PSMjxcdytxfyjP4JJpVtPjiT9JFVU2ddoYu5qJN6tGwjVwgJEWg1UXmPaAw1T/drjS94kVsAs82qICtFmwp52Apg3dBZ0Qwq/8qE1XbG7lLyohIbfCBiL0tiPYMfkcsN9gnFT/kFCX0LVs2pa9fHCRMY9rqCc4/rWJa1w8sMuQ23y4lDaxKF9OZVvOHFQkbBDrkquWHE4r55fchCz/rJklkPJUNENuncBRu0/2X+p4IKFD1DnttXNwb8j4LPiSlLro1T0hiUr5gO2QmdYwXFF63Q3mjQy0+5I4eNbjjQKBgQDZvZy3gUKS/nQNkYfq9za80uLbIj/cWbO+ZZjXCsj0fNIcQFJcKMBoA7DjJvu2S/lf86/41YHkPdmrLAEQAkJ+5BBNOycjYK9minTEjIMMmZDTXXugZ62wnU6F46uLkgEChTqEP57Y6xwwV+JaEDFEsW5N1eE9lEVX9nGIr4phMwKBgQC1TazLuEt1WBx/iUT83ita7obXqoKNzwsS/MWfY2innzYZKDOqeSYZzLtt9uTtp4X4uLyPbYs0qFYhXLsUYMoGHNN8+NdjoyxCjQRJRBkMtaNR0lc5lVDWl3bTuJovjFCgAr9uqJrmI5OHcCIk/cDpdWb3nWaMihVlePmiTcTy9wKBgQCU0u7c1jKkudqks4XM6a+2HAYGdUBk4cLjLhnrUWnNAcuyl5wzdX8dGPi8KZb+IKuQE8WBNJ2VXVj7kBYh1QmSJVunDflQSvNYCOaKuOeRoxzD+y9Wkca74qkbBmPn/6FFEb7PSZTO+tPHjyodGNgz9XpJJRjQuBk1aDJtlF3m1QKBgE5SAr5ym65SZOU3UGUIOKRsfDW4Q/OsqDUImvpywCgBICaX9lHDShFFHwau7FA52ScL7vDquoMB4UtCOtLfyQYA9995w9oYCCurrVlVIJkb8jSLcADBHw3EmqF1kq3NqJqm9TmBfoDCh52vdCCUufxgKh33kfBOSlXuf7B8dgMbAoGAZ3r0/mBQX6S+s5+xCETMTSNv7TQzxgtURIpVs+ZVr2cMhWhiv+n0Omab9X9Z50se8cWl5lkvx8vn3D/XHHIPrMF6qk7RAXtvReb+PeitNvm0odqjFv0J2qki6fDs0HKwq4kojAXI1Md8Th0eobNjsy21fEEJT7uKMJdovI/SErI=", "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCaNSDOjPz6T8HZsf7LDpxiQRiN2OjeyIHUS05p8QWOr3EFUCFsC31R4moihE5HN+FxNalUyyFZU//yjf1pdnlMJqrVByJSMa+y2y4x2FucpoCAO97Tx+iWzwlZ2UXEUXM1Y81mhPbeWXy+wP2xElTgIER0Tsn/thoA0SD2u9wJuVvM7dB7cBcHYmqV6JH+KWCedRTum6O1BssqP/4Lbm2+rkrbZ4+oVRoU2DRLoFhKqwqLtylrbuj4XOI3XykMXV5+uQXz1JzubNOB9lsc6K+eRC+w8hhhDuFMgzkZ4qomCnx3uhO67KaICd8yqqBa6PJ/+fBM5Xk4hjyR40bwcf41AgMBAAE=" diff --git a/examples/chat/src/peer-id-listener.json b/examples/echo/src/id-l.js similarity index 99% rename from examples/chat/src/peer-id-listener.json rename to examples/echo/src/id-l.js index 7acb97c8..722f95eb 100644 --- a/examples/chat/src/peer-id-listener.json +++ b/examples/echo/src/id-l.js @@ -1,4 +1,4 @@ -{ +export default { "id": "QmcrQZ6RJdpYuGvZqD5QEHAv6qX4BrQLJLQPQUrTrzdcgm", "privKey": "CAASqAkwggSkAgEAAoIBAQDLZZcGcbe4urMBVlcHgN0fpBymY+xcr14ewvamG70QZODJ1h9sljlExZ7byLiqRB3SjGbfpZ1FweznwNxWtWpjHkQjTVXeoM4EEgDSNO/Cg7KNlU0EJvgPJXeEPycAZX9qASbVJ6EECQ40VR/7+SuSqsdL1hrmG1phpIju+D64gLyWpw9WEALfzMpH5I/KvdYDW3N4g6zOD2mZNp5y1gHeXINHWzMF596O72/6cxwyiXV1eJ000k1NVnUyrPjXtqWdVLRk5IU1LFpoQoXZU5X1hKj1a2qt/lZfH5eOrF/ramHcwhrYYw1txf8JHXWO/bbNnyemTHAvutZpTNrsWATfAgMBAAECggEAQj0obPnVyjxLFZFnsFLgMHDCv9Fk5V5bOYtmxfvcm50us6ye+T8HEYWGUa9RrGmYiLweuJD34gLgwyzE1RwptHPj3tdNsr4NubefOtXwixlWqdNIjKSgPlaGULQ8YF2tm/kaC2rnfifwz0w1qVqhPReO5fypL+0ShyANVD3WN0Fo2ugzrniCXHUpR2sHXSg6K+2+qWdveyjNWog34b7CgpV73Ln96BWae6ElU8PR5AWdMnRaA9ucA+/HWWJIWB3Fb4+6uwlxhu2L50Ckq1gwYZCtGw63q5L4CglmXMfIKnQAuEzazq9T4YxEkp+XDnVZAOgnQGUBYpetlgMmkkh9qQKBgQDvsEs0ThzFLgnhtC2Jy//ZOrOvIAKAZZf/mS08AqWH3L0/Rjm8ZYbLsRcoWU78sl8UFFwAQhMRDBP9G+RPojWVahBL/B7emdKKnFR1NfwKjFdDVaoX5uNvZEKSl9UubbC4WZJ65u/cd5jEnj+w3ir9G8n+P1gp/0yBz02nZXFgSwKBgQDZPQr4HBxZL7Kx7D49ormIlB7CCn2i7mT11Cppn5ifUTrp7DbFJ2t9e8UNk6tgvbENgCKXvXWsmflSo9gmMxeEOD40AgAkO8Pn2R4OYhrwd89dECiKM34HrVNBzGoB5+YsAno6zGvOzLKbNwMG++2iuNXqXTk4uV9GcI8OnU5ZPQKBgCZUGrKSiyc85XeiSGXwqUkjifhHNh8yH8xPwlwGUFIZimnD4RevZI7OEtXw8iCWpX2gg9XGuyXOuKORAkF5vvfVriV4e7c9Ad4Igbj8mQFWz92EpV6NHXGCpuKqRPzXrZrNOA9PPqwSs+s9IxI1dMpk1zhBCOguWx2m+NP79NVhAoGBAI6WSoTfrpu7ewbdkVzTWgQTdLzYNe6jmxDf2ZbKclrf7lNr/+cYIK2Ud5qZunsdBwFdgVcnu/02czeS42TvVBgs8mcgiQc/Uy7yi4/VROlhOnJTEMjlU2umkGc3zLzDgYiRd7jwRDLQmMrYKNyEr02HFKFn3w8kXSzW5I8rISnhAoGBANhchHVtJd3VMYvxNcQb909FiwTnT9kl9pkjhwivx+f8/K8pDfYCjYSBYCfPTM5Pskv5dXzOdnNuCj6Y2H/9m2SsObukBwF0z5Qijgu1DsxvADVIKZ4rzrGb4uSEmM6200qjJ/9U98fVM7rvOraakrhcf9gRwuspguJQnSO9cLj6", "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLZZcGcbe4urMBVlcHgN0fpBymY+xcr14ewvamG70QZODJ1h9sljlExZ7byLiqRB3SjGbfpZ1FweznwNxWtWpjHkQjTVXeoM4EEgDSNO/Cg7KNlU0EJvgPJXeEPycAZX9qASbVJ6EECQ40VR/7+SuSqsdL1hrmG1phpIju+D64gLyWpw9WEALfzMpH5I/KvdYDW3N4g6zOD2mZNp5y1gHeXINHWzMF596O72/6cxwyiXV1eJ000k1NVnUyrPjXtqWdVLRk5IU1LFpoQoXZU5X1hKj1a2qt/lZfH5eOrF/ramHcwhrYYw1txf8JHXWO/bbNnyemTHAvutZpTNrsWATfAgMBAAE=" diff --git a/examples/echo/src/libp2p.js b/examples/echo/src/libp2p.js index 6bc1d729..b875da72 100644 --- a/examples/echo/src/libp2p.js +++ b/examples/echo/src/libp2p.js @@ -1,23 +1,23 @@ -'use strict' +import { TCP } from '@libp2p/tcp' +import { WebSockets } from '@libp2p/websockets' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import defaultsDeep from '@nodeutils/defaults-deep' +import { createLibp2p as createNode } from 'libp2p' -const TCP = require('libp2p-tcp') -const WS = require('libp2p-websockets') -const mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') - -const defaultsDeep = require('@nodeutils/defaults-deep') -const libp2p = require('../../..') - -async function createLibp2p(_options) { +export async function createLibp2p(_options) { const defaults = { - modules: { - transport: [TCP, WS], - streamMuxer: [mplex], - connEncryption: [NOISE], - }, + transports: [ + new TCP(), + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ] } - return libp2p.create(defaultsDeep(_options, defaults)) + return createNode(defaultsDeep(_options, defaults)) } - -module.exports = createLibp2p diff --git a/examples/echo/src/listener.js b/examples/echo/src/listener.js index 1f814fc3..242c04ae 100644 --- a/examples/echo/src/listener.js +++ b/examples/echo/src/listener.js @@ -1,16 +1,16 @@ -'use strict' /* eslint-disable no-console */ /* * Listener Node */ -const PeerId = require('peer-id') -const createLibp2p = require('./libp2p') -const pipe = require('it-pipe') +import { createLibp2p } from './libp2p.js' +import { pipe } from 'it-pipe' +import { createFromJSON } from '@libp2p/peer-id-factory' +import idl from './id-l.js' async function run() { - const listenerId = await PeerId.createFromJSON(require('./id-l')) + const listenerId = await createFromJSON(idl) // Listener libp2p node const listenerNode = await createLibp2p({ @@ -21,8 +21,9 @@ async function run() { }) // Log a message when we receive a connection - listenerNode.connectionManager.on('peer:connect', (connection) => { - console.log('received dial to me from:', connection.remotePeer.toB58String()) + listenerNode.connectionManager.addEventListener('peer:connect', (evt) => { + const connection = evt.detail + console.log('received dial to me from:', connection.remotePeer.toString()) }) // Handle incoming connections for the protocol by piping from the stream @@ -33,8 +34,8 @@ async function run() { await listenerNode.start() console.log('Listener ready, listening on:') - listenerNode.multiaddrs.forEach((ma) => { - console.log(ma.toString() + '/p2p/' + listenerId.toB58String()) + listenerNode.getMultiaddrs().forEach((ma) => { + console.log(ma.toString()) }) } diff --git a/examples/echo/test.js b/examples/echo/test.js index 579d6091..927b8e98 100644 --- a/examples/echo/test.js +++ b/examples/echo/test.js @@ -1,9 +1,10 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pDefer from 'p-defer' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pDefer = require('p-defer') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) function startProcess(name) { return execa('node', [path.join(__dirname, name)], { @@ -12,7 +13,7 @@ function startProcess(name) { }) } -async function test () { +export async function test () { const listenerReady = pDefer() const messageReceived = pDefer() @@ -57,5 +58,3 @@ async function test () { } }) } - -module.exports = test diff --git a/examples/libp2p-in-the-browser/.babelrc b/examples/libp2p-in-the-browser/.babelrc deleted file mode 100644 index 2145517d..00000000 --- a/examples/libp2p-in-the-browser/.babelrc +++ /dev/null @@ -1,3 +0,0 @@ -{ - "plugins": ["syntax-async-functions","transform-regenerator"] -} \ No newline at end of file diff --git a/examples/libp2p-in-the-browser/index.js b/examples/libp2p-in-the-browser/index.js index 397d1201..92213d9a 100644 --- a/examples/libp2p-in-the-browser/index.js +++ b/examples/libp2p-in-the-browser/index.js @@ -1,14 +1,15 @@ -import 'babel-polyfill' -import Libp2p from 'libp2p' -import Websockets from 'libp2p-websockets' -import WebRTCStar from 'libp2p-webrtc-star' -import { NOISE } from '@chainsafe/libp2p-noise' -import Mplex from 'libp2p-mplex' -import Bootstrap from 'libp2p-bootstrap' +import { createLibp2p } from 'libp2p' +import { WebSockets } from '@libp2p/websockets' +import { WebRTCStar } from '@libp2p/webrtc-star' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' +import { Bootstrap } from '@libp2p/bootstrap' document.addEventListener('DOMContentLoaded', async () => { + const webRtcStar = new WebRTCStar() + // Create our libp2p node - const libp2p = await Libp2p.create({ + const libp2p = await createLibp2p({ addresses: { // Add the signaling server address, along with our PeerId to our multiaddrs list // libp2p will automatically attempt to dial to the signaling server so that it can @@ -18,28 +19,24 @@ document.addEventListener('DOMContentLoaded', async () => { '/dns4/wrtc-star2.sjc.dwebops.pub/tcp/443/wss/p2p-webrtc-star' ] }, - modules: { - transport: [Websockets, WebRTCStar], - connEncryption: [NOISE], - streamMuxer: [Mplex], - peerDiscovery: [Bootstrap] - }, - config: { - peerDiscovery: { - // The `tag` property will be searched when creating the instance of your Peer Discovery service. - // The associated object, will be passed to the service when it is instantiated. - [Bootstrap.tag]: { - enabled: true, - list: [ - '/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN', - '/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb', - '/dnsaddr/bootstrap.libp2p.io/p2p/QmZa1sAxajnQjVM8WjWXoMbmPd7NsWhfKsPkErzpm9wGkp', - '/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa', - '/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt' - ] - } - } - } + transports: [ + new WebSockets(), + webRtcStar + ], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()], + peerDiscovery: [ + webRtcStar.discovery, + new Bootstrap({ + list: [ + '/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN', + '/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb', + '/dnsaddr/bootstrap.libp2p.io/p2p/QmZa1sAxajnQjVM8WjWXoMbmPd7NsWhfKsPkErzpm9wGkp', + '/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa', + '/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt' + ] + }) + ] }) // UI elements @@ -54,23 +51,26 @@ document.addEventListener('DOMContentLoaded', async () => { } // Listen for new peers - libp2p.on('peer:discovery', (peerId) => { - log(`Found peer ${peerId.toB58String()}`) + libp2p.addEventListener('peer:discovery', (evt) => { + const peer = evt.detail + log(`Found peer ${peer.id.toString()}`) }) // Listen for new connections to peers - libp2p.connectionManager.on('peer:connect', (connection) => { - log(`Connected to ${connection.remotePeer.toB58String()}`) + libp2p.connectionManager.addEventListener('peer:connect', (evt) => { + const connection = evt.detail + log(`Connected to ${connection.remotePeer.toString()}`) }) // Listen for peers disconnecting - libp2p.connectionManager.on('peer:disconnect', (connection) => { - log(`Disconnected from ${connection.remotePeer.toB58String()}`) + libp2p.connectionManager.addEventListener('peer:disconnect', (evt) => { + const connection = evt.detail + log(`Disconnected from ${connection.remotePeer.toString()}`) }) await libp2p.start() status.innerText = 'libp2p started!' - log(`libp2p id is ${libp2p.peerId.toB58String()}`) + log(`libp2p id is ${libp2p.peerId.toString()}`) // Export libp2p to the window so you can play with the API window.libp2p = libp2p diff --git a/examples/libp2p-in-the-browser/package.json b/examples/libp2p-in-the-browser/package.json index f515deb4..6f330dc2 100644 --- a/examples/libp2p-in-the-browser/package.json +++ b/examples/libp2p-in-the-browser/package.json @@ -2,31 +2,24 @@ "name": "libp2p-in-browser", "version": "1.0.0", "description": "A libp2p node running in the browser", + "type": "module", "browserslist": [ "last 2 Chrome versions" ], "scripts": { "test": "echo \"Error: no test specified\" && exit 1", - "build": "parcel build index.html", - "start": "parcel index.html" + "start": "vite" }, - "keywords": [], - "author": "", "license": "ISC", "dependencies": { - "@chainsafe/libp2p-noise": "^5.0.2", - "libp2p": "../../", - "libp2p-bootstrap": "^0.14.0", - "libp2p-mplex": "^0.10.4", - "libp2p-webrtc-star": "^0.25.0", - "libp2p-websockets": "^0.16.1" + "@chainsafe/libp2p-noise": "^6.0.1", + "@libp2p/bootstrap": "^1.0.1", + "@libp2p/mplex": "^1.0.2", + "@libp2p/webrtc-star": "^1.0.6", + "@libp2p/websockets": "^1.0.3", + "libp2p": "../../" }, "devDependencies": { - "@babel/cli": "^7.13.10", - "@babel/core": "^7.13.0", - "babel-plugin-syntax-async-functions": "^6.13.0", - "babel-plugin-transform-regenerator": "^6.26.0", - "babel-polyfill": "^6.26.0", - "parcel": "^2.0.1" + "vite": "^2.8.6" } } diff --git a/examples/libp2p-in-the-browser/test.js b/examples/libp2p-in-the-browser/test.js index 97e65224..0b3a16a6 100644 --- a/examples/libp2p-in-the-browser/test.js +++ b/examples/libp2p-in-the-browser/test.js @@ -1,11 +1,14 @@ -'use strict' +import execa from 'execa' +import { chromium } from 'playwright' +import path from 'path' +import { fileURLToPath } from 'url' -const execa = require('execa') -const { chromium } = require('playwright'); +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function run() { - let url = '' - const proc = execa('parcel', ['./index.html'], { +export async function test () { + let url = 'http://localhost:3000' + + const proc = execa('vite', [], { preferLocal: true, localDir: __dirname, cwd: __dirname, @@ -16,11 +19,7 @@ async function run() { /**@type {string} */ const out = chunk.toString() - if (out.includes('Server running at')) { - url = out.split('Server running at ')[1] - } - - if (out.includes('Built in')) { + if (out.includes('ready in')) { try { const browser = await chromium.launch(); const page = await browser.newPage(); @@ -36,9 +35,8 @@ async function run() { '#output', { timeout: 5000 } ) - await browser.close(); - - } catch (/** @type {any} */ err) { + await browser.close() + } catch (err) { console.error(err) process.exit(1) } finally { @@ -46,7 +44,4 @@ async function run() { } } }) - } - -module.exports = run diff --git a/examples/package.json b/examples/package.json index bee67556..596714c0 100644 --- a/examples/package.json +++ b/examples/package.json @@ -2,18 +2,18 @@ "name": "libp2p-examples", "version": "1.0.0", "description": "Examples of how to use libp2p", + "type": "module", "scripts": { "test": "node ./test.js", "test:all": "node ./test-all.js" }, "license": "MIT", "dependencies": { - "@achingbrain/libp2p-gossipsub": "^0.12.2", + "@achingbrain/libp2p-gossipsub": "^0.13.5", + "@libp2p/pubsub-peer-discovery": "^5.0.1", "execa": "^2.1.0", "fs-extra": "^8.1.0", - "libp2p": "../src", - "libp2p-pubsub-peer-discovery": "^4.0.0", - "libp2p-relay-server": "^0.3.0", + "libp2p": "../", "p-defer": "^3.0.0", "uint8arrays": "^3.0.0", "which": "^2.0.1" diff --git a/examples/peer-and-content-routing/1.js b/examples/peer-and-content-routing/1.js index 16b2dfbc..12ecfd4b 100644 --- a/examples/peer-and-content-routing/1.js +++ b/examples/peer-and-content-routing/1.js @@ -1,30 +1,21 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const KadDHT = require('libp2p-kad-dht') - -const delay = require('delay') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { KadDHT } from '@libp2p/kad-dht' +import delay from 'delay' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - dht: KadDHT - }, - config: { - dht: { - enabled: true - } - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + dht: new KadDHT() }) await node.start() @@ -38,8 +29,8 @@ const createNode = async () => { createNode() ]) - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) - await node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) + await node2.peerStore.addressBook.set(node3.peerId, node3.getMultiaddrs()) await Promise.all([ node1.dial(node2.peerId), @@ -52,5 +43,5 @@ const createNode = async () => { const peer = await node1.peerRouting.findPeer(node3.peerId) console.log('Found it, multiaddrs are:') - peer.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${peer.id.toB58String()}`)) + peer.multiaddrs.forEach((ma) => console.log(ma.toString())) })(); diff --git a/examples/peer-and-content-routing/2.js b/examples/peer-and-content-routing/2.js index bcad5977..5df23466 100644 --- a/examples/peer-and-content-routing/2.js +++ b/examples/peer-and-content-routing/2.js @@ -1,32 +1,23 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const { CID } = require('multiformats/cid') -const KadDHT = require('libp2p-kad-dht') - -const all = require('it-all') -const delay = require('delay') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { CID } from 'multiformats/cid' +import { KadDHT } from '@libp2p/kad-dht' +import all from 'it-all' +import delay from 'delay' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - dht: KadDHT - }, - config: { - dht: { - enabled: true - } - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + dht: new KadDHT() }) await node.start() @@ -40,8 +31,8 @@ const createNode = async () => { createNode() ]) - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) - await node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) + await node2.peerStore.addressBook.set(node3.peerId, node3.getMultiaddrs()) await Promise.all([ node1.dial(node2.peerId), @@ -54,12 +45,12 @@ const createNode = async () => { const cid = CID.parse('QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSySnL') await node1.contentRouting.provide(cid) - console.log('Node %s is providing %s', node1.peerId.toB58String(), cid.toString()) + console.log('Node %s is providing %s', node1.peerId.toString(), cid.toString()) // wait for propagation await delay(300) const providers = await all(node3.contentRouting.findProviders(cid, { timeout: 3000 })) - console.log('Found provider:', providers[0].id.toB58String()) + console.log('Found provider:', providers[0].id.toString()) })(); diff --git a/examples/peer-and-content-routing/README.md b/examples/peer-and-content-routing/README.md index 373f10d7..ad523414 100644 --- a/examples/peer-and-content-routing/README.md +++ b/examples/peer-and-content-routing/README.md @@ -13,26 +13,24 @@ This example builds on top of the [Protocol and Stream Muxing](../protocol-and-s First, let's update our config to support Peer Routing and Content Routing. ```JavaScript -const Libp2p = require('libp2p') -const KadDHT = require('libp2p-kad-dht') +import { createLibp2p } from 'libp2p' +import { KadDHT } from '@libp2p/kad-dht' -const node = await Libp2p.create({ +const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - connEncryption: [ NOISE ], - // we add the DHT module that will enable Peer and Content Routing - dht: KadDHT - }, - config: { - dht: { - // dht must be enabled - enabled: true - } - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connEncryption: [ + new Noise() + ], + // we add the DHT module that will enable Peer and Content Routing + dht: KadDHT }) ``` diff --git a/examples/peer-and-content-routing/test-1.js b/examples/peer-and-content-routing/test-1.js index 43d6c1eb..8a234f4d 100644 --- a/examples/peer-and-content-routing/test-1.js +++ b/examples/peer-and-content-routing/test-1.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('1.js\n') await waitForOutput('Found it, multiaddrs are:', 'node', [path.join(__dirname, '1.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/peer-and-content-routing/test-2.js b/examples/peer-and-content-routing/test-2.js index 76c492de..68b41f65 100644 --- a/examples/peer-and-content-routing/test-2.js +++ b/examples/peer-and-content-routing/test-2.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('2.js\n') await waitForOutput('Found provider:', 'node', [path.join(__dirname, '2.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/peer-and-content-routing/test.js b/examples/peer-and-content-routing/test.js index 1ccbda6d..622a7dfc 100644 --- a/examples/peer-and-content-routing/test.js +++ b/examples/peer-and-content-routing/test.js @@ -1,11 +1,7 @@ -'use strict' +import { test as test1 } from './test-1.js' +import { test as test2 } from './test-2.js' -const test1 = require('./test-1') -const test2 = require('./test-2') - -async function test() { +export async function test() { await test1() await test2() } - -module.exports = test diff --git a/examples/pnet/index.js b/examples/pnet/index.js index 090b9e07..1da827cf 100644 --- a/examples/pnet/index.js +++ b/examples/pnet/index.js @@ -1,10 +1,10 @@ /* eslint no-console: ["off"] */ -'use strict' -const { generate } = require('libp2p/src/pnet') -const privateLibp2pNode = require('./libp2p-node') - -const pipe = require('it-pipe') +import { generate } from 'libp2p/pnet/generate' +import { privateLibp2pNode } from './libp2p-node.js' +import { pipe } from 'it-pipe' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' // Create a Uint8Array and write the swarm key to it const swarmKey = new Uint8Array(95) @@ -29,7 +29,7 @@ generate(otherSwarmKey) console.log('nodes started...') // Add node 2 data to node1's PeerStore - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) await node1.dial(node2.peerId) node2.handle('/private', ({ stream }) => { @@ -37,7 +37,7 @@ generate(otherSwarmKey) stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -46,7 +46,7 @@ generate(otherSwarmKey) const { stream } = await node1.dialProtocol(node2.peerId, '/private') await pipe( - ['This message is sent on a private network'], + [uint8ArrayFromString('This message is sent on a private network')], stream ) })() diff --git a/examples/pnet/libp2p-node.js b/examples/pnet/libp2p-node.js index 8c01fde9..662edb05 100644 --- a/examples/pnet/libp2p-node.js +++ b/examples/pnet/libp2p-node.js @@ -1,38 +1,31 @@ -'use strict' - -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Protector = require('libp2p/src/pnet') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { PreSharedKeyConnectionProtector } from 'libp2p/pnet' /** * privateLibp2pNode returns a libp2p node function that will use the swarm * key with the given `swarmKey` to create the Protector - * - * @param {Uint8Array} swarmKey - * @returns {Promise} Returns a libp2pNode function for use in IPFS creation */ -const privateLibp2pNode = async (swarmKey) => { - const node = await Libp2p.create({ +export async function privateLibp2pNode (swarmKey) { + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], // We're only using the TCP transport for this example - streamMuxer: [MPLEX], // We're only using mplex muxing - // Let's make sure to use identifying crypto in our pnet since the protector doesn't - // care about node identity, and only the presence of private keys - connEncryption: [NOISE], - // Leave peer discovery empty, we don't want to find peers. We could omit the property, but it's - // being left in for explicit readability. - // We should explicitly dial pnet peers, or use a custom discovery service for finding nodes in our pnet - peerDiscovery: [], - connProtector: new Protector(swarmKey) - } + transports: [new TCP()], // We're only using the TCP transport for this example + streamMuxers: [new Mplex()], // We're only using mplex muxing + // Let's make sure to use identifying crypto in our pnet since the protector doesn't + // care about node identity, and only the presence of private keys + connectionEncryption: [new Noise()], + // Leave peer discovery empty, we don't want to find peers. We could omit the property, but it's + // being left in for explicit readability. + // We should explicitly dial pnet peers, or use a custom discovery service for finding nodes in our pnet + peerDiscovery: [], + connectionProtector: new PreSharedKeyConnectionProtector({ + psk: swarmKey + }) }) return node } - -module.exports = privateLibp2pNode diff --git a/examples/pnet/test.js b/examples/pnet/test.js index 56519927..33696e95 100644 --- a/examples/pnet/test.js +++ b/examples/pnet/test.js @@ -1,13 +1,12 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { await waitForOutput('This message is sent on a private network', 'node', [path.join(__dirname, 'index.js')], { cwd: __dirname }) } -module.exports = test - diff --git a/examples/pnet/utils.js b/examples/pnet/utils.js index 4f03dc86..16d89de7 100644 --- a/examples/pnet/utils.js +++ b/examples/pnet/utils.js @@ -1,6 +1,6 @@ 'use strict' -const fs = require('fs') -const path = require('path') +const fs from 'fs') +import path from 'path' /** * mkdirp recursively creates needed folders for the given dir path diff --git a/examples/protocol-and-stream-muxing/1.js b/examples/protocol-and-stream-muxing/1.js index 3f71e0f5..364bffdb 100644 --- a/examples/protocol-and-stream-muxing/1.js +++ b/examples/protocol-and-stream-muxing/1.js @@ -1,22 +1,19 @@ -'use strict' - -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') - -const pipe = require('it-pipe') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { pipe } from 'it-pipe' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] }) await node.start() @@ -31,7 +28,7 @@ const createNode = async () => { ]) // Add node's 2 data to the PeerStore - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) // exact matching node2.handle('/your-protocol', ({ stream }) => { @@ -39,7 +36,7 @@ const createNode = async () => { stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -56,7 +53,7 @@ const createNode = async () => { stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -65,7 +62,7 @@ const createNode = async () => { const { stream } = await node1.dialProtocol(node2.peerId, ['/your-protocol']) await pipe( - ['my own protocol, wow!'], + [uint8ArrayFromString('my own protocol, wow!')], stream ) diff --git a/examples/protocol-and-stream-muxing/2.js b/examples/protocol-and-stream-muxing/2.js index a7fdb175..44b0b1eb 100644 --- a/examples/protocol-and-stream-muxing/2.js +++ b/examples/protocol-and-stream-muxing/2.js @@ -1,22 +1,19 @@ -'use strict' - -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') - -const pipe = require('it-pipe') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { pipe } from 'it-pipe' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] }) await node.start() @@ -31,14 +28,14 @@ const createNode = async () => { ]) // Add node's 2 data to the PeerStore - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) node2.handle(['/a', '/b'], ({ protocol, stream }) => { pipe( stream, async function (source) { for await (const msg of source) { - console.log(`from: ${protocol}, msg: ${msg.toString()}`) + console.log(`from: ${protocol}, msg: ${uint8ArrayToString(msg)}`) } } ) @@ -46,19 +43,19 @@ const createNode = async () => { const { stream: stream1 } = await node1.dialProtocol(node2.peerId, ['/a']) await pipe( - ['protocol (a)'], + [uint8ArrayFromString('protocol (a)')], stream1 ) const { stream: stream2 } = await node1.dialProtocol(node2.peerId, ['/b']) await pipe( - ['protocol (b)'], + [uint8ArrayFromString('protocol (b)')], stream2 ) const { stream: stream3 } = await node1.dialProtocol(node2.peerId, ['/b']) await pipe( - ['another stream on protocol (b)'], + [uint8ArrayFromString('another stream on protocol (b)')], stream3 ) })(); diff --git a/examples/protocol-and-stream-muxing/3.js b/examples/protocol-and-stream-muxing/3.js index c9a91483..a3687435 100644 --- a/examples/protocol-and-stream-muxing/3.js +++ b/examples/protocol-and-stream-muxing/3.js @@ -1,23 +1,21 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') - -const pipe = require('it-pipe') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { pipe } from 'it-pipe' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] }) await node.start() @@ -32,14 +30,14 @@ const createNode = async () => { ]) // Add node's 2 data to the PeerStore - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) node1.handle('/node-1', ({ stream }) => { pipe( stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -50,7 +48,7 @@ const createNode = async () => { stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -58,13 +56,13 @@ const createNode = async () => { const { stream: stream1 } = await node1.dialProtocol(node2.peerId, ['/node-2']) await pipe( - ['from 1 to 2'], + [uint8ArrayFromString('from 1 to 2')], stream1 ) const { stream: stream2 } = await node2.dialProtocol(node1.peerId, ['/node-1']) await pipe( - ['from 2 to 1'], + [uint8ArrayFromString('from 2 to 1')], stream2 ) })(); diff --git a/examples/protocol-and-stream-muxing/README.md b/examples/protocol-and-stream-muxing/README.md index a4df1a8b..dae31048 100644 --- a/examples/protocol-and-stream-muxing/README.md +++ b/examples/protocol-and-stream-muxing/README.md @@ -11,9 +11,9 @@ Let's see _protocol multiplexing_ in action! You will need the following modules After creating the nodes, we need to tell libp2p which protocols to handle. ```JavaScript -const pipe = require('it-pipe') -const { map } = require('streaming-iterables') -const { toBuffer } = require('it-buffer') +import { pipe } from 'it-pipe' +const { map } from 'streaming-iterables') +const { toBuffer } from 'it-buffer') // ... const node1 = nodes[0] @@ -102,17 +102,19 @@ Stream multiplexing is an old concept, in fact it happens in many of the layers Currently, we have [libp2p-mplex](https://github.com/libp2p/js-libp2p-mplex) and pluging it in is as easy as adding a transport. Let's revisit our libp2p configuration. ```JavaScript -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' //... const createNode = () => { return Libp2p.create({ - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ] - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ] }) } ``` diff --git a/examples/protocol-and-stream-muxing/test-1.js b/examples/protocol-and-stream-muxing/test-1.js index 91f409d5..d11e42e4 100644 --- a/examples/protocol-and-stream-muxing/test-1.js +++ b/examples/protocol-and-stream-muxing/test-1.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('1.js\n') await waitForOutput('my own protocol, wow!', 'node', [path.join(__dirname, '1.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/protocol-and-stream-muxing/test-2.js b/examples/protocol-and-stream-muxing/test-2.js index 26b4b12e..cf6dc606 100644 --- a/examples/protocol-and-stream-muxing/test-2.js +++ b/examples/protocol-and-stream-muxing/test-2.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('2.js\n') await waitForOutput('another stream on protocol (b)', 'node', [path.join(__dirname, '2.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/protocol-and-stream-muxing/test-3.js b/examples/protocol-and-stream-muxing/test-3.js index 8724237c..bcd4aa15 100644 --- a/examples/protocol-and-stream-muxing/test-3.js +++ b/examples/protocol-and-stream-muxing/test-3.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('3.js\n') await waitForOutput('from 2 to 1', 'node', [path.join(__dirname, '3.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/protocol-and-stream-muxing/test.js b/examples/protocol-and-stream-muxing/test.js index 72fa27ee..8f209bdd 100644 --- a/examples/protocol-and-stream-muxing/test.js +++ b/examples/protocol-and-stream-muxing/test.js @@ -1,13 +1,9 @@ -'use strict' +import { test as test1 } from './test-1.js' +import { test as test2 } from './test-2.js' +import { test as test3 } from './test-3.js' -const test1 = require('./test-1') -const test2 = require('./test-2') -const test3 = require('./test-3') - -async function test() { +export async function test() { await test1() await test2() await test3() } - -module.exports = test diff --git a/examples/pubsub/1.js b/examples/pubsub/1.js index 20aa4970..53d7739e 100644 --- a/examples/pubsub/1.js +++ b/examples/pubsub/1.js @@ -1,25 +1,23 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Gossipsub = require('@achingbrain/libp2p-gossipsub') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { Gossipsub } from '@achingbrain/libp2p-gossipsub' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { CustomEvent } from '@libp2p/interfaces' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - pubsub: Gossipsub - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + pubsub: new Gossipsub() }) await node.start() @@ -35,22 +33,20 @@ const createNode = async () => { ]) // Add node's 2 data to the PeerStore - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) await node1.dial(node2.peerId) - node1.pubsub.on(topic, (msg) => { - console.log(`node1 received: ${uint8ArrayToString(msg.data)}`) + node1.pubsub.addEventListener(topic, (evt) => { + console.log(`node1 received: ${uint8ArrayToString(evt.detail.data)}`) }) - node1.pubsub.subscribe(topic) // Will not receive own published messages by default - node2.pubsub.on(topic, (msg) => { - console.log(`node2 received: ${uint8ArrayToString(msg.data)}`) + node2.pubsub.addEventListener(topic, (evt) => { + console.log(`node2 received: ${uint8ArrayToString(evt.detail.data)}`) }) - node2.pubsub.subscribe(topic) // node2 publishes "news" every second setInterval(() => { - node2.pubsub.publish(topic, uint8ArrayFromString('Bird bird bird, bird is the word!')) + node2.pubsub.dispatchEvent(new CustomEvent(topic, { detail: uint8ArrayFromString('Bird bird bird, bird is the word!') })) }, 1000) })() diff --git a/examples/pubsub/README.md b/examples/pubsub/README.md index 654dd76d..9dec1f97 100644 --- a/examples/pubsub/README.md +++ b/examples/pubsub/README.md @@ -21,28 +21,32 @@ Using PubSub is super simple, you only need to provide the implementation of you First, let's update our libp2p configuration with a pubsub implementation. ```JavaScript -const Libp2p = require('libp2p') -const Gossipsub = require('libp2p-gossipsub') +import { createLibp2p } from 'libp2p' +import { Gossipsub } from 'libp2p-gossipsub' -const node = await Libp2p.create({ +const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - connEncryption: [ NOISE ], - // we add the Pubsub module we want - pubsub: Gossipsub - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + // we add the Pubsub module we want + pubsub: new Gossipsub() }) ``` Once that is done, we only need to create a few libp2p nodes, connect them and everything is ready to start using pubsub. ```JavaScript -const { fromString } = require('uint8arrays/from-string') -const { toString } = require('uint8arrays/to-string') +const { fromString } from 'uint8arrays/from-string') +const { toString } from 'uint8arrays/to-string') const topic = 'news' const node1 = nodes[0] diff --git a/examples/pubsub/message-filtering/1.js b/examples/pubsub/message-filtering/1.js index 81a7830d..32665c12 100644 --- a/examples/pubsub/message-filtering/1.js +++ b/examples/pubsub/message-filtering/1.js @@ -1,25 +1,23 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../../../') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Gossipsub = require('@achingbrain/libp2p-gossipsub') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { Gossipsub } from '@achingbrain/libp2p-gossipsub' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { CustomEvent } from '@libp2p/interfaces' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - pubsub: Gossipsub - } + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + pubsub: new Gossipsub() }) await node.start() @@ -36,28 +34,26 @@ const createNode = async () => { ]) // node1 conect to node2 and node2 conect to node3 - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) await node1.dial(node2.peerId) - await node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs) + await node2.peerStore.addressBook.set(node3.peerId, node3.getMultiaddrs()) await node2.dial(node3.peerId) //subscribe - node1.pubsub.on(topic, (msg) => { + node1.pubsub.addEventListener(topic, (evt) => { // Will not receive own published messages by default - console.log(`node1 received: ${uint8ArrayToString(msg.data)}`) + console.log(`node1 received: ${uint8ArrayToString(evt.detail.data)}`) }) await node1.pubsub.subscribe(topic) - node2.pubsub.on(topic, (msg) => { - console.log(`node2 received: ${uint8ArrayToString(msg.data)}`) + node2.pubsub.addEventListener(topic, (evt) => { + console.log(`node2 received: ${uint8ArrayToString(evt.detail.data)}`) }) - await node2.pubsub.subscribe(topic) - node3.pubsub.on(topic, (msg) => { - console.log(`node3 received: ${uint8ArrayToString(msg.data)}`) + node3.pubsub.addEventListener(topic, (evt) => { + console.log(`node3 received: ${uint8ArrayToString(evt.detail.data)}`) }) - await node3.pubsub.subscribe(topic) const validateFruit = (msgTopic, msg) => { const fruit = uint8ArrayToString(msg.data) @@ -79,7 +75,7 @@ const createNode = async () => { // car is not a fruit ! setInterval(() => { console.log('############## fruit ' + myFruits[count] + ' ##############') - node1.pubsub.publish(topic, uint8ArrayFromString(myFruits[count])) + node1.pubsub.dispatchEvent(new CustomEvent(topic, { detail: uint8ArrayFromString(myFruits[count]) })) count++ if (count == myFruits.length) { count = 0 diff --git a/examples/pubsub/message-filtering/README.md b/examples/pubsub/message-filtering/README.md index eb554afa..9eecb873 100644 --- a/examples/pubsub/message-filtering/README.md +++ b/examples/pubsub/message-filtering/README.md @@ -7,19 +7,23 @@ To prevent undesired data from being propagated on the network, we can apply a f First, let's update our libp2p configuration with a pubsub implementation. ```JavaScript -const Libp2p = require('libp2p') -const Gossipsub = require('libp2p-gossipsub') +import { createLibp2p } from 'libp2p' +import { Gossipsub } from 'libp2p-gossipsub' -const node = await Libp2p.create({ +const node = await createLibp2p({ addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [ TCP ], - streamMuxer: [ Mplex ], - connEncryption: [ NOISE ], - pubsub: Gossipsub - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Noise() + ], + pubsub: new Gossipsub() }) ``` diff --git a/examples/pubsub/message-filtering/test.js b/examples/pubsub/message-filtering/test.js index fddcc5c8..4cb9fbaa 100644 --- a/examples/pubsub/message-filtering/test.js +++ b/examples/pubsub/message-filtering/test.js @@ -1,9 +1,10 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pDefer from 'p-defer' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pDefer = require('p-defer') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) const stdout = [ { @@ -24,7 +25,7 @@ const stdout = [ }, ] -async function test () { +export async function test () { const defer = pDefer() let topicCount = 0 let topicMessageCount = 0 @@ -63,5 +64,3 @@ async function test () { await defer.promise proc.kill() } - -module.exports = test diff --git a/examples/pubsub/test-1.js b/examples/pubsub/test-1.js index ea968897..c818a573 100644 --- a/examples/pubsub/test-1.js +++ b/examples/pubsub/test-1.js @@ -1,11 +1,12 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pDefer from 'p-defer' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pDefer = require('p-defer') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { const defer = pDefer() process.stdout.write('1.js\n') @@ -26,5 +27,3 @@ async function test () { await defer.promise proc.kill() } - -module.exports = test diff --git a/examples/pubsub/test.js b/examples/pubsub/test.js index 987c351a..2848f4b0 100644 --- a/examples/pubsub/test.js +++ b/examples/pubsub/test.js @@ -1,11 +1,7 @@ -'use strict' +import { test as test1 } from './test-1.js' +import { test as testMessageFiltering } from './message-filtering/test.js' -const test1 = require('./test-1') -const testMessageFiltering = require('./message-filtering/test') - -async function test() { +export async function test() { await test1() await testMessageFiltering() } - -module.exports = test diff --git a/examples/test-all.js b/examples/test-all.js index 3ee99e45..bce01ac7 100644 --- a/examples/test-all.js +++ b/examples/test-all.js @@ -1,4 +1,3 @@ -'use strict' process.on('unhandedRejection', (err) => { console.error(err) @@ -6,11 +5,14 @@ process.on('unhandedRejection', (err) => { process.exit(1) }) -const path = require('path') -const fs = require('fs') -const { +import path from 'path' +import fs from 'fs' +import { waitForOutput -} = require('./utils') +} from './utils.js' +import { fileURLToPath } from 'url' + +const __dirname = path.dirname(fileURLToPath(import.meta.url)) async function testAll () { for (const dir of fs.readdirSync(__dirname)) { @@ -24,7 +26,7 @@ async function testAll () { continue } - await waitForOutput('npm info ok', 'npm', ['test', '--', dir], { + await waitForOutput('npm info ok', 'npm', ['--loglevel', 'info', 'run', 'test', '--', dir], { cwd: __dirname }) } diff --git a/examples/test.js b/examples/test.js index 69f2be8b..5de99634 100644 --- a/examples/test.js +++ b/examples/test.js @@ -1,11 +1,12 @@ -'use strict' - process.env.NODE_ENV = 'test' process.env.CI = true // needed for some "clever" build tools -const fs = require('fs-extra') -const path = require('path') -const execa = require('execa') +import fs from 'fs-extra' +import path from 'path' +import execa from 'execa' +import { fileURLToPath } from 'url' + +const __dirname = path.dirname(fileURLToPath(import.meta.url)) const dir = path.join(__dirname, process.argv[2]) testExample(dir) @@ -53,7 +54,7 @@ async function build (dir) { return } - const pkg = require(pkgJson) + const pkg = JSON.parse(fs.readFileSync(pkgJson)) let build if (pkg.scripts.bundle) { @@ -88,7 +89,7 @@ async function runTest (dir) { return } - const test = require(testFile) + const { test } = await import(testFile) await test() } diff --git a/examples/transports/1.js b/examples/transports/1.js index 81d8a2bc..739acaa8 100644 --- a/examples/transports/1.js +++ b/examples/transports/1.js @@ -1,21 +1,24 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../..') -const TCP = require('libp2p-tcp') -const { NOISE } = require('@chainsafe/libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Noise } from '@chainsafe/libp2p-noise' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { // To signal the addresses we want to be available, we use // the multiaddr format, a self describable address - listen: ['/ip4/0.0.0.0/tcp/0'] + listen: [ + '/ip4/0.0.0.0/tcp/0' + ] }, - modules: { - transport: [TCP], - connEncryption: [NOISE] - } + transports: [ + new TCP() + ], + connectionEncryption: [ + new Noise() + ] }) await node.start() @@ -27,5 +30,5 @@ const createNode = async () => { console.log('node has started (true/false):', node.isStarted()) console.log('listening on:') - node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`)) + node.getMultiaddrs().forEach((ma) => console.log(ma.toString())) })(); diff --git a/examples/transports/2.js b/examples/transports/2.js index 2962a062..d157da11 100644 --- a/examples/transports/2.js +++ b/examples/transports/2.js @@ -1,26 +1,24 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../..') -const TCP = require('libp2p-tcp') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') - -const pipe = require('it-pipe') -const concat = require('it-concat') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { pipe } from 'it-pipe' +import toBuffer from 'it-to-buffer' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { // To signal the addresses we want to be available, we use // the multiaddr format, a self describable address listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } + transports: [new TCP()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] }) await node.start() @@ -29,7 +27,7 @@ const createNode = async () => { function printAddrs (node, number) { console.log('node %s is listening on:', number) - node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`)) + node.getMultiaddrs().forEach((ma) => console.log(ma.toString())) } ;(async () => { @@ -44,16 +42,16 @@ function printAddrs (node, number) { node2.handle('/print', async ({ stream }) => { const result = await pipe( stream, - concat + toBuffer ) - console.log(result.toString()) + console.log(uint8ArrayToString(result)) }) - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) const { stream } = await node1.dialProtocol(node2.peerId, '/print') await pipe( - ['Hello', ' ', 'p2p', ' ', 'world', '!'], + ['Hello', ' ', 'p2p', ' ', 'world', '!'].map(str => uint8ArrayFromString(str)), stream ) })(); diff --git a/examples/transports/3.js b/examples/transports/3.js index d9a83697..b1a233f8 100644 --- a/examples/transports/3.js +++ b/examples/transports/3.js @@ -1,28 +1,26 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../..') -const TCP = require('libp2p-tcp') -const WebSockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') - -const pipe = require('it-pipe') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' +import { pipe } from 'it-pipe' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' const createNode = async (transports, addresses = []) => { if (!Array.isArray(addresses)) { addresses = [addresses] } - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: addresses }, - modules: { - transport: transports, - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } + transports: transports, + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] }) await node.start() @@ -31,7 +29,7 @@ const createNode = async (transports, addresses = []) => { function printAddrs(node, number) { console.log('node %s is listening on:', number) - node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`)) + node.getMultiaddrs().forEach((ma) => console.log(ma.toString())) } function print ({ stream }) { @@ -39,7 +37,7 @@ function print ({ stream }) { stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -47,9 +45,9 @@ function print ({ stream }) { ;(async () => { const [node1, node2, node3] = await Promise.all([ - createNode([TCP], '/ip4/0.0.0.0/tcp/0'), - createNode([TCP, WebSockets], ['/ip4/0.0.0.0/tcp/0', '/ip4/127.0.0.1/tcp/10000/ws']), - createNode([WebSockets], '/ip4/127.0.0.1/tcp/20000/ws') + createNode([new TCP()], '/ip4/0.0.0.0/tcp/0'), + createNode([new TCP(), new WebSockets()], ['/ip4/0.0.0.0/tcp/0', '/ip4/127.0.0.1/tcp/10000/ws']), + createNode([new WebSockets()], '/ip4/127.0.0.1/tcp/20000/ws') ]) printAddrs(node1, '1') @@ -60,28 +58,28 @@ function print ({ stream }) { node2.handle('/print', print) node3.handle('/print', print) - await node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs) - await node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs) - await node3.peerStore.addressBook.set(node1.peerId, node1.multiaddrs) + await node1.peerStore.addressBook.set(node2.peerId, node2.getMultiaddrs()) + await node2.peerStore.addressBook.set(node3.peerId, node3.getMultiaddrs()) + await node3.peerStore.addressBook.set(node1.peerId, node1.getMultiaddrs()) // node 1 (TCP) dials to node 2 (TCP+WebSockets) const { stream } = await node1.dialProtocol(node2.peerId, '/print') await pipe( - ['node 1 dialed to node 2 successfully'], + [uint8ArrayFromString('node 1 dialed to node 2 successfully')], stream ) // node 2 (TCP+WebSockets) dials to node 2 (WebSockets) const { stream: stream2 } = await node2.dialProtocol(node3.peerId, '/print') await pipe( - ['node 2 dialed to node 3 successfully'], + [uint8ArrayFromString('node 2 dialed to node 3 successfully')], stream2 ) // node 3 (listening WebSockets) can dial node 1 (TCP) try { await node3.dialProtocol(node1.peerId, '/print') - } catch (/** @type {any} */ err) { + } catch (err) { console.log('node 3 failed to dial to node 1 with:', err.message) } })(); diff --git a/examples/transports/4.js b/examples/transports/4.js index b46d1473..389217aa 100644 --- a/examples/transports/4.js +++ b/examples/transports/4.js @@ -1,17 +1,15 @@ /* eslint-disable no-console */ -'use strict' -const Libp2p = require('../..') -const TCP = require('libp2p-tcp') -const WebSockets = require('libp2p-websockets') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') - -const fs = require('fs'); -const https = require('https'); -const pipe = require('it-pipe') - -const transportKey = WebSockets.prototype[Symbol.toStringTag]; +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { WebSockets } from '@libp2p/websockets' +import { Noise } from '@chainsafe/libp2p-noise' +import { Mplex } from '@libp2p/mplex' +import fs from 'fs' +import https from 'https' +import { pipe } from 'it-pipe' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' const httpServer = https.createServer({ cert: fs.readFileSync('./test_certs/cert.pem'), @@ -23,26 +21,25 @@ const createNode = async (addresses = []) => { addresses = [addresses] } - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: addresses }, - modules: { - transport: [WebSockets], - connEncryption: [NOISE], - streamMuxer: [MPLEX] - }, - config: { - peerDiscovery: { - // Disable autoDial as it would fail because we are using a self-signed cert. - // `dialProtocol` does not fail because we pass `rejectUnauthorized: false`. - autoDial: false - }, - transport: { - [transportKey]: { - listenerOptions: { server: httpServer }, - }, - }, + transports: [ + new TCP(), + new WebSockets({ + server: httpServer, + websocket: { + rejectUnauthorized: false + } + }) + ], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()], + connectionManager: { + // Disable autoDial as it would fail because we are using a self-signed cert. + // `dialProtocol` does not fail because we pass `rejectUnauthorized: false`. + autoDial: false } }) @@ -52,7 +49,7 @@ const createNode = async (addresses = []) => { function printAddrs(node, number) { console.log('node %s is listening on:', number) - node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`)) + node.getMultiaddrs().forEach((ma) => console.log(ma.toString())) } function print ({ stream }) { @@ -60,7 +57,7 @@ function print ({ stream }) { stream, async function (source) { for await (const msg of source) { - console.log(msg.toString()) + console.log(uint8ArrayToString(msg)) } } ) @@ -78,12 +75,12 @@ function print ({ stream }) { node1.handle('/print', print) node2.handle('/print', print) - const targetAddr = `${node1.multiaddrs[0]}/p2p/${node1.peerId.toB58String()}`; + const targetAddr = node1.getMultiaddrs()[0]; // node 2 (Secure WebSockets) dials to node 1 (Secure Websockets) - const { stream } = await node2.dialProtocol(targetAddr, '/print', { websocket: { rejectUnauthorized: false } }) + const { stream } = await node2.dialProtocol(targetAddr, '/print') await pipe( - ['node 2 dialed to node 1 successfully'], + [uint8ArrayFromString('node 2 dialed to node 1 successfully')], stream ) })(); diff --git a/examples/transports/README.md b/examples/transports/README.md index 03d46012..1d3f5d4f 100644 --- a/examples/transports/README.md +++ b/examples/transports/README.md @@ -21,23 +21,23 @@ Then, in your favorite text editor create a file with the `.js` extension. I've First thing is to create our own libp2p node! Insert: ```JavaScript -'use strict' - -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const { NOISE } = require('@chainsafe/libp2p-noise') +import { createLibp2p } from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Noise } from '@chainsafe/libp2p-noise' const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { // To signal the addresses we want to be available, we use // the multiaddr format, a self describable address listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [ TCP ], - connEncryption: [ NOISE ] - } + transports: [ + new TCP() + ], + connectionEncryption: [ + new Noise() + ] }) await node.start() @@ -80,31 +80,29 @@ Now that we have our `createNode` function, let's create two nodes and make them For this step, we will need some more dependencies. ```bash -> npm install it-pipe it-concat libp2p-mplex +> npm install it-pipe it-to-buffer @libp2p/mplex ``` And we also need to import the modules on our .js file: ```js -const pipe = require('it-pipe') -const concat = require('it-concat') -const MPLEX = require('libp2p-mplex') +import { pipe } from 'it-pipe' +import toBuffer from 'it-to-buffer' +import { Mplex } from '@libp2p/mplex' ``` We are going to reuse the `createNode` function from step 1, but this time add a stream multiplexer from `libp2p-mplex`. ```js const createNode = async () => { - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { // To signal the addresses we want to be available, we use // the multiaddr format, a self describable address listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - connEncryption: [NOISE], - streamMuxer: [MPLEX] // <--- Add this line - } + transports: [new TCP()], + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] // <--- Add this line }) await node.start() @@ -135,7 +133,7 @@ Then add, node2.handle('/print', async ({ stream }) => { const result = await pipe( stream, - concat + toBuffer ) console.log(result.toString()) }) @@ -186,15 +184,13 @@ const createNode = async (transports, addresses = []) => { addresses = [addresses] } - const node = await Libp2p.create({ + const node = await createLibp2p({ addresses: { listen: addresses }, - modules: { - transport: transports, - connEncryption: [NOISE], - streamMuxer: [MPLEX] - } + transport: transports, + connectionEncryption: [new Noise()], + streamMuxers: [new Mplex()] }) await node.start() @@ -207,8 +203,8 @@ As a rule, a libp2p node will only be capable of using a transport if: a) it has Let's update our flow to create nodes and see how they behave when dialing to each other: ```JavaScript -const WebSockets = require('libp2p-websockets') -const TCP = require('libp2p-tcp') +import { WebSockets } from '@libp2p/websockets' +import { TCP } from '@libp2p/tcp' const [node1, node2, node3] = await Promise.all([ createNode([TCP], '/ip4/0.0.0.0/tcp/0'), diff --git a/examples/transports/test-1.js b/examples/transports/test-1.js index bcdaf57b..81c1721d 100644 --- a/examples/transports/test-1.js +++ b/examples/transports/test-1.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('1.js\n') await waitForOutput('/p2p/', 'node', [path.join(__dirname, '1.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/transports/test-2.js b/examples/transports/test-2.js index b383ac9e..4c971478 100644 --- a/examples/transports/test-2.js +++ b/examples/transports/test-2.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('2.js\n') await waitForOutput('Hello p2p world!', 'node', [path.join(__dirname, '2.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/transports/test-3.js b/examples/transports/test-3.js index 642ab045..acc1e27d 100644 --- a/examples/transports/test-3.js +++ b/examples/transports/test-3.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('3.js\n') await waitForOutput('node 3 failed to dial to node 1 with:', 'node', [path.join(__dirname, '3.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/transports/test-4.js b/examples/transports/test-4.js index 5186ff53..7a5953b0 100644 --- a/examples/transports/test-4.js +++ b/examples/transports/test-4.js @@ -1,14 +1,13 @@ -'use strict' +import path from 'path' +import { waitForOutput } from '../utils.js' +import { fileURLToPath } from 'url' -const path = require('path') -const { waitForOutput } = require('../utils') +const __dirname = path.dirname(fileURLToPath(import.meta.url)) -async function test () { +export async function test () { process.stdout.write('4.js\n') await waitForOutput('node 2 dialed to node 1 successfully', 'node', [path.join(__dirname, '4.js')], { cwd: __dirname }) } - -module.exports = test diff --git a/examples/transports/test.js b/examples/transports/test.js index 8ef5d0b5..1b0f8f94 100644 --- a/examples/transports/test.js +++ b/examples/transports/test.js @@ -1,15 +1,11 @@ -'use strict' +import { test as test1 } from './test-1.js' +import { test as test2 } from './test-2.js' +import { test as test3 } from './test-3.js' +import { test as test4 } from './test-4.js' -const test1 = require('./test-1') -const test2 = require('./test-2') -const test3 = require('./test-3') -const test4 = require('./test-4') - -async function test() { +export async function test() { await test1() await test2() await test3() await test4() } - -module.exports = test diff --git a/examples/utils.js b/examples/utils.js index 1f6e571c..914d91e9 100644 --- a/examples/utils.js +++ b/examples/utils.js @@ -1,15 +1,13 @@ -'use strict' - -const execa = require('execa') -const fs = require('fs-extra') -const which = require('which') +import execa from 'execa' +import fs from 'fs-extra' +import which from 'which' async function isExecutable (command) { try { await fs.access(command, fs.constants.X_OK) return true - } catch (/** @type {any} */ err) { + } catch (err) { if (err.code === 'ENOENT') { return isExecutable(await which(command)) } @@ -22,7 +20,7 @@ async function isExecutable (command) { } } -async function waitForOutput (expectedOutput, command, args = [], opts = {}) { +export async function waitForOutput (expectedOutput, command, args = [], opts = {}) { if (!await isExecutable(command)) { args.unshift(command) command = 'node' @@ -49,13 +47,9 @@ async function waitForOutput (expectedOutput, command, args = [], opts = {}) { try { await proc - } catch (/** @type {any} */ err) { + } catch (err) { if (!err.killed) { throw err } } } - -module.exports = { - waitForOutput -} diff --git a/examples/webrtc-direct/README.md b/examples/webrtc-direct/README.md index 3eb406a6..529316a8 100644 --- a/examples/webrtc-direct/README.md +++ b/examples/webrtc-direct/README.md @@ -9,7 +9,7 @@ When in the root folder of this example, type `node listener.js` in terminal. Yo incoming connections. Below is just an example of such address. In your case the suffix hash (`peerId`) will be different. ```bash -$ node listener.js +$ node listener.js Listening on: /ip4/127.0.0.1/tcp/9090/http/p2p-webrtc-direct/p2p/QmUKQCzEUhhhobcNSrXU5uzxTqbvF1BjMCGNGZzZU14Kgd ``` @@ -18,16 +18,15 @@ Listening on: Confirm that the above address is the same as the field `list` in `public/dialer.js`: ```js peerDiscovery: { - [Bootstrap.tag]: { - enabled: true, + new Bootstrap({ // paste the address into `list` list: ['/ip4/127.0.0.1/tcp/9090/http/p2p-webrtc-direct/p2p/QmUKQCzEUhhhobcNSrXU5uzxTqbvF1BjMCGNGZzZU14Kgd'] - } + }) } ``` ## 2. Run a browser libp2p dialer -When in the root folder of this example, type `npm run dev` in terminal. You should see an address where you can browse +When in the root folder of this example, type `npm start` in terminal. You should see an address where you can browse the running client. Open this address in your browser. In console logs you should see logs about successful connection with the node client. In the output of node client you should see a log message about successful connection as well. diff --git a/examples/webrtc-direct/dialer.js b/examples/webrtc-direct/dialer.js index b08720c2..d2357dfa 100644 --- a/examples/webrtc-direct/dialer.js +++ b/examples/webrtc-direct/dialer.js @@ -1,28 +1,21 @@ -import 'babel-polyfill' -const Libp2p = require('libp2p') -const WebRTCDirect = require('libp2p-webrtc-direct') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const Bootstrap = require('libp2p-bootstrap') +import { createLibp2p } from 'libp2p' +import { WebRTCDirect } from '@achingbrain/webrtc-direct' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { Bootstrap } from '@libp2p/bootstrap' document.addEventListener('DOMContentLoaded', async () => { // use the same peer id as in `listener.js` to avoid copy-pasting of listener's peer id into `peerDiscovery` const hardcodedPeerId = '12D3KooWCuo3MdXfMgaqpLC5Houi1TRoFqgK9aoxok4NK5udMu8m' - const libp2p = await Libp2p.create({ - modules: { - transport: [WebRTCDirect], - streamMuxer: [Mplex], - connEncryption: [NOISE], - peerDiscovery: [Bootstrap] - }, - config: { - peerDiscovery: { - [Bootstrap.tag]: { - enabled: true, - list: [`/ip4/127.0.0.1/tcp/9090/http/p2p-webrtc-direct/p2p/${hardcodedPeerId}`] - } - } - } + const libp2p = await createLibp2p({ + transports: [new WebRTCDirect()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()], + peerDiscovery: [ + new Bootstrap({ + list: [`/ip4/127.0.0.1/tcp/9090/http/p2p-webrtc-direct/p2p/${hardcodedPeerId}`] + }) + ] }) const status = document.getElementById('status') @@ -36,22 +29,21 @@ document.addEventListener('DOMContentLoaded', async () => { } // Listen for new peers - libp2p.on('peer:discovery', (peerId) => { - log(`Found peer ${peerId.toB58String()}`) + libp2p.addEventListener('peer:discovery', (evt) => { + log(`Found peer ${evt.detail.id.toString()}`) }) // Listen for new connections to peers - libp2p.connectionManager.on('peer:connect', (connection) => { - log(`Connected to ${connection.remotePeer.toB58String()}`) + libp2p.connectionManager.addEventListener('peer:connect', (evt) => { + log(`Connected to ${evt.detail.remotePeer.toString()}`) }) // Listen for peers disconnecting - libp2p.connectionManager.on('peer:disconnect', (connection) => { - log(`Disconnected from ${connection.remotePeer.toB58String()}`) + libp2p.connectionManager.addEventListener('peer:disconnect', (evt) => { + log(`Disconnected from ${evt.detail.remotePeer.toString()}`) }) await libp2p.start() status.innerText = 'libp2p started!' - log(`libp2p id is ${libp2p.peerId.toB58String()}`) - + log(`libp2p id is ${libp2p.peerId.toString()}`) }) diff --git a/examples/webrtc-direct/listener.js b/examples/webrtc-direct/listener.js index b0b9cb43..e27cb66b 100644 --- a/examples/webrtc-direct/listener.js +++ b/examples/webrtc-direct/listener.js @@ -1,43 +1,34 @@ -const Libp2p = require('libp2p') -const Bootstrap = require('libp2p-bootstrap') -const WebRTCDirect = require('libp2p-webrtc-direct') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const PeerId = require('peer-id') +import { createLibp2p } from 'libp2p' +import { WebRTCDirect } from '@achingbrain/webrtc-direct' +import { Mplex } from '@libp2p/mplex' +import { Noise } from '@chainsafe/libp2p-noise' +import { createFromJSON } from '@libp2p/peer-id-factory' +import wrtc from 'wrtc' ;(async () => { // hardcoded peer id to avoid copy-pasting of listener's peer id into the dialer's bootstrap list // generated with cmd `peer-id --type=ed25519` - const hardcodedPeerId = await PeerId.createFromJSON({ + const hardcodedPeerId = await createFromJSON({ "id": "12D3KooWCuo3MdXfMgaqpLC5Houi1TRoFqgK9aoxok4NK5udMu8m", "privKey": "CAESQAG6Ld7ev6nnD0FKPs033/j0eQpjWilhxnzJ2CCTqT0+LfcWoI2Vr+zdc1vwk7XAVdyoCa2nwUR3RJebPWsF1/I=", "pubKey": "CAESIC33FqCNla/s3XNb8JO1wFXcqAmtp8FEd0SXmz1rBdfy" }) - const node = await Libp2p.create({ + const node = await createLibp2p({ peerId: hardcodedPeerId, addresses: { listen: ['/ip4/127.0.0.1/tcp/9090/http/p2p-webrtc-direct'] }, - modules: { - transport: [WebRTCDirect], - streamMuxer: [Mplex], - connEncryption: [NOISE] - }, - config: { - peerDiscovery: { - [Bootstrap.tag]: { - enabled: false, - } - } - } + transports: [new WebRTCDirect({ wrtc })], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] }) - node.connectionManager.on('peer:connect', (connection) => { - console.info(`Connected to ${connection.remotePeer.toB58String()}!`) + node.connectionManager.addEventListener('peer:connect', (evt) => { + console.info(`Connected to ${evt.detail.remotePeer.toString()}!`) }) await node.start() console.log('Listening on:') - node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`)) + node.getMultiaddrs().forEach((ma) => console.log(ma.toString())) })() diff --git a/examples/webrtc-direct/package.json b/examples/webrtc-direct/package.json index 6eb9b4b0..950112b1 100644 --- a/examples/webrtc-direct/package.json +++ b/examples/webrtc-direct/package.json @@ -2,32 +2,25 @@ "name": "webrtc-direct", "version": "0.0.1", "private": true, - "description": "", + "type": "module", + "browserslist": [ + "last 2 Chrome versions" + ], "scripts": { "test": "echo \"Error: no test specified\" && exit 1", - "build": "parcel build index.html", - "start": "parcel index.html" + "start": "vite" }, "license": "ISC", - "devDependencies": { - "@babel/cli": "^7.13.10", - "@babel/core": "^7.13.10", - "@mapbox/node-pre-gyp": "^1.0.8", - "babel-plugin-syntax-async-functions": "^6.13.0", - "babel-plugin-transform-regenerator": "^6.26.0", - "babel-polyfill": "^6.26.0", - "parcel": "^2.0.1", - "util": "^0.12.3" - }, "dependencies": { - "@chainsafe/libp2p-noise": "^5.0.2", + "@achingbrain/webrtc-direct": "^0.7.2", + "@chainsafe/libp2p-noise": "^6.0.1", + "@libp2p/bootstrap": "^1.0.1", + "@libp2p/mplex": "^1.0.2", "libp2p": "../../", - "libp2p-bootstrap": "^0.14.0", - "libp2p-mplex": "^0.10.4", - "libp2p-webrtc-direct": "^0.7.0", - "peer-id": "^0.16.0" + "wrtc": "^0.4.7" }, - "browser": { - "ipfs": "ipfs/dist/index.min.js" + "devDependencies": { + "@mapbox/node-pre-gyp": "^1.0.8", + "vite": "^2.8.6" } } diff --git a/examples/webrtc-direct/test.js b/examples/webrtc-direct/test.js index d6603f36..a658e841 100644 --- a/examples/webrtc-direct/test.js +++ b/examples/webrtc-direct/test.js @@ -1,10 +1,11 @@ -'use strict' +import path from 'path' +import execa from 'execa' +import pDefer from 'p-defer' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { chromium } from 'playwright' +import { fileURLToPath } from 'url' -const path = require('path') -const execa = require('execa') -const pDefer = require('p-defer') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const { chromium } = require('playwright'); +const __dirname = path.dirname(fileURLToPath(import.meta.url)) function startNode (name, args = []) { return execa('node', [path.join(__dirname, name), ...args], { @@ -13,8 +14,8 @@ function startNode (name, args = []) { }) } -function startBrowser (name, args = []) { - return execa('parcel', [path.join(__dirname, name), ...args], { +function startBrowser () { + return execa('vite', [], { preferLocal: true, localDir: __dirname, cwd: __dirname, @@ -22,7 +23,7 @@ function startBrowser (name, args = []) { }) } -async function test () { +export async function test () { // Step 1, listener process const listenerProcReady = pDefer() let listenerOutput = '' @@ -42,20 +43,14 @@ async function test () { // Step 2, dialer process process.stdout.write('dialer.js\n') - let dialerUrl = '' - const dialerProc = startBrowser('index.html') + let dialerUrl = 'http://localhost:3000' + const dialerProc = startBrowser() dialerProc.all.on('data', async (chunk) => { /**@type {string} */ const out = chunk.toString() - if (out.includes('Server running at')) { - dialerUrl = out.split('Server running at ')[1] - } - - - if (out.includes('Built in ')) { - + if (out.includes('ready in')) { try { const browser = await chromium.launch(); const page = await browser.newPage(); @@ -71,25 +66,14 @@ async function test () { '#output', { timeout: 10000 } ) - await browser.close(); - } catch (/** @type {any} */ err) { + await browser.close() + } catch (err) { console.error(err) process.exit(1) } finally { dialerProc.cancel() - listenerProc.kill() + listenerProc.cancel() } } }) - - await Promise.all([ - listenerProc, - dialerProc, - ]).catch((err) => { - if (err.signal !== 'SIGTERM') { - throw err - } - }) } - -module.exports = test diff --git a/package.json b/package.json index 54fdb30a..166aa402 100644 --- a/package.json +++ b/package.json @@ -2,126 +2,146 @@ "name": "libp2p", "version": "0.36.2", "description": "JavaScript implementation of libp2p, a modular peer to peer network stack", - "leadMaintainer": "Jacob Heun ", - "main": "src/index.js", - "types": "dist/src/index.d.ts", + "license": "Apache-2.0 OR MIT", + "homepage": "https://github.com/libp2p/js-libp2p#readme", + "repository": { + "type": "git", + "url": "git+https://github.com/libp2p/js-libp2p.git" + }, + "bugs": { + "url": "https://github.com/libp2p/js-libp2p/issues" + }, + "keywords": [ + "IPFS", + "libp2p", + "network", + "p2p", + "peer", + "peer-to-peer" + ], + "engines": { + "node": ">=16.0.0", + "npm": ">=7.0.0" + }, + "type": "module", + "types": "./dist/src/index.d.ts", "typesVersions": { "*": { + "*": [ + "*", + "dist/*", + "dist/src/*", + "dist/src/*/index" + ], "src/*": [ + "*", + "dist/*", "dist/src/*", "dist/src/*/index" ] } }, "files": [ - "dist", - "src" + "src", + "dist/src", + "!dist/test", + "!**/*.tsbuildinfo" ], - "scripts": { - "lint": "aegir lint", - "build": "aegir build", - "build:proto": "npm run build:proto:circuit && npm run build:proto:fetch && npm run build:proto:identify && npm run build:proto:plaintext && npm run build:proto:address-book && npm run build:proto:proto-book && npm run build:proto:peer && npm run build:proto:peer-record && npm run build:proto:envelope", - "build:proto:circuit": "pbjs -t static-module -w commonjs -r libp2p-circuit --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/circuit/protocol/index.js ./src/circuit/protocol/index.proto", - "build:proto:fetch": "pbjs -t static-module -w commonjs -r libp2p-fetch --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/fetch/proto.js ./src/fetch/proto.proto", - "build:proto:identify": "pbjs -t static-module -w commonjs -r libp2p-identify --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/identify/message.js ./src/identify/message.proto", - "build:proto:plaintext": "pbjs -t static-module -w commonjs -r libp2p-plaintext --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/insecure/proto.js ./src/insecure/proto.proto", - "build:proto:peer": "pbjs -t static-module -w commonjs -r libp2p-peer --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/peer-store/pb/peer.js ./src/peer-store/pb/peer.proto", - "build:proto:peer-record": "pbjs -t static-module -w commonjs -r libp2p-peer-record --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/record/peer-record/peer-record.js ./src/record/peer-record/peer-record.proto", - "build:proto:envelope": "pbjs -t static-module -w commonjs -r libp2p-envelope --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/record/envelope/envelope.js ./src/record/envelope/envelope.proto", - "build:proto-types": "npm run build:proto-types:circuit && npm run build:proto-types:fetch && npm run build:proto-types:identify && npm run build:proto-types:plaintext && npm run build:proto-types:address-book && npm run build:proto-types:proto-book && npm run build:proto-types:peer && npm run build:proto-types:peer-record && npm run build:proto-types:envelope", - "build:proto-types:circuit": "pbts -o src/circuit/protocol/index.d.ts src/circuit/protocol/index.js", - "build:proto-types:fetch": "pbts -o src/fetch/proto.d.ts src/fetch/proto.js", - "build:proto-types:identify": "pbts -o src/identify/message.d.ts src/identify/message.js", - "build:proto-types:plaintext": "pbts -o src/insecure/proto.d.ts src/insecure/proto.js", - "build:proto-types:peer": "pbts -o src/peer-store/pb/peer.d.ts src/peer-store/pb/peer.js", - "build:proto-types:peer-record": "pbts -o src/record/peer-record/peer-record.d.ts src/record/peer-record/peer-record.js", - "build:proto-types:envelope": "pbts -o src/record/envelope/envelope.d.ts src/record/envelope/envelope.js", - "test": "aegir test", - "test:ts": "aegir build --no-bundle && npm run test --prefix test/ts-use", - "test:node": "aegir test -t node -f \"./test/**/*.{node,spec}.js\"", - "test:browser": "aegir test -t browser", - "test:examples": "cd examples && npm run test:all", - "test:interop": "LIBP2P_JS=$PWD npx aegir test -t node -f ./node_modules/libp2p-interop/test/*", - "prepare": "npm run build", - "coverage": "nyc --reporter=text --reporter=lcov npm run test:node" - }, - "repository": { - "type": "git", - "url": "https://github.com/libp2p/js-libp2p.git" - }, - "keywords": [ - "libp2p", - "network", - "p2p", - "peer", - "peer-to-peer", - "IPFS" - ], - "bugs": { - "url": "https://github.com/libp2p/js-libp2p/issues" - }, - "homepage": "https://libp2p.io", - "license": "MIT", - "engines": { - "node": ">=15.0.0" - }, - "browser": { - "nat-api": false + "exports": { + ".": { + "import": "./dist/src/index.js" + }, + "./insecure": { + "import": "./dist/src/insecure/index.js" + }, + "./pnet": { + "import": "./dist/src/pnet/index.js" + }, + "./pnet/generate": { + "import": "./dist/src/pnet/key-generator.js" + } }, "eslintConfig": { "extends": "ipfs", + "parserOptions": { + "sourceType": "module" + }, "ignorePatterns": [ "!.aegir.js", "test/ts-use", "*.d.ts" ] }, + "scripts": { + "lint": "aegir lint", + "build": "tsc", + "postbuild": "mkdirp dist/src/circuit/pb dist/src/fetch/pb dist/src/identify/pb dist/src/insecure/pb && cp src/circuit/pb/*.js src/circuit/pb/*.d.ts dist/src/circuit/pb && cp src/fetch/pb/*.js src/fetch/pb/*.d.ts dist/src/fetch/pb && cp src/identify/pb/*.js src/identify/pb/*.d.ts dist/src/identify/pb && cp src/insecure/pb/*.js src/insecure/pb/*.d.ts dist/src/insecure/pb", + "generate": "run-s generate:proto:* generate:proto-types:*", + "generate:proto:circuit": "pbjs -t static-module -w es6 -r libp2p-circuit --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/circuit/protocol/index.js ./src/circuit/protocol/index.proto", + "generate:proto:fetch": "pbjs -t static-module -w es6 -r libp2p-fetch --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/fetch/proto.js ./src/fetch/proto.proto", + "generate:proto:identify": "pbjs -t static-module -w es6 -r libp2p-identify --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/identify/message.js ./src/identify/message.proto", + "generate:proto:plaintext": "pbjs -t static-module -w es6 -r libp2p-plaintext --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/insecure/proto.js ./src/insecure/proto.proto", + "generate:proto-types:circuit": "pbts -o src/circuit/protocol/index.d.ts src/circuit/protocol/index.js", + "generate:proto-types:fetch": "pbts -o src/fetch/proto.d.ts src/fetch/proto.js", + "generate:proto-types:identify": "pbts -o src/identify/message.d.ts src/identify/message.js", + "generate:proto-types:plaintext": "pbts -o src/insecure/proto.d.ts src/insecure/proto.js", + "pretest": "npm run build", + "test": "aegir test", + "test:node": "npm run test -- -t node -f \"./dist/test/**/*.{node,spec}.js\" --cov", + "test:chrome": "npm run test -- -t browser -f \"./dist/test/**/*.spec.js\" --cov", + "test:chrome-webworker": "npm run test -- -t webworker -f \"./dist/test/**/*.spec.js\"", + "test:firefox": "npm run test -- -t browser -f \"./dist/test/**/*.spec.js\" -- --browser firefox", + "test:firefox-webworker": "npm run test -- -t webworker -f \"./dist/test/**/*.spec.js\" -- --browser firefox", + "test:examples": "cd examples && npm run test:all", + "test:interop": "npm run test -- -t node -f dist/test/interop.js" + }, "dependencies": { - "@vascosantos/moving-average": "^1.1.0", - "abortable-iterator": "^3.0.0", - "aggregate-error": "^3.1.0", + "@achingbrain/nat-port-mapper": "^1.0.0", + "@libp2p/connection": "^1.1.4", + "@libp2p/crypto": "^0.22.9", + "@libp2p/interfaces": "^1.3.17", + "@libp2p/multistream-select": "^1.0.3", + "@libp2p/peer-id": "^1.1.8", + "@libp2p/peer-id-factory": "^1.0.8", + "@libp2p/peer-store": "^1.0.6", + "@libp2p/utils": "^1.0.9", + "@multiformats/mafmt": "^11.0.2", + "@multiformats/multiaddr": "^10.1.8", + "abortable-iterator": "^4.0.2", + "aggregate-error": "^4.0.0", "any-signal": "^3.0.0", "bignumber.js": "^9.0.1", "class-is": "^1.1.0", "datastore-core": "^7.0.0", - "debug": "^4.3.1", - "err-code": "^3.0.0", - "es6-promisify": "^7.0.0", + "debug": "^4.3.3", + "err-code": "^3.0.1", "events": "^3.3.0", "hashlru": "^2.3.0", - "interface-datastore": "^6.0.2", - "it-all": "^1.0.4", - "it-buffer": "^0.1.2", - "it-drain": "^1.0.3", - "it-filter": "^1.0.1", - "it-first": "^1.0.4", + "interface-datastore": "^6.1.0", + "it-all": "^1.0.6", + "it-drain": "^1.0.5", + "it-filter": "^1.0.3", + "it-first": "^1.0.6", "it-foreach": "^0.1.1", - "it-handshake": "^2.0.0", - "it-length-prefixed": "^5.0.2", - "it-map": "^1.0.4", - "it-merge": "^1.0.0", - "it-pipe": "^1.1.0", + "it-handshake": "^3.0.1", + "it-length-prefixed": "^7.0.1", + "it-map": "^1.0.6", + "it-merge": "^1.0.3", + "it-pipe": "^2.0.3", "it-sort": "^1.0.1", - "it-take": "^1.0.0", - "libp2p-crypto": "^0.21.2", - "libp2p-interfaces": "^4.0.0", - "libp2p-utils": "^0.4.0", - "mafmt": "^10.0.0", + "it-stream-types": "^1.0.4", + "it-take": "^1.0.2", + "it-to-buffer": "^2.0.2", "merge-options": "^3.0.4", - "mortice": "^2.0.1", - "multiaddr": "^10.0.0", - "multiformats": "^9.0.0", - "multistream-select": "^3.0.0", + "mortice": "^3.0.0", + "multiformats": "^9.6.3", "mutable-proxy": "^1.0.0", - "nat-api": "^0.3.1", "node-forge": "^1.2.1", - "p-any": "^3.0.0", "p-fifo": "^1.0.0", - "p-retry": "^4.4.0", - "p-settle": "^4.1.1", - "peer-id": "^0.16.0", - "private-ip": "^2.1.0", - "protobufjs": "^6.10.2", + "p-retry": "^5.0.0", + "p-settle": "^5.0.0", + "private-ip": "^2.3.3", + "protobufjs": "^6.11.2", "retimer": "^3.0.0", "sanitize-filename": "^1.6.3", "set-delayed-interval": "^1.0.0", @@ -133,121 +153,49 @@ "xsalsa20": "^1.1.0" }, "devDependencies": { - "@chainsafe/libp2p-noise": "^5.0.0", + "@achingbrain/libp2p-gossipsub": "^0.13.5", + "@chainsafe/libp2p-noise": "^6.0.1", + "@libp2p/bootstrap": "^1.0.2", + "@libp2p/daemon-client": "^0.0.2", + "@libp2p/daemon-server": "^0.0.2", + "@libp2p/delegated-content-routing": "^1.0.2", + "@libp2p/delegated-peer-routing": "^1.0.2", + "@libp2p/floodsub": "^1.0.2", + "@libp2p/interface-compliance-tests": "^1.1.20", + "@libp2p/interop": "^0.0.3", + "@libp2p/kad-dht": "^1.0.3", + "@libp2p/mdns": "^1.0.3", + "@libp2p/mplex": "^1.0.1", + "@libp2p/tcp": "^1.0.6", + "@libp2p/tracked-map": "^1.0.4", + "@libp2p/webrtc-star": "^1.0.3", + "@libp2p/websockets": "^1.0.3", "@nodeutils/defaults-deep": "^1.1.0", - "@types/es6-promisify": "^6.0.0", - "@types/node": "^16.0.1", + "@types/node": "^16.11.26", "@types/node-forge": "^1.0.0", + "@types/p-fifo": "^1.0.0", "@types/varint": "^6.0.0", - "aegir": "^36.0.0", + "@types/xsalsa20": "^1.1.0", + "aegir": "^36.1.3", "buffer": "^6.0.3", + "cborg": "^1.8.1", "delay": "^5.0.0", - "into-stream": "^6.0.0", - "ipfs-http-client": "^54.0.2", - "it-concat": "^2.0.0", - "it-pair": "^1.0.0", - "it-pushable": "^1.4.0", - "libp2p": ".", - "libp2p-bootstrap": "^0.14.0", - "libp2p-delegated-content-routing": "^0.11.0", - "libp2p-delegated-peer-routing": "^0.11.1", - "libp2p-interfaces-compliance-tests": "^4.0.8", - "libp2p-interop": "^0.7.1", - "libp2p-kad-dht": "^0.28.6", - "libp2p-mdns": "^0.18.0", - "libp2p-mplex": "^0.10.4", - "libp2p-tcp": "^0.17.0", - "libp2p-webrtc-star": "^0.25.0", - "libp2p-websockets": "^0.16.0", + "go-libp2p": "^0.0.6", + "into-stream": "^7.0.0", + "ipfs-http-client": "^56.0.1", + "it-pair": "^2.0.2", + "it-pushable": "^2.0.1", "nock": "^13.0.3", - "p-defer": "^3.0.0", - "p-times": "^3.0.0", - "p-wait-for": "^3.2.0", + "npm-run-all": "^4.1.5", + "p-defer": "^4.0.0", + "p-event": "^5.0.1", + "p-times": "^4.0.0", + "p-wait-for": "^4.1.0", "rimraf": "^3.0.2", - "sinon": "^12.0.1", - "util": "^0.12.3" + "sinon": "^13.0.1", + "ts-sinon": "^2.0.2" }, - "contributors": [ - "Vasco Santos ", - "David Dias ", - "Jacob Heun ", - "Alex Potsides ", - "Alan Shaw ", - "Cayman ", - "Pedro Teixeira ", - "Friedel Ziegelmayer ", - "Maciej Krüger ", - "Hugo Dias ", - "dirkmc ", - "Volker Mische ", - "Chris Dostert ", - "zeim839 <50573884+zeim839@users.noreply.github.com>", - "Robert Kiel ", - "Richard Littauer ", - "a1300 ", - "Ryan Bell ", - "ᴠɪᴄᴛᴏʀ ʙᴊᴇʟᴋʜᴏʟᴍ ", - "Andrew Nesbitt ", - "Franck Royer ", - "Thomas Eizinger ", - "Vít Habada ", - "Giovanni T. Parra ", - "acolytec3 <17355484+acolytec3@users.noreply.github.com>", - "Alan Smithee ", - "Elven ", - "Samlior ", - "Didrik Nordström ", - "Aditya Bose <13054902+adbose@users.noreply.github.com>", - "TJKoury ", - "TheStarBoys <41286328+TheStarBoys@users.noreply.github.com>", - "Tiago Alves ", - "Tim Daubenschütz ", - "XiaoZhang ", - "Yusef Napora ", - "Zane Starr ", - "ebinks ", - "greenSnot ", - "isan_rivkin ", - "mayerwin ", - "mcclure ", - "patrickwoodhead <91056047+patrickwoodhead@users.noreply.github.com>", - "phillmac ", - "robertkiel ", - "shresthagrawal <34920931+shresthagrawal@users.noreply.github.com>", - "swedneck <40505480+swedneck@users.noreply.github.com>", - "tuyennhv ", - "Sönke Hahn ", - "Aleksei ", - "Bernd Strehl ", - "Chris Bratlien ", - "Cindy Wu ", - "Daijiro Wachi ", - "Diogo Silva ", - "Dmitriy Ryajov ", - "Ethan Lam ", - "Fei Liu ", - "Felipe Martins ", - "Florian-Merle ", - "Francis Gulotta ", - "Guy Sviry <32539816+guysv@users.noreply.github.com>", - "Henrique Dias ", - "Irakli Gozalishvili ", - "Joel Gustafson ", - "John Rees ", - "João Santos ", - "Julien Bouquillon ", - "Kevin Kwok ", - "Kevin Lacker ", - "Lars Gierth ", - "Leask Wong ", - "Marcin Tojek ", - "Marston Connell <34043723+TheMarstonConnell@users.noreply.github.com>", - "Michael Burns <5170+mburns@users.noreply.github.com>", - "Miguel Mota ", - "Nuno Nogueira ", - "Philipp Muens ", - "RasmusErik Voel Jensen ", - "Smite Chow ", - "Soeren " - ] + "browser": { + "nat-api": false + } } diff --git a/scripts/node-globals.js b/scripts/node-globals.js deleted file mode 100644 index cc0a4c9e..00000000 --- a/scripts/node-globals.js +++ /dev/null @@ -1,2 +0,0 @@ -// @ts-nocheck -export const { Buffer } = require('buffer') diff --git a/src/address-manager/index.js b/src/address-manager/index.js deleted file mode 100644 index 25de94b6..00000000 --- a/src/address-manager/index.js +++ /dev/null @@ -1,96 +0,0 @@ -'use strict' - -const { EventEmitter } = require('events') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') - -/** - * @typedef {Object} AddressManagerOptions - * @property {string[]} [listen = []] - list of multiaddrs string representation to listen. - * @property {string[]} [announce = []] - list of multiaddrs string representation to announce. - */ - -/** - * @fires AddressManager#change:addresses Emitted when a addresses change. - */ -class AddressManager extends EventEmitter { - /** - * Responsible for managing the peer addresses. - * Peers can specify their listen and announce addresses. - * The listen addresses will be used by the libp2p transports to listen for new connections, - * while the announce addresses will be used for the peer addresses' to other peers in the network. - * - * @class - * @param {PeerId} peerId - The Peer ID of the node - * @param {object} [options] - * @param {Array} [options.listen = []] - list of multiaddrs string representation to listen. - * @param {Array} [options.announce = []] - list of multiaddrs string representation to announce. - */ - constructor (peerId, { listen = [], announce = [] } = {}) { - super() - - this.peerId = peerId - this.listen = new Set(listen.map(ma => ma.toString())) - this.announce = new Set(announce.map(ma => ma.toString())) - this.observed = new Set() - } - - /** - * Get peer listen multiaddrs. - * - * @returns {Multiaddr[]} - */ - getListenAddrs () { - return Array.from(this.listen).map((a) => new Multiaddr(a)) - } - - /** - * Get peer announcing multiaddrs. - * - * @returns {Multiaddr[]} - */ - getAnnounceAddrs () { - return Array.from(this.announce).map((a) => new Multiaddr(a)) - } - - /** - * Get observed multiaddrs. - * - * @returns {Array} - */ - getObservedAddrs () { - return Array.from(this.observed).map((a) => new Multiaddr(a)) - } - - /** - * Add peer observed addresses - * - * @param {string | Multiaddr} addr - */ - addObservedAddr (addr) { - let ma = new Multiaddr(addr) - const remotePeer = ma.getPeerId() - - // strip our peer id if it has been passed - if (remotePeer) { - const remotePeerId = PeerId.createFromB58String(remotePeer) - - // use same encoding for comparison - if (remotePeerId.equals(this.peerId)) { - ma = ma.decapsulate(new Multiaddr(`/p2p/${this.peerId}`)) - } - } - - const addrString = ma.toString() - - // do not trigger the change:addresses event if we already know about this address - if (this.observed.has(addrString)) { - return - } - - this.observed.add(addrString) - this.emit('change:addresses') - } -} - -module.exports = AddressManager diff --git a/src/address-manager/index.ts b/src/address-manager/index.ts new file mode 100644 index 00000000..d5ccdd02 --- /dev/null +++ b/src/address-manager/index.ts @@ -0,0 +1,129 @@ +import { AddressManagerEvents, CustomEvent, EventEmitter } from '@libp2p/interfaces' +import { Multiaddr } from '@multiformats/multiaddr' +import { peerIdFromString } from '@libp2p/peer-id' +import type { Components } from '@libp2p/interfaces/components' + +export interface AddressManagerInit { + announceFilter?: AddressFilter + + /** + * list of multiaddrs string representation to listen + */ + listen?: string[] + + /** + * list of multiaddrs string representation to announce + */ + announce?: string[] + + /** + * list of multiaddrs string representation to never announce + */ + noAnnounce?: string[] +} + +export interface AddressFilter { + (addrs: Multiaddr[]): Multiaddr[] +} + +const defaultAddressFilter = (addrs: Multiaddr[]): Multiaddr[] => addrs + +export class DefaultAddressManager extends EventEmitter { + private readonly components: Components + private readonly listen: Set + private readonly announce: Set + private readonly observed: Set + private readonly announceFilter: AddressFilter + + /** + * Responsible for managing the peer addresses. + * Peers can specify their listen and announce addresses. + * The listen addresses will be used by the libp2p transports to listen for new connections, + * while the announce addresses will be used for the peer addresses' to other peers in the network. + */ + constructor (components: Components, init: AddressManagerInit) { + super() + + const { listen = [], announce = [] } = init + + this.components = components + this.listen = new Set(listen.map(ma => ma.toString())) + this.announce = new Set(announce.map(ma => ma.toString())) + this.observed = new Set() + this.announceFilter = init.announceFilter ?? defaultAddressFilter + } + + /** + * Get peer listen multiaddrs + */ + getListenAddrs (): Multiaddr[] { + return Array.from(this.listen).map((a) => new Multiaddr(a)) + } + + /** + * Get peer announcing multiaddrs + */ + getAnnounceAddrs (): Multiaddr[] { + return Array.from(this.announce).map((a) => new Multiaddr(a)) + } + + /** + * Get observed multiaddrs + */ + getObservedAddrs (): Multiaddr[] { + return Array.from(this.observed).map((a) => new Multiaddr(a)) + } + + /** + * Add peer observed addresses + */ + addObservedAddr (addr: string | Multiaddr): void { + let ma = new Multiaddr(addr) + const remotePeer = ma.getPeerId() + + // strip our peer id if it has been passed + if (remotePeer != null) { + const remotePeerId = peerIdFromString(remotePeer) + + // use same encoding for comparison + if (remotePeerId.equals(this.components.getPeerId())) { + ma = ma.decapsulate(new Multiaddr(`/p2p/${this.components.getPeerId().toString()}`)) + } + } + + const addrString = ma.toString() + + // do not trigger the change:addresses event if we already know about this address + if (this.observed.has(addrString)) { + return + } + + this.observed.add(addrString) + this.dispatchEvent(new CustomEvent('change:addresses')) + } + + getAddresses (): Multiaddr[] { + let addrs = this.getAnnounceAddrs().map(ma => ma.toString()) + + if (addrs.length === 0) { + // no configured announce addrs, add configured listen addresses + addrs = this.components.getTransportManager().getAddrs().map(ma => ma.toString()) + } + + addrs = addrs.concat(this.getObservedAddrs().map(ma => ma.toString())) + + // dedupe multiaddrs + const addrSet = new Set(addrs) + + // Create advertising list + return this.announceFilter(Array.from(addrSet) + .map(str => new Multiaddr(str))) + .map(ma => { + if (ma.getPeerId() === this.components.getPeerId().toString()) { + return ma + } + + return ma.encapsulate(`/p2p/${this.components.getPeerId().toString()}`) + }) + } +} diff --git a/src/circuit/README.md b/src/circuit/README.md index 680a76c2..cbf1dd1d 100644 --- a/src/circuit/README.md +++ b/src/circuit/README.md @@ -9,9 +9,9 @@ ## Table of Contents - [js-libp2p-circuit](#js-libp2p-circuit) + - [Table of Contents](#table-of-contents) - [Why?](#why) - [libp2p-circuit and IPFS](#libp2p-circuit-and-ipfs) - - [Table of Contents](#table-of-contents) - [Usage](#usage) - [API](#api) - [Implementation rational](#implementation-rational) @@ -37,22 +37,27 @@ Libp2p circuit configuration can be seen at [Setup with Relay](../../doc/CONFIGU Once you have a circuit relay node running, you can configure other nodes to use it as a relay as follows: ```js -const { Multiaddr } = require('multiaddr') -const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const { NOISE } = require('libp2p-noise') +import { Multiaddr } from '@multiformats/multiaddr' +import Libp2p from 'libp2p' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { NOISE } from '@chainsafe/libp2p-noise' const relayAddr = ... -const node = await Libp2p.create({ +const node = await createLibp2p({ addresses: { listen: [new Multiaddr(`${relayAddr}/p2p-circuit`)] }, - modules: { - transport: [TCP], - streamMuxer: [MPLEX], - connEncryption: [NOISE] + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] }, config: { relay: { // Circuit Relay options (this config is part of libp2p core configurations) diff --git a/src/circuit/auto-relay.js b/src/circuit/auto-relay.js deleted file mode 100644 index e173ac03..00000000 --- a/src/circuit/auto-relay.js +++ /dev/null @@ -1,302 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:auto-relay'), { - error: debug('libp2p:auto-relay:err') -}) - -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const { Multiaddr } = require('multiaddr') -const all = require('it-all') - -const { relay: multicodec } = require('./multicodec') -const { canHop } = require('./circuit/hop') -const { namespaceToCid } = require('./utils') -const { - CIRCUIT_PROTO_CODE, - HOP_METADATA_KEY, - HOP_METADATA_VALUE, - RELAY_RENDEZVOUS_NS -} = require('./constants') - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('../peer-store/types').Address} Address - * @typedef {import('peer-id')} PeerId - */ - -/** - * @typedef {Object} AutoRelayProperties - * @property {import('../')} libp2p - * - * @typedef {Object} AutoRelayOptions - * @property {number} [maxListeners = 1] - maximum number of relays to listen. - * @property {(error: Error, msg?: string) => {}} [onError] - */ - -class AutoRelay { - /** - * Creates an instance of AutoRelay. - * - * @class - * @param {AutoRelayProperties & AutoRelayOptions} props - */ - constructor ({ libp2p, maxListeners = 1, onError }) { - this._libp2p = libp2p - this._peerId = libp2p.peerId - this._peerStore = libp2p.peerStore - this._connectionManager = libp2p.connectionManager - this._transportManager = libp2p.transportManager - this._addressSorter = libp2p.dialer.addressSorter - - this.maxListeners = maxListeners - - /** - * @type {Set} - */ - this._listenRelays = new Set() - - this._onProtocolChange = this._onProtocolChange.bind(this) - this._onPeerDisconnected = this._onPeerDisconnected.bind(this) - - this._peerStore.on('change:protocols', this._onProtocolChange) - this._connectionManager.on('peer:disconnect', this._onPeerDisconnected) - - /** - * @param {Error} error - * @param {string} [msg] - */ - this._onError = (error, msg) => { - log.error(msg || error) - onError && onError(error, msg) - } - } - - /** - * Check if a peer supports the relay protocol. - * If the protocol is not supported, check if it was supported before and remove it as a listen relay. - * If the protocol is supported, check if the peer supports **HOP** and add it as a listener if - * inside the threshold. - * - * @param {Object} props - * @param {PeerId} props.peerId - * @param {string[]} props.protocols - * @returns {Promise} - */ - async _onProtocolChange ({ peerId, protocols }) { - const id = peerId.toB58String() - - // Check if it has the protocol - const hasProtocol = protocols.find(protocol => protocol === multicodec) - - // If no protocol, check if we were keeping the peer before as a listenRelay - if (!hasProtocol && this._listenRelays.has(id)) { - await this._removeListenRelay(id) - return - } else if (!hasProtocol || this._listenRelays.has(id)) { - return - } - - // If protocol, check if can hop, store info in the metadataBook and listen on it - try { - const connection = this._connectionManager.get(peerId) - if (!connection) { - return - } - - // Do not hop on a relayed connection - if (connection.remoteAddr.protoCodes().includes(CIRCUIT_PROTO_CODE)) { - log(`relayed connection to ${id} will not be used to hop on`) - return - } - - const supportsHop = await canHop({ connection }) - - if (supportsHop) { - await this._peerStore.metadataBook.setValue(peerId, HOP_METADATA_KEY, uint8ArrayFromString(HOP_METADATA_VALUE)) - await this._addListenRelay(connection, id) - } - } catch (/** @type {any} */ err) { - this._onError(err) - } - } - - /** - * Peer disconnects. - * - * @param {Connection} connection - connection to the peer - */ - _onPeerDisconnected (connection) { - const peerId = connection.remotePeer - const id = peerId.toB58String() - - // Not listening on this relay - if (!this._listenRelays.has(id)) { - return - } - - this._removeListenRelay(id).catch(err => { - log.error(err) - }) - } - - /** - * Attempt to listen on the given relay connection. - * - * @private - * @param {Connection} connection - connection to the peer - * @param {string} id - peer identifier string - * @returns {Promise} - */ - async _addListenRelay (connection, id) { - try { - // Check if already listening on enough relays - if (this._listenRelays.size >= this.maxListeners) { - return - } - - // Get peer known addresses and sort them per public addresses first - const remoteAddrs = await this._peerStore.addressBook.getMultiaddrsForPeer( - connection.remotePeer, this._addressSorter - ) - - // Attempt to listen on relay - const result = await Promise.all( - remoteAddrs.map(async addr => { - try { - // Announce multiaddrs will update on listen success by TransportManager event being triggered - await this._transportManager.listen([new Multiaddr(`${addr.toString()}/p2p-circuit`)]) - return true - } catch (/** @type {any} */ err) { - this._onError(err) - } - - return false - }) - ) - - if (result.includes(true)) { - this._listenRelays.add(id) - } - } catch (/** @type {any} */ err) { - this._onError(err) - this._listenRelays.delete(id) - } - } - - /** - * Remove listen relay. - * - * @private - * @param {string} id - peer identifier string. - */ - async _removeListenRelay (id) { - if (this._listenRelays.delete(id)) { - // TODO: this should be responsibility of the connMgr - await this._listenOnAvailableHopRelays([id]) - } - } - - /** - * Try to listen on available hop relay connections. - * The following order will happen while we do not have enough relays. - * 1. Check the metadata store for known relays, try to listen on the ones we are already connected. - * 2. Dial and try to listen on the peers we know that support hop but are not connected. - * 3. Search the network. - * - * @param {string[]} [peersToIgnore] - */ - async _listenOnAvailableHopRelays (peersToIgnore = []) { - // TODO: The peer redial issue on disconnect should be handled by connection gating - // Check if already listening on enough relays - if (this._listenRelays.size >= this.maxListeners) { - return - } - - const knownHopsToDial = [] - const peers = await all(this._peerStore.getPeers()) - - // Check if we have known hop peers to use and attempt to listen on the already connected - for await (const { id, metadata } of peers) { - const idStr = id.toB58String() - - // Continue to next if listening on this or peer to ignore - if (this._listenRelays.has(idStr)) { - continue - } - - if (peersToIgnore.includes(idStr)) { - continue - } - - const supportsHop = metadata.get(HOP_METADATA_KEY) - - // Continue to next if it does not support Hop - if (!supportsHop || uint8ArrayToString(supportsHop) !== HOP_METADATA_VALUE) { - continue - } - - const connection = this._connectionManager.get(id) - - // If not connected, store for possible later use. - if (!connection) { - knownHopsToDial.push(id) - continue - } - - await this._addListenRelay(connection, idStr) - - // Check if already listening on enough relays - if (this._listenRelays.size >= this.maxListeners) { - return - } - } - - // Try to listen on known peers that are not connected - for (const peerId of knownHopsToDial) { - await this._tryToListenOnRelay(peerId) - - // Check if already listening on enough relays - if (this._listenRelays.size >= this.maxListeners) { - return - } - } - - // Try to find relays to hop on the network - try { - const cid = await namespaceToCid(RELAY_RENDEZVOUS_NS) - for await (const provider of this._libp2p.contentRouting.findProviders(cid)) { - if (!provider.multiaddrs.length) { - continue - } - - const peerId = provider.id - await this._peerStore.addressBook.add(peerId, provider.multiaddrs) - - await this._tryToListenOnRelay(peerId) - - // Check if already listening on enough relays - if (this._listenRelays.size >= this.maxListeners) { - return - } - } - } catch (/** @type {any} */ err) { - this._onError(err) - } - } - - /** - * @param {PeerId} peerId - */ - async _tryToListenOnRelay (peerId) { - try { - const connection = await this._libp2p.dial(peerId) - await this._addListenRelay(connection, peerId.toB58String()) - } catch (/** @type {any} */ err) { - this._onError(err, `could not connect and listen on known hop relay ${peerId.toB58String()}`) - } - } -} - -module.exports = AutoRelay diff --git a/src/circuit/auto-relay.ts b/src/circuit/auto-relay.ts new file mode 100644 index 00000000..8d84a8e9 --- /dev/null +++ b/src/circuit/auto-relay.ts @@ -0,0 +1,284 @@ +import { logger } from '@libp2p/logger' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { RELAY_CODEC } from './multicodec.js' +import { canHop } from './circuit/hop.js' +import { namespaceToCid } from './utils.js' +import { + CIRCUIT_PROTO_CODE, + HOP_METADATA_KEY, + HOP_METADATA_VALUE, + RELAY_RENDEZVOUS_NS +} from './constants.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { AddressSorter, PeerProtocolsChangeData } from '@libp2p/interfaces/peer-store' +import type { Connection } from '@libp2p/interfaces/connection' +import type { Components } from '@libp2p/interfaces/components' +import sort from 'it-sort' +import all from 'it-all' +import { pipe } from 'it-pipe' +import { publicAddressesFirst } from '@libp2p/utils/address-sort' + +const log = logger('libp2p:auto-relay') + +const noop = () => {} + +export interface AutoRelayInit { + addressSorter?: AddressSorter + maxListeners?: number + onError?: (error: Error, msg?: string) => void +} + +export class AutoRelay { + private readonly components: Components + private readonly addressSorter: AddressSorter + private readonly maxListeners: number + private readonly listenRelays: Set + private readonly onError: (error: Error, msg?: string) => void + + constructor (components: Components, init: AutoRelayInit) { + this.components = components + this.addressSorter = init.addressSorter ?? publicAddressesFirst + this.maxListeners = init.maxListeners ?? 1 + this.listenRelays = new Set() + this.onError = init.onError ?? noop + + this._onProtocolChange = this._onProtocolChange.bind(this) + this._onPeerDisconnected = this._onPeerDisconnected.bind(this) + + this.components.getPeerStore().addEventListener('change:protocols', (evt) => { + void this._onProtocolChange(evt).catch(err => { + log.error(err) + }) + }) + this.components.getConnectionManager().addEventListener('peer:disconnect', this._onPeerDisconnected) + } + + /** + * Check if a peer supports the relay protocol. + * If the protocol is not supported, check if it was supported before and remove it as a listen relay. + * If the protocol is supported, check if the peer supports **HOP** and add it as a listener if + * inside the threshold. + */ + async _onProtocolChange (evt: CustomEvent) { + const { + peerId, + protocols + } = evt.detail + const id = peerId.toString() + + // Check if it has the protocol + const hasProtocol = protocols.find(protocol => protocol === RELAY_CODEC) + + // If no protocol, check if we were keeping the peer before as a listenRelay + if (hasProtocol == null) { + if (this.listenRelays.has(id)) { + await this._removeListenRelay(id) + } + + return + } + + if (this.listenRelays.has(id)) { + return + } + + // If protocol, check if can hop, store info in the metadataBook and listen on it + try { + const connection = this.components.getConnectionManager().getConnection(peerId) + + if (connection == null) { + return + } + + // Do not hop on a relayed connection + if (connection.remoteAddr.protoCodes().includes(CIRCUIT_PROTO_CODE)) { + log(`relayed connection to ${id} will not be used to hop on`) + return + } + + const supportsHop = await canHop({ connection }) + + if (supportsHop) { + await this.components.getPeerStore().metadataBook.setValue(peerId, HOP_METADATA_KEY, uint8ArrayFromString(HOP_METADATA_VALUE)) + await this._addListenRelay(connection, id) + } + } catch (err: any) { + this.onError(err) + } + } + + /** + * Peer disconnects + */ + _onPeerDisconnected (evt: CustomEvent) { + const connection = evt.detail + const peerId = connection.remotePeer + const id = peerId.toString() + + // Not listening on this relay + if (!this.listenRelays.has(id)) { + return + } + + this._removeListenRelay(id).catch(err => { + log.error(err) + }) + } + + /** + * Attempt to listen on the given relay connection + */ + async _addListenRelay (connection: Connection, id: string): Promise { + try { + // Check if already listening on enough relays + if (this.listenRelays.size >= this.maxListeners) { + return + } + + // Get peer known addresses and sort them with public addresses first + const remoteAddrs = await pipe( + await this.components.getPeerStore().addressBook.get(connection.remotePeer), + (source) => sort(source, this.addressSorter), + async (source) => await all(source) + ) + + // Attempt to listen on relay + const result = await Promise.all( + remoteAddrs.map(async addr => { + try { + let multiaddr = addr.multiaddr + + if (multiaddr.getPeerId() == null) { + multiaddr = multiaddr.encapsulate(`/p2p/${connection.remotePeer.toString()}`) + } + + multiaddr = multiaddr.encapsulate('/p2p-circuit') + + // Announce multiaddrs will update on listen success by TransportManager event being triggered + await this.components.getTransportManager().listen([multiaddr]) + return true + } catch (err: any) { + log.error('error listening on circuit address', err) + this.onError(err) + } + + return false + }) + ) + + if (result.includes(true)) { + this.listenRelays.add(id) + } + } catch (err: any) { + this.onError(err) + this.listenRelays.delete(id) + } + } + + /** + * Remove listen relay + */ + async _removeListenRelay (id: string) { + if (this.listenRelays.delete(id)) { + // TODO: this should be responsibility of the connMgr + await this._listenOnAvailableHopRelays([id]) + } + } + + /** + * Try to listen on available hop relay connections. + * The following order will happen while we do not have enough relays. + * 1. Check the metadata store for known relays, try to listen on the ones we are already connected. + * 2. Dial and try to listen on the peers we know that support hop but are not connected. + * 3. Search the network. + */ + async _listenOnAvailableHopRelays (peersToIgnore: string[] = []) { + // TODO: The peer redial issue on disconnect should be handled by connection gating + // Check if already listening on enough relays + if (this.listenRelays.size >= this.maxListeners) { + return + } + + const knownHopsToDial = [] + const peers = await this.components.getPeerStore().all() + + // Check if we have known hop peers to use and attempt to listen on the already connected + for (const { id, metadata } of peers) { + const idStr = id.toString() + + // Continue to next if listening on this or peer to ignore + if (this.listenRelays.has(idStr)) { + continue + } + + if (peersToIgnore.includes(idStr)) { + continue + } + + const supportsHop = metadata.get(HOP_METADATA_KEY) + + // Continue to next if it does not support Hop + if ((supportsHop == null) || uint8ArrayToString(supportsHop) !== HOP_METADATA_VALUE) { + continue + } + + const connection = this.components.getConnectionManager().getConnection(id) + + // If not connected, store for possible later use. + if (connection == null) { + knownHopsToDial.push(id) + continue + } + + await this._addListenRelay(connection, idStr) + + // Check if already listening on enough relays + if (this.listenRelays.size >= this.maxListeners) { + return + } + } + + // Try to listen on known peers that are not connected + for (const peerId of knownHopsToDial) { + await this._tryToListenOnRelay(peerId) + + // Check if already listening on enough relays + if (this.listenRelays.size >= this.maxListeners) { + return + } + } + + // Try to find relays to hop on the network + try { + const cid = await namespaceToCid(RELAY_RENDEZVOUS_NS) + for await (const provider of this.components.getContentRouting().findProviders(cid)) { + if (provider.multiaddrs.length === 0) { + continue + } + + const peerId = provider.id + await this.components.getPeerStore().addressBook.add(peerId, provider.multiaddrs) + + await this._tryToListenOnRelay(peerId) + + // Check if already listening on enough relays + if (this.listenRelays.size >= this.maxListeners) { + return + } + } + } catch (err: any) { + this.onError(err) + } + } + + async _tryToListenOnRelay (peerId: PeerId) { + try { + const connection = await this.components.getDialer().dial(peerId) + await this._addListenRelay(connection, peerId.toString()) + } catch (err: any) { + log.error('Could not use %p as relay', peerId, err) + this.onError(err, `could not connect and listen on known hop relay ${peerId.toString()}`) + } + } +} diff --git a/src/circuit/circuit/hop.js b/src/circuit/circuit/hop.js deleted file mode 100644 index 73d9b1f8..00000000 --- a/src/circuit/circuit/hop.js +++ /dev/null @@ -1,205 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:circuit:hop'), { - error: debug('libp2p:circuit:hop:err') -}) -const errCode = require('err-code') - -const PeerId = require('peer-id') -const { validateAddrs } = require('./utils') -const StreamHandler = require('./stream-handler') -const { CircuitRelay: CircuitPB } = require('../protocol') -const { pipe } = require('it-pipe') -const { codes: Errors } = require('../../errors') - -const { stop } = require('./stop') - -const multicodec = require('./../multicodec') - -/** - * @typedef {import('../protocol').ICircuitRelay} ICircuitRelay - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - * @typedef {import('../transport')} Transport - */ - -/** - * @typedef {Object} HopRequest - * @property {Connection} connection - * @property {ICircuitRelay} request - * @property {StreamHandler} streamHandler - * @property {Transport} circuit - */ - -/** - * @param {HopRequest} options - * @returns {Promise} - */ -async function handleHop ({ - connection, - request, - streamHandler, - circuit -}) { - // Ensure hop is enabled - if (!circuit._options.hop.enabled) { - log('HOP request received but we are not acting as a relay') - return streamHandler.end({ - type: CircuitPB.Type.STATUS, - code: CircuitPB.Status.HOP_CANT_SPEAK_RELAY - }) - } - - // Validate the HOP request has the required input - try { - validateAddrs(request, streamHandler) - } catch (/** @type {any} */ err) { - return log.error('invalid hop request via peer %s', connection.remotePeer.toB58String(), err) - } - - if (!request.dstPeer) { - log('HOP request received but we do not receive a dstPeer') - return - } - - // Get the connection to the destination (stop) peer - const destinationPeer = new PeerId(request.dstPeer.id) - - const destinationConnection = circuit._connectionManager.get(destinationPeer) - if (!destinationConnection && !circuit._options.hop.active) { - log('HOP request received but we are not connected to the destination peer') - return streamHandler.end({ - type: CircuitPB.Type.STATUS, - code: CircuitPB.Status.HOP_NO_CONN_TO_DST - }) - } - - // TODO: Handle being an active relay - if (!destinationConnection) { - return - } - - // Handle the incoming HOP request by performing a STOP request - const stopRequest = { - type: CircuitPB.Type.STOP, - dstPeer: request.dstPeer, - srcPeer: request.srcPeer - } - - let destinationStream - try { - destinationStream = await stop({ - connection: destinationConnection, - request: stopRequest - }) - } catch (/** @type {any} */ err) { - return log.error(err) - } - - log('hop request from %s is valid', connection.remotePeer.toB58String()) - streamHandler.write({ - type: CircuitPB.Type.STATUS, - code: CircuitPB.Status.SUCCESS - }) - const sourceStream = streamHandler.rest() - - // Short circuit the two streams to create the relayed connection - return pipe( - sourceStream, - destinationStream, - sourceStream - ) -} - -/** - * Performs a HOP request to a relay peer, to request a connection to another - * peer. A new, virtual, connection will be created between the two via the relay. - * - * @param {object} options - * @param {Connection} options.connection - Connection to the relay - * @param {ICircuitRelay} options.request - * @returns {Promise} - */ -async function hop ({ - connection, - request -}) { - // Create a new stream to the relay - const { stream } = await connection.newStream([multicodec.relay]) - // Send the HOP request - const streamHandler = new StreamHandler({ stream }) - streamHandler.write(request) - - const response = await streamHandler.read() - - if (!response) { - throw errCode(new Error('HOP request had no response'), Errors.ERR_HOP_REQUEST_FAILED) - } - - if (response.code === CircuitPB.Status.SUCCESS) { - log('hop request was successful') - return streamHandler.rest() - } - - log('hop request failed with code %d, closing stream', response.code) - streamHandler.close() - throw errCode(new Error(`HOP request failed with code ${response.code}`), Errors.ERR_HOP_REQUEST_FAILED) -} - -/** - * Performs a CAN_HOP request to a relay peer, in order to understand its capabilities. - * - * @param {object} options - * @param {Connection} options.connection - Connection to the relay - * @returns {Promise} - */ -async function canHop ({ - connection -}) { - // Create a new stream to the relay - const { stream } = await connection.newStream([multicodec.relay]) - // Send the HOP request - const streamHandler = new StreamHandler({ stream }) - streamHandler.write({ - type: CircuitPB.Type.CAN_HOP - }) - - const response = await streamHandler.read() - await streamHandler.close() - - if (!response || response.code !== CircuitPB.Status.SUCCESS) { - return false - } - - return true -} - -/** - * Creates an unencoded CAN_HOP response based on the Circuits configuration - * - * @param {Object} options - * @param {Connection} options.connection - * @param {StreamHandler} options.streamHandler - * @param {Transport} options.circuit - * @private - */ -function handleCanHop ({ - connection, - streamHandler, - circuit -}) { - const canHop = circuit._options.hop.enabled - log('can hop (%s) request from %s', canHop, connection.remotePeer.toB58String()) - streamHandler.end({ - type: CircuitPB.Type.STATUS, - code: canHop ? CircuitPB.Status.SUCCESS : CircuitPB.Status.HOP_CANT_SPEAK_RELAY - }) -} - -module.exports = { - handleHop, - hop, - canHop, - handleCanHop -} diff --git a/src/circuit/circuit/hop.ts b/src/circuit/circuit/hop.ts new file mode 100644 index 00000000..5440813a --- /dev/null +++ b/src/circuit/circuit/hop.ts @@ -0,0 +1,211 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import { validateAddrs } from './utils.js' +import { StreamHandler } from './stream-handler.js' +import { CircuitRelay as CircuitPB, ICircuitRelay } from '../pb/index.js' +import { pipe } from 'it-pipe' +import { codes as Errors } from '../../errors.js' +import { stop } from './stop.js' +import { RELAY_CODEC } from '../multicodec.js' +import type { Connection } from '@libp2p/interfaces/connection' +import { peerIdFromBytes } from '@libp2p/peer-id' +import type { Duplex } from 'it-stream-types' +import type { Circuit } from '../transport.js' +import type { ConnectionManager } from '@libp2p/interfaces/registrar' + +const log = logger('libp2p:circuit:hop') + +export interface HopRequest { + connection: Connection + request: ICircuitRelay + streamHandler: StreamHandler + circuit: Circuit + connectionManager: ConnectionManager +} + +export async function handleHop (hopRequest: HopRequest) { + const { + connection, + request, + streamHandler, + circuit, + connectionManager + } = hopRequest + + // Ensure hop is enabled + if (!circuit.hopEnabled()) { + log('HOP request received but we are not acting as a relay') + return streamHandler.end({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.HOP_CANT_SPEAK_RELAY + }) + } + + // Validate the HOP request has the required input + try { + validateAddrs(request, streamHandler) + } catch (err: any) { + log.error('invalid hop request via peer %p %o', connection.remotePeer, err) + + return + } + + if (request.dstPeer == null) { + log('HOP request received but we do not receive a dstPeer') + return + } + + // Get the connection to the destination (stop) peer + const destinationPeer = peerIdFromBytes(request.dstPeer.id) + + const destinationConnection = connectionManager.getConnection(destinationPeer) + if (destinationConnection == null && !circuit.hopActive()) { + log('HOP request received but we are not connected to the destination peer') + return streamHandler.end({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.HOP_NO_CONN_TO_DST + }) + } + + // TODO: Handle being an active relay + if (destinationConnection == null) { + log('did not have connection to remote peer') + return streamHandler.end({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.HOP_NO_CONN_TO_DST + }) + } + + // Handle the incoming HOP request by performing a STOP request + const stopRequest = { + type: CircuitPB.Type.STOP, + dstPeer: request.dstPeer, + srcPeer: request.srcPeer + } + + let destinationStream: Duplex + try { + log('performing STOP request') + const result = await stop({ + connection: destinationConnection, + request: stopRequest + }) + + if (result == null) { + throw new Error('Could not stop') + } + + destinationStream = result + } catch (err: any) { + log.error(err) + + return + } + + log('hop request from %p is valid', connection.remotePeer) + streamHandler.write({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.SUCCESS + }) + const sourceStream = streamHandler.rest() + + log('creating related connections') + // Short circuit the two streams to create the relayed connection + return await pipe( + sourceStream, + destinationStream, + sourceStream + ) +} + +export interface HopConfig { + connection: Connection + request: ICircuitRelay +} + +/** + * Performs a HOP request to a relay peer, to request a connection to another + * peer. A new, virtual, connection will be created between the two via the relay. + */ +export async function hop (options: HopConfig): Promise> { + const { + connection, + request + } = options + + // Create a new stream to the relay + const { stream } = await connection.newStream(RELAY_CODEC) + // Send the HOP request + const streamHandler = new StreamHandler({ stream }) + streamHandler.write(request) + + const response = await streamHandler.read() + + if (response == null) { + throw errCode(new Error('HOP request had no response'), Errors.ERR_HOP_REQUEST_FAILED) + } + + if (response.code === CircuitPB.Status.SUCCESS) { + log('hop request was successful') + return streamHandler.rest() + } + + log('hop request failed with code %d, closing stream', response.code) + streamHandler.close() + + throw errCode(new Error(`HOP request failed with code ${response.code}`), Errors.ERR_HOP_REQUEST_FAILED) +} + +export interface CanHopOptions { + connection: Connection +} + +/** + * Performs a CAN_HOP request to a relay peer, in order to understand its capabilities + */ +export async function canHop (options: CanHopOptions) { + const { + connection + } = options + + // Create a new stream to the relay + const { stream } = await connection.newStream(RELAY_CODEC) + + // Send the HOP request + const streamHandler = new StreamHandler({ stream }) + streamHandler.write({ + type: CircuitPB.Type.CAN_HOP + }) + + const response = await streamHandler.read() + await streamHandler.close() + + if (response == null || response.code !== CircuitPB.Status.SUCCESS) { + return false + } + + return true +} + +export interface HandleCanHopOptions { + connection: Connection + streamHandler: StreamHandler + circuit: Circuit +} + +/** + * Creates an unencoded CAN_HOP response based on the Circuits configuration + */ +export function handleCanHop (options: HandleCanHopOptions) { + const { + connection, + streamHandler, + circuit + } = options + const canHop = circuit.hopEnabled() + log('can hop (%s) request from %p', canHop, connection.remotePeer) + streamHandler.end({ + type: CircuitPB.Type.STATUS, + code: canHop ? CircuitPB.Status.SUCCESS : CircuitPB.Status.HOP_CANT_SPEAK_RELAY + }) +} diff --git a/src/circuit/circuit/stop.js b/src/circuit/circuit/stop.js deleted file mode 100644 index 8efd00f8..00000000 --- a/src/circuit/circuit/stop.js +++ /dev/null @@ -1,81 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:circuit:stop'), { - error: debug('libp2p:circuit:stop:err') -}) - -const { CircuitRelay: CircuitPB } = require('../protocol') -const multicodec = require('../multicodec') -const StreamHandler = require('./stream-handler') -const { validateAddrs } = require('./utils') - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - * @typedef {import('../protocol').ICircuitRelay} ICircuitRelay - */ - -/** - * Handles incoming STOP requests - * - * @private - * @param {Object} options - * @param {Connection} options.connection - * @param {ICircuitRelay} options.request - The CircuitRelay protobuf request (unencoded) - * @param {StreamHandler} options.streamHandler - * @returns {Promise|void} Resolves a duplex iterable - */ -module.exports.handleStop = function handleStop ({ - connection, - request, - streamHandler -}) { - // Validate the STOP request has the required input - try { - validateAddrs(request, streamHandler) - } catch (/** @type {any} */ err) { - return log.error('invalid stop request via peer %s', connection.remotePeer.toB58String(), err) - } - - // The request is valid - log('stop request is valid') - streamHandler.write({ - type: CircuitPB.Type.STATUS, - code: CircuitPB.Status.SUCCESS - }) - return streamHandler.rest() -} - -/** - * Creates a STOP request - * - * @private - * @param {Object} options - * @param {Connection} options.connection - * @param {ICircuitRelay} options.request - The CircuitRelay protobuf request (unencoded) - * @returns {Promise} Resolves a duplex iterable - */ -module.exports.stop = async function stop ({ - connection, - request -}) { - const { stream } = await connection.newStream([multicodec.relay]) - log('starting stop request to %s', connection.remotePeer.toB58String()) - const streamHandler = new StreamHandler({ stream }) - - streamHandler.write(request) - const response = await streamHandler.read() - - if (!response) { - return streamHandler.close() - } - - if (response.code === CircuitPB.Status.SUCCESS) { - log('stop request to %s was successful', connection.remotePeer.toB58String()) - return streamHandler.rest() - } - - log('stop request failed with code %d', response.code) - streamHandler.close() -} diff --git a/src/circuit/circuit/stop.ts b/src/circuit/circuit/stop.ts new file mode 100644 index 00000000..edef9672 --- /dev/null +++ b/src/circuit/circuit/stop.ts @@ -0,0 +1,78 @@ +import { logger } from '@libp2p/logger' +import { CircuitRelay as CircuitPB, ICircuitRelay } from '../pb/index.js' +import { RELAY_CODEC } from '../multicodec.js' +import { StreamHandler } from './stream-handler.js' +import { validateAddrs } from './utils.js' +import type { Connection } from '@libp2p/interfaces/connection' +import type { Duplex } from 'it-stream-types' + +const log = logger('libp2p:circuit:stop') + +export interface HandleStopOptions { + connection: Connection + request: ICircuitRelay + streamHandler: StreamHandler +} + +/** + * Handles incoming STOP requests + */ +export function handleStop (options: HandleStopOptions): Duplex | undefined { + const { + connection, + request, + streamHandler + } = options + + // Validate the STOP request has the required input + try { + validateAddrs(request, streamHandler) + } catch (err: any) { + log.error('invalid stop request via peer %p %o', connection.remotePeer, err) + return + } + + // The request is valid + log('stop request is valid') + streamHandler.write({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.SUCCESS + }) + + return streamHandler.rest() +} + +export interface StopOptions { + connection: Connection + request: ICircuitRelay +} + +/** + * Creates a STOP request + */ +export async function stop (options: StopOptions) { + const { + connection, + request + } = options + + const { stream } = await connection.newStream([RELAY_CODEC]) + log('starting stop request to %p', connection.remotePeer) + const streamHandler = new StreamHandler({ stream }) + + streamHandler.write(request) + const response = await streamHandler.read() + + if (response == null) { + streamHandler.close() + return + } + + if (response.code === CircuitPB.Status.SUCCESS) { + log('stop request to %p was successful', connection.remotePeer) + return streamHandler.rest() + } + + log('stop request failed with code %d', response.code) + streamHandler.close() +} diff --git a/src/circuit/circuit/stream-handler.js b/src/circuit/circuit/stream-handler.js deleted file mode 100644 index bbae7d68..00000000 --- a/src/circuit/circuit/stream-handler.js +++ /dev/null @@ -1,94 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:circuit:stream-handler'), { - error: debug('libp2p:circuit:stream-handler:err') -}) - -const lp = require('it-length-prefixed') -// @ts-ignore it-handshake does not export types -const handshake = require('it-handshake') -const { CircuitRelay } = require('../protocol') - -/** - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - * @typedef {import('../protocol').ICircuitRelay} ICircuitRelay - */ - -class StreamHandler { - /** - * Create a stream handler for connection - * - * @class - * @param {object} options - * @param {MuxedStream} options.stream - A duplex iterable - * @param {number} [options.maxLength = 4096] - max bytes length of message - */ - constructor ({ stream, maxLength = 4096 }) { - this.stream = stream - - this.shake = handshake(this.stream) - // @ts-ignore options are not optional - this.decoder = lp.decode.fromReader(this.shake.reader, { maxDataLength: maxLength }) - } - - /** - * Read and decode message - * - * @async - */ - async read () { - const msg = await this.decoder.next() - if (msg.value) { - const value = CircuitRelay.decode(msg.value.slice()) - log('read message type', value.type) - return value - } - - log('read received no value, closing stream') - // End the stream, we didn't get data - this.close() - } - - /** - * Encode and write array of buffers - * - * @param {ICircuitRelay} msg - An unencoded CircuitRelay protobuf message - * @returns {void} - */ - write (msg) { - log('write message type %s', msg.type) - // @ts-ignore lp.encode expects type type 'Buffer | BufferList', not 'Uint8Array' - this.shake.write(lp.encode.single(CircuitRelay.encode(msg).finish())) - } - - /** - * Return the handshake rest stream and invalidate handler - * - * @returns {*} A duplex iterable - */ - rest () { - this.shake.rest() - return this.shake.stream - } - - /** - * @param {ICircuitRelay} msg - An unencoded CircuitRelay protobuf message - */ - end (msg) { - this.write(msg) - this.close() - } - - /** - * Close the stream - * - * @returns {void} - */ - close () { - log('closing the stream') - this.rest().sink([]) - } -} - -module.exports = StreamHandler diff --git a/src/circuit/circuit/stream-handler.ts b/src/circuit/circuit/stream-handler.ts new file mode 100644 index 00000000..8b55f36a --- /dev/null +++ b/src/circuit/circuit/stream-handler.ts @@ -0,0 +1,87 @@ +import { logger } from '@libp2p/logger' +import * as lp from 'it-length-prefixed' +import { Handshake, handshake } from 'it-handshake' +import { CircuitRelay, ICircuitRelay } from '../pb/index.js' +import type { Stream } from '@libp2p/interfaces/connection' +import type { Source } from 'it-stream-types' + +const log = logger('libp2p:circuit:stream-handler') + +export interface StreamHandlerOptions { + /** + * A duplex iterable + */ + stream: Stream + + /** + * max bytes length of message + */ + maxLength?: number +} + +export class StreamHandler { + private readonly stream: Stream + private readonly shake: Handshake + private readonly decoder: Source + + constructor (options: StreamHandlerOptions) { + const { stream, maxLength = 4096 } = options + + this.stream = stream + this.shake = handshake(this.stream) + this.decoder = lp.decode.fromReader(this.shake.reader, { maxDataLength: maxLength }) + } + + /** + * Read and decode message + */ + async read () { + // @ts-expect-error FIXME is a source, needs to be a generator + const msg = await this.decoder.next() + + if (msg.value != null) { + const value = CircuitRelay.decode(msg.value.slice()) + log('read message type', value.type) + return value + } + + log('read received no value, closing stream') + // End the stream, we didn't get data + this.close() + } + + /** + * Encode and write array of buffers + */ + write (msg: ICircuitRelay) { + log('write message type %s', msg.type) + // @ts-expect-error lp.encode expects type type 'Buffer | BufferList', not 'Uint8Array' + this.shake.write(lp.encode.single(CircuitRelay.encode(msg).finish())) + } + + /** + * Return the handshake rest stream and invalidate handler + */ + rest () { + this.shake.rest() + return this.shake.stream + } + + /** + * @param {ICircuitRelay} msg - An unencoded CircuitRelay protobuf message + */ + end (msg: ICircuitRelay) { + this.write(msg) + this.close() + } + + /** + * Close the stream + */ + close () { + log('closing the stream') + void this.rest().sink([]).catch(err => { + log.error(err) + }) + } +} diff --git a/src/circuit/circuit/utils.js b/src/circuit/circuit/utils.ts similarity index 50% rename from src/circuit/circuit/utils.js rename to src/circuit/circuit/utils.ts index 624d0ba4..b5fe34f2 100644 --- a/src/circuit/circuit/utils.js +++ b/src/circuit/circuit/utils.ts @@ -1,20 +1,11 @@ -'use strict' - -const { Multiaddr } = require('multiaddr') -const { CircuitRelay } = require('../protocol') - -/** - * @typedef {import('./stream-handler')} StreamHandler - * @typedef {import('../protocol').ICircuitRelay} ICircuitRelay - */ +import { Multiaddr } from '@multiformats/multiaddr' +import { CircuitRelay, ICircuitRelay } from '../pb/index.js' +import type { StreamHandler } from './stream-handler.js' /** * Write a response - * - * @param {StreamHandler} streamHandler - * @param {import('../protocol').CircuitRelay.Status} status */ -function writeResponse (streamHandler, status) { +function writeResponse (streamHandler: StreamHandler, status: CircuitRelay.Status) { streamHandler.write({ type: CircuitRelay.Type.STATUS, code: status @@ -23,18 +14,15 @@ function writeResponse (streamHandler, status) { /** * Validate incomming HOP/STOP message - * - * @param {ICircuitRelay} msg - A CircuitRelay unencoded protobuf message - * @param {StreamHandler} streamHandler */ -function validateAddrs (msg, streamHandler) { +export function validateAddrs (msg: ICircuitRelay, streamHandler: StreamHandler) { try { - if (msg.dstPeer && msg.dstPeer.addrs) { + if (msg.dstPeer?.addrs != null) { msg.dstPeer.addrs.forEach((addr) => { return new Multiaddr(addr) }) } - } catch (/** @type {any} */ err) { + } catch (err: any) { writeResponse(streamHandler, msg.type === CircuitRelay.Type.HOP ? CircuitRelay.Status.HOP_DST_MULTIADDR_INVALID : CircuitRelay.Status.STOP_DST_MULTIADDR_INVALID) @@ -42,19 +30,15 @@ function validateAddrs (msg, streamHandler) { } try { - if (msg.srcPeer && msg.srcPeer.addrs) { + if (msg.srcPeer?.addrs != null) { msg.srcPeer.addrs.forEach((addr) => { return new Multiaddr(addr) }) } - } catch (/** @type {any} */ err) { + } catch (err: any) { writeResponse(streamHandler, msg.type === CircuitRelay.Type.HOP ? CircuitRelay.Status.HOP_SRC_MULTIADDR_INVALID : CircuitRelay.Status.STOP_SRC_MULTIADDR_INVALID) throw err } } - -module.exports = { - validateAddrs -} diff --git a/src/circuit/constants.js b/src/circuit/constants.js deleted file mode 100644 index b4de629c..00000000 --- a/src/circuit/constants.js +++ /dev/null @@ -1,12 +0,0 @@ -'use strict' - -const minute = 60 * 1000 - -module.exports = { - ADVERTISE_BOOT_DELAY: 15 * minute, // Delay before HOP relay service is advertised on the network - ADVERTISE_TTL: 30 * minute, // Delay Between HOP relay service advertisements on the network - CIRCUIT_PROTO_CODE: 290, // Multicodec code - HOP_METADATA_KEY: 'hop_relay', // PeerStore metadaBook key for HOP relay service - HOP_METADATA_VALUE: 'true', // PeerStore metadaBook value for HOP relay service - RELAY_RENDEZVOUS_NS: '/libp2p/relay' // Relay HOP relay service namespace for discovery -} diff --git a/src/circuit/constants.ts b/src/circuit/constants.ts new file mode 100644 index 00000000..8405a0fa --- /dev/null +++ b/src/circuit/constants.ts @@ -0,0 +1,31 @@ +const minute = 60 * 1000 + +/** + * Delay before HOP relay service is advertised on the network + */ +export const ADVERTISE_BOOT_DELAY = 15 * minute + +/** + * Delay Between HOP relay service advertisements on the network + */ +export const ADVERTISE_TTL = 30 * minute + +/** + * Multicodec code + */ +export const CIRCUIT_PROTO_CODE = 290 + +/** + * PeerStore metadaBook key for HOP relay service + */ +export const HOP_METADATA_KEY = 'hop_relay' + +/** + * PeerStore metadaBook value for HOP relay service + */ +export const HOP_METADATA_VALUE = 'true' + +/** + * Relay HOP relay service namespace for discovery + */ +export const RELAY_RENDEZVOUS_NS = '/libp2p/relay' diff --git a/src/circuit/index.js b/src/circuit/index.js deleted file mode 100644 index 06d4107a..00000000 --- a/src/circuit/index.js +++ /dev/null @@ -1,102 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:relay'), { - error: debug('libp2p:relay:err') -}) -const { codes } = require('./../errors') -const { - setDelayedInterval, - clearDelayedInterval -// @ts-ignore set-delayed-interval does not export types -} = require('set-delayed-interval') - -const AutoRelay = require('./auto-relay') -const { namespaceToCid } = require('./utils') -const { - RELAY_RENDEZVOUS_NS -} = require('./constants') - -/** - * @typedef {import('../')} Libp2p - * - * @typedef {Object} RelayAdvertiseOptions - * @property {number} [bootDelay = ADVERTISE_BOOT_DELAY] - * @property {boolean} [enabled = true] - * @property {number} [ttl = ADVERTISE_TTL] - * - * @typedef {Object} HopOptions - * @property {boolean} [enabled = false] - * @property {boolean} [active = false] - * - * @typedef {Object} AutoRelayOptions - * @property {number} [maxListeners = 2] - maximum number of relays to listen. - * @property {boolean} [enabled = false] - */ - -class Relay { - /** - * Creates an instance of Relay. - * - * @class - * @param {Libp2p} libp2p - */ - constructor (libp2p) { - this._libp2p = libp2p - this._options = { - ...libp2p._config.relay - } - - // Create autoRelay if enabled - this._autoRelay = this._options.autoRelay.enabled && new AutoRelay({ libp2p, ...this._options.autoRelay }) - - this._advertiseService = this._advertiseService.bind(this) - } - - /** - * Start Relay service. - * - * @returns {void} - */ - start () { - // Advertise service if HOP enabled - const canHop = this._options.hop.enabled - - if (canHop && this._options.advertise.enabled) { - this._timeout = setDelayedInterval( - this._advertiseService, this._options.advertise.ttl, this._options.advertise.bootDelay - ) - } - } - - /** - * Stop Relay service. - * - * @returns {void} - */ - stop () { - clearDelayedInterval(this._timeout) - } - - /** - * Advertise hop relay service in the network. - * - * @returns {Promise} - */ - async _advertiseService () { - try { - const cid = await namespaceToCid(RELAY_RENDEZVOUS_NS) - await this._libp2p.contentRouting.provide(cid) - } catch (/** @type {any} */ err) { - if (err.code === codes.ERR_NO_ROUTERS_AVAILABLE) { - log.error('a content router, such as a DHT, must be provided in order to advertise the relay service', err) - // Stop the advertise - this.stop() - } else { - log.error(err) - } - } - } -} - -module.exports = Relay diff --git a/src/circuit/index.ts b/src/circuit/index.ts new file mode 100644 index 00000000..8bd66808 --- /dev/null +++ b/src/circuit/index.ts @@ -0,0 +1,120 @@ +import { logger } from '@libp2p/logger' +import { codes } from '../errors.js' +import { + setDelayedInterval, + clearDelayedInterval +// @ts-expect-error set-delayed-interval does not export types +} from 'set-delayed-interval' +import { AutoRelay } from './auto-relay.js' +import { namespaceToCid } from './utils.js' +import { + RELAY_RENDEZVOUS_NS +} from './constants.js' +import type { AddressSorter } from '@libp2p/interfaces/peer-store' +import type { Startable } from '@libp2p/interfaces' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:relay') + +export interface RelayAdvertiseConfig { + bootDelay?: number + enabled?: boolean + ttl?: number +} + +export interface HopConfig { + enabled?: boolean + active?: boolean +} + +export interface AutoRelayConfig { + enabled?: boolean + + /** + * maximum number of relays to listen + */ + maxListeners: number +} + +export interface RelayInit { + addressSorter?: AddressSorter + maxListeners?: number + onError?: (error: Error, msg?: string) => void + hop: HopConfig + advertise: RelayAdvertiseConfig + autoRelay: AutoRelayConfig +} + +export class Relay implements Startable { + private readonly components: Components + private readonly init: RelayInit + // @ts-expect-error this field isn't used anywhere? + private readonly autoRelay?: AutoRelay + private timeout?: any + private started: boolean + + /** + * Creates an instance of Relay + */ + constructor (components: Components, init: RelayInit) { + this.components = components + // Create autoRelay if enabled + this.autoRelay = init.autoRelay?.enabled !== false + ? new AutoRelay(components, { + addressSorter: init.addressSorter, + ...init.autoRelay + }) + : undefined + + this.started = false + this.init = init + this._advertiseService = this._advertiseService.bind(this) + } + + isStarted () { + return this.started + } + + /** + * Start Relay service + */ + async start () { + // Advertise service if HOP enabled + if (this.init.hop.enabled !== false && this.init.advertise.enabled !== false) { + this.timeout = setDelayedInterval( + this._advertiseService, this.init.advertise.ttl, this.init.advertise.bootDelay + ) + } + + this.started = true + } + + /** + * Stop Relay service + */ + async stop () { + if (this.timeout != null) { + clearDelayedInterval(this.timeout) + } + + this.started = false + } + + /** + * Advertise hop relay service in the network. + */ + async _advertiseService () { + try { + const cid = await namespaceToCid(RELAY_RENDEZVOUS_NS) + await this.components.getContentRouting().provide(cid) + } catch (err: any) { + if (err.code === codes.ERR_NO_ROUTERS_AVAILABLE) { + log.error('a content router, such as a DHT, must be provided in order to advertise the relay service', err) + // Stop the advertise + await this.stop() + } else { + log.error(err) + } + } + } +} diff --git a/src/circuit/listener.js b/src/circuit/listener.ts similarity index 54% rename from src/circuit/listener.js rename to src/circuit/listener.ts index a3f11dc7..7de4fbf9 100644 --- a/src/circuit/listener.js +++ b/src/circuit/listener.ts @@ -1,33 +1,27 @@ -'use strict' +import { CustomEvent, EventEmitter } from '@libp2p/interfaces' +import type { ConnectionManager } from '@libp2p/interfaces/registrar' +import type { Dialer } from '@libp2p/interfaces/dialer' +import type { Listener } from '@libp2p/interfaces/transport' +import { Multiaddr } from '@multiformats/multiaddr' -const { EventEmitter } = require('events') -const { Multiaddr } = require('multiaddr') +export interface ListenerOptions { + dialer: Dialer + connectionManager: ConnectionManager +} -/** - * @typedef {import('libp2p-interfaces/src/transport/types').Listener} Listener - */ - -/** - * @param {import('../')} libp2p - * @returns {Listener} a transport listener - */ -module.exports = (libp2p) => { +export function createListener (options: ListenerOptions): Listener { const listeningAddrs = new Map() /** * Add swarm handler and listen for incoming connections - * - * @param {Multiaddr} addr - * @returns {Promise} */ - async function listen (addr) { - const addrString = String(addr).split('/p2p-circuit').find(a => a !== '') - - const relayConn = await libp2p.dial(new Multiaddr(addrString)) + async function listen (addr: Multiaddr): Promise { + const addrString = addr.toString().split('/p2p-circuit').find(a => a !== '') + const relayConn = await options.dialer.dial(new Multiaddr(addrString)) const relayedAddr = relayConn.remoteAddr.encapsulate('/p2p-circuit') - listeningAddrs.set(relayConn.remotePeer.toB58String(), relayedAddr) - listener.emit('listening') + listeningAddrs.set(relayConn.remotePeer.toString(), relayedAddr) + listener.dispatchEvent(new CustomEvent('listening')) } /** @@ -54,20 +48,20 @@ module.exports = (libp2p) => { return addrs } - /** @type Listener */ - const listener = Object.assign(new EventEmitter(), { - close: () => Promise.resolve(), + const listener: Listener = Object.assign(new EventEmitter(), { + close: async () => await Promise.resolve(), listen, getAddrs }) // Remove listeningAddrs when a peer disconnects - libp2p.connectionManager.on('peer:disconnect', (connection) => { - const deleted = listeningAddrs.delete(connection.remotePeer.toB58String()) + options.connectionManager.addEventListener('peer:disconnect', (evt) => { + const { detail: connection } = evt + const deleted = listeningAddrs.delete(connection.remotePeer.toString()) if (deleted) { // Announce listen addresses change - listener.emit('close') + listener.dispatchEvent(new CustomEvent('close')) } }) diff --git a/src/circuit/multicodec.js b/src/circuit/multicodec.js deleted file mode 100644 index bcdb9787..00000000 --- a/src/circuit/multicodec.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict' - -module.exports = { - relay: '/libp2p/circuit/relay/0.1.0' -} diff --git a/src/circuit/multicodec.ts b/src/circuit/multicodec.ts new file mode 100644 index 00000000..fcd28221 --- /dev/null +++ b/src/circuit/multicodec.ts @@ -0,0 +1,2 @@ + +export const RELAY_CODEC = '/libp2p/circuit/relay/0.1.0' diff --git a/src/circuit/protocol/index.d.ts b/src/circuit/pb/index.d.ts similarity index 100% rename from src/circuit/protocol/index.d.ts rename to src/circuit/pb/index.d.ts diff --git a/src/circuit/protocol/index.js b/src/circuit/pb/index.js similarity index 97% rename from src/circuit/protocol/index.js rename to src/circuit/pb/index.js index d929debc..2157d6de 100644 --- a/src/circuit/protocol/index.js +++ b/src/circuit/pb/index.js @@ -1,15 +1,13 @@ /*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); +import $protobuf from "protobufjs/minimal.js"; // Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; +const $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; // Exported root namespace -var $root = $protobuf.roots["libp2p-circuit"] || ($protobuf.roots["libp2p-circuit"] = {}); +const $root = $protobuf.roots["libp2p-circuit"] || ($protobuf.roots["libp2p-circuit"] = {}); -$root.CircuitRelay = (function() { +export const CircuitRelay = $root.CircuitRelay = (() => { /** * Properties of a CircuitRelay. @@ -305,7 +303,7 @@ $root.CircuitRelay = (function() { * @property {number} MALFORMED_MESSAGE=400 MALFORMED_MESSAGE value */ CircuitRelay.Status = (function() { - var valuesById = {}, values = Object.create(valuesById); + const valuesById = {}, values = Object.create(valuesById); values[valuesById[100] = "SUCCESS"] = 100; values[valuesById[220] = "HOP_SRC_ADDR_TOO_LONG"] = 220; values[valuesById[221] = "HOP_DST_ADDR_TOO_LONG"] = 221; @@ -335,7 +333,7 @@ $root.CircuitRelay = (function() { * @property {number} CAN_HOP=4 CAN_HOP value */ CircuitRelay.Type = (function() { - var valuesById = {}, values = Object.create(valuesById); + const valuesById = {}, values = Object.create(valuesById); values[valuesById[1] = "HOP"] = 1; values[valuesById[2] = "STOP"] = 2; values[valuesById[3] = "STATUS"] = 3; @@ -527,4 +525,4 @@ $root.CircuitRelay = (function() { return CircuitRelay; })(); -module.exports = $root; +export { $root as default }; diff --git a/src/circuit/protocol/index.proto b/src/circuit/pb/index.proto similarity index 100% rename from src/circuit/protocol/index.proto rename to src/circuit/pb/index.proto diff --git a/src/circuit/transport.js b/src/circuit/transport.js deleted file mode 100644 index 5d0ad50d..00000000 --- a/src/circuit/transport.js +++ /dev/null @@ -1,229 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:circuit'), { - error: debug('libp2p:circuit:err') -}) - -const errCode = require('err-code') -const mafmt = require('mafmt') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') -const { CircuitRelay: CircuitPB } = require('./protocol') -const { codes } = require('../errors') - -const toConnection = require('libp2p-utils/src/stream-to-ma-conn') - -const { relay: multicodec } = require('./multicodec') -const createListener = require('./listener') -const { handleCanHop, handleHop, hop } = require('./circuit/hop') -const { handleStop } = require('./circuit/stop') -const StreamHandler = require('./circuit/stream-handler') - -const transportSymbol = Symbol.for('@libp2p/js-libp2p-circuit/circuit') - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - */ - -class Circuit { - /** - * Creates an instance of the Circuit Transport. - * - * @class - * @param {object} options - * @param {import('../')} options.libp2p - * @param {import('../upgrader')} options.upgrader - */ - constructor ({ libp2p, upgrader }) { - this._dialer = libp2p.dialer - this._registrar = libp2p.registrar - this._connectionManager = libp2p.connectionManager - this._upgrader = upgrader - this._options = libp2p._config.relay - this._libp2p = libp2p - this.peerId = libp2p.peerId - - this._registrar.handle(multicodec, this._onProtocol.bind(this)) - } - - /** - * @param {Object} props - * @param {Connection} props.connection - * @param {MuxedStream} props.stream - */ - async _onProtocol ({ connection, stream }) { - /** @type {import('./circuit/stream-handler')} */ - const streamHandler = new StreamHandler({ stream }) - const request = await streamHandler.read() - - if (!request) { - return - } - - const circuit = this - let virtualConnection - - switch (request.type) { - case CircuitPB.Type.CAN_HOP: { - log('received CAN_HOP request from %s', connection.remotePeer.toB58String()) - await handleCanHop({ circuit, connection, streamHandler }) - break - } - case CircuitPB.Type.HOP: { - log('received HOP request from %s', connection.remotePeer.toB58String()) - virtualConnection = await handleHop({ - connection, - request, - streamHandler, - circuit - }) - break - } - case CircuitPB.Type.STOP: { - log('received STOP request from %s', connection.remotePeer.toB58String()) - virtualConnection = await handleStop({ - connection, - request, - streamHandler - }) - break - } - default: { - log('Request of type %s not supported', request.type) - } - } - - if (virtualConnection) { - // @ts-ignore dst peer will not be undefined - const remoteAddr = new Multiaddr(request.dstPeer.addrs[0]) - // @ts-ignore src peer will not be undefined - const localAddr = new Multiaddr(request.srcPeer.addrs[0]) - const maConn = toConnection({ - stream: virtualConnection, - remoteAddr, - localAddr - }) - const type = request.type === CircuitPB.Type.HOP ? 'relay' : 'inbound' - log('new %s connection %s', type, maConn.remoteAddr) - - const conn = await this._upgrader.upgradeInbound(maConn) - log('%s connection %s upgraded', type, maConn.remoteAddr) - this.handler && this.handler(conn) - } - } - - /** - * Dial a peer over a relay - * - * @param {Multiaddr} ma - the multiaddr of the peer to dial - * @param {Object} options - dial options - * @param {AbortSignal} [options.signal] - An optional abort signal - * @returns {Promise} - the connection - */ - async dial (ma, options) { - // Check the multiaddr to see if it contains a relay and a destination peer - const addrs = ma.toString().split('/p2p-circuit') - const relayAddr = new Multiaddr(addrs[0]) - const destinationAddr = new Multiaddr(addrs[addrs.length - 1]) - const relayId = relayAddr.getPeerId() - const destinationId = destinationAddr.getPeerId() - - if (!relayId || !destinationId) { - const errMsg = 'Circuit relay dial failed as addresses did not have peer id' - log.error(errMsg) - throw errCode(new Error(errMsg), codes.ERR_RELAYED_DIAL) - } - - const relayPeer = PeerId.createFromB58String(relayId) - const destinationPeer = PeerId.createFromB58String(destinationId) - - let disconnectOnFailure = false - let relayConnection = this._connectionManager.get(relayPeer) - if (!relayConnection) { - relayConnection = await this._dialer.connectToPeer(relayAddr, options) - disconnectOnFailure = true - } - - try { - const virtualConnection = await hop({ - connection: relayConnection, - request: { - type: CircuitPB.Type.HOP, - srcPeer: { - id: this.peerId.toBytes(), - addrs: this._libp2p.multiaddrs.map(addr => addr.bytes) - }, - dstPeer: { - id: destinationPeer.toBytes(), - addrs: [new Multiaddr(destinationAddr).bytes] - } - } - }) - - const localAddr = relayAddr.encapsulate(`/p2p-circuit/p2p/${this.peerId.toB58String()}`) - const maConn = toConnection({ - stream: virtualConnection, - remoteAddr: ma, - localAddr - }) - log('new outbound connection %s', maConn.remoteAddr) - - return this._upgrader.upgradeOutbound(maConn) - } catch (/** @type {any} */ err) { - log.error('Circuit relay dial failed', err) - disconnectOnFailure && await relayConnection.close() - throw err - } - } - - /** - * Create a listener - * - * @param {any} options - * @param {Function} handler - * @returns {import('libp2p-interfaces/src/transport/types').Listener} - */ - createListener (options, handler) { - if (typeof options === 'function') { - handler = options - options = {} - } - - // Called on successful HOP and STOP requests - this.handler = handler - - return createListener(this._libp2p) - } - - /** - * Filter check for all Multiaddrs that this transport can dial on - * - * @param {Multiaddr[]} multiaddrs - * @returns {Multiaddr[]} - */ - filter (multiaddrs) { - multiaddrs = Array.isArray(multiaddrs) ? multiaddrs : [multiaddrs] - - return multiaddrs.filter((ma) => { - return mafmt.Circuit.matches(ma) - }) - } - - get [Symbol.toStringTag] () { - return 'Circuit' - } - - /** - * Checks if the given value is a Transport instance. - * - * @param {any} other - * @returns {other is Transport} - */ - static isTransport (other) { - return Boolean(other && other[transportSymbol]) - } -} - -module.exports = Circuit diff --git a/src/circuit/transport.ts b/src/circuit/transport.ts new file mode 100644 index 00000000..9722d576 --- /dev/null +++ b/src/circuit/transport.ts @@ -0,0 +1,216 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import * as mafmt from '@multiformats/mafmt' +import { Multiaddr } from '@multiformats/multiaddr' +import { CircuitRelay as CircuitPB } from './pb/index.js' +import { codes } from '../errors.js' +import { streamToMaConnection } from '@libp2p/utils/stream-to-ma-conn' +import { RELAY_CODEC } from './multicodec.js' +import { createListener } from './listener.js' +import { handleCanHop, handleHop, hop } from './circuit/hop.js' +import { handleStop } from './circuit/stop.js' +import { StreamHandler } from './circuit/stream-handler.js' +import { symbol } from '@libp2p/interfaces/transport' +import { peerIdFromString } from '@libp2p/peer-id' +import { Components, Initializable } from '@libp2p/interfaces/components' +import type { AbortOptions } from '@libp2p/interfaces' +import type { IncomingStreamData } from '@libp2p/interfaces/registrar' +import type { Listener, Transport, CreateListenerOptions, ConnectionHandler } from '@libp2p/interfaces/transport' +import type { Connection } from '@libp2p/interfaces/connection' + +const log = logger('libp2p:circuit') + +export class Circuit implements Transport, Initializable { + private handler?: ConnectionHandler + private components: Components = new Components() + + init (components: Components): void { + this.components = components + void this.components.getRegistrar().handle(RELAY_CODEC, (data) => { + void this._onProtocol(data).catch(err => { + log.error(err) + }) + }) + .catch(err => { + log.error(err) + }) + } + + hopEnabled () { + return true + } + + hopActive () { + return true + } + + get [symbol] (): true { + return true + } + + get [Symbol.toStringTag] () { + return this.constructor.name + } + + async _onProtocol (data: IncomingStreamData) { + const { connection, stream } = data + const streamHandler = new StreamHandler({ stream }) + const request = await streamHandler.read() + + if (request == null) { + log('request was invalid, could not read from stream') + streamHandler.write({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.MALFORMED_MESSAGE + }) + streamHandler.close() + return + } + + let virtualConnection + + switch (request.type) { + case CircuitPB.Type.CAN_HOP: { + log('received CAN_HOP request from %p', connection.remotePeer) + await handleCanHop({ circuit: this, connection, streamHandler }) + break + } + case CircuitPB.Type.HOP: { + log('received HOP request from %p', connection.remotePeer) + virtualConnection = await handleHop({ + connection, + request, + streamHandler, + circuit: this, + connectionManager: this.components.getConnectionManager() + }) + break + } + case CircuitPB.Type.STOP: { + log('received STOP request from %p', connection.remotePeer) + virtualConnection = await handleStop({ + connection, + request, + streamHandler + }) + break + } + default: { + log('Request of type %s not supported', request.type) + streamHandler.write({ + type: CircuitPB.Type.STATUS, + code: CircuitPB.Status.MALFORMED_MESSAGE + }) + streamHandler.close() + return + } + } + + if (virtualConnection != null) { + // @ts-expect-error dst peer will not be undefined + const remoteAddr = new Multiaddr(request.dstPeer.addrs[0]) + // @ts-expect-error dst peer will not be undefined + const localAddr = new Multiaddr(request.srcPeer.addrs[0]) + const maConn = streamToMaConnection({ + stream: virtualConnection, + remoteAddr, + localAddr + }) + const type = request.type === CircuitPB.Type.HOP ? 'relay' : 'inbound' + log('new %s connection %s', type, maConn.remoteAddr) + + const conn = await this.components.getUpgrader().upgradeInbound(maConn) + log('%s connection %s upgraded', type, maConn.remoteAddr) + + if (this.handler != null) { + this.handler(conn) + } + } + } + + /** + * Dial a peer over a relay + */ + async dial (ma: Multiaddr, options: AbortOptions = {}): Promise { + // Check the multiaddr to see if it contains a relay and a destination peer + const addrs = ma.toString().split('/p2p-circuit') + const relayAddr = new Multiaddr(addrs[0]) + const destinationAddr = new Multiaddr(addrs[addrs.length - 1]) + const relayId = relayAddr.getPeerId() + const destinationId = destinationAddr.getPeerId() + + if (relayId == null || destinationId == null) { + const errMsg = 'Circuit relay dial failed as addresses did not have peer id' + log.error(errMsg) + throw errCode(new Error(errMsg), codes.ERR_RELAYED_DIAL) + } + + const relayPeer = peerIdFromString(relayId) + const destinationPeer = peerIdFromString(destinationId) + + let disconnectOnFailure = false + let relayConnection = this.components.getConnectionManager().getConnection(relayPeer) + if (relayConnection == null) { + relayConnection = await this.components.getDialer().dial(relayAddr, options) + disconnectOnFailure = true + } + + try { + const virtualConnection = await hop({ + connection: relayConnection, + request: { + type: CircuitPB.Type.HOP, + srcPeer: { + id: this.components.getPeerId().toBytes(), + addrs: this.components.getAddressManager().getAddresses().map(addr => addr.bytes) + }, + dstPeer: { + id: destinationPeer.toBytes(), + addrs: [new Multiaddr(destinationAddr).bytes] + } + } + }) + + const localAddr = relayAddr.encapsulate(`/p2p-circuit/p2p/${this.components.getPeerId().toString()}`) + const maConn = streamToMaConnection({ + stream: virtualConnection, + remoteAddr: ma, + localAddr + }) + log('new outbound connection %s', maConn.remoteAddr) + + return await this.components.getUpgrader().upgradeOutbound(maConn) + } catch (err: any) { + log.error('Circuit relay dial failed', err) + disconnectOnFailure && await relayConnection.close() + throw err + } + } + + /** + * Create a listener + */ + createListener (options: CreateListenerOptions): Listener { + // Called on successful HOP and STOP requests + this.handler = options.handler + + return createListener({ + dialer: this.components.getDialer(), + connectionManager: this.components.getConnectionManager() + }) + } + + /** + * Filter check for all Multiaddrs that this transport can dial on + * + * @param {Multiaddr[]} multiaddrs + * @returns {Multiaddr[]} + */ + filter (multiaddrs: Multiaddr[]): Multiaddr[] { + multiaddrs = Array.isArray(multiaddrs) ? multiaddrs : [multiaddrs] + + return multiaddrs.filter((ma) => { + return mafmt.Circuit.matches(ma) + }) + } +} diff --git a/src/circuit/utils.js b/src/circuit/utils.js deleted file mode 100644 index 7f681a54..00000000 --- a/src/circuit/utils.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' - -const { CID } = require('multiformats/cid') -const { sha256 } = require('multiformats/hashes/sha2') - -/** - * Convert a namespace string into a cid. - * - * @param {string} namespace - * @returns {Promise} - */ -module.exports.namespaceToCid = async (namespace) => { - const bytes = new TextEncoder().encode(namespace) - const hash = await sha256.digest(bytes) - - return CID.createV0(hash) -} diff --git a/src/circuit/utils.ts b/src/circuit/utils.ts new file mode 100644 index 00000000..eb3bcd6f --- /dev/null +++ b/src/circuit/utils.ts @@ -0,0 +1,12 @@ +import { CID } from 'multiformats/cid' +import { sha256 } from 'multiformats/hashes/sha2' + +/** + * Convert a namespace string into a cid + */ +export async function namespaceToCid (namespace: string): Promise { + const bytes = new TextEncoder().encode(namespace) + const hash = await sha256.digest(bytes) + + return CID.createV0(hash) +} diff --git a/src/config.js b/src/config.js deleted file mode 100644 index b2d3f604..00000000 --- a/src/config.js +++ /dev/null @@ -1,114 +0,0 @@ -'use strict' - -const mergeOptions = require('merge-options') -// @ts-ignore no types in multiaddr path -const { dnsaddrResolver } = require('multiaddr/src/resolvers') - -const Constants = require('./constants') -const { AGENT_VERSION } = require('./identify/consts') -const RelayConstants = require('./circuit/constants') - -const { publicAddressesFirst } = require('libp2p-utils/src/address-sort') -const { FaultTolerance } = require('./transport-manager') - -/** - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('./types').ConnectionGater} ConnectionGater - * @typedef {import('.').Libp2pOptions} Libp2pOptions - * @typedef {import('.').constructorOptions} constructorOptions - */ - -const DefaultConfig = { - addresses: { - listen: [], - announce: [], - noAnnounce: [], - announceFilter: (/** @type {Multiaddr[]} */ multiaddrs) => multiaddrs - }, - connectionManager: { - minConnections: 25 - }, - connectionGater: /** @type {ConnectionGater} */ {}, - transportManager: { - faultTolerance: FaultTolerance.FATAL_ALL - }, - dialer: { - maxParallelDials: Constants.MAX_PARALLEL_DIALS, - maxDialsPerPeer: Constants.MAX_PER_PEER_DIALS, - dialTimeout: Constants.DIAL_TIMEOUT, - resolvers: { - dnsaddr: dnsaddrResolver - }, - addressSorter: publicAddressesFirst - }, - host: { - agentVersion: AGENT_VERSION - }, - metrics: { - enabled: false - }, - peerStore: { - persistence: false, - threshold: 5 - }, - peerRouting: { - refreshManager: { - enabled: true, - interval: 6e5, - bootDelay: 10e3 - } - }, - config: { - protocolPrefix: 'ipfs', - dht: { - enabled: false, - kBucketSize: 20 - }, - nat: { - enabled: true, - ttl: 7200, - keepAlive: true, - gateway: null, - externalIp: null, - pmp: { - enabled: false - } - }, - peerDiscovery: { - autoDial: true - }, - pubsub: { - enabled: true - }, - relay: { - enabled: true, - advertise: { - bootDelay: RelayConstants.ADVERTISE_BOOT_DELAY, - enabled: false, - ttl: RelayConstants.ADVERTISE_TTL - }, - hop: { - enabled: false, - active: false - }, - autoRelay: { - enabled: false, - maxListeners: 2 - } - }, - transport: {} - } -} - -/** - * @param {Libp2pOptions} opts - * @returns {DefaultConfig & Libp2pOptions & constructorOptions} - */ -module.exports.validate = (opts) => { - /** @type {DefaultConfig & Libp2pOptions & constructorOptions} */ - const resultingOptions = mergeOptions(DefaultConfig, opts) - - if (resultingOptions.modules.transport.length < 1) throw new Error("'options.modules.transport' must contain at least 1 transport") - - return resultingOptions -} diff --git a/src/config.ts b/src/config.ts new file mode 100644 index 00000000..2720c8a9 --- /dev/null +++ b/src/config.ts @@ -0,0 +1,101 @@ +import mergeOptions from 'merge-options' +import { dnsaddrResolver } from '@multiformats/multiaddr/resolvers' +import * as Constants from './constants.js' +import { AGENT_VERSION } from './identify/consts.js' +import * as RelayConstants from './circuit/constants.js' +import { publicAddressesFirst } from '@libp2p/utils/address-sort' +import { FAULT_TOLERANCE } from './transport-manager.js' +import type { Multiaddr } from '@multiformats/multiaddr' +import type { Libp2pInit } from './index.js' +import { codes, messages } from './errors.js' +import errCode from 'err-code' +import type { RecursivePartial } from '@libp2p/interfaces' + +const DefaultConfig: Partial = { + addresses: { + listen: [], + announce: [], + noAnnounce: [], + announceFilter: (multiaddrs: Multiaddr[]) => multiaddrs + }, + connectionManager: { + maxConnections: 300, + minConnections: 50, + autoDialInterval: 10000, + autoDial: true + }, + connectionGater: {}, + transportManager: { + faultTolerance: FAULT_TOLERANCE.FATAL_ALL + }, + dialer: { + maxParallelDials: Constants.MAX_PARALLEL_DIALS, + maxDialsPerPeer: Constants.MAX_PER_PEER_DIALS, + dialTimeout: Constants.DIAL_TIMEOUT, + resolvers: { + dnsaddr: dnsaddrResolver + }, + addressSorter: publicAddressesFirst + }, + host: { + agentVersion: AGENT_VERSION + }, + metrics: { + enabled: false, + computeThrottleMaxQueueSize: 1000, + computeThrottleTimeout: 2000, + movingAverageIntervals: [ + 60 * 1000, // 1 minute + 5 * 60 * 1000, // 5 minutes + 15 * 60 * 1000 // 15 minutes + ], + maxOldPeersRetention: 50 + }, + peerRouting: { + refreshManager: { + enabled: true, + interval: 6e5, + bootDelay: 10e3 + } + }, + protocolPrefix: 'ipfs', + nat: { + enabled: true, + ttl: 7200, + keepAlive: true + }, + relay: { + enabled: true, + advertise: { + bootDelay: RelayConstants.ADVERTISE_BOOT_DELAY, + enabled: false, + ttl: RelayConstants.ADVERTISE_TTL + }, + hop: { + enabled: false, + active: false + }, + autoRelay: { + enabled: false, + maxListeners: 2 + } + } +} + +export function validateConfig (opts: RecursivePartial): Libp2pInit { + const resultingOptions: Libp2pInit = mergeOptions(DefaultConfig, opts) + + if (resultingOptions.transports == null || resultingOptions.transports.length < 1) { + throw errCode(new Error(messages.ERR_TRANSPORTS_REQUIRED), codes.ERR_TRANSPORTS_REQUIRED) + } + + if (resultingOptions.connectionEncryption == null || resultingOptions.connectionEncryption.length === 0) { + throw errCode(new Error(messages.CONN_ENCRYPTION_REQUIRED), codes.CONN_ENCRYPTION_REQUIRED) + } + + if (resultingOptions.connectionProtector === null && globalThis.process?.env?.LIBP2P_FORCE_PNET != null) { // eslint-disable-line no-undef + throw errCode(new Error(messages.ERR_PROTECTOR_REQUIRED), codes.ERR_PROTECTOR_REQUIRED) + } + + return resultingOptions +} diff --git a/src/connection-manager/auto-dialler.js b/src/connection-manager/auto-dialler.js deleted file mode 100644 index 1468c48e..00000000 --- a/src/connection-manager/auto-dialler.js +++ /dev/null @@ -1,132 +0,0 @@ -'use strict' - -const debug = require('debug') -const mergeOptions = require('merge-options') -// @ts-ignore retimer does not have types -const retimer = require('retimer') -const all = require('it-all') -const { pipe } = require('it-pipe') -const filter = require('it-filter') -const sort = require('it-sort') - -const log = Object.assign(debug('libp2p:connection-manager:auto-dialler'), { - error: debug('libp2p:connection-manager:auto-dialler:err') -}) - -const defaultOptions = { - enabled: true, - minConnections: 0, - autoDialInterval: 10000 -} - -/** - * @typedef {import('../index')} Libp2p - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - */ - -/** - * @typedef {Object} AutoDiallerOptions - * @property {boolean} [enabled = true] - Should preemptively guarantee connections are above the low watermark - * @property {number} [minConnections = 0] - The minimum number of connections to avoid pruning - * @property {number} [autoDialInterval = 10000] - How often, in milliseconds, it should preemptively guarantee connections are above the low watermark - */ - -class AutoDialler { - /** - * Proactively tries to connect to known peers stored in the PeerStore. - * It will keep the number of connections below the upper limit and sort - * the peers to connect based on wether we know their keys and protocols. - * - * @class - * @param {Libp2p} libp2p - * @param {AutoDiallerOptions} options - */ - constructor (libp2p, options = {}) { - this._options = mergeOptions.call({ ignoreUndefined: true }, defaultOptions, options) - this._libp2p = libp2p - this._running = false - this._autoDialTimeout = null - this._autoDial = this._autoDial.bind(this) - - log('options: %j', this._options) - } - - /** - * Starts the auto dialer - */ - async start () { - if (!this._options.enabled) { - log('not enabled') - return - } - - this._running = true - this._autoDial().catch(err => { - log.error('could start autodial', err) - }) - log('started') - } - - /** - * Stops the auto dialler - */ - async stop () { - if (!this._options.enabled) { - log('not enabled') - return - } - - this._running = false - this._autoDialTimeout && this._autoDialTimeout.clear() - log('stopped') - } - - async _autoDial () { - const minConnections = this._options.minConnections - - // Already has enough connections - if (this._libp2p.connections.size >= minConnections) { - this._autoDialTimeout = retimer(this._autoDial, this._options.autoDialInterval) - return - } - - // Sort peers on whether we know protocols of public keys for them - // TODO: assuming the `peerStore.getPeers()` order is stable this will mean - // we keep trying to connect to the same peers? - const peers = await pipe( - this._libp2p.peerStore.getPeers(), - (source) => filter(source, (peer) => !peer.id.equals(this._libp2p.peerId)), - (source) => sort(source, (a, b) => { - if (b.protocols && b.protocols.length && (!a.protocols || !a.protocols.length)) { - return 1 - } else if (b.id.pubKey && !a.id.pubKey) { - return 1 - } - return -1 - }), - (source) => all(source) - ) - - for (let i = 0; this._running && i < peers.length && this._libp2p.connections.size < minConnections; i++) { - const peer = peers[i] - - if (!this._libp2p.connectionManager.get(peer.id)) { - log('connecting to a peerStore stored peer %s', peer.id.toB58String()) - try { - await this._libp2p.dialer.connectToPeer(peer.id) - } catch (/** @type {any} */ err) { - log.error('could not connect to peerStore stored peer', err) - } - } - } - - // Connection Manager was stopped - if (!this._running) { - return - } - - this._autoDialTimeout = retimer(this._autoDial, this._options.autoDialInterval) - } -} - -module.exports = AutoDialler diff --git a/src/connection-manager/auto-dialler.ts b/src/connection-manager/auto-dialler.ts new file mode 100644 index 00000000..d14cbbe5 --- /dev/null +++ b/src/connection-manager/auto-dialler.ts @@ -0,0 +1,154 @@ +import { logger } from '@libp2p/logger' +import mergeOptions from 'merge-options' +// @ts-expect-error retimer does not have types +import retimer from 'retimer' +import all from 'it-all' +import { pipe } from 'it-pipe' +import filter from 'it-filter' +import sort from 'it-sort' +import type { Startable } from '@libp2p/interfaces' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:connection-manager:auto-dialler') + +export interface AutoDiallerInit { + /** + * Should preemptively guarantee connections are above the low watermark + */ + enabled?: boolean + + /** + * The minimum number of connections to avoid pruning + */ + minConnections?: number + + /** + * How often, in milliseconds, it should preemptively guarantee connections are above the low watermark + */ + autoDialInterval?: number +} + +const defaultOptions: Partial = { + enabled: true, + minConnections: 0, + autoDialInterval: 10000 +} + +export class AutoDialler implements Startable { + private readonly components: Components + private readonly options: Required + private running: boolean + private autoDialTimeout?: ReturnType + + /** + * Proactively tries to connect to known peers stored in the PeerStore. + * It will keep the number of connections below the upper limit and sort + * the peers to connect based on wether we know their keys and protocols. + */ + constructor (components: Components, init: AutoDiallerInit) { + this.components = components + this.options = mergeOptions.call({ ignoreUndefined: true }, defaultOptions, init) + this.running = false + this._autoDial = this._autoDial.bind(this) + + log('options: %j', this.options) + } + + isStarted () { + return this.running + } + + /** + * Starts the auto dialer + */ + async start () { + if (!this.options.enabled) { + log('not enabled') + return + } + + this.running = true + + void this._autoDial().catch(err => { + log.error('could start autodial', err) + }) + + log('started') + } + + /** + * Stops the auto dialler + */ + async stop () { + if (!this.options.enabled) { + log('not enabled') + return + } + + this.running = false + + if (this.autoDialTimeout != null) { + this.autoDialTimeout.clear() + } + + log('stopped') + } + + async _autoDial () { + if (this.autoDialTimeout != null) { + this.autoDialTimeout.clear() + } + + const minConnections = this.options.minConnections + + // Already has enough connections + if (this.components.getConnectionManager().getConnectionList().length >= minConnections) { + this.autoDialTimeout = retimer(this._autoDial, this.options.autoDialInterval) + + return + } + + // Sort peers on whether we know protocols or public keys for them + const allPeers = await this.components.getPeerStore().all() + + const peers = await pipe( + // shuffle the peers + allPeers.sort(() => Math.random() > 0.5 ? 1 : -1), + (source) => filter(source, (peer) => !peer.id.equals(this.components.getPeerId())), + (source) => sort(source, (a, b) => { + if (b.protocols.length > a.protocols.length) { + return 1 + } else if (b.id.publicKey != null && a.id.publicKey == null) { + return 1 + } + return -1 + }), + async (source) => await all(source) + ) + + for (let i = 0; this.running && i < peers.length && this.components.getConnectionManager().getConnectionList().length < minConnections; i++) { + // Connection Manager was stopped during async dial + if (!this.running) { + return + } + + const peer = peers[i] + + if (this.components.getConnectionManager().getConnection(peer.id) == null) { + log('connecting to a peerStore stored peer %p', peer.id) + try { + await this.components.getDialer().dial(peer.id) + } catch (err: any) { + log.error('could not connect to peerStore stored peer', err) + } + } + } + + // Connection Manager was stopped + if (!this.running) { + return + } + + this.autoDialTimeout = retimer(this._autoDial, this.options.autoDialInterval) + } +} diff --git a/src/connection-manager/index.js b/src/connection-manager/index.js deleted file mode 100644 index eeae1559..00000000 --- a/src/connection-manager/index.js +++ /dev/null @@ -1,374 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:connection-manager'), { - error: debug('libp2p:connection-manager:err') -}) - -const errcode = require('err-code') -const mergeOptions = require('merge-options') -const LatencyMonitor = require('./latency-monitor') -// @ts-ignore retimer does not have types -const retimer = require('retimer') - -const { EventEmitter } = require('events') -const trackedMap = require('../metrics/tracked-map') -const PeerId = require('peer-id') - -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../errors') - -const defaultOptions = { - maxConnections: Infinity, - minConnections: 0, - maxData: Infinity, - maxSentData: Infinity, - maxReceivedData: Infinity, - maxEventLoopDelay: Infinity, - pollInterval: 2000, - autoDialInterval: 10000, - movingAverageInterval: 60000, - defaultPeerValue: 1 -} - -const METRICS_COMPONENT = 'connection-manager' -const METRICS_PEER_CONNECTIONS = 'peer-connections' -const METRICS_PEER_VALUES = 'peer-values' - -/** - * @typedef {import('../')} Libp2p - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - */ - -/** - * @typedef {Object} ConnectionManagerOptions - * @property {number} [maxConnections = Infinity] - The maximum number of connections allowed. - * @property {number} [minConnections = 0] - The minimum number of connections to avoid pruning. - * @property {number} [maxData = Infinity] - The max data (in and out), per average interval to allow. - * @property {number} [maxSentData = Infinity] - The max outgoing data, per average interval to allow. - * @property {number} [maxReceivedData = Infinity] - The max incoming data, per average interval to allow. - * @property {number} [maxEventLoopDelay = Infinity] - The upper limit the event loop can take to run. - * @property {number} [pollInterval = 2000] - How often, in milliseconds, metrics and latency should be checked. - * @property {number} [movingAverageInterval = 60000] - How often, in milliseconds, to compute averages. - * @property {number} [defaultPeerValue = 1] - The value of the peer. - * @property {boolean} [autoDial = true] - Should preemptively guarantee connections are above the low watermark. - * @property {number} [autoDialInterval = 10000] - How often, in milliseconds, it should preemptively guarantee connections are above the low watermark. - */ - -/** - * - * @fires ConnectionManager#peer:connect Emitted when a new peer is connected. - * @fires ConnectionManager#peer:disconnect Emitted when a peer is disconnected. - */ -class ConnectionManager extends EventEmitter { - /** - * Responsible for managing known connections. - * - * @class - * @param {Libp2p} libp2p - * @param {ConnectionManagerOptions} options - */ - constructor (libp2p, options = {}) { - super() - - this._libp2p = libp2p - this._peerId = libp2p.peerId.toB58String() - - this._options = mergeOptions.call({ ignoreUndefined: true }, defaultOptions, options) - if (this._options.maxConnections < this._options.minConnections) { - throw errcode(new Error('Connection Manager maxConnections must be greater than minConnections'), ERR_INVALID_PARAMETERS) - } - - log('options: %j', this._options) - - /** - * Map of peer identifiers to their peer value for pruning connections. - * - * @type {Map} - */ - this._peerValues = trackedMap({ - component: METRICS_COMPONENT, - metric: METRICS_PEER_VALUES, - metrics: this._libp2p.metrics - }) - - /** - * Map of connections per peer - * - * @type {Map} - */ - this.connections = trackedMap({ - component: METRICS_COMPONENT, - metric: METRICS_PEER_CONNECTIONS, - metrics: this._libp2p.metrics - }) - - this._started = false - this._timer = null - this._checkMetrics = this._checkMetrics.bind(this) - - this._latencyMonitor = new LatencyMonitor({ - latencyCheckIntervalMs: this._options.pollInterval, - dataEmitIntervalMs: this._options.pollInterval - }) - - // This emitter gets listened to a lot - this.setMaxListeners(Infinity) - } - - /** - * Get current number of open connections. - */ - get size () { - return Array.from(this.connections.values()) - .reduce((accumulator, value) => accumulator + value.length, 0) - } - - /** - * Starts the Connection Manager. If Metrics are not enabled on libp2p - * only event loop and connection limits will be monitored. - */ - start () { - if (this._libp2p.metrics) { - this._timer = this._timer || retimer(this._checkMetrics, this._options.pollInterval) - } - - // latency monitor - this._latencyMonitor.start() - this._onLatencyMeasure = this._onLatencyMeasure.bind(this) - this._latencyMonitor.on('data', this._onLatencyMeasure) - - this._started = true - log('started') - } - - /** - * Stops the Connection Manager - * - * @async - */ - async stop () { - this._timer && this._timer.clear() - - this._latencyMonitor.removeListener('data', this._onLatencyMeasure) - this._latencyMonitor.stop() - - this._started = false - await this._close() - log('stopped') - } - - /** - * Cleans up the connections - * - * @async - */ - async _close () { - // Close all connections we're tracking - const tasks = [] - for (const connectionList of this.connections.values()) { - for (const connection of connectionList) { - tasks.push(connection.close()) - } - } - - await Promise.all(tasks) - this.connections.clear() - } - - /** - * Sets the value of the given peer. Peers with lower values - * will be disconnected first. - * - * @param {PeerId} peerId - * @param {number} value - A number between 0 and 1 - * @returns {void} - */ - setPeerValue (peerId, value) { - if (value < 0 || value > 1) { - throw new Error('value should be a number between 0 and 1') - } - this._peerValues.set(peerId.toB58String(), value) - } - - /** - * Checks the libp2p metrics to determine if any values have exceeded - * the configured maximums. - * - * @private - */ - async _checkMetrics () { - if (this._libp2p.metrics) { - try { - const movingAverages = this._libp2p.metrics.global.movingAverages - // @ts-ignore moving averages object types - const received = movingAverages.dataReceived[this._options.movingAverageInterval].movingAverage() - await this._checkMaxLimit('maxReceivedData', received) - // @ts-ignore moving averages object types - const sent = movingAverages.dataSent[this._options.movingAverageInterval].movingAverage() - await this._checkMaxLimit('maxSentData', sent) - const total = received + sent - await this._checkMaxLimit('maxData', total) - log('metrics update', total) - } finally { - this._timer = retimer(this._checkMetrics, this._options.pollInterval) - } - } - } - - /** - * Tracks the incoming connection and check the connection limit - * - * @param {Connection} connection - */ - async onConnect (connection) { - if (!this._started) { - // This can happen when we are in the process of shutting down the node - await connection.close() - return - } - - const peerId = connection.remotePeer - const peerIdStr = peerId.toB58String() - const storedConn = this.connections.get(peerIdStr) - - this.emit('peer:connect', connection) - - if (storedConn) { - storedConn.push(connection) - } else { - this.connections.set(peerIdStr, [connection]) - } - - await this._libp2p.peerStore.keyBook.set(peerId, peerId.pubKey) - - if (!this._peerValues.has(peerIdStr)) { - this._peerValues.set(peerIdStr, this._options.defaultPeerValue) - } - - await this._checkMaxLimit('maxConnections', this.size) - } - - /** - * Removes the connection from tracking - * - * @param {Connection} connection - * @returns {void} - */ - onDisconnect (connection) { - if (!this._started) { - // This can happen when we are in the process of shutting down the node - return - } - - const peerId = connection.remotePeer.toB58String() - let storedConn = this.connections.get(peerId) - - if (storedConn && storedConn.length > 1) { - storedConn = storedConn.filter((conn) => conn.id !== connection.id) - this.connections.set(peerId, storedConn) - } else if (storedConn) { - this.connections.delete(peerId) - this._peerValues.delete(connection.remotePeer.toB58String()) - this.emit('peer:disconnect', connection) - - this._libp2p.metrics && this._libp2p.metrics.onPeerDisconnected(connection.remotePeer) - } - } - - /** - * Get a connection with a peer. - * - * @param {PeerId} peerId - * @returns {Connection|null} - */ - get (peerId) { - const connections = this.getAll(peerId) - if (connections.length) { - return connections[0] - } - return null - } - - /** - * Get all open connections with a peer. - * - * @param {PeerId} peerId - * @returns {Connection[]} - */ - getAll (peerId) { - if (!PeerId.isPeerId(peerId)) { - throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS) - } - - const id = peerId.toB58String() - const connections = this.connections.get(id) - - // Return all open connections - if (connections) { - return connections.filter(connection => connection.stat.status === 'open') - } - return [] - } - - /** - * If the event loop is slow, maybe close a connection - * - * @private - * @param {*} summary - The LatencyMonitor summary - */ - _onLatencyMeasure (summary) { - this._checkMaxLimit('maxEventLoopDelay', summary.avgMs) - .catch(err => { - log.error(err) - }) - } - - /** - * If the `value` of `name` has exceeded its limit, maybe close a connection - * - * @private - * @param {string} name - The name of the field to check limits for - * @param {number} value - The current value of the field - */ - async _checkMaxLimit (name, value) { - const limit = this._options[name] - log('checking limit of %s. current value: %d of %d', name, value, limit) - if (value > limit) { - log('%s: limit exceeded: %s, %d', this._peerId, name, value) - await this._maybeDisconnectOne() - } - } - - /** - * If we have more connections than our maximum, close a connection - * to the lowest valued peer. - * - * @private - */ - async _maybeDisconnectOne () { - if (this._options.minConnections < this.connections.size) { - const peerValues = Array.from(new Map([...this._peerValues.entries()].sort((a, b) => a[1] - b[1]))) - log('%s: sorted peer values: %j', this._peerId, peerValues) - const disconnectPeer = peerValues[0] - if (disconnectPeer) { - const peerId = disconnectPeer[0] - log('%s: lowest value peer is %s', this._peerId, peerId) - log('%s: closing a connection to %j', this._peerId, peerId) - for (const connections of this.connections.values()) { - if (connections[0].remotePeer.toB58String() === peerId) { - connections[0].close().catch(err => { - log.error(err) - }) - // TODO: should not need to invoke this manually - this.onDisconnect(connections[0]) - break - } - } - } - } - } -} - -module.exports = ConnectionManager diff --git a/src/connection-manager/index.ts b/src/connection-manager/index.ts new file mode 100644 index 00000000..ad165d87 --- /dev/null +++ b/src/connection-manager/index.ts @@ -0,0 +1,422 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import mergeOptions from 'merge-options' +import { LatencyMonitor, SummaryObject } from './latency-monitor.js' +// @ts-expect-error retimer does not have types +import retimer from 'retimer' +import { CustomEvent, EventEmitter, Startable } from '@libp2p/interfaces' +import { trackedMap } from '@libp2p/tracked-map' +import { codes } from '../errors.js' +import { isPeerId, PeerId } from '@libp2p/interfaces/peer-id' +// @ts-expect-error setMaxListeners is missing from the node 16 types +import { setMaxListeners } from 'events' +import type { Connection } from '@libp2p/interfaces/connection' +import type { ConnectionManager } from '@libp2p/interfaces/registrar' +import type { Components } from '@libp2p/interfaces/components' +import * as STATUS from '@libp2p/interfaces/connection/status' + +const log = logger('libp2p:connection-manager') + +const defaultOptions: Partial = { + maxConnections: Infinity, + minConnections: 0, + maxData: Infinity, + maxSentData: Infinity, + maxReceivedData: Infinity, + maxEventLoopDelay: Infinity, + pollInterval: 2000, + autoDialInterval: 10000, + movingAverageInterval: 60000, + defaultPeerValue: 1 +} + +const METRICS_COMPONENT = 'connection-manager' +const METRICS_PEER_CONNECTIONS = 'peer-connections' +const METRICS_PEER_VALUES = 'peer-values' + +export interface ConnectionManagerEvents { + 'peer:connect': CustomEvent + 'peer:disconnect': CustomEvent +} + +export interface ConnectionManagerInit { + /** + * The maximum number of connections allowed + */ + maxConnections?: number + + /** + * The minimum number of connections to avoid pruning + */ + minConnections?: number + + /** + * The max data (in and out), per average interval to allow + */ + maxData?: number + + /** + * The max outgoing data, per average interval to allow + */ + maxSentData?: number + + /** + * The max incoming data, per average interval to allow + */ + maxReceivedData?: number + + /** + * The upper limit the event loop can take to run + */ + maxEventLoopDelay?: number + + /** + * How often, in milliseconds, metrics and latency should be checked + */ + pollInterval?: number + + /** + * How often, in milliseconds, to compute averages + */ + movingAverageInterval?: number + + /** + * The value of the peer + */ + defaultPeerValue?: number + + /** + * Should preemptively guarantee connections are above the low watermark + */ + autoDial?: boolean + + /** + * How often, in milliseconds, it should preemptively guarantee connections are above the low watermark + */ + autoDialInterval?: number +} + +/** + * Responsible for managing known connections. + */ +export class DefaultConnectionManager extends EventEmitter implements ConnectionManager, Startable { + private readonly components: Components + private readonly init: Required + private readonly peerValues: Map + private readonly connections: Map + private started: boolean + private timer?: ReturnType + private readonly latencyMonitor: LatencyMonitor + + constructor (components: Components, init: ConnectionManagerInit = {}) { + super() + + this.components = components + this.init = mergeOptions.call({ ignoreUndefined: true }, defaultOptions, init) + + if (this.init.maxConnections < this.init.minConnections) { + throw errCode(new Error('Connection Manager maxConnections must be greater than minConnections'), codes.ERR_INVALID_PARAMETERS) + } + + log('options: %o', this.init) + + /** + * Map of peer identifiers to their peer value for pruning connections. + * + * @type {Map} + */ + this.peerValues = trackedMap({ + component: METRICS_COMPONENT, + metric: METRICS_PEER_VALUES, + metrics: this.components.getMetrics() + }) + + /** + * Map of connections per peer + */ + this.connections = trackedMap({ + component: METRICS_COMPONENT, + metric: METRICS_PEER_CONNECTIONS, + metrics: this.components.getMetrics() + }) + + this.started = false + this._checkMetrics = this._checkMetrics.bind(this) + + this.latencyMonitor = new LatencyMonitor({ + latencyCheckIntervalMs: init.pollInterval, + dataEmitIntervalMs: init.pollInterval + }) + + try { + // This emitter gets listened to a lot + setMaxListeners?.(Infinity, this) + } catch {} + + this.components.getUpgrader().addEventListener('connection', (evt) => { + void this.onConnect(evt).catch(err => { + log.error(err) + }) + }) + this.components.getUpgrader().addEventListener('connectionEnd', this.onDisconnect.bind(this)) + } + + isStarted () { + return this.started + } + + /** + * Starts the Connection Manager. If Metrics are not enabled on libp2p + * only event loop and connection limits will be monitored. + */ + async start () { + if (this.components.getMetrics() != null) { + this.timer = this.timer ?? retimer(this._checkMetrics, this.init.pollInterval) + } + + // latency monitor + this.latencyMonitor.start() + this._onLatencyMeasure = this._onLatencyMeasure.bind(this) + this.latencyMonitor.addEventListener('data', this._onLatencyMeasure) + + this.started = true + log('started') + } + + /** + * Stops the Connection Manager + */ + async stop () { + this.timer?.clear() + + this.latencyMonitor.removeEventListener('data', this._onLatencyMeasure) + this.latencyMonitor.stop() + + this.started = false + await this._close() + log('stopped') + } + + /** + * Cleans up the connections + */ + async _close () { + // Close all connections we're tracking + const tasks = [] + for (const connectionList of this.connections.values()) { + for (const connection of connectionList) { + tasks.push(connection.close()) + } + } + + log('closing %d connections', tasks.length) + await Promise.all(tasks) + this.connections.clear() + } + + /** + * Sets the value of the given peer. Peers with lower values + * will be disconnected first. + */ + setPeerValue (peerId: PeerId, value: number) { + if (value < 0 || value > 1) { + throw new Error('value should be a number between 0 and 1') + } + + this.peerValues.set(peerId.toString(), value) + } + + /** + * Checks the libp2p metrics to determine if any values have exceeded + * the configured maximums. + * + * @private + */ + async _checkMetrics () { + const metrics = this.components.getMetrics() + + if (metrics != null) { + try { + const movingAverages = metrics.getGlobal().getMovingAverages() + const received = movingAverages.dataReceived[this.init.movingAverageInterval].movingAverage + await this._checkMaxLimit('maxReceivedData', received) + const sent = movingAverages.dataSent[this.init.movingAverageInterval].movingAverage + await this._checkMaxLimit('maxSentData', sent) + const total = received + sent + await this._checkMaxLimit('maxData', total) + log('metrics update', total) + } finally { + this.timer = retimer(this._checkMetrics, this.init.pollInterval) + } + } + } + + /** + * Tracks the incoming connection and check the connection limit + */ + async onConnect (evt: CustomEvent) { + const { detail: connection } = evt + + if (!this.started) { + // This can happen when we are in the process of shutting down the node + await connection.close() + return + } + + const peerId = connection.remotePeer + const peerIdStr = peerId.toString() + const storedConns = this.connections.get(peerIdStr) + + this.dispatchEvent(new CustomEvent('peer:connect', { detail: connection })) + + if (storedConns != null) { + storedConns.push(connection) + } else { + this.connections.set(peerIdStr, [connection]) + } + + if (peerId.publicKey != null) { + await this.components.getPeerStore().keyBook.set(peerId, peerId.publicKey) + } + + if (!this.peerValues.has(peerIdStr)) { + this.peerValues.set(peerIdStr, this.init.defaultPeerValue) + } + + await this._checkMaxLimit('maxConnections', this.getConnectionList().length) + } + + /** + * Removes the connection from tracking + */ + onDisconnect (evt: CustomEvent) { + const { detail: connection } = evt + + if (!this.started) { + // This can happen when we are in the process of shutting down the node + return + } + + const peerId = connection.remotePeer.toString() + let storedConn = this.connections.get(peerId) + + if (storedConn != null && storedConn.length > 1) { + storedConn = storedConn.filter((conn) => conn.id !== connection.id) + this.connections.set(peerId, storedConn) + } else if (storedConn != null) { + this.connections.delete(peerId) + this.peerValues.delete(connection.remotePeer.toString()) + this.dispatchEvent(new CustomEvent('peer:disconnect', { detail: connection })) + + this.components.getMetrics()?.onPeerDisconnected(connection.remotePeer) + } + } + + getConnectionMap (): Map { + return this.connections + } + + getConnectionList (): Connection[] { + let output: Connection[] = [] + + for (const connections of this.connections.values()) { + output = output.concat(connections) + } + + return output + } + + getConnections (peerId: PeerId): Connection[] { + return this.connections.get(peerId.toString()) ?? [] + } + + /** + * Get a connection with a peer + */ + getConnection (peerId: PeerId): Connection | undefined { + const connections = this.getAll(peerId) + + if (connections.length > 0) { + return connections[0] + } + + return undefined + } + + /** + * Get all open connections with a peer + */ + getAll (peerId: PeerId): Connection[] { + if (!isPeerId(peerId)) { + throw errCode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) + } + + const id = peerId.toString() + const connections = this.connections.get(id) + + // Return all open connections + if (connections != null) { + return connections.filter(connection => connection.stat.status === STATUS.OPEN) + } + + return [] + } + + /** + * If the event loop is slow, maybe close a connection + */ + _onLatencyMeasure (evt: CustomEvent) { + const { detail: summary } = evt + + this._checkMaxLimit('maxEventLoopDelay', summary.avgMs) + .catch(err => { + log.error(err) + }) + } + + /** + * If the `value` of `name` has exceeded its limit, maybe close a connection + */ + async _checkMaxLimit (name: keyof ConnectionManagerInit, value: number) { + const limit = this.init[name] + log.trace('checking limit of %s. current value: %d of %d', name, value, limit) + if (value > limit) { + log('%s: limit exceeded: %p, %d', this.components.getPeerId(), name, value) + await this._maybeDisconnectOne() + } + } + + /** + * If we have more connections than our maximum, close a connection + * to the lowest valued peer. + */ + async _maybeDisconnectOne () { + if (this.init.minConnections < this.connections.size) { + const peerValues = Array.from(new Map([...this.peerValues.entries()].sort((a, b) => a[1] - b[1]))) + + log('%p: sorted peer values: %j', this.components.getPeerId(), peerValues) + const disconnectPeer = peerValues[0] + + if (disconnectPeer != null) { + const peerId = disconnectPeer[0] + log('%p: lowest value peer is %s', this.components.getPeerId(), peerId) + log('%p: closing a connection to %j', this.components.getPeerId(), peerId) + + for (const connections of this.connections.values()) { + if (connections[0].remotePeer.toString() === peerId) { + void connections[0].close() + .catch(err => { + log.error(err) + }) + + // TODO: should not need to invoke this manually + this.onDisconnect(new CustomEvent('connectionEnd', { + detail: connections[0] + })) + break + } + } + } + } + } +} diff --git a/src/connection-manager/latency-monitor.js b/src/connection-manager/latency-monitor.js deleted file mode 100644 index 374794c8..00000000 --- a/src/connection-manager/latency-monitor.js +++ /dev/null @@ -1,264 +0,0 @@ -// @ts-nocheck -'use strict' - -/** - * This code is based on `latency-monitor` (https://github.com/mlucool/latency-monitor) by `mlucool` (https://github.com/mlucool), available under Apache License 2.0 (https://github.com/mlucool/latency-monitor/blob/master/LICENSE) - */ - -const { EventEmitter } = require('events') -const VisibilityChangeEmitter = require('./visibility-change-emitter') -const debug = require('debug')('latency-monitor:LatencyMonitor') - -/** - * @typedef {Object} SummaryObject - * @property {number} events How many events were called - * @property {number} minMS What was the min time for a cb to be called - * @property {number} maxMS What was the max time for a cb to be called - * @property {number} avgMs What was the average time for a cb to be called - * @property {number} lengthMs How long this interval was in ms - * - * @typedef {Object} LatencyMonitorOptions - * @property {number} [latencyCheckIntervalMs=500] - How often to add a latency check event (ms) - * @property {number} [dataEmitIntervalMs=5000] - How often to summarize latency check events. null or 0 disables event firing - * @property {Function} [asyncTestFn] - What cb-style async function to use - * @property {number} [latencyRandomPercentage=5] - What percent (+/-) of latencyCheckIntervalMs should we randomly use? This helps avoid alignment to other events. - */ - -/** - * A class to monitor latency of any async function which works in a browser or node. This works by periodically calling - * the asyncTestFn and timing how long it takes the callback to be called. It can also periodically emit stats about this. - * This can be disabled and stats can be pulled via setting dataEmitIntervalMs = 0. - * - * @extends {EventEmitter} - * - * The default implementation is an event loop latency monitor. This works by firing periodic events into the event loop - * and timing how long it takes to get back. - * - * @example - * const monitor = new LatencyMonitor(); - * monitor.on('data', (summary) => console.log('Event Loop Latency: %O', summary)); - * - * @example - * const monitor = new LatencyMonitor({latencyCheckIntervalMs: 1000, dataEmitIntervalMs: 60000, asyncTestFn:ping}); - * monitor.on('data', (summary) => console.log('Ping Pong Latency: %O', summary)); - */ -class LatencyMonitor extends EventEmitter { - /** - * @class - * @param {LatencyMonitorOptions} [options] - */ - constructor ({ latencyCheckIntervalMs, dataEmitIntervalMs, asyncTestFn, latencyRandomPercentage } = {}) { - super() - const that = this - - // 0 isn't valid here, so its ok to use || - that.latencyCheckIntervalMs = latencyCheckIntervalMs || 500 // 0.5s - that.latencyRandomPercentage = latencyRandomPercentage || 10 - that._latecyCheckMultiply = 2 * (that.latencyRandomPercentage / 100.0) * that.latencyCheckIntervalMs - that._latecyCheckSubtract = that._latecyCheckMultiply / 2 - - that.dataEmitIntervalMs = (dataEmitIntervalMs === null || dataEmitIntervalMs === 0) - ? undefined - : dataEmitIntervalMs || 5 * 1000 // 5s - debug('latencyCheckIntervalMs: %s dataEmitIntervalMs: %s', - that.latencyCheckIntervalMs, that.dataEmitIntervalMs) - if (that.dataEmitIntervalMs) { - debug('Expecting ~%s events per summary', that.latencyCheckIntervalMs / that.dataEmitIntervalMs) - } else { - debug('Not emitting summaries') - } - - that.asyncTestFn = asyncTestFn // If there is no asyncFn, we measure latency - } - - start () { - // If process: use high resolution timer - if (globalThis.process && globalThis.process.hrtime) { // eslint-disable-line no-undef - debug('Using process.hrtime for timing') - this.now = globalThis.process.hrtime // eslint-disable-line no-undef - this.getDeltaMS = (startTime) => { - const hrtime = this.now(startTime) - return (hrtime[0] * 1000) + (hrtime[1] / 1000000) - } - // Let's try for a timer that only monotonically increases - } else if (typeof window !== 'undefined' && window.performance && window.performance.now) { - debug('Using performance.now for timing') - this.now = window.performance.now.bind(window.performance) - this.getDeltaMS = (startTime) => Math.round(this.now() - startTime) - } else { - debug('Using Date.now for timing') - this.now = Date.now - this.getDeltaMS = (startTime) => this.now() - startTime - } - - this._latencyData = this._initLatencyData() - - // We check for isBrowser because of browsers set max rates of timeouts when a page is hidden, - // so we fall back to another library - // See: http://stackoverflow.com/questions/6032429/chrome-timeouts-interval-suspended-in-background-tabs - if (isBrowser()) { - this._visibilityChangeEmitter = new VisibilityChangeEmitter() - - this._visibilityChangeEmitter.on('visibilityChange', (pageInFocus) => { - if (pageInFocus) { - this._startTimers() - } else { - this._emitSummary() - this._stopTimers() - } - }) - } - - if (!this._visibilityChangeEmitter || this._visibilityChangeEmitter.isVisible()) { - this._startTimers() - } - } - - stop () { - this._stopTimers() - } - - /** - * Start internal timers - * - * @private - */ - _startTimers () { - // Timer already started, ignore this - if (this._checkLatencyID) { - return - } - this._checkLatency() - if (this.dataEmitIntervalMs) { - this._emitIntervalID = setInterval(() => this._emitSummary(), this.dataEmitIntervalMs) - if (typeof this._emitIntervalID.unref === 'function') { - this._emitIntervalID.unref() // Doesn't block exit - } - } - } - - /** - * Stop internal timers - * - * @private - */ - _stopTimers () { - if (this._checkLatencyID) { - clearTimeout(this._checkLatencyID) - this._checkLatencyID = undefined - } - if (this._emitIntervalID) { - clearInterval(this._emitIntervalID) - this._emitIntervalID = undefined - } - } - - /** - * Emit summary only if there were events. It might not have any events if it was forced via a page hidden/show - * - * @private - */ - _emitSummary () { - const summary = this.getSummary() - if (summary.events > 0) { - this.emit('data', summary) - } - } - - /** - * Calling this function will end the collection period. If a timing event was already fired and somewhere in the queue, - * it will not count for this time period - * - * @returns {SummaryObject} - */ - getSummary () { - // We might want to adjust for the number of expected events - // Example: first 1 event it comes back, then such a long blocker that the next emit check comes - // Then this fires - looks like no latency!! - const latency = { - events: this._latencyData.events, - minMs: this._latencyData.minMs, - maxMs: this._latencyData.maxMs, - avgMs: this._latencyData.events - ? this._latencyData.totalMs / this._latencyData.events - : Number.POSITIVE_INFINITY, - lengthMs: this.getDeltaMS(this._latencyData.startTime) - } - this._latencyData = this._initLatencyData() // Clear - - debug('Summary: %O', latency) - return latency - } - - /** - * Randomly calls an async fn every roughly latencyCheckIntervalMs (plus some randomness). If no async fn is found, - * it will simply report on event loop latency. - * - * @private - */ - _checkLatency () { - const that = this - // Randomness is needed to avoid alignment by accident to regular things in the event loop - const randomness = (Math.random() * that._latecyCheckMultiply) - that._latecyCheckSubtract - - // We use this to ensure that in case some overlap somehow, we don't take the wrong startTime/offset - const localData = { - deltaOffset: Math.ceil(that.latencyCheckIntervalMs + randomness), - startTime: that.now() - } - - const cb = () => { - // We are already stopped, ignore this datapoint - if (!this._checkLatencyID) { - return - } - const deltaMS = that.getDeltaMS(localData.startTime) - localData.deltaOffset - that._checkLatency() // Start again ASAP - - // Add the data point. If this gets complex, refactor it - that._latencyData.events++ - that._latencyData.minMs = Math.min(that._latencyData.minMs, deltaMS) - that._latencyData.maxMs = Math.max(that._latencyData.maxMs, deltaMS) - that._latencyData.totalMs += deltaMS - debug('MS: %s Data: %O', deltaMS, that._latencyData) - } - debug('localData: %O', localData) - - this._checkLatencyID = setTimeout(() => { - // This gets rid of including event loop - if (that.asyncTestFn) { - // Clear timing related things - localData.deltaOffset = 0 - localData.startTime = that.now() - that.asyncTestFn(cb) - } else { - // setTimeout is not more accurate than 1ms, so this will ensure positive numbers. Add 1 to emitted data to remove. - // This is not the best, but for now it'll be just fine. This isn't meant to be sub ms accurate. - localData.deltaOffset -= 1 - // If there is no function to test, we mean check latency which is a special case that is really cb => cb() - // We avoid that for the few extra function all overheads. Also, we want to keep the timers different - cb() - } - }, localData.deltaOffset) - - if (typeof this._checkLatencyID.unref === 'function') { - this._checkLatencyID.unref() // Doesn't block exit - } - } - - _initLatencyData () { - return { - startTime: this.now(), - minMs: Number.POSITIVE_INFINITY, - maxMs: Number.NEGATIVE_INFINITY, - events: 0, - totalMs: 0 - } - } -} - -function isBrowser () { - return typeof window !== 'undefined' -} - -module.exports = LatencyMonitor diff --git a/src/connection-manager/latency-monitor.ts b/src/connection-manager/latency-monitor.ts new file mode 100644 index 00000000..bb7ce626 --- /dev/null +++ b/src/connection-manager/latency-monitor.ts @@ -0,0 +1,319 @@ +/** + * This code is based on `latency-monitor` (https://github.com/mlucool/latency-monitor) by `mlucool` (https://github.com/mlucool), available under Apache License 2.0 (https://github.com/mlucool/latency-monitor/blob/master/LICENSE) + */ + +import { CustomEvent, EventEmitter } from '@libp2p/interfaces' +import { VisibilityChangeEmitter } from './visibility-change-emitter.js' +import { logger } from '@libp2p/logger' + +const log = logger('libp2p:connection-manager:latency-monitor') + +export interface LatencyMonitorEvents { + 'data': CustomEvent +} + +export interface LatencyMonitorInit { + /** + * How often to add a latency check event (ms) + */ + latencyCheckIntervalMs?: number + + /** + * How often to summarize latency check events. null or 0 disables event firing + */ + dataEmitIntervalMs?: number + + /** + * What cb-style async function to use + */ + asyncTestFn?: (cb: () => void) => void + + /** + * What percent (+/-) of latencyCheckIntervalMs should we randomly use? This helps avoid alignment to other events. + */ + latencyRandomPercentage?: number +} + +export interface SummaryObject { + /** + * How many events were called + */ + events: number + + /** + * What was the min time for a cb to be called + */ + minMs: number + + /** + * What was the max time for a cb to be called + */ + maxMs: number + + /** + * What was the average time for a cb to be called + */ + avgMs: number + + /** + * How long this interval was in ms + */ + lengthMs: number +} + +interface LatencyData { + startTime: number + events: number + minMs: number + maxMs: number + totalMs: number +} + +/** + * A class to monitor latency of any async function which works in a browser or node. This works by periodically calling + * the asyncTestFn and timing how long it takes the callback to be called. It can also periodically emit stats about this. + * This can be disabled and stats can be pulled via setting dataEmitIntervalMs = 0. + * + * @extends {EventEmitter} + * + * The default implementation is an event loop latency monitor. This works by firing periodic events into the event loop + * and timing how long it takes to get back. + * + * @example + * const monitor = new LatencyMonitor(); + * monitor.on('data', (summary) => console.log('Event Loop Latency: %O', summary)); + * + * @example + * const monitor = new LatencyMonitor({latencyCheckIntervalMs: 1000, dataEmitIntervalMs: 60000, asyncTestFn:ping}); + * monitor.on('data', (summary) => console.log('Ping Pong Latency: %O', summary)); + */ +export class LatencyMonitor extends EventEmitter { + private readonly latencyCheckIntervalMs: number + private readonly latencyRandomPercentage: number + private readonly latencyCheckMultiply: number + private readonly latencyCheckSubtract: number + private readonly dataEmitIntervalMs?: number + private readonly asyncTestFn?: (cb: () => void) => void + + private readonly now: (num?: any) => any + private readonly getDeltaMS: (num: number) => number + private visibilityChangeEmitter?: VisibilityChangeEmitter + private latencyData: LatencyData + private checkLatencyID?: NodeJS.Timeout + private emitIntervalID?: NodeJS.Timeout + + constructor (init: LatencyMonitorInit = {}) { + super() + + const { latencyCheckIntervalMs, dataEmitIntervalMs, asyncTestFn, latencyRandomPercentage } = init + + // 0 isn't valid here, so its ok to use || + this.latencyCheckIntervalMs = latencyCheckIntervalMs ?? 500 // 0.5s + this.latencyRandomPercentage = latencyRandomPercentage ?? 10 + this.latencyCheckMultiply = 2 * (this.latencyRandomPercentage / 100.0) * this.latencyCheckIntervalMs + this.latencyCheckSubtract = this.latencyCheckMultiply / 2 + + this.dataEmitIntervalMs = (dataEmitIntervalMs === null || dataEmitIntervalMs === 0) + ? undefined + : dataEmitIntervalMs ?? 5 * 1000 // 5s + log('latencyCheckIntervalMs: %s dataEmitIntervalMs: %s', + this.latencyCheckIntervalMs, this.dataEmitIntervalMs) + if (this.dataEmitIntervalMs != null) { + log('Expecting ~%s events per summary', this.latencyCheckIntervalMs / this.dataEmitIntervalMs) + } else { + log('Not emitting summaries') + } + + this.asyncTestFn = asyncTestFn // If there is no asyncFn, we measure latency + + // If process: use high resolution timer + if (globalThis.process?.hrtime != null) { + log('Using process.hrtime for timing') + this.now = globalThis.process.hrtime // eslint-disable-line no-undef + this.getDeltaMS = (startTime) => { + const hrtime = this.now(startTime) + return (hrtime[0] * 1000) + (hrtime[1] / 1000000) + } + // Let's try for a timer that only monotonically increases + } else if (typeof window !== 'undefined' && window.performance?.now != null) { + log('Using performance.now for timing') + this.now = window.performance.now.bind(window.performance) + this.getDeltaMS = (startTime) => Math.round(this.now() - startTime) + } else { + log('Using Date.now for timing') + this.now = Date.now + this.getDeltaMS = (startTime) => this.now() - startTime + } + + this.latencyData = this.initLatencyData() + } + + start () { + // We check for isBrowser because of browsers set max rates of timeouts when a page is hidden, + // so we fall back to another library + // See: http://stackoverflow.com/questions/6032429/chrome-timeouts-interval-suspended-in-background-tabs + if (isBrowser()) { + this.visibilityChangeEmitter = new VisibilityChangeEmitter() + + this.visibilityChangeEmitter.addEventListener('visibilityChange', (evt) => { + const { detail: pageInFocus } = evt + + if (pageInFocus) { + this._startTimers() + } else { + this._emitSummary() + this._stopTimers() + } + }) + } + + if (this.visibilityChangeEmitter?.isVisible() === true) { + this._startTimers() + } + } + + stop () { + this._stopTimers() + } + + /** + * Start internal timers + * + * @private + */ + _startTimers () { + // Timer already started, ignore this + if (this.checkLatencyID != null) { + return + } + + this.checkLatency() + + if (this.dataEmitIntervalMs != null) { + this.emitIntervalID = setInterval(() => this._emitSummary(), this.dataEmitIntervalMs) + if (typeof this.emitIntervalID.unref === 'function') { + this.emitIntervalID.unref() // Doesn't block exit + } + } + } + + /** + * Stop internal timers + * + * @private + */ + _stopTimers () { + if (this.checkLatencyID != null) { + clearTimeout(this.checkLatencyID) + this.checkLatencyID = undefined + } + if (this.emitIntervalID != null) { + clearInterval(this.emitIntervalID) + this.emitIntervalID = undefined + } + } + + /** + * Emit summary only if there were events. It might not have any events if it was forced via a page hidden/show + * + * @private + */ + _emitSummary () { + const summary = this.getSummary() + if (summary.events > 0) { + this.dispatchEvent(new CustomEvent('data', { + detail: summary + })) + } + } + + /** + * Calling this function will end the collection period. If a timing event was already fired and somewhere in the queue, + * it will not count for this time period + */ + getSummary (): SummaryObject { + // We might want to adjust for the number of expected events + // Example: first 1 event it comes back, then such a long blocker that the next emit check comes + // Then this fires - looks like no latency!! + const latency = { + events: this.latencyData.events, + minMs: this.latencyData.minMs, + maxMs: this.latencyData.maxMs, + avgMs: this.latencyData.events > 0 + ? this.latencyData.totalMs / this.latencyData.events + : Number.POSITIVE_INFINITY, + lengthMs: this.getDeltaMS(this.latencyData.startTime) + } + this.latencyData = this.initLatencyData() // Clear + + log.trace('Summary: %O', latency) + return latency + } + + /** + * Randomly calls an async fn every roughly latencyCheckIntervalMs (plus some randomness). If no async fn is found, + * it will simply report on event loop latency. + */ + checkLatency () { + // Randomness is needed to avoid alignment by accident to regular things in the event loop + const randomness = (Math.random() * this.latencyCheckMultiply) - this.latencyCheckSubtract + + // We use this to ensure that in case some overlap somehow, we don't take the wrong startTime/offset + const localData = { + deltaOffset: Math.ceil(this.latencyCheckIntervalMs + randomness), + startTime: this.now() + } + + const cb = () => { + // We are already stopped, ignore this datapoint + if (this.checkLatencyID == null) { + return + } + const deltaMS = this.getDeltaMS(localData.startTime) - localData.deltaOffset + this.checkLatency() // Start again ASAP + + // Add the data point. If this gets complex, refactor it + this.latencyData.events++ + this.latencyData.minMs = Math.min(this.latencyData.minMs, deltaMS) + this.latencyData.maxMs = Math.max(this.latencyData.maxMs, deltaMS) + this.latencyData.totalMs += deltaMS + log.trace('MS: %s Data: %O', deltaMS, this.latencyData) + } + log.trace('localData: %O', localData) + + this.checkLatencyID = setTimeout(() => { + // This gets rid of including event loop + if (this.asyncTestFn != null) { + // Clear timing related things + localData.deltaOffset = 0 + localData.startTime = this.now() + this.asyncTestFn(cb) + } else { + // setTimeout is not more accurate than 1ms, so this will ensure positive numbers. Add 1 to emitted data to remove. + // This is not the best, but for now it'll be just fine. This isn't meant to be sub ms accurate. + localData.deltaOffset -= 1 + // If there is no function to test, we mean check latency which is a special case that is really cb => cb() + // We avoid that for the few extra function all overheads. Also, we want to keep the timers different + cb() + } + }, localData.deltaOffset) + + if (typeof this.checkLatencyID.unref === 'function') { + this.checkLatencyID.unref() // Doesn't block exit + } + } + + initLatencyData (): LatencyData { + return { + startTime: this.now(), + minMs: Number.POSITIVE_INFINITY, + maxMs: Number.NEGATIVE_INFINITY, + events: 0, + totalMs: 0 + } + } +} + +function isBrowser () { + return typeof globalThis.window !== 'undefined' +} diff --git a/src/connection-manager/visibility-change-emitter.js b/src/connection-manager/visibility-change-emitter.ts similarity index 53% rename from src/connection-manager/visibility-change-emitter.js rename to src/connection-manager/visibility-change-emitter.ts index ebe5e7d0..0eddbf39 100644 --- a/src/connection-manager/visibility-change-emitter.js +++ b/src/connection-manager/visibility-change-emitter.ts @@ -1,14 +1,17 @@ -// @ts-nocheck -/* global document */ - /** * This code is based on `latency-monitor` (https://github.com/mlucool/latency-monitor) by `mlucool` (https://github.com/mlucool), available under Apache License 2.0 (https://github.com/mlucool/latency-monitor/blob/master/LICENSE) */ -'use strict' -const { EventEmitter } = require('events') +import { CustomEvent, EventEmitter } from '@libp2p/interfaces' +import { logger } from '@libp2p/logger' -const debug = require('debug')('latency-monitor:VisibilityChangeEmitter') +const log = logger('libp2p:connection-manager:latency-monitor:visibility-change-emitter') + +interface VisibilityChangeEmitterEvents { + 'visibilityChange': CustomEvent +} + +type Hidden = 'hidden' | 'mozHidden' | 'msHidden' | 'webkitHidden' /** * Listen to page visibility change events (i.e. when the page is focused / blurred) by an event emitter. @@ -32,20 +35,20 @@ const debug = require('debug')('latency-monitor:VisibilityChangeEmitter') * // To access the visibility state directly, call: * console.log('Am I focused now? ' + myVisibilityEmitter.isVisible()); */ -class VisibilityChangeEmitter extends EventEmitter { - /** - * Creates a VisibilityChangeEmitter - * - * @class - */ +export class VisibilityChangeEmitter extends EventEmitter { + private hidden: Hidden + private visibilityChange: string + constructor () { super() - if (typeof document === 'undefined') { - debug('This is not a browser, no "document" found. Stopping.') - return + + this.hidden = 'hidden' + this.visibilityChange = 'visibilityChange' + + if (globalThis.document != null) { + this._initializeVisibilityVarNames() + this._addVisibilityChangeListener() } - this._initializeVisibilityVarNames() - this._addVisibilityChangeListener() } /** @@ -58,23 +61,28 @@ class VisibilityChangeEmitter extends EventEmitter { * @private */ _initializeVisibilityVarNames () { - let hidden - let visibilityChange - if (typeof document.hidden !== 'undefined') { // Opera 12.10 and Firefox 18 and later support + let hidden: Hidden = 'hidden' + let visibilityChange = 'visibilitychange' + + if (typeof globalThis.document.hidden !== 'undefined') { // Opera 12.10 and Firefox 18 and later support hidden = 'hidden' visibilityChange = 'visibilitychange' - } else if (typeof document.mozHidden !== 'undefined') { + // @ts-expect-error mozHidden is a non-standard field name + } else if (typeof globalThis.document.mozHidden !== 'undefined') { hidden = 'mozHidden' visibilityChange = 'mozvisibilitychange' - } else if (typeof document.msHidden !== 'undefined') { + // @ts-expect-error msHidden is a non-standard field name + } else if (typeof globalThis.document.msHidden !== 'undefined') { hidden = 'msHidden' visibilityChange = 'msvisibilitychange' - } else if (typeof document.webkitHidden !== 'undefined') { + // @ts-expect-error webkitHidden is a non-standard field name + } else if (typeof globalThis.document.webkitHidden !== 'undefined') { hidden = 'webkitHidden' visibilityChange = 'webkitvisibilitychange' } - this._hidden = hidden - this._visibilityChange = visibilityChange + + this.hidden = hidden + this.visibilityChange = visibilityChange } /** @@ -84,27 +92,27 @@ class VisibilityChangeEmitter extends EventEmitter { * @private */ _addVisibilityChangeListener () { - if (typeof document.addEventListener === 'undefined' || - typeof document[this._hidden] === 'undefined') { - debug('Checking page visibility requires a browser that supports the Page Visibility API.') + // @ts-expect-error cannot index document object with string key + if (typeof globalThis.document.addEventListener === 'undefined' || typeof document[this.hidden] === 'undefined') { + log('Checking page visibility requires a browser that supports the Page Visibility API.') } else { // Handle page visibility change - document.addEventListener(this._visibilityChange, this._handleVisibilityChange.bind(this), false) + globalThis.document.addEventListener(this.visibilityChange, this._handleVisibilityChange.bind(this), false) } } /** * The function returns ```true``` if the page is visible or ```false``` if the page is not visible and * ```undefined``` if the page visibility API is not supported by the browser. - * - * @returns {boolean | void} whether the page is now visible or not (undefined is unknown) */ isVisible () { - if (this._hidden === undefined || document[this._hidden] === undefined) { + // @ts-expect-error cannot index document object with string key + if (this.hidden === undefined || document[this.hidden] === undefined) { return undefined } - return !document[this._hidden] + // @ts-expect-error cannot index document object with string key + return document[this.hidden] == null } /** @@ -115,11 +123,13 @@ class VisibilityChangeEmitter extends EventEmitter { * @private */ _handleVisibilityChange () { - const visible = !document[this._hidden] - debug(visible ? 'Page Visible' : 'Page Hidden') + // @ts-expect-error cannot index document object with string key + const visible = globalThis.document[this.hidden] === false + log(visible ? 'Page Visible' : 'Page Hidden') + // Emit the event - this.emit('visibilityChange', visible) + this.dispatchEvent(new CustomEvent('visibilityChange', { + detail: visible + })) } } - -module.exports = VisibilityChangeEmitter diff --git a/src/constants.js b/src/constants.js deleted file mode 100644 index 76a2abd8..00000000 --- a/src/constants.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -module.exports = { - DIAL_TIMEOUT: 30e3, // How long in ms a dial attempt is allowed to take - MAX_PARALLEL_DIALS: 100, // Maximum allowed concurrent dials - MAX_PER_PEER_DIALS: 4, // Allowed parallel dials per DialRequest - MAX_ADDRS_TO_DIAL: 25, // Maximum number of allowed addresses to attempt to dial - METRICS: { - computeThrottleMaxQueueSize: 1000, - computeThrottleTimeout: 2000, - movingAverageIntervals: [ - 60 * 1000, // 1 minute - 5 * 60 * 1000, // 5 minutes - 15 * 60 * 1000 // 15 minutes - ], - maxOldPeersRetention: 50 - } -} diff --git a/src/constants.ts b/src/constants.ts new file mode 100644 index 00000000..fcb7e943 --- /dev/null +++ b/src/constants.ts @@ -0,0 +1,31 @@ + +/** + * How long in ms a dial attempt is allowed to take + */ +export const DIAL_TIMEOUT = 30e3 + +/** + * Maximum allowed concurrent dials + */ +export const MAX_PARALLEL_DIALS = 100 + +/** + * Allowed parallel dials per DialRequest + */ +export const MAX_PER_PEER_DIALS = 4 + +/** + * Maximum number of allowed addresses to attempt to dial + */ +export const MAX_ADDRS_TO_DIAL = 25 + +export const METRICS = { + computeThrottleMaxQueueSize: 1000, + computeThrottleTimeout: 2000, + movingAverageIntervals: [ + 60 * 1000, // 1 minute + 5 * 60 * 1000, // 5 minutes + 15 * 60 * 1000 // 15 minutes + ], + maxOldPeersRetention: 50 +} diff --git a/src/content-routing/index.js b/src/content-routing/index.js deleted file mode 100644 index df04225d..00000000 --- a/src/content-routing/index.js +++ /dev/null @@ -1,163 +0,0 @@ -'use strict' - -const errCode = require('err-code') -const { messages, codes } = require('../errors') -const { - storeAddresses, - uniquePeers, - requirePeers, - maybeLimitSource -} = require('./utils') -const drain = require('it-drain') -const merge = require('it-merge') -const { pipe } = require('it-pipe') -const { DHTContentRouting } = require('../dht/dht-content-routing') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('multiformats/cid').CID} CID - * @typedef {import('libp2p-interfaces/src/content-routing/types').ContentRouting} ContentRoutingModule - */ - -/** - * @typedef {Object} GetData - * @property {PeerId} from - * @property {Uint8Array} val - */ - -class ContentRouting { - /** - * @class - * @param {import('..')} libp2p - */ - constructor (libp2p) { - this.libp2p = libp2p - /** @type {ContentRoutingModule[]} */ - this.routers = libp2p._modules.contentRouting || [] - this.dht = libp2p._dht - - // If we have the dht, add it to the available content routers - if (this.dht && libp2p._config.dht.enabled) { - this.routers.push(new DHTContentRouting(this.dht)) - } - } - - /** - * Iterates over all content routers in parallel to find providers of the given key. - * - * @param {CID} key - The CID key of the content to find - * @param {object} [options] - * @param {number} [options.timeout] - How long the query should run - * @param {number} [options.maxNumProviders] - maximum number of providers to find - * @returns {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} - */ - async * findProviders (key, options = {}) { - if (!this.routers.length) { - throw errCode(new Error('No content this.routers available'), codes.ERR_NO_ROUTERS_AVAILABLE) - } - - yield * pipe( - merge( - ...this.routers.map(router => router.findProviders(key, options)) - ), - (source) => storeAddresses(source, this.libp2p.peerStore), - (source) => uniquePeers(source), - (source) => maybeLimitSource(source, options.maxNumProviders), - (source) => requirePeers(source) - ) - } - - /** - * Iterates over all content routers in parallel to notify it is - * a provider of the given key. - * - * @param {CID} key - The CID key of the content to find - * @returns {Promise} - */ - async provide (key) { - if (!this.routers.length) { - throw errCode(new Error('No content routers available'), codes.ERR_NO_ROUTERS_AVAILABLE) - } - - await Promise.all(this.routers.map((router) => router.provide(key))) - } - - /** - * Store the given key/value pair in the DHT. - * - * @param {Uint8Array} key - * @param {Uint8Array} value - * @param {Object} [options] - put options - * @param {number} [options.minPeers] - minimum number of peers required to successfully put - * @returns {Promise} - */ - async put (key, value, options) { - if (!this.libp2p.isStarted() || !this.dht.isStarted) { - throw errCode(new Error(messages.NOT_STARTED_YET), codes.DHT_NOT_STARTED) - } - - await drain(this.dht.put(key, value, options)) - } - - /** - * Get the value to the given key. - * Times out after 1 minute by default. - * - * @param {Uint8Array} key - * @param {Object} [options] - get options - * @param {number} [options.timeout] - optional timeout (default: 60000) - * @returns {Promise} - */ - async get (key, options) { - if (!this.libp2p.isStarted() || !this.dht.isStarted) { - throw errCode(new Error(messages.NOT_STARTED_YET), codes.DHT_NOT_STARTED) - } - - for await (const event of this.dht.get(key, options)) { - if (event.name === 'VALUE') { - return { from: event.peerId, val: event.value } - } - } - - throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) - } - - /** - * Get the `n` values to the given key without sorting. - * - * @param {Uint8Array} key - * @param {number} nVals - * @param {Object} [options] - get options - * @param {number} [options.timeout] - optional timeout (default: 60000) - */ - async * getMany (key, nVals, options) { // eslint-disable-line require-await - if (!this.libp2p.isStarted() || !this.dht.isStarted) { - throw errCode(new Error(messages.NOT_STARTED_YET), codes.DHT_NOT_STARTED) - } - - if (!nVals) { - return - } - - let gotValues = 0 - - for await (const event of this.dht.get(key, options)) { - if (event.name === 'VALUE') { - yield { from: event.peerId, val: event.value } - - gotValues++ - - if (gotValues === nVals) { - break - } - } - } - - if (gotValues === 0) { - throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) - } - } -} - -module.exports = ContentRouting diff --git a/src/content-routing/index.ts b/src/content-routing/index.ts new file mode 100644 index 00000000..a52450c9 --- /dev/null +++ b/src/content-routing/index.ts @@ -0,0 +1,143 @@ +import errCode from 'err-code' +import { messages, codes } from '../errors.js' +import { + storeAddresses, + uniquePeers, + requirePeers +} from './utils.js' +import drain from 'it-drain' +import merge from 'it-merge' +import { pipe } from 'it-pipe' +import type { ContentRouting } from '@libp2p/interfaces/content-routing' +import type { AbortOptions, Startable } from '@libp2p/interfaces' +import type { CID } from 'multiformats/cid' +import type { Components } from '@libp2p/interfaces/components' + +export interface CompoundContentRoutingInit { + routers: ContentRouting[] +} + +export class CompoundContentRouting implements ContentRouting, Startable { + private readonly routers: ContentRouting[] + private started: boolean + private readonly components: Components + + constructor (components: Components, init: CompoundContentRoutingInit) { + this.routers = init.routers ?? [] + this.started = false + this.components = components + } + + isStarted () { + return this.started + } + + async start () { + this.started = true + } + + async stop () { + this.started = false + } + + /** + * Iterates over all content routers in parallel to find providers of the given key + */ + async * findProviders (key: CID, options: AbortOptions = {}) { + if (this.routers.length === 0) { + throw errCode(new Error('No content this.routers available'), codes.ERR_NO_ROUTERS_AVAILABLE) + } + + yield * pipe( + merge( + ...this.routers.map(router => router.findProviders(key, options)) + ), + (source) => storeAddresses(source, this.components.getPeerStore()), + (source) => uniquePeers(source), + (source) => requirePeers(source) + ) + } + + /** + * Iterates over all content routers in parallel to notify it is + * a provider of the given key + */ + async provide (key: CID, options: AbortOptions = {}) { + if (this.routers.length === 0) { + throw errCode(new Error('No content routers available'), codes.ERR_NO_ROUTERS_AVAILABLE) + } + + await Promise.all(this.routers.map(async (router) => await router.provide(key, options))) + } + + /** + * Store the given key/value pair in the available content routings + */ + async put (key: Uint8Array, value: Uint8Array, options?: AbortOptions) { + if (!this.isStarted()) { + throw errCode(new Error(messages.NOT_STARTED_YET), codes.DHT_NOT_STARTED) + } + + const dht = this.components.getDHT() + + if (dht != null) { + await drain(dht.put(key, value, options)) + } + } + + /** + * Get the value to the given key. + * Times out after 1 minute by default. + */ + async get (key: Uint8Array, options?: AbortOptions): Promise { + if (!this.isStarted()) { + throw errCode(new Error(messages.NOT_STARTED_YET), codes.DHT_NOT_STARTED) + } + + const dht = this.components.getDHT() + + if (dht != null) { + for await (const event of dht.get(key, options)) { + if (event.name === 'VALUE') { + return event.value + } + } + } + + throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) + } + + /** + * Get the `n` values to the given key without sorting + */ + async * getMany (key: Uint8Array, nVals: number, options: AbortOptions) { // eslint-disable-line require-await + if (!this.isStarted()) { + throw errCode(new Error(messages.NOT_STARTED_YET), codes.DHT_NOT_STARTED) + } + + if (nVals == null || nVals === 0) { + return + } + + let gotValues = 0 + const dht = this.components.getDHT() + + if (dht != null) { + for await (const event of dht.get(key, options)) { + if (event.name === 'VALUE') { + yield { from: event.from, val: event.value } + + gotValues++ + + if (gotValues === nVals) { + break + } + } + } + } + + if (gotValues === 0) { + throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) + } + } +} diff --git a/src/content-routing/utils.js b/src/content-routing/utils.js deleted file mode 100644 index adcf8f2c..00000000 --- a/src/content-routing/utils.js +++ /dev/null @@ -1,89 +0,0 @@ -'use strict' - -const errCode = require('err-code') -const filter = require('it-filter') -const map = require('it-map') -const take = require('it-take') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('multiaddr').Multiaddr} Multiaddr - */ - -/** - * Store the multiaddrs from every peer in the passed peer store - * - * @param {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} source - * @param {import('../peer-store/types').PeerStore} peerStore - */ -async function * storeAddresses (source, peerStore) { - yield * map(source, async (peer) => { - // ensure we have the addresses for a given peer - await peerStore.addressBook.add(peer.id, peer.multiaddrs) - - return peer - }) -} - -/** - * Filter peers by unique peer id - * - * @param {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} source - */ -function uniquePeers (source) { - /** @type Set */ - const seen = new Set() - - return filter(source, (peer) => { - // dedupe by peer id - if (seen.has(peer.id.toString())) { - return false - } - - seen.add(peer.id.toString()) - - return true - }) -} - -/** - * Require at least `min` peers to be yielded from `source` - * - * @param {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} source - * @param {number} min - */ -async function * requirePeers (source, min = 1) { - let seen = 0 - - for await (const peer of source) { - seen++ - - yield peer - } - - if (seen < min) { - throw errCode(new Error('not found'), 'NOT_FOUND') - } -} - -/** - * If `max` is passed, only take that number of peers from the source - * otherwise take all the peers - * - * @param {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} source - * @param {number} [max] - */ -function maybeLimitSource (source, max) { - if (max) { - return take(source, max) - } - - return source -} - -module.exports = { - storeAddresses, - uniquePeers, - requirePeers, - maybeLimitSource -} diff --git a/src/content-routing/utils.ts b/src/content-routing/utils.ts new file mode 100644 index 00000000..a4ca4d15 --- /dev/null +++ b/src/content-routing/utils.ts @@ -0,0 +1,54 @@ +import errCode from 'err-code' +import filter from 'it-filter' +import map from 'it-map' +import type { Source } from 'it-stream-types' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import type { PeerStore } from '@libp2p/interfaces/peer-store' + +/** + * Store the multiaddrs from every peer in the passed peer store + */ +export async function * storeAddresses (source: Source, peerStore: PeerStore) { + yield * map(source, async (peer) => { + // ensure we have the addresses for a given peer + await peerStore.addressBook.add(peer.id, peer.multiaddrs) + + return peer + }) +} + +/** + * Filter peers by unique peer id + */ +export function uniquePeers (source: Source) { + /** @type Set */ + const seen = new Set() + + return filter(source, (peer) => { + // dedupe by peer id + if (seen.has(peer.id.toString())) { + return false + } + + seen.add(peer.id.toString()) + + return true + }) +} + +/** + * Require at least `min` peers to be yielded from `source` + */ +export async function * requirePeers (source: Source, min: number = 1) { + let seen = 0 + + for await (const peer of source) { + seen++ + + yield peer + } + + if (seen < min) { + throw errCode(new Error('not found'), 'NOT_FOUND') + } +} diff --git a/src/dht/dht-content-routing.js b/src/dht/dht-content-routing.js deleted file mode 100644 index ead668f3..00000000 --- a/src/dht/dht-content-routing.js +++ /dev/null @@ -1,44 +0,0 @@ -'use strict' - -const drain = require('it-drain') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('libp2p-interfaces/src/content-routing/types').ContentRouting} ContentRoutingModule - * @typedef {import('multiformats/cid').CID} CID - */ - -/** - * Wrapper class to convert events into returned values - * - * @implements {ContentRoutingModule} - */ -class DHTContentRouting { - /** - * @param {import('libp2p-kad-dht').DHT} dht - */ - constructor (dht) { - this._dht = dht - } - - /** - * @param {CID} cid - */ - async provide (cid) { - await drain(this._dht.provide(cid)) - } - - /** - * @param {CID} cid - * @param {*} options - */ - async * findProviders (cid, options) { - for await (const event of this._dht.findProviders(cid, options)) { - if (event.name === 'PROVIDER') { - yield * event.providers - } - } - } -} - -module.exports = { DHTContentRouting } diff --git a/src/dht/dht-content-routing.ts b/src/dht/dht-content-routing.ts new file mode 100644 index 00000000..12552d47 --- /dev/null +++ b/src/dht/dht-content-routing.ts @@ -0,0 +1,43 @@ +import drain from 'it-drain' +import errCode from 'err-code' +import type { DHT } from '@libp2p/interfaces/dht' +import type { ContentRouting } from '@libp2p/interfaces/content-routing' +import type { CID } from 'multiformats/cid' +import type { AbortOptions } from '@libp2p/interfaces' + +/** + * Wrapper class to convert events into returned values + */ +export class DHTContentRouting implements ContentRouting { + private readonly dht: DHT + + constructor (dht: DHT) { + this.dht = dht + } + + async provide (cid: CID) { + await drain(this.dht.provide(cid)) + } + + async * findProviders (cid: CID, options: AbortOptions = {}) { + for await (const event of this.dht.findProviders(cid, options)) { + if (event.name === 'PROVIDER') { + yield * event.providers + } + } + } + + async put (key: Uint8Array, value: Uint8Array, options?: AbortOptions): Promise { + await drain(this.dht.put(key, value, options)) + } + + async get (key: Uint8Array, options?: AbortOptions): Promise { + for await (const event of this.dht.get(key, options)) { + if (event.name === 'VALUE') { + return event.value + } + } + + throw errCode(new Error('Not found'), 'ERR_NOT_FOUND') + } +} diff --git a/src/dht/dht-peer-routing.js b/src/dht/dht-peer-routing.js deleted file mode 100644 index 762abc80..00000000 --- a/src/dht/dht-peer-routing.js +++ /dev/null @@ -1,51 +0,0 @@ -'use strict' - -const errCode = require('err-code') -const { messages, codes } = require('../errors') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('libp2p-interfaces/src/peer-routing/types').PeerRouting} PeerRoutingModule - */ - -/** - * Wrapper class to convert events into returned values - * - * @implements {PeerRoutingModule} - */ -class DHTPeerRouting { - /** - * @param {import('libp2p-kad-dht').DHT} dht - */ - constructor (dht) { - this._dht = dht - } - - /** - * @param {PeerId} peerId - * @param {any} options - */ - async findPeer (peerId, options = {}) { - for await (const event of this._dht.findPeer(peerId, options)) { - if (event.name === 'FINAL_PEER') { - return event.peer - } - } - - throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) - } - - /** - * @param {Uint8Array} key - * @param {any} options - */ - async * getClosestPeers (key, options = {}) { - for await (const event of this._dht.getClosestPeers(key, options)) { - if (event.name === 'PEER_RESPONSE') { - yield * event.closer - } - } - } -} - -module.exports = { DHTPeerRouting } diff --git a/src/dht/dht-peer-routing.ts b/src/dht/dht-peer-routing.ts new file mode 100644 index 00000000..45950cde --- /dev/null +++ b/src/dht/dht-peer-routing.ts @@ -0,0 +1,35 @@ +import errCode from 'err-code' +import { messages, codes } from '../errors.js' +import type { PeerRouting } from '@libp2p/interfaces/peer-routing' +import type { DHT } from '@libp2p/interfaces/dht' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { AbortOptions } from '@libp2p/interfaces' + +/** + * Wrapper class to convert events into returned values + */ +export class DHTPeerRouting implements PeerRouting { + private readonly dht: DHT + + constructor (dht: DHT) { + this.dht = dht + } + + async findPeer (peerId: PeerId, options: AbortOptions = {}) { + for await (const event of this.dht.findPeer(peerId, options)) { + if (event.name === 'FINAL_PEER') { + return event.peer + } + } + + throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) + } + + async * getClosestPeers (key: Uint8Array, options: AbortOptions = {}) { + for await (const event of this.dht.getClosestPeers(key, options)) { + if (event.name === 'PEER_RESPONSE') { + yield * event.closer + } + } + } +} diff --git a/src/dialer/auto-dialer.ts b/src/dialer/auto-dialer.ts new file mode 100644 index 00000000..5674a94d --- /dev/null +++ b/src/dialer/auto-dialer.ts @@ -0,0 +1,40 @@ +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import { logger } from '@libp2p/logger' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:dialer:auto-dialer') + +export interface AutoDialerInit { + enabled: boolean + minConnections: number +} + +export class AutoDialer { + private readonly components: Components + private readonly enabled: boolean + private readonly minConnections: number + + constructor (components: Components, init: AutoDialerInit) { + this.components = components + this.enabled = init.enabled + this.minConnections = init.minConnections + } + + public handle (evt: CustomEvent) { + const { detail: peer } = evt + + // If auto dialing is on and we have no connection to the peer, check if we should dial + if (this.enabled && this.components.getConnectionManager().getConnection(peer.id) == null) { + const minConnections = this.minConnections ?? 0 + + if (minConnections > this.components.getConnectionManager().getConnectionList().length) { + log('auto-dialing discovered peer %p', peer.id) + + void this.components.getDialer().dial(peer.id) + .catch(err => { + log.error('could not connect to discovered peer %p with %o', peer.id, err) + }) + } + } + } +} diff --git a/src/dialer/dial-request.js b/src/dialer/dial-request.ts similarity index 51% rename from src/dialer/dial-request.js rename to src/dialer/dial-request.ts index 810cdfee..23c007a4 100644 --- a/src/dialer/dial-request.js +++ b/src/dialer/dial-request.ts @@ -1,91 +1,102 @@ -'use strict' +import errCode from 'err-code' +import { anySignal } from 'any-signal' +import FIFO from 'p-fifo' +// @ts-expect-error setMaxListeners is missing from the node 16 types +import { setMaxListeners } from 'events' +import { codes } from '../errors.js' +import { logger } from '@libp2p/logger' +import type { Multiaddr } from '@multiformats/multiaddr' +import type { Connection } from '@libp2p/interfaces/connection' +import type { AbortOptions } from '@libp2p/interfaces' +import type { DefaultDialer } from './index.js' -const errCode = require('err-code') -const { anySignal } = require('any-signal') -// @ts-ignore p-fifo does not export types -const FIFO = require('p-fifo') -const pAny = require('p-any') -// @ts-expect-error setMaxListeners is missing from the types -const { setMaxListeners } = require('events') -const { codes } = require('../errors') +const log = logger('libp2p:dialer:dial-request') -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('./')} Dialer - * @typedef {import('multiaddr').Multiaddr} Multiaddr - */ +export interface DialAction { + (m: Multiaddr, options: AbortOptions): Promise +} -/** - * @typedef {Object} DialOptions - * @property {AbortSignal} signal - * - * @typedef {Object} DialRequestOptions - * @property {Multiaddr[]} addrs - * @property {(m: Multiaddr, options: DialOptions) => Promise} dialAction - * @property {Dialer} dialer - */ +export interface DialRequestOptions { + addrs: Multiaddr[] + dialAction: DialAction + dialer: DefaultDialer +} + +export class DialRequest { + private readonly addrs: Multiaddr[] + private readonly dialer: DefaultDialer + private readonly dialAction: DialAction -class DialRequest { /** * Manages running the `dialAction` on multiple provided `addrs` in parallel * up to a maximum determined by the number of tokens returned * from `dialer.getTokens`. Once a DialRequest is created, it can be * started using `DialRequest.run(options)`. Once a single dial has succeeded, * all other dials in the request will be cancelled. - * - * @class - * @param {DialRequestOptions} options */ - constructor ({ - addrs, - dialAction, - dialer - }) { + constructor (options: DialRequestOptions) { + const { + addrs, + dialAction, + dialer + } = options + this.addrs = addrs this.dialer = dialer this.dialAction = dialAction } - /** - * @async - * @param {object} [options] - * @param {AbortSignal} [options.signal] - An AbortController signal - * @returns {Promise} - */ - async run (options = {}) { + async run (options: AbortOptions = {}): Promise { const tokens = this.dialer.getTokens(this.addrs.length) + // If no tokens are available, throw if (tokens.length < 1) { throw errCode(new Error('No dial tokens available'), codes.ERR_NO_DIAL_TOKENS) } - const tokenHolder = new FIFO() - tokens.forEach(token => tokenHolder.push(token)) + const tokenHolder = new FIFO() + + for (const token of tokens) { + void tokenHolder.push(token).catch(err => { + log.error(err) + }) + } + const dialAbortControllers = this.addrs.map(() => { const controller = new AbortController() try { // fails on node < 15.4 - setMaxListeners && setMaxListeners(Infinity, controller.signal) + setMaxListeners?.(Infinity, controller.signal) } catch {} return controller }) + + if (options.signal != null) { + try { + // fails on node < 15.4 + setMaxListeners?.(Infinity, options.signal) + } catch {} + } + let completedDials = 0 try { - return await pAny(this.addrs.map(async (addr, i) => { + return await Promise.any(this.addrs.map(async (addr, i) => { const token = await tokenHolder.shift() // get token let conn try { const signal = dialAbortControllers[i].signal - conn = await this.dialAction(addr, { ...options, signal: options.signal ? anySignal([signal, options.signal]) : signal }) + conn = await this.dialAction(addr, { ...options, signal: (options.signal != null) ? anySignal([signal, options.signal]) : signal }) // Remove the successful AbortController so it is not aborted dialAbortControllers.splice(i, 1) } finally { completedDials++ // If we have more or equal dials remaining than tokens, recycle the token, otherwise release it if (this.addrs.length - completedDials >= tokens.length) { - tokenHolder.push(token) + void tokenHolder.push(token).catch(err => { + log.error(err) + }) } else { this.dialer.releaseToken(tokens.splice(tokens.indexOf(token), 1)[0]) } @@ -99,5 +110,3 @@ class DialRequest { } } } - -module.exports = DialRequest diff --git a/src/dialer/index.js b/src/dialer/index.js deleted file mode 100644 index f2e82cd3..00000000 --- a/src/dialer/index.js +++ /dev/null @@ -1,376 +0,0 @@ -'use strict' - -const debug = require('debug') -const all = require('it-all') -const filter = require('it-filter') -const { pipe } = require('it-pipe') -const log = Object.assign(debug('libp2p:dialer'), { - error: debug('libp2p:dialer:err') -}) -const errCode = require('err-code') -const { Multiaddr } = require('multiaddr') -const { TimeoutController } = require('timeout-abort-controller') -const { AbortError } = require('abortable-iterator') -const { anySignal } = require('any-signal') -// @ts-expect-error setMaxListeners is missing from the types -const { setMaxListeners } = require('events') -const DialRequest = require('./dial-request') -const { publicAddressesFirst } = require('libp2p-utils/src/address-sort') -const getPeer = require('../get-peer') -const trackedMap = require('../metrics/tracked-map') -const { codes } = require('../errors') -const { - DIAL_TIMEOUT, - MAX_PARALLEL_DIALS, - MAX_PER_PEER_DIALS, - MAX_ADDRS_TO_DIAL -} = require('../constants') - -const METRICS_COMPONENT = 'dialler' -const METRICS_PENDING_DIALS = 'pending-dials' -const METRICS_PENDING_DIAL_TARGETS = 'pending-dial-targets' - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('peer-id')} PeerId - * @typedef {import('../peer-store/types').PeerStore} PeerStore - * @typedef {import('../peer-store/types').Address} Address - * @typedef {import('../transport-manager')} TransportManager - * @typedef {import('../types').ConnectionGater} ConnectionGater - */ - -/** - * @typedef {Object} DialerProperties - * @property {PeerStore} peerStore - * @property {TransportManager} transportManager - * @property {ConnectionGater} connectionGater - * - * @typedef {(addr:Multiaddr) => Promise} Resolver - * - * @typedef {Object} DialerOptions - * @property {(addresses: Address[]) => Address[]} [options.addressSorter = publicAddressesFirst] - Sort the known addresses of a peer before trying to dial. - * @property {number} [maxParallelDials = MAX_PARALLEL_DIALS] - Number of max concurrent dials. - * @property {number} [maxAddrsToDial = MAX_ADDRS_TO_DIAL] - Number of max addresses to dial for a given peer. - * @property {number} [maxDialsPerPeer = MAX_PER_PEER_DIALS] - Number of max concurrent dials per peer. - * @property {number} [dialTimeout = DIAL_TIMEOUT] - How long a dial attempt is allowed to take. - * @property {Record} [resolvers = {}] - multiaddr resolvers to use when dialing - * @property {import('../metrics')} [metrics] - * - * @typedef DialTarget - * @property {string} id - * @property {Multiaddr[]} addrs - * - * @typedef PendingDial - * @property {import('./dial-request')} dialRequest - * @property {import('timeout-abort-controller').TimeoutController} controller - * @property {Promise} promise - * @property {function():void} destroy - */ - -class Dialer { - /** - * @class - * @param {DialerProperties & DialerOptions} options - */ - constructor ({ - transportManager, - peerStore, - connectionGater, - addressSorter = publicAddressesFirst, - maxParallelDials = MAX_PARALLEL_DIALS, - maxAddrsToDial = MAX_ADDRS_TO_DIAL, - dialTimeout = DIAL_TIMEOUT, - maxDialsPerPeer = MAX_PER_PEER_DIALS, - resolvers = {}, - metrics - }) { - this.connectionGater = connectionGater - this.transportManager = transportManager - this.peerStore = peerStore - this.addressSorter = addressSorter - this.maxParallelDials = maxParallelDials - this.maxAddrsToDial = maxAddrsToDial - this.timeout = dialTimeout - this.maxDialsPerPeer = maxDialsPerPeer - this.tokens = [...new Array(maxParallelDials)].map((_, index) => index) - - /** @type {Map} */ - this._pendingDials = trackedMap({ - component: METRICS_COMPONENT, - metric: METRICS_PENDING_DIALS, - metrics - }) - - /** @type {Map void, reject: (err: Error) => void}>} */ - this._pendingDialTargets = trackedMap({ - component: METRICS_COMPONENT, - metric: METRICS_PENDING_DIAL_TARGETS, - metrics - }) - - for (const [key, value] of Object.entries(resolvers)) { - Multiaddr.resolvers.set(key, value) - } - } - - /** - * Clears any pending dials - */ - destroy () { - for (const dial of this._pendingDials.values()) { - try { - dial.controller.abort() - } catch (/** @type {any} */ err) { - log.error(err) - } - } - this._pendingDials.clear() - - for (const pendingTarget of this._pendingDialTargets.values()) { - pendingTarget.reject(new AbortError('Dialer was destroyed')) - } - this._pendingDialTargets.clear() - } - - /** - * Connects to a given `peer` by dialing all of its known addresses. - * The dial to the first address that is successfully able to upgrade a connection - * will be used. - * - * @param {PeerId|Multiaddr|string} peer - The peer to dial - * @param {object} [options] - * @param {AbortSignal} [options.signal] - An AbortController signal - * @returns {Promise} - */ - async connectToPeer (peer, options = {}) { - const { id } = getPeer(peer) - - if (await this.connectionGater.denyDialPeer(id)) { - throw errCode(new Error('The dial request is blocked by gater.allowDialPeer'), codes.ERR_PEER_DIAL_INTERCEPTED) - } - - const dialTarget = await this._createCancellableDialTarget(peer) - - if (!dialTarget.addrs.length) { - throw errCode(new Error('The dial request has no valid addresses'), codes.ERR_NO_VALID_ADDRESSES) - } - const pendingDial = this._pendingDials.get(dialTarget.id) || this._createPendingDial(dialTarget, options) - - try { - const connection = await pendingDial.promise - log('dial succeeded to %s', dialTarget.id) - return connection - } catch (/** @type {any} */ err) { - // Error is a timeout - if (pendingDial.controller.signal.aborted) { - err.code = codes.ERR_TIMEOUT - } - log.error(err) - throw err - } finally { - pendingDial.destroy() - } - } - - /** - * Connects to a given `peer` by dialing all of its known addresses. - * The dial to the first address that is successfully able to upgrade a connection - * will be used. - * - * @param {PeerId|Multiaddr|string} peer - The peer to dial - * @returns {Promise} - */ - async _createCancellableDialTarget (peer) { - // Make dial target promise cancellable - const id = `${(parseInt(String(Math.random() * 1e9), 10)).toString() + Date.now()}` - const cancellablePromise = new Promise((resolve, reject) => { - this._pendingDialTargets.set(id, { resolve, reject }) - }) - - try { - const dialTarget = await Promise.race([ - this._createDialTarget(peer), - cancellablePromise - ]) - - return dialTarget - } finally { - this._pendingDialTargets.delete(id) - } - } - - /** - * Creates a DialTarget. The DialTarget is used to create and track - * the DialRequest to a given peer. - * If a multiaddr is received it should be the first address attempted. - * Multiaddrs not supported by the available transports will be filtered out. - * - * @private - * @param {PeerId|Multiaddr|string} peer - A PeerId or Multiaddr - * @returns {Promise} - */ - async _createDialTarget (peer) { - const { id, multiaddrs } = getPeer(peer) - - if (multiaddrs) { - await this.peerStore.addressBook.add(id, multiaddrs) - } - - let knownAddrs = await pipe( - await this.peerStore.addressBook.getMultiaddrsForPeer(id, this.addressSorter), - (source) => filter(source, async (multiaddr) => { - return !(await this.connectionGater.denyDialMultiaddr(id, multiaddr)) - }), - (source) => all(source) - ) - - // If received a multiaddr to dial, it should be the first to use - // But, if we know other multiaddrs for the peer, we should try them too. - if (Multiaddr.isMultiaddr(peer)) { - knownAddrs = knownAddrs.filter((addr) => !peer.equals(addr)) - knownAddrs.unshift(peer) - } - - /** @type {Multiaddr[]} */ - const addrs = [] - for (const a of knownAddrs) { - const resolvedAddrs = await this._resolve(a) - resolvedAddrs.forEach(ra => addrs.push(ra)) - } - - // Multiaddrs not supported by the available transports will be filtered out. - const supportedAddrs = addrs.filter(a => this.transportManager.transportForMultiaddr(a)) - - if (supportedAddrs.length > this.maxAddrsToDial) { - await this.peerStore.delete(id) - throw errCode(new Error('dial with more addresses than allowed'), codes.ERR_TOO_MANY_ADDRESSES) - } - - return { - id: id.toB58String(), - addrs: supportedAddrs - } - } - - /** - * Creates a PendingDial that wraps the underlying DialRequest - * - * @private - * @param {DialTarget} dialTarget - * @param {object} [options] - * @param {AbortSignal} [options.signal] - An AbortController signal - * @returns {PendingDial} - */ - _createPendingDial (dialTarget, options = {}) { - /** - * @param {Multiaddr} addr - * @param {{ signal: { aborted: any; }; }} options - */ - const dialAction = (addr, options) => { - if (options.signal.aborted) throw errCode(new Error('already aborted'), codes.ERR_ALREADY_ABORTED) - return this.transportManager.dial(addr, options) - } - - const dialRequest = new DialRequest({ - addrs: dialTarget.addrs, - dialAction, - dialer: this - }) - - // Combine the timeout signal and options.signal, if provided - const timeoutController = new TimeoutController(this.timeout) - - const signals = [timeoutController.signal] - options.signal && signals.push(options.signal) - const signal = anySignal(signals) - - // this signal will potentially be used while dialing lots of - // peers so prevent MaxListenersExceededWarning appearing in the console - try { - // fails on node < 15.4 - setMaxListeners && setMaxListeners(Infinity, signal) - } catch {} - - const pendingDial = { - dialRequest, - controller: timeoutController, - promise: dialRequest.run({ ...options, signal }), - destroy: () => { - timeoutController.clear() - this._pendingDials.delete(dialTarget.id) - } - } - this._pendingDials.set(dialTarget.id, pendingDial) - - return pendingDial - } - - /** - * @param {number} num - */ - getTokens (num) { - const total = Math.min(num, this.maxDialsPerPeer, this.tokens.length) - const tokens = this.tokens.splice(0, total) - log('%d tokens request, returning %d, %d remaining', num, total, this.tokens.length) - return tokens - } - - /** - * @param {number} token - */ - releaseToken (token) { - // Guard against duplicate releases - if (this.tokens.indexOf(token) > -1) return - log('token %d released', token) - this.tokens.push(token) - } - - /** - * Resolve multiaddr recursively. - * - * @param {Multiaddr} ma - * @returns {Promise} - */ - async _resolve (ma) { - // TODO: recursive logic should live in multiaddr once dns4/dns6 support is in place - // Now only supporting resolve for dnsaddr - const resolvableProto = ma.protoNames().includes('dnsaddr') - - // Multiaddr is not resolvable? End recursion! - if (!resolvableProto) { - return [ma] - } - - const resolvedMultiaddrs = await this._resolveRecord(ma) - const recursiveMultiaddrs = await Promise.all(resolvedMultiaddrs.map((nm) => { - return this._resolve(nm) - })) - - const addrs = recursiveMultiaddrs.flat() - return addrs.reduce((array, newM) => { - if (!array.find(m => m.equals(newM))) { - array.push(newM) - } - return array - }, /** @type {Multiaddr[]} */([])) - } - - /** - * Resolve a given multiaddr. If this fails, an empty array will be returned - * - * @param {Multiaddr} ma - * @returns {Promise} - */ - async _resolveRecord (ma) { - try { - ma = new Multiaddr(ma.toString()) // Use current multiaddr module - const multiaddrs = await ma.resolve() - return multiaddrs - } catch (_) { - log.error(`multiaddr ${ma} could not be resolved`) - return [] - } - } -} - -module.exports = Dialer diff --git a/src/dialer/index.ts b/src/dialer/index.ts new file mode 100644 index 00000000..30dc731e --- /dev/null +++ b/src/dialer/index.ts @@ -0,0 +1,374 @@ +import { logger } from '@libp2p/logger' +import all from 'it-all' +import filter from 'it-filter' +import { pipe } from 'it-pipe' +import errCode from 'err-code' +import { Multiaddr } from '@multiformats/multiaddr' +import { TimeoutController } from 'timeout-abort-controller' +import { AbortError } from '@libp2p/interfaces/errors' +import { anySignal } from 'any-signal' +// @ts-expect-error setMaxListeners is missing from the node 16 types +import { setMaxListeners } from 'events' +import { DialAction, DialRequest } from './dial-request.js' +import { publicAddressesFirst } from '@libp2p/utils/address-sort' +import { trackedMap } from '@libp2p/tracked-map' +import { codes } from '../errors.js' +import { + DIAL_TIMEOUT, + MAX_PARALLEL_DIALS, + MAX_PER_PEER_DIALS, + MAX_ADDRS_TO_DIAL +} from '../constants.js' +import type { Connection } from '@libp2p/interfaces/connection' +import type { AbortOptions, Startable } from '@libp2p/interfaces' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { getPeer } from '../get-peer.js' +import sort from 'it-sort' +import type { Components } from '@libp2p/interfaces/components' +import type { Dialer, DialerInit } from '@libp2p/interfaces/dialer' +import map from 'it-map' +import type { AddressSorter } from '@libp2p/interfaces/peer-store' + +const log = logger('libp2p:dialer') + +const METRICS_COMPONENT = 'dialler' +const METRICS_PENDING_DIALS = 'pending-dials' +const METRICS_PENDING_DIAL_TARGETS = 'pending-dial-targets' + +export interface DialTarget { + id: string + addrs: Multiaddr[] +} + +export interface PendingDial { + dialRequest: DialRequest + controller: TimeoutController + promise: Promise + destroy: () => void +} + +export interface PendingDialTarget { + resolve: (value: any) => void + reject: (err: Error) => void +} + +export class DefaultDialer implements Dialer, Startable { + private readonly components: Components + private readonly addressSorter: AddressSorter + private readonly maxAddrsToDial: number + private readonly timeout: number + private readonly maxDialsPerPeer: number + public tokens: number[] + public pendingDials: Map + public pendingDialTargets: Map + private started: boolean + + constructor (components: Components, init: DialerInit = {}) { + this.components = components + this.started = false + this.addressSorter = init.addressSorter ?? publicAddressesFirst + this.maxAddrsToDial = init.maxAddrsToDial ?? MAX_ADDRS_TO_DIAL + this.timeout = init.dialTimeout ?? DIAL_TIMEOUT + this.maxDialsPerPeer = init.maxDialsPerPeer ?? MAX_PER_PEER_DIALS + this.tokens = [...new Array(init.maxParallelDials ?? MAX_PARALLEL_DIALS)].map((_, index) => index) + this.pendingDials = trackedMap({ + component: METRICS_COMPONENT, + metric: METRICS_PENDING_DIALS, + metrics: init.metrics + }) + this.pendingDialTargets = trackedMap({ + component: METRICS_COMPONENT, + metric: METRICS_PENDING_DIAL_TARGETS, + metrics: init.metrics + }) + + for (const [key, value] of Object.entries(init.resolvers ?? {})) { + Multiaddr.resolvers.set(key, value) + } + } + + isStarted () { + return this.started + } + + async start () { + this.started = true + } + + /** + * Clears any pending dials + */ + async stop () { + this.started = false + + for (const dial of this.pendingDials.values()) { + try { + dial.controller.abort() + } catch (err: any) { + log.error(err) + } + } + this.pendingDials.clear() + + for (const pendingTarget of this.pendingDialTargets.values()) { + pendingTarget.reject(new AbortError('Dialer was destroyed')) + } + this.pendingDialTargets.clear() + } + + /** + * Connects to a given `peer` by dialing all of its known addresses. + * The dial to the first address that is successfully able to upgrade a connection + * will be used. + */ + async dial (peer: PeerId | Multiaddr, options: AbortOptions = {}): Promise { + const { id, multiaddrs } = getPeer(peer) + + if (this.components.getPeerId().equals(id)) { + throw errCode(new Error('Tried to dial self'), codes.ERR_DIALED_SELF) + } + + log('check multiaddrs %p', id) + + if (multiaddrs != null && multiaddrs.length > 0) { + log('storing multiaddrs %p', id, multiaddrs) + await this.components.getPeerStore().addressBook.add(id, multiaddrs) + } + + if (await this.components.getConnectionGater().denyDialPeer(id)) { + throw errCode(new Error('The dial request is blocked by gater.allowDialPeer'), codes.ERR_PEER_DIAL_INTERCEPTED) + } + + log('dial to %p', id) + + const existingConnection = this.components.getConnectionManager().getConnection(id) + + if (existingConnection != null) { + log('had an existing connection to %p', id) + + return existingConnection + } + + log('creating dial target for %p', id) + + const dialTarget = await this._createCancellableDialTarget(id) + + if (dialTarget.addrs.length === 0) { + throw errCode(new Error('The dial request has no valid addresses'), codes.ERR_NO_VALID_ADDRESSES) + } + + const pendingDial = this.pendingDials.get(dialTarget.id) ?? this._createPendingDial(dialTarget, options) + + try { + const connection = await pendingDial.promise + log('dial succeeded to %s', dialTarget.id) + return connection + } catch (err: any) { + log('dial failed to %s', dialTarget.id, err) + // Error is a timeout + if (pendingDial.controller.signal.aborted) { + err.code = codes.ERR_TIMEOUT + } + log.error(err) + throw err + } finally { + pendingDial.destroy() + } + } + + async dialProtocol (peer: PeerId | Multiaddr, protocols: string | string[], options: AbortOptions = {}) { + if (protocols == null) { + throw errCode(new Error('no protocols were provided to open a stream'), codes.ERR_INVALID_PROTOCOLS_FOR_STREAM) + } + + protocols = Array.isArray(protocols) ? protocols : [protocols] + + if (protocols.length === 0) { + throw errCode(new Error('no protocols were provided to open a stream'), codes.ERR_INVALID_PROTOCOLS_FOR_STREAM) + } + + const connection = await this.dial(peer, options) + + return await connection.newStream(protocols) + } + + /** + * Connects to a given `peer` by dialing all of its known addresses. + * The dial to the first address that is successfully able to upgrade a connection + * will be used. + */ + async _createCancellableDialTarget (peer: PeerId): Promise { + // Make dial target promise cancellable + const id = `${(parseInt(String(Math.random() * 1e9), 10)).toString()}${Date.now()}` + const cancellablePromise = new Promise((resolve, reject) => { + this.pendingDialTargets.set(id, { resolve, reject }) + }) + + try { + const dialTarget = await Promise.race([ + this._createDialTarget(peer), + cancellablePromise + ]) + + return dialTarget + } finally { + this.pendingDialTargets.delete(id) + } + } + + /** + * Creates a DialTarget. The DialTarget is used to create and track + * the DialRequest to a given peer. + * If a multiaddr is received it should be the first address attempted. + * Multiaddrs not supported by the available transports will be filtered out. + */ + async _createDialTarget (peer: PeerId): Promise { + const knownAddrs = await pipe( + await this.components.getPeerStore().addressBook.get(peer), + (source) => filter(source, async (address) => { + return !(await this.components.getConnectionGater().denyDialMultiaddr(peer, address.multiaddr)) + }), + (source) => sort(source, this.addressSorter), + (source) => map(source, (address) => { + const ma = address.multiaddr + + if (peer.toString() === ma.getPeerId()) { + return ma + } + + return ma.encapsulate(`/p2p/${peer.toString()}`) + }), + async (source) => await all(source) + ) + + const addrs: Multiaddr[] = [] + for (const a of knownAddrs) { + const resolvedAddrs = await this._resolve(a) + + log('resolved %s to %s', a, resolvedAddrs) + + resolvedAddrs.forEach(ra => addrs.push(ra)) + } + + // Multiaddrs not supported by the available transports will be filtered out. + const supportedAddrs = addrs.filter(a => this.components.getTransportManager().transportForMultiaddr(a)) + + if (supportedAddrs.length > this.maxAddrsToDial) { + await this.components.getPeerStore().delete(peer) + throw errCode(new Error('dial with more addresses than allowed'), codes.ERR_TOO_MANY_ADDRESSES) + } + + return { + id: peer.toString(), + addrs: supportedAddrs + } + } + + /** + * Creates a PendingDial that wraps the underlying DialRequest + */ + _createPendingDial (dialTarget: DialTarget, options: AbortOptions = {}): PendingDial { + /** + * @param {Multiaddr} addr + * @param {{ signal: { aborted: any; }; }} options + */ + const dialAction: DialAction = async (addr, options = {}) => { + if (options.signal?.aborted === true) { + throw errCode(new Error('already aborted'), codes.ERR_ALREADY_ABORTED) + } + + return await this.components.getTransportManager().dial(addr, options) + } + + const dialRequest = new DialRequest({ + addrs: dialTarget.addrs, + dialAction, + dialer: this + }) + + // Combine the timeout signal and options.signal, if provided + const timeoutController = new TimeoutController(this.timeout) + + const signals = [timeoutController.signal] + ;(options.signal != null) && signals.push(options.signal) + const signal = anySignal(signals) + + // this signal will potentially be used while dialing lots of + // peers so prevent MaxListenersExceededWarning appearing in the console + try { + // fails on node < 15.4 + setMaxListeners?.(Infinity, signal) + } catch {} + + const pendingDial = { + dialRequest, + controller: timeoutController, + promise: dialRequest.run({ ...options, signal }), + destroy: () => { + timeoutController.clear() + this.pendingDials.delete(dialTarget.id) + } + } + this.pendingDials.set(dialTarget.id, pendingDial) + + return pendingDial + } + + getTokens (num: number) { + const total = Math.min(num, this.maxDialsPerPeer, this.tokens.length) + const tokens = this.tokens.splice(0, total) + log('%d tokens request, returning %d, %d remaining', num, total, this.tokens.length) + return tokens + } + + releaseToken (token: number) { + // Guard against duplicate releases + if (this.tokens.includes(token)) { + return + } + + log('token %d released', token) + this.tokens.push(token) + } + + /** + * Resolve multiaddr recursively + */ + async _resolve (ma: Multiaddr): Promise { + // TODO: recursive logic should live in multiaddr once dns4/dns6 support is in place + // Now only supporting resolve for dnsaddr + const resolvableProto = ma.protoNames().includes('dnsaddr') + + // Multiaddr is not resolvable? End recursion! + if (!resolvableProto) { + return [ma] + } + + const resolvedMultiaddrs = await this._resolveRecord(ma) + const recursiveMultiaddrs = await Promise.all(resolvedMultiaddrs.map(async (nm) => { + return await this._resolve(nm) + })) + + const addrs = recursiveMultiaddrs.flat() + return addrs.reduce((array, newM) => { + if (array.find(m => m.equals(newM)) == null) { + array.push(newM) + } + return array + }, ([])) + } + + /** + * Resolve a given multiaddr. If this fails, an empty array will be returned + */ + async _resolveRecord (ma: Multiaddr): Promise { + try { + ma = new Multiaddr(ma.toString()) // Use current multiaddr module + const multiaddrs = await ma.resolve() + return multiaddrs + } catch (err) { + log.error(`multiaddr ${ma.toString()} could not be resolved`, err) + return [] + } + } +} diff --git a/src/errors.js b/src/errors.js deleted file mode 100644 index 4c34ae51..00000000 --- a/src/errors.js +++ /dev/null @@ -1,66 +0,0 @@ -'use strict' - -exports.messages = { - NOT_STARTED_YET: 'The libp2p node is not started yet', - DHT_DISABLED: 'DHT is not available', - CONN_ENCRYPTION_REQUIRED: 'At least one connection encryption module is required', - NOT_FOUND: 'Not found' -} - -exports.codes = { - DHT_DISABLED: 'ERR_DHT_DISABLED', - PUBSUB_NOT_STARTED: 'ERR_PUBSUB_NOT_STARTED', - DHT_NOT_STARTED: 'ERR_DHT_NOT_STARTED', - CONN_ENCRYPTION_REQUIRED: 'ERR_CONN_ENCRYPTION_REQUIRED', - ERR_PEER_DIAL_INTERCEPTED: 'ERR_PEER_DIAL_INTERCEPTED', - ERR_CONNECTION_INTERCEPTED: 'ERR_CONNECTION_INTERCEPTED', - ERR_INVALID_PROTOCOLS_FOR_STREAM: 'ERR_INVALID_PROTOCOLS_FOR_STREAM', - ERR_CONNECTION_ENDED: 'ERR_CONNECTION_ENDED', - ERR_CONNECTION_FAILED: 'ERR_CONNECTION_FAILED', - ERR_NODE_NOT_STARTED: 'ERR_NODE_NOT_STARTED', - ERR_ALREADY_ABORTED: 'ERR_ALREADY_ABORTED', - ERR_TOO_MANY_ADDRESSES: 'ERR_TOO_MANY_ADDRESSES', - ERR_NO_VALID_ADDRESSES: 'ERR_NO_VALID_ADDRESSES', - ERR_RELAYED_DIAL: 'ERR_RELAYED_DIAL', - ERR_DIALED_SELF: 'ERR_DIALED_SELF', - ERR_DISCOVERED_SELF: 'ERR_DISCOVERED_SELF', - ERR_DUPLICATE_TRANSPORT: 'ERR_DUPLICATE_TRANSPORT', - ERR_ENCRYPTION_FAILED: 'ERR_ENCRYPTION_FAILED', - ERR_HOP_REQUEST_FAILED: 'ERR_HOP_REQUEST_FAILED', - ERR_INVALID_KEY: 'ERR_INVALID_KEY', - ERR_INVALID_MESSAGE: 'ERR_INVALID_MESSAGE', - ERR_INVALID_PARAMETERS: 'ERR_INVALID_PARAMETERS', - ERR_INVALID_PEER: 'ERR_INVALID_PEER', - ERR_MUXER_UNAVAILABLE: 'ERR_MUXER_UNAVAILABLE', - ERR_NOT_FOUND: 'ERR_NOT_FOUND', - ERR_TIMEOUT: 'ERR_TIMEOUT', - ERR_TRANSPORT_UNAVAILABLE: 'ERR_TRANSPORT_UNAVAILABLE', - ERR_TRANSPORT_DIAL_FAILED: 'ERR_TRANSPORT_DIAL_FAILED', - ERR_UNSUPPORTED_PROTOCOL: 'ERR_UNSUPPORTED_PROTOCOL', - ERR_INVALID_MULTIADDR: 'ERR_INVALID_MULTIADDR', - ERR_SIGNATURE_NOT_VALID: 'ERR_SIGNATURE_NOT_VALID', - ERR_FIND_SELF: 'ERR_FIND_SELF', - ERR_NO_ROUTERS_AVAILABLE: 'ERR_NO_ROUTERS_AVAILABLE', - ERR_CONNECTION_NOT_MULTIPLEXED: 'ERR_CONNECTION_NOT_MULTIPLEXED', - ERR_NO_DIAL_TOKENS: 'ERR_NO_DIAL_TOKENS', - ERR_KEYCHAIN_REQUIRED: 'ERR_KEYCHAIN_REQUIRED', - ERR_INVALID_CMS: 'ERR_INVALID_CMS', - ERR_MISSING_KEYS: 'ERR_MISSING_KEYS', - ERR_NO_KEY: 'ERR_NO_KEY', - ERR_INVALID_KEY_NAME: 'ERR_INVALID_KEY_NAME', - ERR_INVALID_KEY_TYPE: 'ERR_INVALID_KEY_TYPE', - ERR_KEY_ALREADY_EXISTS: 'ERR_KEY_ALREADY_EXISTS', - ERR_INVALID_KEY_SIZE: 'ERR_INVALID_KEY_SIZE', - ERR_KEY_NOT_FOUND: 'ERR_KEY_NOT_FOUND', - ERR_OLD_KEY_NAME_INVALID: 'ERR_OLD_KEY_NAME_INVALID', - ERR_NEW_KEY_NAME_INVALID: 'ERR_NEW_KEY_NAME_INVALID', - ERR_PASSWORD_REQUIRED: 'ERR_PASSWORD_REQUIRED', - ERR_PEM_REQUIRED: 'ERR_PEM_REQUIRED', - ERR_CANNOT_READ_KEY: 'ERR_CANNOT_READ_KEY', - ERR_MISSING_PRIVATE_KEY: 'ERR_MISSING_PRIVATE_KEY', - ERR_INVALID_OLD_PASS_TYPE: 'ERR_INVALID_OLD_PASS_TYPE', - ERR_INVALID_NEW_PASS_TYPE: 'ERR_INVALID_NEW_PASS_TYPE', - ERR_INVALID_PASS_LENGTH: 'ERR_INVALID_PASS_LENGTH', - ERR_NOT_IMPLEMENTED: 'ERR_NOT_IMPLEMENTED', - ERR_WRONG_PING_ACK: 'ERR_WRONG_PING_ACK' -} diff --git a/src/errors.ts b/src/errors.ts new file mode 100644 index 00000000..8b1eff04 --- /dev/null +++ b/src/errors.ts @@ -0,0 +1,71 @@ +export enum messages { + NOT_STARTED_YET = 'The libp2p node is not started yet', + DHT_DISABLED = 'DHT is not available', + CONN_ENCRYPTION_REQUIRED = 'At least one connection encryption module is required', + ERR_TRANSPORTS_REQUIRED = 'At least one transport module is required', + ERR_PROTECTOR_REQUIRED = 'Private network is enforced, but no protector was provided', + NOT_FOUND = 'Not found' +} + +export enum codes { + DHT_DISABLED = 'ERR_DHT_DISABLED', + PUBSUB_NOT_STARTED = 'ERR_PUBSUB_NOT_STARTED', + DHT_NOT_STARTED = 'ERR_DHT_NOT_STARTED', + CONN_ENCRYPTION_REQUIRED = 'ERR_CONN_ENCRYPTION_REQUIRED', + ERR_TRANSPORTS_REQUIRED = 'ERR_TRANSPORTS_REQUIRED', + ERR_PROTECTOR_REQUIRED = 'ERR_PROTECTOR_REQUIRED', + ERR_PEER_DIAL_INTERCEPTED = 'ERR_PEER_DIAL_INTERCEPTED', + ERR_CONNECTION_INTERCEPTED = 'ERR_CONNECTION_INTERCEPTED', + ERR_INVALID_PROTOCOLS_FOR_STREAM = 'ERR_INVALID_PROTOCOLS_FOR_STREAM', + ERR_CONNECTION_ENDED = 'ERR_CONNECTION_ENDED', + ERR_CONNECTION_FAILED = 'ERR_CONNECTION_FAILED', + ERR_NODE_NOT_STARTED = 'ERR_NODE_NOT_STARTED', + ERR_ALREADY_ABORTED = 'ERR_ALREADY_ABORTED', + ERR_TOO_MANY_ADDRESSES = 'ERR_TOO_MANY_ADDRESSES', + ERR_NO_VALID_ADDRESSES = 'ERR_NO_VALID_ADDRESSES', + ERR_RELAYED_DIAL = 'ERR_RELAYED_DIAL', + ERR_DIALED_SELF = 'ERR_DIALED_SELF', + ERR_DISCOVERED_SELF = 'ERR_DISCOVERED_SELF', + ERR_DUPLICATE_TRANSPORT = 'ERR_DUPLICATE_TRANSPORT', + ERR_ENCRYPTION_FAILED = 'ERR_ENCRYPTION_FAILED', + ERR_HOP_REQUEST_FAILED = 'ERR_HOP_REQUEST_FAILED', + ERR_INVALID_KEY = 'ERR_INVALID_KEY', + ERR_INVALID_MESSAGE = 'ERR_INVALID_MESSAGE', + ERR_INVALID_PARAMETERS = 'ERR_INVALID_PARAMETERS', + ERR_INVALID_PEER = 'ERR_INVALID_PEER', + ERR_MUXER_UNAVAILABLE = 'ERR_MUXER_UNAVAILABLE', + ERR_NOT_FOUND = 'ERR_NOT_FOUND', + ERR_TIMEOUT = 'ERR_TIMEOUT', + ERR_TRANSPORT_UNAVAILABLE = 'ERR_TRANSPORT_UNAVAILABLE', + ERR_TRANSPORT_DIAL_FAILED = 'ERR_TRANSPORT_DIAL_FAILED', + ERR_UNSUPPORTED_PROTOCOL = 'ERR_UNSUPPORTED_PROTOCOL', + ERR_PROTOCOL_HANDLER_ALREADY_REGISTERED = 'ERR_PROTOCOL_HANDLER_ALREADY_REGISTERED', + ERR_INVALID_MULTIADDR = 'ERR_INVALID_MULTIADDR', + ERR_SIGNATURE_NOT_VALID = 'ERR_SIGNATURE_NOT_VALID', + ERR_FIND_SELF = 'ERR_FIND_SELF', + ERR_NO_ROUTERS_AVAILABLE = 'ERR_NO_ROUTERS_AVAILABLE', + ERR_CONNECTION_NOT_MULTIPLEXED = 'ERR_CONNECTION_NOT_MULTIPLEXED', + ERR_NO_DIAL_TOKENS = 'ERR_NO_DIAL_TOKENS', + ERR_KEYCHAIN_REQUIRED = 'ERR_KEYCHAIN_REQUIRED', + ERR_INVALID_CMS = 'ERR_INVALID_CMS', + ERR_MISSING_KEYS = 'ERR_MISSING_KEYS', + ERR_NO_KEY = 'ERR_NO_KEY', + ERR_INVALID_KEY_NAME = 'ERR_INVALID_KEY_NAME', + ERR_INVALID_KEY_TYPE = 'ERR_INVALID_KEY_TYPE', + ERR_KEY_ALREADY_EXISTS = 'ERR_KEY_ALREADY_EXISTS', + ERR_INVALID_KEY_SIZE = 'ERR_INVALID_KEY_SIZE', + ERR_KEY_NOT_FOUND = 'ERR_KEY_NOT_FOUND', + ERR_OLD_KEY_NAME_INVALID = 'ERR_OLD_KEY_NAME_INVALID', + ERR_NEW_KEY_NAME_INVALID = 'ERR_NEW_KEY_NAME_INVALID', + ERR_PASSWORD_REQUIRED = 'ERR_PASSWORD_REQUIRED', + ERR_PEM_REQUIRED = 'ERR_PEM_REQUIRED', + ERR_CANNOT_READ_KEY = 'ERR_CANNOT_READ_KEY', + ERR_MISSING_PRIVATE_KEY = 'ERR_MISSING_PRIVATE_KEY', + ERR_MISSING_PUBLIC_KEY = 'ERR_MISSING_PUBLIC_KEY', + ERR_INVALID_OLD_PASS_TYPE = 'ERR_INVALID_OLD_PASS_TYPE', + ERR_INVALID_NEW_PASS_TYPE = 'ERR_INVALID_NEW_PASS_TYPE', + ERR_INVALID_PASS_LENGTH = 'ERR_INVALID_PASS_LENGTH', + ERR_NOT_IMPLEMENTED = 'ERR_NOT_IMPLEMENTED', + ERR_WRONG_PING_ACK = 'ERR_WRONG_PING_ACK', + ERR_INVALID_RECORD = 'ERR_INVALID_RECORD' +} diff --git a/src/fetch/README.md b/src/fetch/README.md index 7ea9997a..0ab7e877 100644 --- a/src/fetch/README.md +++ b/src/fetch/README.md @@ -12,7 +12,7 @@ The fetch protocol is a simple protocol for requesting a value corresponding to ## Usage ```javascript -const Libp2p = require('libp2p') +import { createLibp2p } from 'libp2p' /** * Given a key (as a string) returns a value (as a Uint8Array), or null if the key isn't found. diff --git a/src/fetch/constants.js b/src/fetch/constants.js deleted file mode 100644 index 2c1044e5..00000000 --- a/src/fetch/constants.js +++ /dev/null @@ -1,6 +0,0 @@ -'use strict' - -module.exports = { - // https://github.com/libp2p/specs/tree/master/fetch#wire-protocol - PROTOCOL: '/libp2p/fetch/0.0.1' -} diff --git a/src/fetch/constants.ts b/src/fetch/constants.ts new file mode 100644 index 00000000..c9c425d6 --- /dev/null +++ b/src/fetch/constants.ts @@ -0,0 +1,3 @@ + +// https://github.com/libp2p/specs/tree/master/fetch#wire-protocol +export const PROTOCOL = '/libp2p/fetch/0.0.1' diff --git a/src/fetch/index.js b/src/fetch/index.ts similarity index 52% rename from src/fetch/index.js rename to src/fetch/index.ts index ef8c37f1..65f906db 100644 --- a/src/fetch/index.js +++ b/src/fetch/index.ts @@ -1,24 +1,30 @@ -'use strict' +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import { codes } from '../errors.js' +import * as lp from 'it-length-prefixed' +import { FetchRequest, FetchResponse } from './pb/proto.js' +import { handshake } from 'it-handshake' +import { PROTOCOL } from './constants.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Startable } from '@libp2p/interfaces' +import type { Stream } from '@libp2p/interfaces/connection' +import type { IncomingStreamData } from '@libp2p/interfaces/registrar' +import type { Components } from '@libp2p/interfaces/components' -const debug = require('debug') -const log = Object.assign(debug('libp2p:fetch'), { - error: debug('libp2p:fetch:err') -}) -const errCode = require('err-code') -const { codes } = require('../errors') -const lp = require('it-length-prefixed') -const { FetchRequest, FetchResponse } = require('./proto') -// @ts-ignore it-handshake does not export types -const handshake = require('it-handshake') -const { PROTOCOL } = require('./constants') +const log = logger('libp2p:fetch') -/** - * @typedef {import('../')} Libp2p - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('peer-id')} PeerId - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - * @typedef {(key: string) => Promise} LookupFunction - */ +export interface FetchInit { + protocolPrefix: string +} + +export interface HandleMessageOptions { + stream: Stream + protocol: string +} + +export interface LookupFunction { + (key: string): Promise +} /** * A simple libp2p protocol for requesting a value corresponding to a key from a peer. @@ -26,36 +32,54 @@ const { PROTOCOL } = require('./constants') * a given key. Each lookup function must act on a distinct part of the overall key space, defined * by a fixed prefix that all keys that should be routed to that lookup function will start with. */ -class FetchProtocol { - /** - * @param {Libp2p} libp2p - */ - constructor (libp2p) { - this._lookupFunctions = new Map() // Maps key prefix to value lookup function - this._libp2p = libp2p +export class FetchService implements Startable { + private readonly components: Components + private readonly lookupFunctions: Map + private readonly protocol: string + private started: boolean + + constructor (components: Components, init: FetchInit) { + this.started = false + this.components = components + this.protocol = PROTOCOL + this.lookupFunctions = new Map() // Maps key prefix to value lookup function this.handleMessage = this.handleMessage.bind(this) } - /** - * Sends a request to fetch the value associated with the given key from the given peer. - * - * @param {PeerId|Multiaddr} peer - * @param {string} key - * @returns {Promise} - */ - async fetch (peer, key) { - // @ts-ignore multiaddr might not have toB58String - log('dialing %s to %s', this._protocol, peer.toB58String ? peer.toB58String() : peer) + async start () { + await this.components.getRegistrar().handle(this.protocol, (data) => { + void this.handleMessage(data).catch(err => { + log.error(err) + }) + }) + this.started = true + } - const connection = await this._libp2p.dial(peer) - const { stream } = await connection.newStream(FetchProtocol.PROTOCOL) + async stop () { + await this.components.getRegistrar().unhandle(this.protocol) + this.started = false + } + + isStarted () { + return this.started + } + + /** + * Sends a request to fetch the value associated with the given key from the given peer + */ + async fetch (peer: PeerId, key: string): Promise { + log('dialing %s to %p', this.protocol, peer) + + const connection = await this.components.getDialer().dial(peer) + const { stream } = await connection.newStream([this.protocol]) const shake = handshake(stream) // send message const request = new FetchRequest({ identifier: key }) - shake.write(lp.encode.single(FetchRequest.encode(request).finish())) + shake.write(lp.encode.single(FetchRequest.encode(request).finish()).slice()) // read response + // @ts-expect-error fromReader returns a Source which has no .next method const response = FetchResponse.decode((await lp.decode.fromReader(shake.reader).next()).value.slice()) switch (response.status) { case (FetchResponse.StatusCode.OK): { @@ -78,21 +102,18 @@ class FetchProtocol { * Invoked when a fetch request is received. Reads the request message off the given stream and * responds based on looking up the key in the request via the lookup callback that corresponds * to the key's prefix. - * - * @param {object} options - * @param {MuxedStream} options.stream - * @param {string} options.protocol */ - async handleMessage (options) { - const { stream } = options + async handleMessage (data: IncomingStreamData) { + const { stream } = data const shake = handshake(stream) + // @ts-expect-error fromReader returns a Source which has no .next method const request = FetchRequest.decode((await lp.decode.fromReader(shake.reader).next()).value.slice()) let response const lookup = this._getLookupFunction(request.identifier) - if (lookup) { + if (lookup != null) { const data = await lookup(request.identifier) - if (data) { + if (data != null) { response = new FetchResponse({ status: FetchResponse.StatusCode.OK, data }) } else { response = new FetchResponse({ status: FetchResponse.StatusCode.NOT_FOUND }) @@ -102,58 +123,46 @@ class FetchProtocol { response = new FetchResponse({ status: FetchResponse.StatusCode.ERROR, data: errmsg }) } - shake.write(lp.encode.single(FetchResponse.encode(response).finish())) + shake.write(lp.encode.single(FetchResponse.encode(response).finish()).slice()) } /** * Given a key, finds the appropriate function for looking up its corresponding value, based on * the key's prefix. - * - * @param {string} key */ - _getLookupFunction (key) { - for (const prefix of this._lookupFunctions.keys()) { + _getLookupFunction (key: string) { + for (const prefix of this.lookupFunctions.keys()) { if (key.startsWith(prefix)) { - return this._lookupFunctions.get(prefix) + return this.lookupFunctions.get(prefix) } } - return null } /** * Registers a new lookup callback that can map keys to values, for a given set of keys that - * share the same prefix. - * - * @param {string} prefix - * @param {LookupFunction} lookup + * share the same prefix */ - registerLookupFunction (prefix, lookup) { - if (this._lookupFunctions.has(prefix)) { + registerLookupFunction (prefix: string, lookup: LookupFunction) { + if (this.lookupFunctions.has(prefix)) { throw errCode(new Error("Fetch protocol handler for key prefix '" + prefix + "' already registered"), codes.ERR_KEY_ALREADY_EXISTS) } - this._lookupFunctions.set(prefix, lookup) + + this.lookupFunctions.set(prefix, lookup) } /** * Registers a new lookup callback that can map keys to values, for a given set of keys that * share the same prefix. - * - * @param {string} prefix - * @param {LookupFunction} [lookup] */ - unregisterLookupFunction (prefix, lookup) { + unregisterLookupFunction (prefix: string, lookup?: LookupFunction) { if (lookup != null) { - const existingLookup = this._lookupFunctions.get(prefix) + const existingLookup = this.lookupFunctions.get(prefix) if (existingLookup !== lookup) { return } } - this._lookupFunctions.delete(prefix) + this.lookupFunctions.delete(prefix) } } - -FetchProtocol.PROTOCOL = PROTOCOL - -exports = module.exports = FetchProtocol diff --git a/src/fetch/proto.d.ts b/src/fetch/pb/proto.d.ts similarity index 100% rename from src/fetch/proto.d.ts rename to src/fetch/pb/proto.d.ts diff --git a/src/fetch/proto.js b/src/fetch/pb/proto.js similarity index 95% rename from src/fetch/proto.js rename to src/fetch/pb/proto.js index f7de2b1d..b77d6375 100644 --- a/src/fetch/proto.js +++ b/src/fetch/pb/proto.js @@ -1,15 +1,13 @@ /*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); +import $protobuf from "protobufjs/minimal.js"; // Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; +const $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; // Exported root namespace -var $root = $protobuf.roots["libp2p-fetch"] || ($protobuf.roots["libp2p-fetch"] = {}); +const $root = $protobuf.roots["libp2p-fetch"] || ($protobuf.roots["libp2p-fetch"] = {}); -$root.FetchRequest = (function() { +export const FetchRequest = $root.FetchRequest = (() => { /** * Properties of a FetchRequest. @@ -141,7 +139,7 @@ $root.FetchRequest = (function() { return FetchRequest; })(); -$root.FetchResponse = (function() { +export const FetchResponse = $root.FetchResponse = (() => { /** * Properties of a FetchResponse. @@ -320,7 +318,7 @@ $root.FetchResponse = (function() { * @property {number} ERROR=2 ERROR value */ FetchResponse.StatusCode = (function() { - var valuesById = {}, values = Object.create(valuesById); + const valuesById = {}, values = Object.create(valuesById); values[valuesById[0] = "OK"] = 0; values[valuesById[1] = "NOT_FOUND"] = 1; values[valuesById[2] = "ERROR"] = 2; @@ -330,4 +328,4 @@ $root.FetchResponse = (function() { return FetchResponse; })(); -module.exports = $root; +export { $root as default }; diff --git a/src/fetch/proto.proto b/src/fetch/pb/proto.proto similarity index 100% rename from src/fetch/proto.proto rename to src/fetch/pb/proto.proto diff --git a/src/get-peer.js b/src/get-peer.js deleted file mode 100644 index afad64df..00000000 --- a/src/get-peer.js +++ /dev/null @@ -1,49 +0,0 @@ -'use strict' - -const PeerId = require('peer-id') -const { Multiaddr } = require('multiaddr') -const errCode = require('err-code') - -const { codes } = require('./errors') - -/** - * Converts the given `peer` to a `Peer` object. - * If a multiaddr is received, the addressBook is updated. - * - * @param {PeerId|Multiaddr|string} peer - * @returns {{ id: PeerId, multiaddrs: Multiaddr[]|undefined }} - */ -function getPeer (peer) { - if (typeof peer === 'string') { - peer = new Multiaddr(peer) - } - - let addr - if (Multiaddr.isMultiaddr(peer)) { - addr = peer - const idStr = peer.getPeerId() - - if (!idStr) { - throw errCode( - new Error(`${peer} does not have a valid peer type`), - codes.ERR_INVALID_MULTIADDR - ) - } - - try { - peer = PeerId.createFromB58String(idStr) - } catch (/** @type {any} */ err) { - throw errCode( - new Error(`${peer} is not a valid peer type`), - codes.ERR_INVALID_MULTIADDR - ) - } - } - - return { - id: peer, - multiaddrs: addr ? [addr] : undefined - } -} - -module.exports = getPeer diff --git a/src/get-peer.ts b/src/get-peer.ts new file mode 100644 index 00000000..bc4129d1 --- /dev/null +++ b/src/get-peer.ts @@ -0,0 +1,57 @@ +import { peerIdFromString } from '@libp2p/peer-id' +import { Multiaddr } from '@multiformats/multiaddr' +import errCode from 'err-code' +import { codes } from './errors.js' +import { isPeerId } from '@libp2p/interfaces/peer-id' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' + +function peerIdFromMultiaddr (ma: Multiaddr) { + const idStr = ma.getPeerId() + + if (idStr == null) { + throw errCode( + new Error(`${ma.toString()} does not have a valid peer type`), + codes.ERR_INVALID_MULTIADDR + ) + } + + try { + return peerIdFromString(idStr) + } catch (err: any) { + throw errCode( + new Error(`${ma.toString()} is not a valid peer type`), + codes.ERR_INVALID_MULTIADDR + ) + } +} + +/** + * Converts the given `peer` to a `Peer` object. + */ +export function getPeer (peer: PeerId | Multiaddr | string): PeerInfo { + if (isPeerId(peer)) { + return { + id: peer, + multiaddrs: [], + protocols: [] + } + } + + if (typeof peer === 'string') { + peer = new Multiaddr(peer) + } + + let addr + + if (Multiaddr.isMultiaddr(peer)) { + addr = peer + peer = peerIdFromMultiaddr(peer) + } + + return { + id: peer, + multiaddrs: addr != null ? [addr] : [], + protocols: [] + } +} diff --git a/src/identify/consts.js b/src/identify/consts.js deleted file mode 100644 index 7c2484aa..00000000 --- a/src/identify/consts.js +++ /dev/null @@ -1,15 +0,0 @@ -'use strict' - -// @ts-ignore file not listed within the file list of projects -const libp2pVersion = require('../../package.json').version - -module.exports.PROTOCOL_VERSION = 'ipfs/0.1.0' // deprecated -module.exports.AGENT_VERSION = `js-libp2p/${libp2pVersion}` -module.exports.MULTICODEC_IDENTIFY = '/ipfs/id/1.0.0' // deprecated -module.exports.MULTICODEC_IDENTIFY_PUSH = '/ipfs/id/push/1.0.0' // deprecated - -module.exports.IDENTIFY_PROTOCOL_VERSION = '0.1.0' -module.exports.MULTICODEC_IDENTIFY_PROTOCOL_NAME = 'id' -module.exports.MULTICODEC_IDENTIFY_PUSH_PROTOCOL_NAME = 'id/push' -module.exports.MULTICODEC_IDENTIFY_PROTOCOL_VERSION = '1.0.0' -module.exports.MULTICODEC_IDENTIFY_PUSH_PROTOCOL_VERSION = '1.0.0' diff --git a/src/identify/consts.ts b/src/identify/consts.ts new file mode 100644 index 00000000..adb38720 --- /dev/null +++ b/src/identify/consts.ts @@ -0,0 +1,13 @@ + +import { version } from '../version.js' + +export const PROTOCOL_VERSION = 'ipfs/0.1.0' // deprecated +export const AGENT_VERSION = `js-libp2p/${version}` +export const MULTICODEC_IDENTIFY = '/ipfs/id/1.0.0' // deprecated +export const MULTICODEC_IDENTIFY_PUSH = '/ipfs/id/push/1.0.0' // deprecated + +export const IDENTIFY_PROTOCOL_VERSION = '0.1.0' +export const MULTICODEC_IDENTIFY_PROTOCOL_NAME = 'id' +export const MULTICODEC_IDENTIFY_PUSH_PROTOCOL_NAME = 'id/push' +export const MULTICODEC_IDENTIFY_PROTOCOL_VERSION = '1.0.0' +export const MULTICODEC_IDENTIFY_PUSH_PROTOCOL_VERSION = '1.0.0' diff --git a/src/identify/index.js b/src/identify/index.js deleted file mode 100644 index 16d23a66..00000000 --- a/src/identify/index.js +++ /dev/null @@ -1,384 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:identify'), { - error: debug('libp2p:identify:err') -}) -const errCode = require('err-code') -const lp = require('it-length-prefixed') -const { pipe } = require('it-pipe') -const { collect, take, consume } = require('streaming-iterables') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -const PeerId = require('peer-id') -const { Multiaddr } = require('multiaddr') -// @ts-ignore it-buffer does not have types -const { toBuffer } = require('it-buffer') - -const Message = require('./message') - -const Envelope = require('../record/envelope') -const PeerRecord = require('../record/peer-record') - -const { - MULTICODEC_IDENTIFY, - MULTICODEC_IDENTIFY_PUSH, - IDENTIFY_PROTOCOL_VERSION, - MULTICODEC_IDENTIFY_PROTOCOL_NAME, - MULTICODEC_IDENTIFY_PUSH_PROTOCOL_NAME, - MULTICODEC_IDENTIFY_PROTOCOL_VERSION, - MULTICODEC_IDENTIFY_PUSH_PROTOCOL_VERSION -} = require('./consts') - -const { codes } = require('../errors') - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - */ - -/** - * @typedef {Object} HostProperties - * @property {string} agentVersion - */ - -class IdentifyService { - /** - * @param {import('../')} libp2p - */ - static getProtocolStr (libp2p) { - return { - identifyProtocolStr: `/${libp2p._config.protocolPrefix}/${MULTICODEC_IDENTIFY_PROTOCOL_NAME}/${MULTICODEC_IDENTIFY_PROTOCOL_VERSION}`, - identifyPushProtocolStr: `/${libp2p._config.protocolPrefix}/${MULTICODEC_IDENTIFY_PUSH_PROTOCOL_NAME}/${MULTICODEC_IDENTIFY_PUSH_PROTOCOL_VERSION}` - } - } - - /** - * @class - * @param {Object} options - * @param {import('../')} options.libp2p - */ - constructor ({ libp2p }) { - this._libp2p = libp2p - this.peerStore = libp2p.peerStore - this.addressManager = libp2p.addressManager - this.connectionManager = libp2p.connectionManager - this.peerId = libp2p.peerId - - this.handleMessage = this.handleMessage.bind(this) - - const protocolStr = IdentifyService.getProtocolStr(libp2p) - this.identifyProtocolStr = protocolStr.identifyProtocolStr - this.identifyPushProtocolStr = protocolStr.identifyPushProtocolStr - - // Store self host metadata - this._host = { - protocolVersion: `${libp2p._config.protocolPrefix}/${IDENTIFY_PROTOCOL_VERSION}`, - ...libp2p._options.host - } - - // When a new connection happens, trigger identify - this.connectionManager.on('peer:connect', (connection) => { - this.identify(connection).catch(log.error) - }) - - // When self multiaddrs change, trigger identify-push - this.peerStore.on('change:multiaddrs', ({ peerId }) => { - if (peerId.toString() === this.peerId.toString()) { - this.pushToPeerStore().catch(err => log.error(err)) - } - }) - - // When self protocols change, trigger identify-push - this.peerStore.on('change:protocols', ({ peerId }) => { - if (peerId.toString() === this.peerId.toString()) { - this.pushToPeerStore().catch(err => log.error(err)) - } - }) - } - - async start () { - await this.peerStore.metadataBook.setValue(this.peerId, 'AgentVersion', uint8ArrayFromString(this._host.agentVersion)) - await this.peerStore.metadataBook.setValue(this.peerId, 'ProtocolVersion', uint8ArrayFromString(this._host.protocolVersion)) - } - - async stop () { - - } - - /** - * Send an Identify Push update to the list of connections - * - * @param {Connection[]} connections - * @returns {Promise} - */ - async push (connections) { - const signedPeerRecord = await this.peerStore.addressBook.getRawEnvelope(this.peerId) - const listenAddrs = this._libp2p.multiaddrs.map((ma) => ma.bytes) - const protocols = await this.peerStore.protoBook.get(this.peerId) - - const pushes = connections.map(async connection => { - try { - const { stream } = await connection.newStream(this.identifyPushProtocolStr) - - await pipe( - [Message.Identify.encode({ - listenAddrs, - signedPeerRecord, - protocols - }).finish()], - lp.encode(), - stream, - consume - ) - } catch (/** @type {any} */ err) { - // Just log errors - log.error('could not push identify update to peer', err) - } - }) - - return Promise.all(pushes) - } - - /** - * Calls `push` for all peers in the `peerStore` that are connected - */ - async pushToPeerStore () { - // Do not try to push if libp2p node is not running - if (!this._libp2p.isStarted()) { - return - } - - const connections = [] - let connection - for await (const peer of this.peerStore.getPeers()) { - if (peer.protocols.includes(this.identifyPushProtocolStr) && (connection = this.connectionManager.get(peer.id))) { - connections.push(connection) - } - } - - await this.push(connections) - } - - /** - * Requests the `Identify` message from peer associated with the given `connection`. - * If the identified peer does not match the `PeerId` associated with the connection, - * an error will be thrown. - * - * @async - * @param {Connection} connection - * @returns {Promise} - */ - async identify (connection) { - const { stream } = await connection.newStream(this.identifyProtocolStr) - const [data] = await pipe( - [], - stream, - lp.decode(), - take(1), - toBuffer, - collect - ) - - if (!data) { - throw errCode(new Error('No data could be retrieved'), codes.ERR_CONNECTION_ENDED) - } - - let message - try { - message = Message.Identify.decode(data) - } catch (/** @type {any} */ err) { - throw errCode(err, codes.ERR_INVALID_MESSAGE) - } - - const { - publicKey, - listenAddrs, - protocols, - observedAddr, - signedPeerRecord - } = message - - const id = await PeerId.createFromPubKey(publicKey) - - if (connection.remotePeer.toB58String() !== id.toB58String()) { - throw errCode(new Error('identified peer does not match the expected peer'), codes.ERR_INVALID_PEER) - } - - // Get the observedAddr if there is one - const cleanObservedAddr = IdentifyService.getCleanMultiaddr(observedAddr) - - try { - const envelope = await Envelope.openAndCertify(signedPeerRecord, PeerRecord.DOMAIN) - if (await this.peerStore.addressBook.consumePeerRecord(envelope)) { - await this.peerStore.protoBook.set(id, protocols) - await this.peerStore.metadataBook.setValue(id, 'AgentVersion', uint8ArrayFromString(message.agentVersion)) - await this.peerStore.metadataBook.setValue(id, 'ProtocolVersion', uint8ArrayFromString(message.protocolVersion)) - return - } - } catch (/** @type {any} */ err) { - log('received invalid envelope, discard it and fallback to listenAddrs is available', err) - } - - // LEGACY: Update peers data in PeerStore - try { - await this.peerStore.addressBook.set(id, listenAddrs.map((addr) => new Multiaddr(addr))) - } catch (/** @type {any} */ err) { - log.error('received invalid addrs', err) - } - - await this.peerStore.protoBook.set(id, protocols) - await this.peerStore.metadataBook.setValue(id, 'AgentVersion', uint8ArrayFromString(message.agentVersion)) - await this.peerStore.metadataBook.setValue(id, 'ProtocolVersion', uint8ArrayFromString(message.protocolVersion)) - - // TODO: Add and score our observed addr - log('received observed address of %s', cleanObservedAddr) - // this.addressManager.addObservedAddr(observedAddr) - } - - /** - * A handler to register with Libp2p to process identify messages. - * - * @param {Object} options - * @param {Connection} options.connection - * @param {MuxedStream} options.stream - * @param {string} options.protocol - * @returns {Promise|undefined} - */ - handleMessage ({ connection, stream, protocol }) { - switch (protocol) { - case this.identifyProtocolStr: - return this._handleIdentify({ connection, stream }) - case this.identifyPushProtocolStr: - return this._handlePush({ connection, stream }) - default: - log.error('cannot handle unknown protocol %s', protocol) - } - } - - /** - * Sends the `Identify` response with the Signed Peer Record - * to the requesting peer over the given `connection` - * - * @private - * @param {Object} options - * @param {MuxedStream} options.stream - * @param {Connection} options.connection - * @returns {Promise} - */ - async _handleIdentify ({ connection, stream }) { - try { - let publicKey = new Uint8Array(0) - if (this.peerId.pubKey) { - publicKey = this.peerId.pubKey.bytes - } - - const signedPeerRecord = await this.peerStore.addressBook.getRawEnvelope(this.peerId) - const protocols = await this.peerStore.protoBook.get(this.peerId) - - const message = Message.Identify.encode({ - protocolVersion: this._host.protocolVersion, - agentVersion: this._host.agentVersion, - publicKey, - listenAddrs: this._libp2p.multiaddrs.map((ma) => ma.bytes), - signedPeerRecord, - observedAddr: connection.remoteAddr.bytes, - protocols - }).finish() - - await pipe( - [message], - lp.encode(), - stream, - consume - ) - } catch (/** @type {any} */ err) { - log.error('could not respond to identify request', err) - } - } - - /** - * Reads the Identify Push message from the given `connection` - * - * @private - * @param {object} options - * @param {MuxedStream} options.stream - * @param {Connection} options.connection - * @returns {Promise} - */ - async _handlePush ({ connection, stream }) { - let message - try { - const [data] = await pipe( - [], - stream, - lp.decode(), - take(1), - toBuffer, - collect - ) - message = Message.Identify.decode(data) - } catch (/** @type {any} */ err) { - return log.error('received invalid message', err) - } - - const id = connection.remotePeer - - try { - const envelope = await Envelope.openAndCertify(message.signedPeerRecord, PeerRecord.DOMAIN) - if (await this.peerStore.addressBook.consumePeerRecord(envelope)) { - await this.peerStore.protoBook.set(id, message.protocols) - return - } - } catch (/** @type {any} */ err) { - log('received invalid envelope, discard it and fallback to listenAddrs is available', err) - } - - // LEGACY: Update peers data in PeerStore - try { - await this.peerStore.addressBook.set(id, - message.listenAddrs.map((addr) => new Multiaddr(addr))) - } catch (/** @type {any} */ err) { - log.error('received invalid addrs', err) - } - - // Update the protocols - try { - await this.peerStore.protoBook.set(id, message.protocols) - } catch (/** @type {any} */ err) { - log.error('received invalid protocols', err) - } - } - - /** - * Takes the `addr` and converts it to a Multiaddr if possible - * - * @param {Uint8Array | string} addr - * @returns {Multiaddr|null} - */ - static getCleanMultiaddr (addr) { - if (addr && addr.length > 0) { - try { - return new Multiaddr(addr) - } catch (_) { - return null - } - } - return null - } -} - -/** - * The protocols the IdentifyService supports - * - * @property multicodecs - */ -const multicodecs = { - IDENTIFY: MULTICODEC_IDENTIFY, - IDENTIFY_PUSH: MULTICODEC_IDENTIFY_PUSH -} - -IdentifyService.multicodecs = multicodecs -IdentifyService.Messsage = Message - -module.exports = IdentifyService diff --git a/src/identify/index.ts b/src/identify/index.ts new file mode 100644 index 00000000..bfd3b033 --- /dev/null +++ b/src/identify/index.ts @@ -0,0 +1,445 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import * as lp from 'it-length-prefixed' +import { pipe } from 'it-pipe' +import all from 'it-all' +import take from 'it-take' +import drain from 'it-drain' +import first from 'it-first' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { Multiaddr, protocols } from '@multiformats/multiaddr' +import Message from './pb/message.js' +import { RecordEnvelope, PeerRecord } from '@libp2p/peer-record' +import { + MULTICODEC_IDENTIFY, + MULTICODEC_IDENTIFY_PUSH, + IDENTIFY_PROTOCOL_VERSION, + MULTICODEC_IDENTIFY_PROTOCOL_NAME, + MULTICODEC_IDENTIFY_PUSH_PROTOCOL_NAME, + MULTICODEC_IDENTIFY_PROTOCOL_VERSION, + MULTICODEC_IDENTIFY_PUSH_PROTOCOL_VERSION +} from './consts.js' +import { codes } from '../errors.js' +import type { IncomingStreamData } from '@libp2p/interfaces/registrar' +import type { Connection } from '@libp2p/interfaces/connection' +import type { Startable } from '@libp2p/interfaces' +import { peerIdFromKeys, peerIdFromString } from '@libp2p/peer-id' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:identify') + +export interface HostProperties { + agentVersion: string +} + +export interface IdentifyServiceInit { + protocolPrefix: string + host: HostProperties +} + +export class IdentifyService implements Startable { + private readonly components: Components + private readonly identifyProtocolStr: string + private readonly identifyPushProtocolStr: string + private readonly host: { + protocolVersion: string + agentVersion: string + } + + private started: boolean + + constructor (components: Components, init: IdentifyServiceInit) { + this.components = components + this.started = false + + this.handleMessage = this.handleMessage.bind(this) + + this.identifyProtocolStr = `/${init.protocolPrefix}/${MULTICODEC_IDENTIFY_PROTOCOL_NAME}/${MULTICODEC_IDENTIFY_PROTOCOL_VERSION}` + this.identifyPushProtocolStr = `/${init.protocolPrefix}/${MULTICODEC_IDENTIFY_PUSH_PROTOCOL_NAME}/${MULTICODEC_IDENTIFY_PUSH_PROTOCOL_VERSION}` + + // Store self host metadata + this.host = { + protocolVersion: `${init.protocolPrefix}/${IDENTIFY_PROTOCOL_VERSION}`, + ...init.host + } + + // When a new connection happens, trigger identify + this.components.getConnectionManager().addEventListener('peer:connect', (evt) => { + const connection = evt.detail + this.identify(connection).catch(log.error) + }) + + // When self multiaddrs change, trigger identify-push + this.components.getPeerStore().addEventListener('change:multiaddrs', (evt) => { + const { peerId } = evt.detail + + if (this.components.getPeerId().equals(peerId)) { + void this.pushToPeerStore().catch(err => log.error(err)) + } + }) + + // When self protocols change, trigger identify-push + this.components.getPeerStore().addEventListener('change:protocols', (evt) => { + const { peerId } = evt.detail + + if (this.components.getPeerId().equals(peerId)) { + void this.pushToPeerStore().catch(err => log.error(err)) + } + }) + } + + isStarted () { + return this.started + } + + async start () { + if (this.started) { + return + } + + await this.components.getPeerStore().metadataBook.setValue(this.components.getPeerId(), 'AgentVersion', uint8ArrayFromString(this.host.agentVersion)) + await this.components.getPeerStore().metadataBook.setValue(this.components.getPeerId(), 'ProtocolVersion', uint8ArrayFromString(this.host.protocolVersion)) + + await this.components.getRegistrar().handle([ + this.identifyProtocolStr, + this.identifyPushProtocolStr + ], (data) => { + void this.handleMessage(data)?.catch(err => { + log.error(err) + }) + }) + + this.started = true + } + + async stop () { + await this.components.getRegistrar().unhandle(this.identifyProtocolStr) + await this.components.getRegistrar().unhandle(this.identifyPushProtocolStr) + + this.started = false + } + + /** + * Send an Identify Push update to the list of connections + */ + async push (connections: Connection[]): Promise { + const signedPeerRecord = await this.components.getPeerStore().addressBook.getRawEnvelope(this.components.getPeerId()) + const listenAddrs = this.components.getAddressManager().getAddresses().map((ma) => ma.bytes) + const protocols = await this.components.getPeerStore().protoBook.get(this.components.getPeerId()) + + const pushes = connections.map(async connection => { + try { + const { stream } = await connection.newStream([this.identifyPushProtocolStr]) + + await pipe( + [Message.Identify.encode({ + listenAddrs, + signedPeerRecord, + protocols + }).finish()], + lp.encode(), + stream, + drain + ) + } catch (err: any) { + // Just log errors + log.error('could not push identify update to peer', err) + } + }) + + await Promise.all(pushes) + } + + /** + * Calls `push` on all peer connections + */ + async pushToPeerStore () { + // Do not try to push if we are not running + if (!this.isStarted()) { + return + } + + const connections: Connection[] = [] + + for (const [peerIdStr, conns] of this.components.getConnectionManager().getConnectionMap().entries()) { + const peerId = peerIdFromString(peerIdStr) + const peer = await this.components.getPeerStore().get(peerId) + + if (!peer.protocols.includes(this.identifyPushProtocolStr)) { + continue + } + + connections.push(...conns) + } + + await this.push(connections) + } + + /** + * Requests the `Identify` message from peer associated with the given `connection`. + * If the identified peer does not match the `PeerId` associated with the connection, + * an error will be thrown. + */ + async identify (connection: Connection): Promise { + const { stream } = await connection.newStream([this.identifyProtocolStr]) + const [data] = await pipe( + [], + stream, + lp.decode(), + (source) => take(source, 1), + async (source) => await all(source) + ) + + if (data == null) { + throw errCode(new Error('No data could be retrieved'), codes.ERR_CONNECTION_ENDED) + } + + let message + try { + message = Message.Identify.decode(data) + } catch (err: any) { + throw errCode(err, codes.ERR_INVALID_MESSAGE) + } + + const { + publicKey, + listenAddrs, + protocols, + observedAddr, + signedPeerRecord, + agentVersion, + protocolVersion + } = message + + if (publicKey == null) { + throw errCode(new Error('public key was missing from identify message'), codes.ERR_MISSING_PUBLIC_KEY) + } + + const id = await peerIdFromKeys(publicKey) + + if (!connection.remotePeer.equals(id)) { + throw errCode(new Error('identified peer does not match the expected peer'), codes.ERR_INVALID_PEER) + } + + if (this.components.getPeerId().equals(id)) { + throw errCode(new Error('identified peer is our own peer id?'), codes.ERR_INVALID_PEER) + } + + // Get the observedAddr if there is one + const cleanObservedAddr = IdentifyService.getCleanMultiaddr(observedAddr) + + if (signedPeerRecord != null) { + log('received signed peer record from %p', id) + + try { + const envelope = await RecordEnvelope.openAndCertify(signedPeerRecord, PeerRecord.DOMAIN) + + if (!envelope.peerId.equals(id)) { + throw errCode(new Error('identified peer does not match the expected peer'), codes.ERR_INVALID_PEER) + } + + if (await this.components.getPeerStore().addressBook.consumePeerRecord(envelope)) { + await this.components.getPeerStore().protoBook.set(id, protocols) + + if (agentVersion != null) { + await this.components.getPeerStore().metadataBook.setValue(id, 'AgentVersion', uint8ArrayFromString(agentVersion)) + } + + if (protocolVersion != null) { + await this.components.getPeerStore().metadataBook.setValue(id, 'ProtocolVersion', uint8ArrayFromString(protocolVersion)) + } + + log('identify completed for peer %p and protocols %o', id, protocols) + + return + } + } catch (err: any) { + log('received invalid envelope, discard it and fallback to listenAddrs is available', err) + } + } else { + log('no signed peer record received from %p', id) + } + + log('falling back to legacy addresses from %p', id) + + // LEGACY: Update peers data in PeerStore + try { + await this.components.getPeerStore().addressBook.set(id, listenAddrs.map((addr) => new Multiaddr(addr))) + } catch (err: any) { + log.error('received invalid addrs', err) + } + + await this.components.getPeerStore().protoBook.set(id, protocols) + + if (agentVersion != null) { + await this.components.getPeerStore().metadataBook.setValue(id, 'AgentVersion', uint8ArrayFromString(agentVersion)) + } + + if (protocolVersion != null) { + await this.components.getPeerStore().metadataBook.setValue(id, 'ProtocolVersion', uint8ArrayFromString(protocolVersion)) + } + + log('identify completed for peer %p and protocols %o', id, protocols) + + // TODO: Add and score our observed addr + log('received observed address of %s', cleanObservedAddr?.toString()) + // this.components.getAddressManager().addObservedAddr(observedAddr) + } + + /** + * A handler to register with Libp2p to process identify messages + */ + handleMessage (data: IncomingStreamData) { + const { protocol } = data + + switch (protocol) { + case this.identifyProtocolStr: + return this._handleIdentify(data) + case this.identifyPushProtocolStr: + return this._handlePush(data) + default: + log.error('cannot handle unknown protocol %s', protocol) + } + } + + /** + * Sends the `Identify` response with the Signed Peer Record + * to the requesting peer over the given `connection` + */ + async _handleIdentify (data: IncomingStreamData) { + const { connection, stream } = data + try { + const publicKey = this.components.getPeerId().publicKey ?? new Uint8Array(0) + const peerData = await this.components.getPeerStore().get(this.components.getPeerId()) + const multiaddrs = this.components.getAddressManager().getAddresses().map(ma => ma.decapsulateCode(protocols('p2p').code)) + let signedPeerRecord = peerData.peerRecordEnvelope + + if (multiaddrs.length > 0 && signedPeerRecord == null) { + const peerRecord = new PeerRecord({ + peerId: this.components.getPeerId(), + multiaddrs + }) + + const envelope = await RecordEnvelope.seal(peerRecord, this.components.getPeerId()) + await this.components.getPeerStore().addressBook.consumePeerRecord(envelope) + signedPeerRecord = envelope.marshal() + } + + const message = Message.Identify.encode({ + protocolVersion: this.host.protocolVersion, + agentVersion: this.host.agentVersion, + publicKey, + listenAddrs: multiaddrs.map(addr => addr.bytes), + signedPeerRecord, + observedAddr: connection.remoteAddr.bytes, + protocols: peerData.protocols + }).finish() + + await pipe( + [message], + lp.encode(), + stream, + drain + ) + } catch (err: any) { + log.error('could not respond to identify request', err) + } + } + + /** + * Reads the Identify Push message from the given `connection` + */ + async _handlePush (data: IncomingStreamData) { + const { connection, stream } = data + + let message + try { + const data = await pipe( + [], + stream, + lp.decode(), + async (source) => await first(source) + ) + + if (data != null) { + message = Message.Identify.decode(data) + } + } catch (err: any) { + return log.error('received invalid message', err) + } + + if (message == null) { + return log.error('received invalid message') + } + + const id = connection.remotePeer + + if (this.components.getPeerId().equals(id)) { + log('received push from ourselves?') + return + } + + log('received push from %p', id) + + if (message.signedPeerRecord != null) { + log('received signedPeerRecord in push') + + try { + const envelope = await RecordEnvelope.openAndCertify(message.signedPeerRecord, PeerRecord.DOMAIN) + + if (await this.components.getPeerStore().addressBook.consumePeerRecord(envelope)) { + log('consumed signedPeerRecord sent in push') + + await this.components.getPeerStore().protoBook.set(id, message.protocols) + return + } else { + log('failed to consume signedPeerRecord sent in push') + } + } catch (err: any) { + log('received invalid envelope, discard it and fallback to listenAddrs is available', err) + } + } else { + log('did not receive signedPeerRecord in push') + } + + // LEGACY: Update peers data in PeerStore + try { + await this.components.getPeerStore().addressBook.set(id, + message.listenAddrs.map((addr) => new Multiaddr(addr))) + } catch (err: any) { + log.error('received invalid addrs', err) + } + + // Update the protocols + try { + await this.components.getPeerStore().protoBook.set(id, message.protocols) + } catch (err: any) { + log.error('received invalid protocols', err) + } + + log('handled push from %p', id) + } + + /** + * Takes the `addr` and converts it to a Multiaddr if possible + */ + static getCleanMultiaddr (addr: Uint8Array | string | null | undefined) { + if (addr != null && addr.length > 0) { + try { + return new Multiaddr(addr) + } catch { + + } + } + } +} + +/** + * The protocols the IdentifyService supports + */ +export const multicodecs = { + IDENTIFY: MULTICODEC_IDENTIFY, + IDENTIFY_PUSH: MULTICODEC_IDENTIFY_PUSH +} + +export { Message } diff --git a/src/identify/message.d.ts b/src/identify/pb/message.d.ts similarity index 80% rename from src/identify/message.d.ts rename to src/identify/pb/message.d.ts index ba49c586..561dbc55 100644 --- a/src/identify/message.d.ts +++ b/src/identify/pb/message.d.ts @@ -34,25 +34,40 @@ export class Identify implements IIdentify { constructor(p?: IIdentify); /** Identify protocolVersion. */ - public protocolVersion: string; + public protocolVersion?: (string|null); /** Identify agentVersion. */ - public agentVersion: string; + public agentVersion?: (string|null); /** Identify publicKey. */ - public publicKey: Uint8Array; + public publicKey?: (Uint8Array|null); /** Identify listenAddrs. */ public listenAddrs: Uint8Array[]; /** Identify observedAddr. */ - public observedAddr: Uint8Array; + public observedAddr?: (Uint8Array|null); /** Identify protocols. */ public protocols: string[]; /** Identify signedPeerRecord. */ - public signedPeerRecord: Uint8Array; + public signedPeerRecord?: (Uint8Array|null); + + /** Identify _protocolVersion. */ + public _protocolVersion?: "protocolVersion"; + + /** Identify _agentVersion. */ + public _agentVersion?: "agentVersion"; + + /** Identify _publicKey. */ + public _publicKey?: "publicKey"; + + /** Identify _observedAddr. */ + public _observedAddr?: "observedAddr"; + + /** Identify _signedPeerRecord. */ + public _signedPeerRecord?: "signedPeerRecord"; /** * Encodes the specified Identify message. Does not implicitly {@link Identify.verify|verify} messages. diff --git a/src/identify/message.js b/src/identify/pb/message.js similarity index 77% rename from src/identify/message.js rename to src/identify/pb/message.js index f4f04feb..80764cad 100644 --- a/src/identify/message.js +++ b/src/identify/pb/message.js @@ -1,15 +1,13 @@ /*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); +import $protobuf from "protobufjs/minimal.js"; // Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; +const $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; // Exported root namespace -var $root = $protobuf.roots["libp2p-identify"] || ($protobuf.roots["libp2p-identify"] = {}); +const $root = $protobuf.roots["libp2p-identify"] || ($protobuf.roots["libp2p-identify"] = {}); -$root.Identify = (function() { +export const Identify = $root.Identify = (() => { /** * Properties of an Identify. @@ -43,27 +41,27 @@ $root.Identify = (function() { /** * Identify protocolVersion. - * @member {string} protocolVersion + * @member {string|null|undefined} protocolVersion * @memberof Identify * @instance */ - Identify.prototype.protocolVersion = ""; + Identify.prototype.protocolVersion = null; /** * Identify agentVersion. - * @member {string} agentVersion + * @member {string|null|undefined} agentVersion * @memberof Identify * @instance */ - Identify.prototype.agentVersion = ""; + Identify.prototype.agentVersion = null; /** * Identify publicKey. - * @member {Uint8Array} publicKey + * @member {Uint8Array|null|undefined} publicKey * @memberof Identify * @instance */ - Identify.prototype.publicKey = $util.newBuffer([]); + Identify.prototype.publicKey = null; /** * Identify listenAddrs. @@ -75,11 +73,11 @@ $root.Identify = (function() { /** * Identify observedAddr. - * @member {Uint8Array} observedAddr + * @member {Uint8Array|null|undefined} observedAddr * @memberof Identify * @instance */ - Identify.prototype.observedAddr = $util.newBuffer([]); + Identify.prototype.observedAddr = null; /** * Identify protocols. @@ -91,11 +89,69 @@ $root.Identify = (function() { /** * Identify signedPeerRecord. - * @member {Uint8Array} signedPeerRecord + * @member {Uint8Array|null|undefined} signedPeerRecord * @memberof Identify * @instance */ - Identify.prototype.signedPeerRecord = $util.newBuffer([]); + Identify.prototype.signedPeerRecord = null; + + // OneOf field names bound to virtual getters and setters + let $oneOfFields; + + /** + * Identify _protocolVersion. + * @member {"protocolVersion"|undefined} _protocolVersion + * @memberof Identify + * @instance + */ + Object.defineProperty(Identify.prototype, "_protocolVersion", { + get: $util.oneOfGetter($oneOfFields = ["protocolVersion"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Identify _agentVersion. + * @member {"agentVersion"|undefined} _agentVersion + * @memberof Identify + * @instance + */ + Object.defineProperty(Identify.prototype, "_agentVersion", { + get: $util.oneOfGetter($oneOfFields = ["agentVersion"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Identify _publicKey. + * @member {"publicKey"|undefined} _publicKey + * @memberof Identify + * @instance + */ + Object.defineProperty(Identify.prototype, "_publicKey", { + get: $util.oneOfGetter($oneOfFields = ["publicKey"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Identify _observedAddr. + * @member {"observedAddr"|undefined} _observedAddr + * @memberof Identify + * @instance + */ + Object.defineProperty(Identify.prototype, "_observedAddr", { + get: $util.oneOfGetter($oneOfFields = ["observedAddr"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Identify _signedPeerRecord. + * @member {"signedPeerRecord"|undefined} _signedPeerRecord + * @memberof Identify + * @instance + */ + Object.defineProperty(Identify.prototype, "_signedPeerRecord", { + get: $util.oneOfGetter($oneOfFields = ["signedPeerRecord"]), + set: $util.oneOfSetter($oneOfFields) + }); /** * Encodes the specified Identify message. Does not implicitly {@link Identify.verify|verify} messages. @@ -256,33 +312,10 @@ $root.Identify = (function() { d.listenAddrs = []; d.protocols = []; } - if (o.defaults) { - if (o.bytes === String) - d.publicKey = ""; - else { - d.publicKey = []; - if (o.bytes !== Array) - d.publicKey = $util.newBuffer(d.publicKey); - } - if (o.bytes === String) - d.observedAddr = ""; - else { - d.observedAddr = []; - if (o.bytes !== Array) - d.observedAddr = $util.newBuffer(d.observedAddr); - } - d.protocolVersion = ""; - d.agentVersion = ""; - if (o.bytes === String) - d.signedPeerRecord = ""; - else { - d.signedPeerRecord = []; - if (o.bytes !== Array) - d.signedPeerRecord = $util.newBuffer(d.signedPeerRecord); - } - } if (m.publicKey != null && m.hasOwnProperty("publicKey")) { d.publicKey = o.bytes === String ? $util.base64.encode(m.publicKey, 0, m.publicKey.length) : o.bytes === Array ? Array.prototype.slice.call(m.publicKey) : m.publicKey; + if (o.oneofs) + d._publicKey = "publicKey"; } if (m.listenAddrs && m.listenAddrs.length) { d.listenAddrs = []; @@ -298,15 +331,23 @@ $root.Identify = (function() { } if (m.observedAddr != null && m.hasOwnProperty("observedAddr")) { d.observedAddr = o.bytes === String ? $util.base64.encode(m.observedAddr, 0, m.observedAddr.length) : o.bytes === Array ? Array.prototype.slice.call(m.observedAddr) : m.observedAddr; + if (o.oneofs) + d._observedAddr = "observedAddr"; } if (m.protocolVersion != null && m.hasOwnProperty("protocolVersion")) { d.protocolVersion = m.protocolVersion; + if (o.oneofs) + d._protocolVersion = "protocolVersion"; } if (m.agentVersion != null && m.hasOwnProperty("agentVersion")) { d.agentVersion = m.agentVersion; + if (o.oneofs) + d._agentVersion = "agentVersion"; } if (m.signedPeerRecord != null && m.hasOwnProperty("signedPeerRecord")) { d.signedPeerRecord = o.bytes === String ? $util.base64.encode(m.signedPeerRecord, 0, m.signedPeerRecord.length) : o.bytes === Array ? Array.prototype.slice.call(m.signedPeerRecord) : m.signedPeerRecord; + if (o.oneofs) + d._signedPeerRecord = "signedPeerRecord"; } return d; }; @@ -325,4 +366,4 @@ $root.Identify = (function() { return Identify; })(); -module.exports = $root; +export { $root as default }; diff --git a/src/identify/message.proto b/src/identify/pb/message.proto similarity index 100% rename from src/identify/message.proto rename to src/identify/pb/message.proto diff --git a/src/index.js b/src/index.js deleted file mode 100644 index f5e0f6c0..00000000 --- a/src/index.js +++ /dev/null @@ -1,813 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p'), { - error: debug('libp2p:err') -}) -const { EventEmitter } = require('events') - -const errCode = require('err-code') -const PeerId = require('peer-id') -const { Multiaddr } = require('multiaddr') -const { MemoryDatastore } = require('datastore-core/memory') -const PeerRouting = require('./peer-routing') -const ContentRouting = require('./content-routing') -const getPeer = require('./get-peer') -const { validate: validateConfig } = require('./config') -const { codes, messages } = require('./errors') - -const AddressManager = require('./address-manager') -const ConnectionManager = require('./connection-manager') -const AutoDialler = require('./connection-manager/auto-dialler') -const Circuit = require('./circuit/transport') -const Relay = require('./circuit') -const Dialer = require('./dialer') -const Keychain = require('./keychain') -const Metrics = require('./metrics') -const TransportManager = require('./transport-manager') -const Upgrader = require('./upgrader') -const PeerStore = require('./peer-store') -const PubsubAdapter = require('./pubsub-adapter') -const Registrar = require('./registrar') -const IdentifyService = require('./identify') -const FetchService = require('./fetch') -const PingService = require('./ping') -const NatManager = require('./nat-manager') -const { updateSelfPeerRecord } = require('./record/utils') - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - * @typedef {import('libp2p-interfaces/src/transport/types').TransportFactory} TransportFactory - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxerFactory} MuxerFactory - * @typedef {import('libp2p-interfaces/src/content-routing/types').ContentRouting} ContentRoutingModule - * @typedef {import('libp2p-interfaces/src/peer-discovery/types').PeerDiscoveryFactory} PeerDiscoveryFactory - * @typedef {import('libp2p-interfaces/src/peer-routing/types').PeerRouting} PeerRoutingModule - * @typedef {import('libp2p-interfaces/src/crypto/types').Crypto} Crypto - * @typedef {import('libp2p-interfaces/src/pubsub')} Pubsub - * @typedef {import('libp2p-interfaces/src/pubsub').PubsubOptions} PubsubOptions - * @typedef {import('interface-datastore').Datastore} Datastore - * @typedef {import('./pnet')} Protector - * @typedef {import('./types').ConnectionGater} ConnectionGater - * @typedef {Object} PersistentPeerStoreOptions - * @property {number} [threshold] - */ - -/** - * @typedef {Object} HandlerProps - * @property {Connection} connection - * @property {MuxedStream} stream - * @property {string} protocol - * - * @typedef {Object} DhtOptions - * @property {boolean} [enabled = false] - * @property {number} [kBucketSize = 20] - * @property {boolean} [clientMode] - * @property {import('libp2p-interfaces/src/types').DhtSelectors} [selectors] - * @property {import('libp2p-interfaces/src/types').DhtValidators} [validators] - * - * @typedef {Object} KeychainOptions - * @property {Datastore} [datastore] - * - * @typedef {Object} PeerStoreOptions - * @property {boolean} persistence - * - * @typedef {Object} PubsubLocalOptions - * @property {boolean} enabled - * - * @typedef {Object} MetricsOptions - * @property {boolean} enabled - * - * @typedef {Object} RelayOptions - * @property {boolean} [enabled = true] - * @property {import('./circuit').RelayAdvertiseOptions} [advertise] - * @property {import('./circuit').HopOptions} [hop] - * @property {import('./circuit').AutoRelayOptions} [autoRelay] - * - * @typedef {Object} Libp2pConfig - * @property {DhtOptions} [dht] dht module options - * @property {import('./nat-manager').NatManagerOptions} [nat] - * @property {Record} [peerDiscovery] - * @property {PubsubLocalOptions & PubsubOptions} [pubsub] pubsub module options - * @property {RelayOptions} [relay] - * @property {Record} [transport] transport options indexed by transport key - * - * @typedef {Object} Libp2pModules - * @property {TransportFactory[]} transport - * @property {MuxerFactory[]} streamMuxer - * @property {Crypto[]} connEncryption - * @property {PeerDiscoveryFactory[]} [peerDiscovery] - * @property {PeerRoutingModule[]} [peerRouting] - * @property {ContentRoutingModule[]} [contentRouting] - * @property {Object} [dht] - * @property {{new(...args: any[]): Pubsub}} [pubsub] - * @property {Protector} [connProtector] - * - * @typedef {Object} Libp2pOptions - * @property {Libp2pModules} modules libp2p modules to use - * @property {import('./address-manager').AddressManagerOptions} [addresses] - * @property {import('./connection-manager').ConnectionManagerOptions} [connectionManager] - * @property {Partial} [connectionGater] - * @property {Datastore} [datastore] - * @property {import('./dialer').DialerOptions} [dialer] - * @property {import('./identify/index').HostProperties} [host] libp2p host - * @property {KeychainOptions & import('./keychain/index').KeychainOptions} [keychain] - * @property {MetricsOptions & import('./metrics').MetricsOptions} [metrics] - * @property {import('./peer-routing').PeerRoutingOptions} [peerRouting] - * @property {PeerStoreOptions} [peerStore] - * @property {import('./transport-manager').TransportManagerOptions} [transportManager] - * @property {Libp2pConfig} [config] - * - * @typedef {Object} constructorOptions - * @property {PeerId} peerId - * - * @typedef {Object} CreateOptions - * @property {PeerId} [peerId] - * - * @extends {EventEmitter} - * @fires Libp2p#error Emitted when an error occurs - * @fires Libp2p#peer:discovery Emitted when a peer is discovered - */ -class Libp2p extends EventEmitter { - /** - * Like `new Libp2p(options)` except it will create a `PeerId` - * instance if one is not provided in options. - * - * @param {Libp2pOptions & CreateOptions} options - Libp2p configuration options - * @returns {Promise} - */ - static async create (options) { - if (options.peerId) { - // @ts-ignore 'Libp2pOptions & CreateOptions' is not assignable to 'Libp2pOptions & constructorOptions' - return new Libp2p(options) - } - - const peerId = await PeerId.create() - - options.peerId = peerId - // @ts-ignore 'Libp2pOptions & CreateOptions' is not assignable to 'Libp2pOptions & constructorOptions' - return new Libp2p(options) - } - - /** - * Libp2p node. - * - * @class - * @param {Libp2pOptions & constructorOptions} _options - */ - constructor (_options) { - super() - // validateConfig will ensure the config is correct, - // and add default values where appropriate - this._options = validateConfig(_options) - - /** @type {PeerId} */ - this.peerId = this._options.peerId - this.datastore = this._options.datastore - - // Create Metrics - if (this._options.metrics.enabled) { - const metrics = new Metrics({ - ...this._options.metrics - }) - - this.metrics = metrics - } - - /** @type {ConnectionGater} */ - this.connectionGater = { - denyDialPeer: async () => Promise.resolve(false), - denyDialMultiaddr: async () => Promise.resolve(false), - denyInboundConnection: async () => Promise.resolve(false), - denyOutboundConnection: async () => Promise.resolve(false), - denyInboundEncryptedConnection: async () => Promise.resolve(false), - denyOutboundEncryptedConnection: async () => Promise.resolve(false), - denyInboundUpgradedConnection: async () => Promise.resolve(false), - denyOutboundUpgradedConnection: async () => Promise.resolve(false), - filterMultiaddrForPeer: async () => Promise.resolve(true), - ...this._options.connectionGater - } - - /** @type {import('./peer-store/types').PeerStore} */ - this.peerStore = new PeerStore({ - peerId: this.peerId, - datastore: (this.datastore && this._options.peerStore.persistence) ? this.datastore : new MemoryDatastore(), - addressFilter: this.connectionGater.filterMultiaddrForPeer - }) - - // Addresses {listen, announce, noAnnounce} - this.addresses = this._options.addresses - this.addressManager = new AddressManager(this.peerId, this._options.addresses) - - // when addresses change, update our peer record - this.addressManager.on('change:addresses', () => { - updateSelfPeerRecord(this).catch(err => { - log.error('Error updating self peer record', err) - }) - }) - - this._modules = this._options.modules - this._config = this._options.config - this._transport = [] // Transport instances/references - this._discovery = new Map() // Discovery service instances/references - - // Create the Connection Manager - this.connectionManager = new ConnectionManager(this, { - ...this._options.connectionManager - }) - this._autodialler = new AutoDialler(this, { - enabled: this._config.peerDiscovery.autoDial, - minConnections: this._options.connectionManager.minConnections, - autoDialInterval: this._options.connectionManager.autoDialInterval - }) - - // Create keychain - if (this._options.keychain && this._options.keychain.datastore) { - log('creating keychain') - - const keychainOpts = Keychain.generateOptions() - - this.keychain = new Keychain(this._options.keychain.datastore, { - ...keychainOpts, - ...this._options.keychain - }) - - log('keychain constructed') - } - - // Setup the Upgrader - this.upgrader = new Upgrader({ - connectionGater: this.connectionGater, - localPeer: this.peerId, - metrics: this.metrics, - onConnection: (connection) => this.connectionManager.onConnect(connection), - onConnectionEnd: (connection) => this.connectionManager.onDisconnect(connection) - }) - - // Setup the transport manager - this.transportManager = new TransportManager({ - libp2p: this, - upgrader: this.upgrader, - faultTolerance: this._options.transportManager.faultTolerance - }) - - // Create the Nat Manager - this.natManager = new NatManager({ - peerId: this.peerId, - addressManager: this.addressManager, - transportManager: this.transportManager, - // @ts-ignore Nat typedef is not understood as Object - ...this._options.config.nat - }) - - // Create the Registrar - this.registrar = new Registrar({ - peerStore: this.peerStore, - connectionManager: this.connectionManager - }) - - this.handle = this.handle.bind(this) - this.registrar.handle = this.handle - - // Attach crypto channels - if (!this._modules.connEncryption || !this._modules.connEncryption.length) { - throw errCode(new Error(messages.CONN_ENCRYPTION_REQUIRED), codes.CONN_ENCRYPTION_REQUIRED) - } - const cryptos = this._modules.connEncryption - cryptos.forEach((crypto) => { - this.upgrader.cryptos.set(crypto.protocol, crypto) - }) - - this.dialer = new Dialer({ - transportManager: this.transportManager, - connectionGater: this.connectionGater, - peerStore: this.peerStore, - metrics: this.metrics, - ...this._options.dialer - }) - - this._modules.transport.forEach((Transport) => { - const key = Transport.prototype[Symbol.toStringTag] - const transportOptions = this._config.transport[key] - this.transportManager.add(key, Transport, transportOptions) - }) - - if (this._config.relay.enabled) { - // @ts-ignore Circuit prototype - this.transportManager.add(Circuit.prototype[Symbol.toStringTag], Circuit) - this.relay = new Relay(this) - } - - // Attach stream multiplexers - if (this._modules.streamMuxer) { - const muxers = this._modules.streamMuxer - muxers.forEach((muxer) => { - this.upgrader.muxers.set(muxer.multicodec, muxer) - }) - - // Add the identify service since we can multiplex - this.identifyService = new IdentifyService({ libp2p: this }) - } - - // Attach private network protector - if (this._modules.connProtector) { - this.upgrader.protector = this._modules.connProtector - } else if (globalThis.process !== undefined && globalThis.process.env && globalThis.process.env.LIBP2P_FORCE_PNET) { // eslint-disable-line no-undef - throw new Error('Private network is enforced, but no protector was provided') - } - - // dht provided components (peerRouting, contentRouting, dht) - if (this._modules.dht) { - const DHT = this._modules.dht - // @ts-ignore TODO: types need fixing - DHT is an `object` which has no `create` method - this._dht = DHT.create({ - libp2p: this, - ...this._config.dht - }) - } - - // Create pubsub if provided - if (this._modules.pubsub) { - const Pubsub = this._modules.pubsub - // using pubsub adapter with *DEPRECATED* handlers functionality - /** @type {Pubsub} */ - this.pubsub = PubsubAdapter(Pubsub, this, this._config.pubsub) - } - - // Attach remaining APIs - // peer and content routing will automatically get modules from _modules and _dht - this.peerRouting = new PeerRouting(this) - this.contentRouting = new ContentRouting(this) - - this._onDiscoveryPeer = this._onDiscoveryPeer.bind(this) - - this.fetchService = new FetchService(this) - this.pingService = new PingService(this) - } - - /** - * Overrides EventEmitter.emit to conditionally emit errors - * if there is a handler. If not, errors will be logged. - * - * @param {string} eventName - * @param {...any} args - * @returns {boolean} - */ - emit (eventName, ...args) { - // TODO: do we still need this? - // @ts-ignore _events does not exist in libp2p - if (eventName === 'error' && !this._events.error) { - log.error(args) - return false - } else { - return super.emit(eventName, ...args) - } - } - - /** - * Starts the libp2p node and all its subsystems - * - * @returns {Promise} - */ - async start () { - log('libp2p is starting') - - if (this.identifyService) { - await this.handle(Object.values(IdentifyService.getProtocolStr(this)), this.identifyService.handleMessage) - } - - if (this.fetchService) { - await this.handle(FetchService.PROTOCOL, this.fetchService.handleMessage) - } - - if (this.pingService) { - await this.handle(PingService.getProtocolStr(this), this.pingService.handleMessage) - } - - try { - await this._onStarting() - await this._onDidStart() - log('libp2p has started') - } catch (/** @type {any} */ err) { - this.emit('error', err) - log.error('An error occurred starting libp2p', err) - await this.stop() - throw err - } - } - - /** - * Stop the libp2p node by closing its listeners and open connections - * - * @async - * @returns {Promise} - */ - async stop () { - log('libp2p is stopping') - - try { - this._isStarted = false - - if (this.identifyService) { - await this.identifyService.stop() - } - - this.relay && this.relay.stop() - this.peerRouting.stop() - await this._autodialler.stop() - await (this._dht && this._dht.stop()) - - for (const service of this._discovery.values()) { - service.removeListener('peer', this._onDiscoveryPeer) - } - - await Promise.all(Array.from(this._discovery.values(), s => s.stop())) - - this._discovery = new Map() - - await this.connectionManager.stop() - - await Promise.all([ - this.pubsub && this.pubsub.stop(), - this.metrics && this.metrics.stop() - ]) - - await this.natManager.stop() - await this.transportManager.close() - - await this.unhandle(FetchService.PROTOCOL) - await this.unhandle(PingService.getProtocolStr(this)) - - this.dialer.destroy() - } catch (/** @type {any} */ err) { - if (err) { - log.error(err) - this.emit('error', err) - } - } - log('libp2p has stopped') - } - - /** - * Load keychain keys from the datastore. - * Imports the private key as 'self', if needed. - * - * @async - * @returns {Promise} - */ - async loadKeychain () { - if (!this.keychain) { - return - } - - try { - await this.keychain.findKeyByName('self') - } catch (/** @type {any} */ err) { - await this.keychain.importPeer('self', this.peerId) - } - } - - isStarted () { - return this._isStarted - } - - /** - * Gets a Map of the current connections. The keys are the stringified - * `PeerId` of the peer. The value is an array of Connections to that peer. - * - * @returns {Map} - */ - get connections () { - return this.connectionManager.connections - } - - /** - * Dials to the provided peer. If successful, the known metadata of the - * peer will be added to the nodes `peerStore` - * - * @param {PeerId|Multiaddr|string} peer - The peer to dial - * @param {object} [options] - * @param {AbortSignal} [options.signal] - * @returns {Promise} - */ - dial (peer, options) { - return this._dial(peer, options) - } - - /** - * Dials to the provided peer and tries to handshake with the given protocols in order. - * If successful, the known metadata of the peer will be added to the nodes `peerStore`, - * and the `MuxedStream` will be returned together with the successful negotiated protocol. - * - * @async - * @param {PeerId|Multiaddr|string} peer - The peer to dial - * @param {string[]|string} protocols - * @param {object} [options] - * @param {AbortSignal} [options.signal] - */ - async dialProtocol (peer, protocols, options) { - if (!protocols || !protocols.length) { - throw errCode(new Error('no protocols were provided to open a stream'), codes.ERR_INVALID_PROTOCOLS_FOR_STREAM) - } - - const connection = await this._dial(peer, options) - return connection.newStream(protocols) - } - - /** - * @async - * @param {PeerId|Multiaddr|string} peer - The peer to dial - * @param {object} [options] - * @returns {Promise} - */ - async _dial (peer, options) { - const { id, multiaddrs } = getPeer(peer) - - if (id.equals(this.peerId)) { - throw errCode(new Error('Cannot dial self'), codes.ERR_DIALED_SELF) - } - - let connection = this.connectionManager.get(id) - - if (!connection) { - connection = await this.dialer.connectToPeer(peer, options) - } else if (multiaddrs) { - await this.peerStore.addressBook.add(id, multiaddrs) - } - - return connection - } - - /** - * Get a deduplicated list of peer advertising multiaddrs by concatenating - * the listen addresses used by transports with any configured - * announce addresses as well as observed addresses reported by peers. - * - * If Announce addrs are specified, configured listen addresses will be - * ignored though observed addresses will still be included. - * - * @returns {Multiaddr[]} - */ - get multiaddrs () { - let addrs = this.addressManager.getAnnounceAddrs().map(ma => ma.toString()) - - if (!addrs.length) { - // no configured announce addrs, add configured listen addresses - addrs = this.transportManager.getAddrs().map(ma => ma.toString()) - } - - addrs = addrs.concat(this.addressManager.getObservedAddrs().map(ma => ma.toString())) - - const announceFilter = this._options.addresses.announceFilter - - // dedupe multiaddrs - const addrSet = new Set(addrs) - - // Create advertising list - return announceFilter(Array.from(addrSet).map(str => new Multiaddr(str))) - } - - /** - * Disconnects all connections to the given `peer` - * - * @param {PeerId|Multiaddr|string} peer - the peer to close connections to - * @returns {Promise} - */ - async hangUp (peer) { - const { id } = getPeer(peer) - - const connections = this.connectionManager.connections.get(id.toB58String()) - - if (!connections) { - return - } - - await Promise.all( - connections.map(connection => { - return connection.close() - }) - ) - } - - /** - * Sends a request to fetch the value associated with the given key from the given peer. - * - * @param {PeerId|Multiaddr} peer - * @param {string} key - * @returns {Promise} - */ - fetch (peer, key) { - return this.fetchService.fetch(peer, key) - } - - /** - * Pings the given peer in order to obtain the operation latency. - * - * @param {PeerId|Multiaddr|string} peer - The peer to ping - * @returns {Promise} - */ - ping (peer) { - const { id, multiaddrs } = getPeer(peer) - - // If received multiaddr, ping it - if (multiaddrs) { - return this.pingService.ping(multiaddrs[0]) - } - - return this.pingService.ping(id) - } - - /** - * Registers the `handler` for each protocol - * - * @param {string[]|string} protocols - * @param {(props: HandlerProps) => void} handler - */ - async handle (protocols, handler) { - protocols = Array.isArray(protocols) ? protocols : [protocols] - protocols.forEach(protocol => { - this.upgrader.protocols.set(protocol, handler) - }) - - // Add new protocols to self protocols in the Protobook - await this.peerStore.protoBook.add(this.peerId, protocols) - } - - /** - * Removes the handler for each protocol. The protocol - * will no longer be supported on streams. - * - * @param {string[]|string} protocols - */ - async unhandle (protocols) { - protocols = Array.isArray(protocols) ? protocols : [protocols] - protocols.forEach(protocol => { - this.upgrader.protocols.delete(protocol) - }) - - // Remove protocols from self protocols in the Protobook - await this.peerStore.protoBook.remove(this.peerId, protocols) - } - - async _onStarting () { - // Listen on the provided transports for the provided addresses - const addrs = this.addressManager.getListenAddrs() - await this.transportManager.listen(addrs) - - // Manage your NATs - this.natManager.start() - - if (this._config.pubsub.enabled) { - this.pubsub && await this.pubsub.start() - } - - // DHT subsystem - if (this._config.dht.enabled) { - this._dht && await this._dht.start() - - // TODO: this should be modified once random-walk is used as - // the other discovery modules - this._dht.on('peer', this._onDiscoveryPeer) - } - - // Start metrics if present - this.metrics && this.metrics.start() - - if (this.identifyService) { - await this.identifyService.start() - } - } - - /** - * Called when libp2p has started and before it returns - * - * @private - */ - async _onDidStart () { - this._isStarted = true - - this.peerStore.on('peer', peerId => { - this.emit('peer:discovery', peerId) - this._maybeConnect(peerId).catch(err => { - log.error(err) - }) - }) - - // Once we start, emit any peers we may have already discovered - // TODO: this should be removed, as we already discovered these peers in the past - for await (const peer of this.peerStore.getPeers()) { - this.emit('peer:discovery', peer.id) - } - - this.connectionManager.start() - await this._autodialler.start() - - // Peer discovery - await this._setupPeerDiscovery() - - // Relay - this.relay && this.relay.start() - - this.peerRouting.start() - } - - /** - * Called whenever peer discovery services emit `peer` events. - * Known peers may be emitted. - * - * @private - * @param {{ id: PeerId, multiaddrs: Multiaddr[], protocols: string[] }} peer - */ - _onDiscoveryPeer (peer) { - if (peer.id.toB58String() === this.peerId.toB58String()) { - log.error(new Error(codes.ERR_DISCOVERED_SELF)) - return - } - - peer.multiaddrs && this.peerStore.addressBook.add(peer.id, peer.multiaddrs).catch(err => log.error(err)) - peer.protocols && this.peerStore.protoBook.set(peer.id, peer.protocols).catch(err => log.error(err)) - } - - /** - * Will dial to the given `peerId` if the current number of - * connected peers is less than the configured `ConnectionManager` - * minConnections. - * - * @private - * @param {PeerId} peerId - */ - async _maybeConnect (peerId) { - // If auto dialing is on and we have no connection to the peer, check if we should dial - if (this._config.peerDiscovery.autoDial === true && !this.connectionManager.get(peerId)) { - const minConnections = this._options.connectionManager.minConnections || 0 - if (minConnections > this.connectionManager.size) { - log('connecting to discovered peer %s', peerId.toB58String()) - try { - await this.dialer.connectToPeer(peerId) - } catch (/** @type {any} */ err) { - log.error(`could not connect to discovered peer ${peerId.toB58String()} with ${err}`) - } - } - } - } - - /** - * Initializes and starts peer discovery services - * - * @async - * @private - */ - async _setupPeerDiscovery () { - /** - * @param {PeerDiscoveryFactory} DiscoveryService - */ - const setupService = (DiscoveryService) => { - let config = { - enabled: true // on by default - } - - if (DiscoveryService.tag && - this._config.peerDiscovery && - this._config.peerDiscovery[DiscoveryService.tag]) { - // @ts-ignore PeerDiscovery not understood as an Object for spread - config = { ...config, ...this._config.peerDiscovery[DiscoveryService.tag] } - } - - if (config.enabled && - !this._discovery.has(DiscoveryService.tag)) { // not already added - let discoveryService - - if (typeof DiscoveryService === 'function') { - // @ts-ignore DiscoveryService has no constructor type inferred - discoveryService = new DiscoveryService(Object.assign({}, config, { - peerId: this.peerId, - libp2p: this - })) - } else { - discoveryService = DiscoveryService - } - - discoveryService.on('peer', this._onDiscoveryPeer) - this._discovery.set(DiscoveryService.tag, discoveryService) - } - } - - // Discovery modules - for (const DiscoveryService of this._modules.peerDiscovery || []) { - setupService(DiscoveryService) - } - - // Transport modules with discovery - for (const Transport of this.transportManager.getTransports()) { - // @ts-ignore Transport interface does not include discovery - if (Transport.discovery) { - // @ts-ignore Transport interface does not include discovery - setupService(Transport.discovery) - } - } - - await Promise.all(Array.from(this._discovery.values(), d => d.start())) - } -} - -module.exports = Libp2p diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 00000000..0f063df9 --- /dev/null +++ b/src/index.ts @@ -0,0 +1,234 @@ +import { createLibp2pNode } from './libp2p.js' +import type { AbortOptions, EventEmitter, RecursivePartial, Startable } from '@libp2p/interfaces' +import type { Multiaddr } from '@multiformats/multiaddr' +import type { FAULT_TOLERANCE } from './transport-manager.js' +import type { HostProperties } from './identify/index.js' +import type { DualDHT } from '@libp2p/interfaces/dht' +import type { Datastore } from 'interface-datastore' +import type { PeerStore, PeerStoreInit } from '@libp2p/interfaces/peer-store' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { AutoRelayConfig, RelayAdvertiseConfig } from './circuit/index.js' +import type { PeerDiscovery } from '@libp2p/interfaces/peer-discovery' +import type { Connection, ConnectionGater, ConnectionProtector, ProtocolStream } from '@libp2p/interfaces/connection' +import type { Transport } from '@libp2p/interfaces/transport' +import type { StreamMuxerFactory } from '@libp2p/interfaces/stream-muxer' +import type { ConnectionEncrypter } from '@libp2p/interfaces/connection-encrypter' +import type { PeerRouting } from '@libp2p/interfaces/peer-routing' +import type { ContentRouting } from '@libp2p/interfaces/content-routing' +import type { PubSub } from '@libp2p/interfaces/pubsub' +import type { ConnectionManager, StreamHandler } from '@libp2p/interfaces/registrar' +import type { MetricsInit } from '@libp2p/interfaces/metrics' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import type { DialerInit } from '@libp2p/interfaces/dialer' +import type { KeyChain } from './keychain/index.js' + +export interface PersistentPeerStoreOptions { + threshold?: number +} + +export interface DEKConfig { + keyLength: number + iterationCount: number + salt: string + hash: string +} + +export interface KeychainConfig { + pass?: string + dek?: DEKConfig +} + +export interface MetricsConfig { + enabled?: boolean +} + +export interface HopConfig { + enabled?: boolean + active?: boolean +} + +export interface RelayConfig { + enabled: boolean + advertise: RelayAdvertiseConfig + hop: HopConfig + autoRelay: AutoRelayConfig +} + +export interface NatManagerConfig { + enabled: boolean + externalAddress?: string + localAddress?: string + description?: string + ttl?: number + keepAlive: boolean + gateway?: string +} + +export interface AddressesConfig { + listen: string[] + announce: string[] + noAnnounce: string[] + announceFilter: (multiaddrs: Multiaddr[]) => Multiaddr[] +} + +export interface ConnectionManagerConfig { + /** + * If true, try to connect to all discovered peers up to the connection manager limit + */ + autoDial?: boolean + + /** + * The maximum number of connections to keep open + */ + maxConnections: number + + /** + * The minimum number of connections to keep open + */ + minConnections: number + + /** + * How long to wait between attempting to keep our number of concurrent connections + * above minConnections + */ + autoDialInterval: number +} + +export interface TransportManagerConfig { + faultTolerance?: FAULT_TOLERANCE +} + +export interface PeerStoreConfig { + persistence?: boolean + threshold?: number +} + +export interface PeerRoutingConfig { + refreshManager: RefreshManagerConfig +} + +export interface RefreshManagerConfig { + enabled?: boolean + interval: number + bootDelay: number +} + +export interface Libp2pInit { + peerId: PeerId + host: HostProperties + addresses: AddressesConfig + connectionManager: ConnectionManagerConfig + connectionGater: Partial + transportManager: TransportManagerConfig + datastore: Datastore + dialer: DialerInit + metrics: MetricsInit + peerStore: PeerStoreInit + peerRouting: PeerRoutingConfig + keychain: KeychainConfig + protocolPrefix: string + nat: NatManagerConfig + relay: RelayConfig + + transports: Transport[] + streamMuxers?: StreamMuxerFactory[] + connectionEncryption?: ConnectionEncrypter[] + peerDiscovery?: PeerDiscovery[] + peerRouters?: PeerRouting[] + contentRouters?: ContentRouting[] + dht?: DualDHT + pubsub?: PubSub + connectionProtector?: ConnectionProtector +} + +export interface Libp2pEvents { + 'peer:discovery': CustomEvent +} + +export interface Libp2p extends Startable, EventEmitter { + peerId: PeerId + peerStore: PeerStore + peerRouting: PeerRouting + contentRouting: ContentRouting + keychain: KeyChain + connectionManager: ConnectionManager + + pubsub?: PubSub + dht?: DualDHT + + /** + * Load keychain keys from the datastore. + * Imports the private key as 'self', if needed. + */ + loadKeychain: () => Promise + + /** + * Get a deduplicated list of peer advertising multiaddrs by concatenating + * the listen addresses used by transports with any configured + * announce addresses as well as observed addresses reported by peers. + * + * If Announce addrs are specified, configured listen addresses will be + * ignored though observed addresses will still be included. + */ + getMultiaddrs: () => Multiaddr[] + + /** + * Return a list of all connections this node has open, optionally filtering + * by a PeerId + */ + getConnections: (peerId?: PeerId) => Connection[] + + /** + * Return a list of all peers we currently have a connection open to + */ + getPeers: () => PeerId[] + + /** + * Dials to the provided peer. If successful, the known metadata of the + * peer will be added to the nodes `peerStore` + */ + dial: (peer: PeerId | Multiaddr, options?: AbortOptions) => Promise + + /** + * Dials to the provided peer and tries to handshake with the given protocols in order. + * If successful, the known metadata of the peer will be added to the nodes `peerStore`, + * and the `MuxedStream` will be returned together with the successful negotiated protocol. + */ + dialProtocol: (peer: PeerId | Multiaddr, protocols: string | string[], options?: AbortOptions) => Promise + + /** + * Disconnects all connections to the given `peer` + */ + hangUp: (peer: PeerId | Multiaddr | string) => Promise + + /** + * Registers the `handler` for each protocol + */ + handle: (protocol: string | string[], handler: StreamHandler) => Promise + + /** + * Removes the handler for each protocol. The protocol + * will no longer be supported on streams. + */ + unhandle: (protocols: string[] | string) => Promise + + /** + * Pings the given peer in order to obtain the operation latency + */ + ping: (peer: Multiaddr |PeerId) => Promise + + /** + * Sends a request to fetch the value associated with the given key from the given peer. + */ + fetch: (peer: PeerId | Multiaddr | string, key: string) => Promise +} + +export type Libp2pOptions = RecursivePartial + +/** + * Returns a new instance of the Libp2p interface, generating a new PeerId + * if one is not passed as part of the options. + */ +export async function createLibp2p (options: Libp2pOptions): Promise { + return await createLibp2pNode(options) +} diff --git a/src/insecure/index.ts b/src/insecure/index.ts new file mode 100644 index 00000000..3c941bef --- /dev/null +++ b/src/insecure/index.ts @@ -0,0 +1,97 @@ +import { logger } from '@libp2p/logger' +import { handshake } from 'it-handshake' +import * as lp from 'it-length-prefixed' +import { UnexpectedPeerError, InvalidCryptoExchangeError } from '@libp2p/interfaces/connection-encrypter/errors' +import { Exchange, IExchange, KeyType } from './pb/proto.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { peerIdFromBytes, peerIdFromKeys } from '@libp2p/peer-id' +import type { ConnectionEncrypter, SecuredConnection } from '@libp2p/interfaces/connection-encrypter' +import type { Duplex } from 'it-stream-types' + +const log = logger('libp2p:plaintext') +const PROTOCOL = '/plaintext/2.0.0' + +function lpEncodeExchange (exchange: IExchange) { + const pb = Exchange.encode(exchange).finish() + + return lp.encode.single(pb) +} + +/** + * Encrypt connection + */ +async function encrypt (localId: PeerId, conn: Duplex, remoteId?: PeerId): Promise { + const shake = handshake(conn) + + let type = KeyType.RSA + + if (localId.type === 'Ed25519') { + type = KeyType.Ed25519 + } else if (localId.type === 'secp256k1') { + type = KeyType.Secp256k1 + } + + // Encode the public key and write it to the remote peer + shake.write( + lpEncodeExchange({ + id: localId.toBytes(), + pubkey: { + Type: type, + Data: localId.publicKey + } + }).slice() + ) + + log('write pubkey exchange to peer %p', remoteId) + + // Get the Exchange message + // @ts-expect-error needs to be generator + const response = (await lp.decode.fromReader(shake.reader).next()).value + const id = Exchange.decode(response.slice()) + log('read pubkey exchange from peer %p', remoteId) + + let peerId + try { + if (id.pubkey.Data.length === 0) { + throw new Error('Public key data too short') + } + + if (id.id == null) { + throw new Error('Remote id missing') + } + + peerId = await peerIdFromKeys(id.pubkey.Data) + + if (!peerId.equals(peerIdFromBytes(id.id))) { + throw new Error('Public key did not match id') + } + } catch (err: any) { + log.error(err) + throw new InvalidCryptoExchangeError('Remote did not provide its public key') + } + + if (remoteId != null && !peerId.equals(remoteId)) { + throw new UnexpectedPeerError() + } + + log('plaintext key exchange completed successfully with peer %p', peerId) + + shake.rest() + return { + conn: shake.stream, + remotePeer: peerId, + remoteEarlyData: new Uint8Array() + } +} + +export class Plaintext implements ConnectionEncrypter { + public protocol: string = PROTOCOL + + async secureInbound (localId: PeerId, conn: Duplex, remoteId?: PeerId): Promise { + return await encrypt(localId, conn, remoteId) + } + + async secureOutbound (localId: PeerId, conn: Duplex, remoteId: PeerId): Promise { + return await encrypt(localId, conn, remoteId) + } +} diff --git a/src/insecure/proto.d.ts b/src/insecure/pb/proto.d.ts similarity index 96% rename from src/insecure/proto.d.ts rename to src/insecure/pb/proto.d.ts index a4fbac06..191c8669 100644 --- a/src/insecure/proto.d.ts +++ b/src/insecure/pb/proto.d.ts @@ -19,11 +19,17 @@ export class Exchange implements IExchange { constructor(p?: IExchange); /** Exchange id. */ - public id: Uint8Array; + public id?: (Uint8Array|null); /** Exchange pubkey. */ public pubkey?: (IPublicKey|null); + /** Exchange _id. */ + public _id?: "id"; + + /** Exchange _pubkey. */ + public _pubkey?: "pubkey"; + /** * Encodes the specified Exchange message. Does not implicitly {@link Exchange.verify|verify} messages. * @param m Exchange message or plain object to encode diff --git a/src/insecure/proto.js b/src/insecure/pb/proto.js similarity index 88% rename from src/insecure/proto.js rename to src/insecure/pb/proto.js index ab43d4a9..54fa39cf 100644 --- a/src/insecure/proto.js +++ b/src/insecure/pb/proto.js @@ -1,15 +1,13 @@ /*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); +import $protobuf from "protobufjs/minimal.js"; // Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; +const $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; // Exported root namespace -var $root = $protobuf.roots["libp2p-plaintext"] || ($protobuf.roots["libp2p-plaintext"] = {}); +const $root = $protobuf.roots["libp2p-plaintext"] || ($protobuf.roots["libp2p-plaintext"] = {}); -$root.Exchange = (function() { +export const Exchange = $root.Exchange = (() => { /** * Properties of an Exchange. @@ -36,11 +34,11 @@ $root.Exchange = (function() { /** * Exchange id. - * @member {Uint8Array} id + * @member {Uint8Array|null|undefined} id * @memberof Exchange * @instance */ - Exchange.prototype.id = $util.newBuffer([]); + Exchange.prototype.id = null; /** * Exchange pubkey. @@ -50,6 +48,31 @@ $root.Exchange = (function() { */ Exchange.prototype.pubkey = null; + // OneOf field names bound to virtual getters and setters + let $oneOfFields; + + /** + * Exchange _id. + * @member {"id"|undefined} _id + * @memberof Exchange + * @instance + */ + Object.defineProperty(Exchange.prototype, "_id", { + get: $util.oneOfGetter($oneOfFields = ["id"]), + set: $util.oneOfSetter($oneOfFields) + }); + + /** + * Exchange _pubkey. + * @member {"pubkey"|undefined} _pubkey + * @memberof Exchange + * @instance + */ + Object.defineProperty(Exchange.prototype, "_pubkey", { + get: $util.oneOfGetter($oneOfFields = ["pubkey"]), + set: $util.oneOfSetter($oneOfFields) + }); + /** * Encodes the specified Exchange message. Does not implicitly {@link Exchange.verify|verify} messages. * @function encode @@ -140,21 +163,15 @@ $root.Exchange = (function() { if (!o) o = {}; var d = {}; - if (o.defaults) { - if (o.bytes === String) - d.id = ""; - else { - d.id = []; - if (o.bytes !== Array) - d.id = $util.newBuffer(d.id); - } - d.pubkey = null; - } if (m.id != null && m.hasOwnProperty("id")) { d.id = o.bytes === String ? $util.base64.encode(m.id, 0, m.id.length) : o.bytes === Array ? Array.prototype.slice.call(m.id) : m.id; + if (o.oneofs) + d._id = "id"; } if (m.pubkey != null && m.hasOwnProperty("pubkey")) { d.pubkey = $root.PublicKey.toObject(m.pubkey, o); + if (o.oneofs) + d._pubkey = "pubkey"; } return d; }; @@ -182,8 +199,8 @@ $root.Exchange = (function() { * @property {number} Secp256k1=2 Secp256k1 value * @property {number} ECDSA=3 ECDSA value */ -$root.KeyType = (function() { - var valuesById = {}, values = Object.create(valuesById); +export const KeyType = $root.KeyType = (() => { + const valuesById = {}, values = Object.create(valuesById); values[valuesById[0] = "RSA"] = 0; values[valuesById[1] = "Ed25519"] = 1; values[valuesById[2] = "Secp256k1"] = 2; @@ -191,7 +208,7 @@ $root.KeyType = (function() { return values; })(); -$root.PublicKey = (function() { +export const PublicKey = $root.PublicKey = (() => { /** * Properties of a PublicKey. @@ -368,4 +385,4 @@ $root.PublicKey = (function() { return PublicKey; })(); -module.exports = $root; +export { $root as default }; diff --git a/src/insecure/proto.proto b/src/insecure/pb/proto.proto similarity index 100% rename from src/insecure/proto.proto rename to src/insecure/pb/proto.proto diff --git a/src/insecure/plaintext.js b/src/insecure/plaintext.js deleted file mode 100644 index 99921e53..00000000 --- a/src/insecure/plaintext.js +++ /dev/null @@ -1,95 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:plaintext'), { - error: debug('libp2p:plaintext:err') -}) -// @ts-ignore it-handshake do not export types -const handshake = require('it-handshake') -const lp = require('it-length-prefixed') -const PeerId = require('peer-id') -const { UnexpectedPeerError, InvalidCryptoExchangeError } = require('libp2p-interfaces/src/crypto/errors') - -const { Exchange, KeyType } = require('./proto') -const protocol = '/plaintext/2.0.0' - -/** - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - */ - -/** - * @param {import('./proto').IExchange} exchange - */ -function lpEncodeExchange (exchange) { - const pb = Exchange.encode(exchange).finish() - // @ts-ignore TODO: Uint8Array not assignable to Buffer - return lp.encode.single(pb) -} - -/** - * Encrypt connection. - * - * @param {PeerId} localId - * @param {Connection} conn - * @param {PeerId} [remoteId] - */ -async function encrypt (localId, conn, remoteId) { - const shake = handshake(conn) - - // Encode the public key and write it to the remote peer - shake.write(lpEncodeExchange({ - id: localId.toBytes(), - pubkey: { - Type: KeyType.RSA, // TODO: dont hard code - Data: localId.marshalPubKey() - } - })) - - log('write pubkey exchange to peer %j', remoteId) - - // Get the Exchange message - const response = (await lp.decode.fromReader(shake.reader).next()).value - const id = Exchange.decode(response.slice()) - log('read pubkey exchange from peer %j', remoteId) - - let peerId - try { - peerId = await PeerId.createFromPubKey(id.pubkey.Data) - } catch (/** @type {any} */ err) { - log.error(err) - throw new InvalidCryptoExchangeError('Remote did not provide its public key') - } - - if (remoteId && !peerId.equals(remoteId)) { - throw new UnexpectedPeerError() - } - - log('plaintext key exchange completed successfully with peer %j', peerId) - - shake.rest() - return { - conn: shake.stream, - remotePeer: peerId - } -} - -module.exports = - { - protocol, - /** - * @param {PeerId} localId - * @param {Connection} conn - * @param {PeerId | undefined} remoteId - */ - secureInbound: (localId, conn, remoteId) => { - return encrypt(localId, conn, remoteId) - }, - /** - * @param {PeerId} localId - * @param {Connection} conn - * @param {PeerId | undefined} remoteId - */ - secureOutbound: (localId, conn, remoteId) => { - return encrypt(localId, conn, remoteId) - } - } diff --git a/src/keychain/cms.js b/src/keychain/cms.ts similarity index 50% rename from src/keychain/cms.js rename to src/keychain/cms.ts index f929cb4e..8a26c338 100644 --- a/src/keychain/cms.js +++ b/src/keychain/cms.ts @@ -1,18 +1,18 @@ -'use strict' +import 'node-forge/lib/pkcs7.js' +import 'node-forge/lib/pbe.js' +// @ts-expect-error types are missing +import forge from 'node-forge/lib/forge.js' +import { certificateForKey, findAsync } from './util.js' +import errCode from 'err-code' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { codes } from '../errors.js' +import { logger } from '@libp2p/logger' +import type { KeyChain } from './index.js' -// @ts-ignore node-forge types not exported -require('node-forge/lib/pkcs7') -// @ts-ignore node-forge types not exported -require('node-forge/lib/pbe') -// @ts-ignore node-forge types not exported -const forge = require('node-forge/lib/forge') -const { certificateForKey, findAsync } = require('./util') -const errcode = require('err-code') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const { codes } = require('../errors') +const log = logger('libp2p:keychain:cms') -const privates = new WeakMap() +const privates = new WeakMap() /** * Cryptographic Message Syntax (aka PKCS #7) @@ -23,16 +23,15 @@ const privates = new WeakMap() * * See RFC 5652 for all the details. */ -class CMS { +export class CMS { + private readonly keychain: KeyChain + /** * Creates a new instance with a keychain - * - * @param {import('./index')} keychain - the available keys - * @param {string} dek */ - constructor (keychain, dek) { - if (!keychain) { - throw errcode(new Error('keychain is required'), codes.ERR_KEYCHAIN_REQUIRED) + constructor (keychain: KeyChain, dek: string) { + if (keychain == null) { + throw errCode(new Error('keychain is required'), codes.ERR_KEYCHAIN_REQUIRED) } this.keychain = keychain @@ -43,20 +42,21 @@ class CMS { * Creates some protected data. * * The output Uint8Array contains the PKCS #7 message in DER. - * - * @param {string} name - The local key name. - * @param {Uint8Array} plain - The data to encrypt. - * @returns {Promise} */ - async encrypt (name, plain) { + async encrypt (name: string, plain: Uint8Array): Promise { if (!(plain instanceof Uint8Array)) { - throw errcode(new Error('Plain data must be a Uint8Array'), codes.ERR_INVALID_PARAMETERS) + throw errCode(new Error('Plain data must be a Uint8Array'), codes.ERR_INVALID_PARAMETERS) } const key = await this.keychain.findKeyByName(name) - const pem = await this.keychain._getPrivateKey(name) - /** @type {string} */ - const dek = privates.get(this).dek + const pem = await this.keychain.getPrivateKey(name) + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek const privateKey = forge.pki.decryptRsaPrivateKey(pem, dek) const certificate = await certificateForKey(key, privateKey) @@ -76,71 +76,75 @@ class CMS { * * The keychain must contain one of the keys used to encrypt the data. If none of the keys * exists, an Error is returned with the property 'missingKeys'. It is array of key ids. - * - * @param {Uint8Array} cmsData - The CMS encrypted data to decrypt. - * @returns {Promise} */ - async decrypt (cmsData) { + async decrypt (cmsData: Uint8Array): Promise { if (!(cmsData instanceof Uint8Array)) { - throw errcode(new Error('CMS data is required'), codes.ERR_INVALID_PARAMETERS) + throw errCode(new Error('CMS data is required'), codes.ERR_INVALID_PARAMETERS) } - let cms + let cms: any try { const buf = forge.util.createBuffer(uint8ArrayToString(cmsData, 'ascii')) const obj = forge.asn1.fromDer(buf) - // @ts-ignore not defined + cms = forge.pkcs7.messageFromAsn1(obj) - } catch (/** @type {any} */ err) { - throw errcode(new Error('Invalid CMS: ' + err.message), codes.ERR_INVALID_CMS) + } catch (err: any) { + log.error(err) + throw errCode(new Error('Invalid CMS'), codes.ERR_INVALID_CMS) } // Find a recipient whose key we hold. We only deal with recipient certs // issued by ipfs (O=ipfs). - const recipients = cms.recipients - // @ts-ignore cms types not defined + const recipients: any = cms.recipients + // @ts-expect-error cms types not defined .filter(r => r.issuer.find(a => a.shortName === 'O' && a.value === 'ipfs')) - // @ts-ignore cms types not defined + // @ts-expect-error cms types not defined .filter(r => r.issuer.find(a => a.shortName === 'CN')) - // @ts-ignore cms types not defined + // @ts-expect-error cms types not defined .map(r => { return { recipient: r, - // @ts-ignore cms types not defined + // @ts-expect-error cms types not defined keyId: r.issuer.find(a => a.shortName === 'CN').value } }) - const r = await findAsync(recipients, async (recipient) => { + const r = await findAsync(recipients, async (recipient: any) => { try { const key = await this.keychain.findKeyById(recipient.keyId) - if (key) return true - } catch (/** @type {any} */ err) { + if (key != null) { + return true + } + } catch (err: any) { return false } return false }) - if (!r) { - // @ts-ignore cms types not defined - const missingKeys = recipients.map(r => r.keyId) - throw errcode(new Error('Decryption needs one of the key(s): ' + missingKeys.join(', ')), codes.ERR_MISSING_KEYS, { + if (r == null) { + // @ts-expect-error cms types not defined + const missingKeys: string[] = recipients.map(r => r.keyId) + throw errCode(new Error(`Decryption needs one of the key(s): ${missingKeys.join(', ')}`), codes.ERR_MISSING_KEYS, { missingKeys }) } const key = await this.keychain.findKeyById(r.keyId) - if (!key) { - throw errcode(new Error('No key available to decrypto'), codes.ERR_NO_KEY) + if (key == null) { + throw errCode(new Error('No key available to decrypto'), codes.ERR_NO_KEY) } - const pem = await this.keychain._getPrivateKey(key.name) - const dek = privates.get(this).dek + const pem = await this.keychain.getPrivateKey(key.name) + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek const privateKey = forge.pki.decryptRsaPrivateKey(pem, dek) cms.decrypt(r.recipient, privateKey) return uint8ArrayFromString(cms.content.getBytes(), 'ascii') } } - -module.exports = CMS diff --git a/src/keychain/index.js b/src/keychain/index.js deleted file mode 100644 index f6a17ab6..00000000 --- a/src/keychain/index.js +++ /dev/null @@ -1,561 +0,0 @@ -/* eslint max-nested-callbacks: ["error", 5] */ -'use strict' -const debug = require('debug') -const log = Object.assign(debug('libp2p:keychain'), { - error: debug('libp2p:keychain:err') -}) -const sanitize = require('sanitize-filename') -const mergeOptions = require('merge-options') -const crypto = require('libp2p-crypto') -const { Key } = require('interface-datastore/key') -const CMS = require('./cms') -const errcode = require('err-code') -const { codes } = require('../errors') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -// @ts-ignore node-forge sha512 types not exported -require('node-forge/lib/sha512') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('interface-datastore').Datastore} Datastore - */ - -/** - * @typedef {Object} DekOptions - * @property {string} hash - * @property {string} salt - * @property {number} iterationCount - * @property {number} keyLength - * - * @typedef {Object} KeychainOptions - * @property {string} [pass] - * @property {DekOptions} [dek] - */ - -/** - * Information about a key. - * - * @typedef {Object} KeyInfo - * @property {string} id - The universally unique key id. - * @property {string} name - The local key name. - */ - -const keyPrefix = '/pkcs8/' -const infoPrefix = '/info/' -const privates = new WeakMap() - -// NIST SP 800-132 -const NIST = { - minKeyLength: 112 / 8, - minSaltLength: 128 / 8, - minIterationCount: 1000 -} - -const defaultOptions = { - // See https://cryptosense.com/parametesr-choice-for-pbkdf2/ - dek: { - keyLength: 512 / 8, - iterationCount: 10000, - salt: 'you should override this value with a crypto secure random number', - hash: 'sha2-512' - } -} - -/** - * @param {string} name - */ -function validateKeyName (name) { - if (!name) return false - if (typeof name !== 'string') return false - return name === sanitize(name.trim()) -} - -/** - * Throws an error after a delay - * - * This assumes than an error indicates that the keychain is under attack. Delay returning an - * error to make brute force attacks harder. - * - * @param {string|Error} err - The error - * @returns {Promise} - * @private - */ -async function throwDelayed (err) { - const min = 200 - const max = 1000 - const delay = Math.random() * (max - min) + min - - await new Promise(resolve => setTimeout(resolve, delay)) - throw err -} - -/** - * Converts a key name into a datastore name. - * - * @param {string} name - * @returns {Key} - * @private - */ -function DsName (name) { - return new Key(keyPrefix + name) -} - -/** - * Converts a key name into a datastore info name. - * - * @param {string} name - * @returns {Key} - * @private - */ -function DsInfoName (name) { - return new Key(infoPrefix + name) -} - -/** - * Manages the lifecycle of a key. Keys are encrypted at rest using PKCS #8. - * - * A key in the store has two entries - * - '/info/*key-name*', contains the KeyInfo for the key - * - '/pkcs8/*key-name*', contains the PKCS #8 for the key - * - */ -class Keychain { - /** - * Creates a new instance of a key chain. - * - * @param {Datastore} store - where the key are. - * @param {KeychainOptions} options - * @class - */ - constructor (store, options) { - if (!store) { - throw new Error('store is required') - } - this.store = store - - this.opts = mergeOptions(defaultOptions, options) - - // Enforce NIST SP 800-132 - if (this.opts.pass && this.opts.pass.length < 20) { - throw new Error('pass must be least 20 characters') - } - if (this.opts.dek.keyLength < NIST.minKeyLength) { - throw new Error(`dek.keyLength must be least ${NIST.minKeyLength} bytes`) - } - if (this.opts.dek.salt.length < NIST.minSaltLength) { - throw new Error(`dek.saltLength must be least ${NIST.minSaltLength} bytes`) - } - if (this.opts.dek.iterationCount < NIST.minIterationCount) { - throw new Error(`dek.iterationCount must be least ${NIST.minIterationCount}`) - } - - const dek = this.opts.pass - ? crypto.pbkdf2( - this.opts.pass, - this.opts.dek.salt, - this.opts.dek.iterationCount, - this.opts.dek.keyLength, - this.opts.dek.hash) - : '' - - privates.set(this, { dek }) - } - - /** - * Gets an object that can encrypt/decrypt protected data - * using the Cryptographic Message Syntax (CMS). - * - * CMS describes an encapsulation syntax for data protection. It - * is used to digitally sign, digest, authenticate, or encrypt - * arbitrary message content. - * - * @returns {CMS} - */ - get cms () { - return new CMS(this, privates.get(this).dek) - } - - /** - * Generates the options for a keychain. A random salt is produced. - * - * @returns {Object} - */ - static generateOptions () { - const options = Object.assign({}, defaultOptions) - const saltLength = Math.ceil(NIST.minSaltLength / 3) * 3 // no base64 padding - options.dek.salt = uint8ArrayToString(crypto.randomBytes(saltLength), 'base64') - return options - } - - /** - * Gets an object that can encrypt/decrypt protected data. - * The default options for a keychain. - * - * @returns {Object} - */ - static get options () { - return defaultOptions - } - - /** - * Create a new key. - * - * @param {string} name - The local key name; cannot already exist. - * @param {string} type - One of the key types; 'rsa'. - * @param {number} [size = 2048] - The key size in bits. Used for rsa keys only. - * @returns {Promise} - */ - async createKey (name, type, size = 2048) { - const self = this - - if (!validateKeyName(name) || name === 'self') { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - - if (typeof type !== 'string') { - return throwDelayed(errcode(new Error(`Invalid key type '${type}'`), codes.ERR_INVALID_KEY_TYPE)) - } - - const dsname = DsName(name) - const exists = await self.store.has(dsname) - if (exists) return throwDelayed(errcode(new Error(`Key '${name}' already exists`), codes.ERR_KEY_ALREADY_EXISTS)) - - switch (type.toLowerCase()) { - case 'rsa': - if (!Number.isSafeInteger(size) || size < 2048) { - return throwDelayed(errcode(new Error(`Invalid RSA key size ${size}`), codes.ERR_INVALID_KEY_SIZE)) - } - break - default: - break - } - - let keyInfo - try { - // @ts-ignore Differences between several crypto return types need to be fixed in libp2p-crypto - const keypair = await crypto.keys.generateKeyPair(type, size) - const kid = await keypair.id() - /** @type {string} */ - const dek = privates.get(this).dek - const pem = await keypair.export(dek) - keyInfo = { - name: name, - id: kid - } - const batch = self.store.batch() - batch.put(dsname, uint8ArrayFromString(pem)) - batch.put(DsInfoName(name), uint8ArrayFromString(JSON.stringify(keyInfo))) - - await batch.commit() - } catch (/** @type {any} */ err) { - return throwDelayed(err) - } - - return keyInfo - } - - /** - * List all the keys. - * - * @returns {Promise} - */ - async listKeys () { - const self = this - const query = { - prefix: infoPrefix - } - - const info = [] - for await (const value of self.store.query(query)) { - info.push(JSON.parse(uint8ArrayToString(value.value))) - } - - return info - } - - /** - * Find a key by it's id. - * - * @param {string} id - The universally unique key identifier. - * @returns {Promise} - */ - async findKeyById (id) { - try { - const keys = await this.listKeys() - return keys.find((k) => k.id === id) - } catch (/** @type {any} */ err) { - return throwDelayed(err) - } - } - - /** - * Find a key by it's name. - * - * @param {string} name - The local key name. - * @returns {Promise} - */ - async findKeyByName (name) { - if (!validateKeyName(name)) { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - - const dsname = DsInfoName(name) - try { - const res = await this.store.get(dsname) - return JSON.parse(uint8ArrayToString(res)) - } catch (/** @type {any} */ err) { - return throwDelayed(errcode(new Error(`Key '${name}' does not exist. ${err.message}`), codes.ERR_KEY_NOT_FOUND)) - } - } - - /** - * Remove an existing key. - * - * @param {string} name - The local key name; must already exist. - * @returns {Promise} - */ - async removeKey (name) { - const self = this - if (!validateKeyName(name) || name === 'self') { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - const dsname = DsName(name) - const keyInfo = await self.findKeyByName(name) - const batch = self.store.batch() - batch.delete(dsname) - batch.delete(DsInfoName(name)) - await batch.commit() - return keyInfo - } - - /** - * Rename a key - * - * @param {string} oldName - The old local key name; must already exist. - * @param {string} newName - The new local key name; must not already exist. - * @returns {Promise} - */ - async renameKey (oldName, newName) { - const self = this - if (!validateKeyName(oldName) || oldName === 'self') { - return throwDelayed(errcode(new Error(`Invalid old key name '${oldName}'`), codes.ERR_OLD_KEY_NAME_INVALID)) - } - if (!validateKeyName(newName) || newName === 'self') { - return throwDelayed(errcode(new Error(`Invalid new key name '${newName}'`), codes.ERR_NEW_KEY_NAME_INVALID)) - } - const oldDsname = DsName(oldName) - const newDsname = DsName(newName) - const oldInfoName = DsInfoName(oldName) - const newInfoName = DsInfoName(newName) - - const exists = await self.store.has(newDsname) - if (exists) return throwDelayed(errcode(new Error(`Key '${newName}' already exists`), codes.ERR_KEY_ALREADY_EXISTS)) - - try { - const pem = await self.store.get(oldDsname) - const res = await self.store.get(oldInfoName) - - const keyInfo = JSON.parse(uint8ArrayToString(res)) - keyInfo.name = newName - const batch = self.store.batch() - batch.put(newDsname, pem) - batch.put(newInfoName, uint8ArrayFromString(JSON.stringify(keyInfo))) - batch.delete(oldDsname) - batch.delete(oldInfoName) - await batch.commit() - return keyInfo - } catch (/** @type {any} */ err) { - return throwDelayed(err) - } - } - - /** - * Export an existing key as a PEM encrypted PKCS #8 string - * - * @param {string} name - The local key name; must already exist. - * @param {string} password - The password - * @returns {Promise} - */ - async exportKey (name, password) { - if (!validateKeyName(name)) { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - if (!password) { - return throwDelayed(errcode(new Error('Password is required'), codes.ERR_PASSWORD_REQUIRED)) - } - - const dsname = DsName(name) - try { - const res = await this.store.get(dsname) - const pem = uint8ArrayToString(res) - /** @type {string} */ - const dek = privates.get(this).dek - const privateKey = await crypto.keys.import(pem, dek) - return privateKey.export(password) - } catch (/** @type {any} */ err) { - return throwDelayed(err) - } - } - - /** - * Import a new key from a PEM encoded PKCS #8 string - * - * @param {string} name - The local key name; must not already exist. - * @param {string} pem - The PEM encoded PKCS #8 string - * @param {string} password - The password. - * @returns {Promise} - */ - async importKey (name, pem, password) { - const self = this - if (!validateKeyName(name) || name === 'self') { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - if (!pem) { - return throwDelayed(errcode(new Error('PEM encoded key is required'), codes.ERR_PEM_REQUIRED)) - } - const dsname = DsName(name) - const exists = await self.store.has(dsname) - if (exists) return throwDelayed(errcode(new Error(`Key '${name}' already exists`), codes.ERR_KEY_ALREADY_EXISTS)) - - let privateKey - try { - privateKey = await crypto.keys.import(pem, password) - } catch (/** @type {any} */ err) { - return throwDelayed(errcode(new Error('Cannot read the key, most likely the password is wrong'), codes.ERR_CANNOT_READ_KEY)) - } - - let kid - try { - kid = await privateKey.id() - /** @type {string} */ - const dek = privates.get(this).dek - pem = await privateKey.export(dek) - } catch (/** @type {any} */ err) { - return throwDelayed(err) - } - - const keyInfo = { - name: name, - id: kid - } - const batch = self.store.batch() - batch.put(dsname, uint8ArrayFromString(pem)) - batch.put(DsInfoName(name), uint8ArrayFromString(JSON.stringify(keyInfo))) - await batch.commit() - - return keyInfo - } - - /** - * Import a peer key - * - * @param {string} name - The local key name; must not already exist. - * @param {PeerId} peer - The PEM encoded PKCS #8 string - * @returns {Promise} - */ - async importPeer (name, peer) { - const self = this - if (!validateKeyName(name)) { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - if (!peer || !peer.privKey) { - return throwDelayed(errcode(new Error('Peer.privKey is required'), codes.ERR_MISSING_PRIVATE_KEY)) - } - - const privateKey = peer.privKey - const dsname = DsName(name) - const exists = await self.store.has(dsname) - if (exists) return throwDelayed(errcode(new Error(`Key '${name}' already exists`), codes.ERR_KEY_ALREADY_EXISTS)) - - try { - const kid = await privateKey.id() - /** @type {string} */ - const dek = privates.get(this).dek - const pem = await privateKey.export(dek) - const keyInfo = { - name: name, - id: kid - } - const batch = self.store.batch() - batch.put(dsname, uint8ArrayFromString(pem)) - batch.put(DsInfoName(name), uint8ArrayFromString(JSON.stringify(keyInfo))) - await batch.commit() - return keyInfo - } catch (/** @type {any} */ err) { - return throwDelayed(err) - } - } - - /** - * Gets the private key as PEM encoded PKCS #8 string. - * - * @param {string} name - * @returns {Promise} - */ - async _getPrivateKey (name) { - if (!validateKeyName(name)) { - return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME)) - } - - try { - const dsname = DsName(name) - const res = await this.store.get(dsname) - return uint8ArrayToString(res) - } catch (/** @type {any} */ err) { - return throwDelayed(errcode(new Error(`Key '${name}' does not exist. ${err.message}`), codes.ERR_KEY_NOT_FOUND)) - } - } - - /** - * Rotate keychain password and re-encrypt all assosciated keys - * - * @param {string} oldPass - The old local keychain password - * @param {string} newPass - The new local keychain password - */ - async rotateKeychainPass (oldPass, newPass) { - if (typeof oldPass !== 'string') { - return throwDelayed(errcode(new Error(`Invalid old pass type '${typeof oldPass}'`), codes.ERR_INVALID_OLD_PASS_TYPE)) - } - if (typeof newPass !== 'string') { - return throwDelayed(errcode(new Error(`Invalid new pass type '${typeof newPass}'`), codes.ERR_INVALID_NEW_PASS_TYPE)) - } - if (newPass.length < 20) { - return throwDelayed(errcode(new Error(`Invalid pass length ${newPass.length}`), codes.ERR_INVALID_PASS_LENGTH)) - } - log('recreating keychain') - const oldDek = privates.get(this).dek - this.opts.pass = newPass - const newDek = newPass - ? crypto.pbkdf2( - newPass, - this.opts.dek.salt, - this.opts.dek.iterationCount, - this.opts.dek.keyLength, - this.opts.dek.hash) - : '' - privates.set(this, { dek: newDek }) - const keys = await this.listKeys() - for (const key of keys) { - const res = await this.store.get(DsName(key.name)) - const pem = uint8ArrayToString(res) - const privateKey = await crypto.keys.import(pem, oldDek) - const password = newDek.toString() - const keyAsPEM = await privateKey.export(password) - - // Update stored key - const batch = this.store.batch() - const keyInfo = { - name: key.name, - id: key.id - } - batch.put(DsName(key.name), uint8ArrayFromString(keyAsPEM)) - batch.put(DsInfoName(key.name), uint8ArrayFromString(JSON.stringify(keyInfo))) - await batch.commit() - } - log('keychain reconstructed') - } -} - -module.exports = Keychain diff --git a/src/keychain/index.ts b/src/keychain/index.ts new file mode 100644 index 00000000..b95cfb8a --- /dev/null +++ b/src/keychain/index.ts @@ -0,0 +1,588 @@ +/* eslint max-nested-callbacks: ["error", 5] */ + +import { logger } from '@libp2p/logger' +import sanitize from 'sanitize-filename' +import mergeOptions from 'merge-options' +import { Key } from 'interface-datastore/key' +import { CMS } from './cms.js' +import errCode from 'err-code' +import { codes } from '../errors.js' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import 'node-forge/lib/sha512.js' +import { generateKeyPair, importKey, unmarshalPrivateKey } from '@libp2p/crypto/keys' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Components } from '@libp2p/interfaces/components' +import { pbkdf2, randomBytes } from '@libp2p/crypto' + +const log = logger('libp2p:keychain') + +export interface DekOptions { + hash: string + salt: string + iterationCount: number + keyLength: number +} + +export interface KeyChainInit { + pass?: string + dek?: DekOptions +} + +/** + * Information about a key. + */ +export interface KeyInfo { + /** + * The universally unique key id + */ + id: string + + /** + * The local key name. + */ + name: string +} + +const keyPrefix = '/pkcs8/' +const infoPrefix = '/info/' +const privates = new WeakMap() + +// NIST SP 800-132 +const NIST = { + minKeyLength: 112 / 8, + minSaltLength: 128 / 8, + minIterationCount: 1000 +} + +const defaultOptions = { + // See https://cryptosense.com/parametesr-choice-for-pbkdf2/ + dek: { + keyLength: 512 / 8, + iterationCount: 10000, + salt: 'you should override this value with a crypto secure random number', + hash: 'sha2-512' + } +} + +function validateKeyName (name: string) { + if (name == null) { + return false + } + if (typeof name !== 'string') { + return false + } + return name === sanitize(name.trim()) && name.length > 0 +} + +/** + * Throws an error after a delay + * + * This assumes than an error indicates that the keychain is under attack. Delay returning an + * error to make brute force attacks harder. + */ +async function randomDelay () { + const min = 200 + const max = 1000 + const delay = Math.random() * (max - min) + min + + await new Promise(resolve => setTimeout(resolve, delay)) +} + +/** + * Converts a key name into a datastore name + */ +function DsName (name: string) { + return new Key(keyPrefix + name) +} + +/** + * Converts a key name into a datastore info name + */ +function DsInfoName (name: string) { + return new Key(infoPrefix + name) +} + +/** + * Manages the lifecycle of a key. Keys are encrypted at rest using PKCS #8. + * + * A key in the store has two entries + * - '/info/*key-name*', contains the KeyInfo for the key + * - '/pkcs8/*key-name*', contains the PKCS #8 for the key + * + */ +export class KeyChain { + private readonly components: Components + private init: KeyChainInit + + /** + * Creates a new instance of a key chain + */ + constructor (components: Components, init: KeyChainInit) { + this.components = components + this.init = mergeOptions(defaultOptions, init) + + // Enforce NIST SP 800-132 + if (this.init.pass != null && this.init.pass?.length < 20) { + throw new Error('pass must be least 20 characters') + } + if (this.init.dek?.keyLength != null && this.init.dek.keyLength < NIST.minKeyLength) { + throw new Error(`dek.keyLength must be least ${NIST.minKeyLength} bytes`) + } + if (this.init.dek?.salt?.length != null && this.init.dek.salt.length < NIST.minSaltLength) { + throw new Error(`dek.saltLength must be least ${NIST.minSaltLength} bytes`) + } + if (this.init.dek?.iterationCount != null && this.init.dek.iterationCount < NIST.minIterationCount) { + throw new Error(`dek.iterationCount must be least ${NIST.minIterationCount}`) + } + + const dek = this.init.pass != null && this.init.dek?.salt != null + ? pbkdf2( + this.init.pass, + this.init.dek?.salt, + this.init.dek?.iterationCount, + this.init.dek?.keyLength, + this.init.dek?.hash) + : '' + + privates.set(this, { dek }) + } + + /** + * Gets an object that can encrypt/decrypt protected data + * using the Cryptographic Message Syntax (CMS). + * + * CMS describes an encapsulation syntax for data protection. It + * is used to digitally sign, digest, authenticate, or encrypt + * arbitrary message content + */ + get cms () { + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek + + return new CMS(this, dek) + } + + /** + * Generates the options for a keychain. A random salt is produced. + * + * @returns {Object} + */ + static generateOptions (): KeyChainInit { + const options = Object.assign({}, defaultOptions) + const saltLength = Math.ceil(NIST.minSaltLength / 3) * 3 // no base64 padding + options.dek.salt = uint8ArrayToString(randomBytes(saltLength), 'base64') + return options + } + + /** + * Gets an object that can encrypt/decrypt protected data. + * The default options for a keychain. + * + * @returns {Object} + */ + static get options () { + return defaultOptions + } + + /** + * Create a new key. + * + * @param {string} name - The local key name; cannot already exist. + * @param {string} type - One of the key types; 'rsa'. + * @param {number} [size = 2048] - The key size in bits. Used for rsa keys only + */ + async createKey (name: string, type: 'RSA' | 'Ed25519', size = 2048): Promise { + if (!validateKeyName(name) || name === 'self') { + await randomDelay() + throw errCode(new Error('Invalid key name'), codes.ERR_INVALID_KEY_NAME) + } + + if (typeof type !== 'string') { + await randomDelay() + throw errCode(new Error('Invalid key type'), codes.ERR_INVALID_KEY_TYPE) + } + + const dsname = DsName(name) + const exists = await this.components.getDatastore().has(dsname) + if (exists) { + await randomDelay() + throw errCode(new Error('Key name already exists'), codes.ERR_KEY_ALREADY_EXISTS) + } + + switch (type.toLowerCase()) { + case 'rsa': + if (!Number.isSafeInteger(size) || size < 2048) { + await randomDelay() + throw errCode(new Error('Invalid RSA key size'), codes.ERR_INVALID_KEY_SIZE) + } + break + default: + break + } + + let keyInfo + try { + const keypair = await generateKeyPair(type, size) + const kid = await keypair.id() + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek + const pem = await keypair.export(dek) + keyInfo = { + name: name, + id: kid + } + const batch = this.components.getDatastore().batch() + batch.put(dsname, uint8ArrayFromString(pem)) + batch.put(DsInfoName(name), uint8ArrayFromString(JSON.stringify(keyInfo))) + + await batch.commit() + } catch (err: any) { + await randomDelay() + throw err + } + + return keyInfo + } + + /** + * List all the keys. + * + * @returns {Promise} + */ + async listKeys () { + const query = { + prefix: infoPrefix + } + + const info = [] + for await (const value of this.components.getDatastore().query(query)) { + info.push(JSON.parse(uint8ArrayToString(value.value))) + } + + return info + } + + /** + * Find a key by it's id + */ + async findKeyById (id: string): Promise { + try { + const keys = await this.listKeys() + return keys.find((k) => k.id === id) + } catch (err: any) { + await randomDelay() + throw err + } + } + + /** + * Find a key by it's name. + * + * @param {string} name - The local key name. + * @returns {Promise} + */ + async findKeyByName (name: string): Promise { + if (!validateKeyName(name)) { + await randomDelay() + throw errCode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME) + } + + const dsname = DsInfoName(name) + try { + const res = await this.components.getDatastore().get(dsname) + return JSON.parse(uint8ArrayToString(res)) + } catch (err: any) { + await randomDelay() + log.error(err) + throw errCode(new Error(`Key '${name}' does not exist.`), codes.ERR_KEY_NOT_FOUND) + } + } + + /** + * Remove an existing key. + * + * @param {string} name - The local key name; must already exist. + * @returns {Promise} + */ + async removeKey (name: string) { + if (!validateKeyName(name) || name === 'self') { + await randomDelay() + throw errCode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME) + } + const dsname = DsName(name) + const keyInfo = await this.findKeyByName(name) + const batch = this.components.getDatastore().batch() + batch.delete(dsname) + batch.delete(DsInfoName(name)) + await batch.commit() + return keyInfo + } + + /** + * Rename a key + * + * @param {string} oldName - The old local key name; must already exist. + * @param {string} newName - The new local key name; must not already exist. + * @returns {Promise} + */ + async renameKey (oldName: string, newName: string): Promise { + if (!validateKeyName(oldName) || oldName === 'self') { + await randomDelay() + throw errCode(new Error(`Invalid old key name '${oldName}'`), codes.ERR_OLD_KEY_NAME_INVALID) + } + if (!validateKeyName(newName) || newName === 'self') { + await randomDelay() + throw errCode(new Error(`Invalid new key name '${newName}'`), codes.ERR_NEW_KEY_NAME_INVALID) + } + const oldDsname = DsName(oldName) + const newDsname = DsName(newName) + const oldInfoName = DsInfoName(oldName) + const newInfoName = DsInfoName(newName) + + const exists = await this.components.getDatastore().has(newDsname) + if (exists) { + await randomDelay() + throw errCode(new Error(`Key '${newName}' already exists`), codes.ERR_KEY_ALREADY_EXISTS) + } + + try { + const pem = await this.components.getDatastore().get(oldDsname) + const res = await this.components.getDatastore().get(oldInfoName) + + const keyInfo = JSON.parse(uint8ArrayToString(res)) + keyInfo.name = newName + const batch = this.components.getDatastore().batch() + batch.put(newDsname, pem) + batch.put(newInfoName, uint8ArrayFromString(JSON.stringify(keyInfo))) + batch.delete(oldDsname) + batch.delete(oldInfoName) + await batch.commit() + return keyInfo + } catch (err: any) { + await randomDelay() + throw err + } + } + + /** + * Export an existing key as a PEM encrypted PKCS #8 string + */ + async exportKey (name: string, password: string) { + if (!validateKeyName(name)) { + await randomDelay() + throw errCode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME) + } + if (password == null) { + await randomDelay() + throw errCode(new Error('Password is required'), codes.ERR_PASSWORD_REQUIRED) + } + + const dsname = DsName(name) + try { + const res = await this.components.getDatastore().get(dsname) + const pem = uint8ArrayToString(res) + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek + const privateKey = await importKey(pem, dek) + return await privateKey.export(password) + } catch (err: any) { + await randomDelay() + throw err + } + } + + /** + * Import a new key from a PEM encoded PKCS #8 string + * + * @param {string} name - The local key name; must not already exist. + * @param {string} pem - The PEM encoded PKCS #8 string + * @param {string} password - The password. + * @returns {Promise} + */ + async importKey (name: string, pem: string, password: string): Promise { + if (!validateKeyName(name) || name === 'self') { + await randomDelay() + throw errCode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME) + } + if (pem == null) { + await randomDelay() + throw errCode(new Error('PEM encoded key is required'), codes.ERR_PEM_REQUIRED) + } + const dsname = DsName(name) + const exists = await this.components.getDatastore().has(dsname) + if (exists) { + await randomDelay() + throw errCode(new Error(`Key '${name}' already exists`), codes.ERR_KEY_ALREADY_EXISTS) + } + + let privateKey + try { + privateKey = await importKey(pem, password) + } catch (err: any) { + await randomDelay() + throw errCode(new Error('Cannot read the key, most likely the password is wrong'), codes.ERR_CANNOT_READ_KEY) + } + + let kid + try { + kid = await privateKey.id() + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek + pem = await privateKey.export(dek) + } catch (err: any) { + await randomDelay() + throw err + } + + const keyInfo = { + name: name, + id: kid + } + const batch = this.components.getDatastore().batch() + batch.put(dsname, uint8ArrayFromString(pem)) + batch.put(DsInfoName(name), uint8ArrayFromString(JSON.stringify(keyInfo))) + await batch.commit() + + return keyInfo + } + + /** + * Import a peer key + */ + async importPeer (name: string, peer: PeerId): Promise { + try { + if (!validateKeyName(name)) { + throw errCode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME) + } + if (peer == null || peer.privateKey == null) { + throw errCode(new Error('Peer.privKey is required'), codes.ERR_MISSING_PRIVATE_KEY) + } + + const privateKey = await unmarshalPrivateKey(peer.privateKey) + + const dsname = DsName(name) + const exists = await this.components.getDatastore().has(dsname) + if (exists) { + await randomDelay() + throw errCode(new Error(`Key '${name}' already exists`), codes.ERR_KEY_ALREADY_EXISTS) + } + + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const dek = cached.dek + const pem = await privateKey.export(dek) + const keyInfo: KeyInfo = { + name: name, + id: peer.toString() + } + const batch = this.components.getDatastore().batch() + batch.put(dsname, uint8ArrayFromString(pem)) + batch.put(DsInfoName(name), uint8ArrayFromString(JSON.stringify(keyInfo))) + await batch.commit() + return keyInfo + } catch (err: any) { + await randomDelay() + throw err + } + } + + /** + * Gets the private key as PEM encoded PKCS #8 string + */ + async getPrivateKey (name: string): Promise { + if (!validateKeyName(name)) { + await randomDelay() + throw errCode(new Error(`Invalid key name '${name}'`), codes.ERR_INVALID_KEY_NAME) + } + + try { + const dsname = DsName(name) + const res = await this.components.getDatastore().get(dsname) + return uint8ArrayToString(res) + } catch (err: any) { + await randomDelay() + log.error(err) + throw errCode(new Error(`Key '${name}' does not exist.`), codes.ERR_KEY_NOT_FOUND) + } + } + + /** + * Rotate keychain password and re-encrypt all associated keys + */ + async rotateKeychainPass (oldPass: string, newPass: string) { + if (typeof oldPass !== 'string') { + await randomDelay() + throw errCode(new Error(`Invalid old pass type '${typeof oldPass}'`), codes.ERR_INVALID_OLD_PASS_TYPE) + } + if (typeof newPass !== 'string') { + await randomDelay() + throw errCode(new Error(`Invalid new pass type '${typeof newPass}'`), codes.ERR_INVALID_NEW_PASS_TYPE) + } + if (newPass.length < 20) { + await randomDelay() + throw errCode(new Error(`Invalid pass length ${newPass.length}`), codes.ERR_INVALID_PASS_LENGTH) + } + log('recreating keychain') + const cached = privates.get(this) + + if (cached == null) { + throw errCode(new Error('dek missing'), codes.ERR_INVALID_PARAMETERS) + } + + const oldDek = cached.dek + this.init.pass = newPass + const newDek = newPass != null && this.init.dek?.salt != null + ? pbkdf2( + newPass, + this.init.dek.salt, + this.init.dek?.iterationCount, + this.init.dek?.keyLength, + this.init.dek?.hash) + : '' + privates.set(this, { dek: newDek }) + const keys = await this.listKeys() + for (const key of keys) { + const res = await this.components.getDatastore().get(DsName(key.name)) + const pem = uint8ArrayToString(res) + const privateKey = await importKey(pem, oldDek) + const password = newDek.toString() + const keyAsPEM = await privateKey.export(password) + + // Update stored key + const batch = this.components.getDatastore().batch() + const keyInfo = { + name: key.name, + id: key.id + } + batch.put(DsName(key.name), uint8ArrayFromString(keyAsPEM)) + batch.put(DsInfoName(key.name), uint8ArrayFromString(JSON.stringify(keyInfo))) + await batch.commit() + } + log('keychain reconstructed') + } +} diff --git a/src/keychain/util.js b/src/keychain/util.ts similarity index 78% rename from src/keychain/util.js rename to src/keychain/util.ts index a84c3f10..7e015426 100644 --- a/src/keychain/util.js +++ b/src/keychain/util.ts @@ -1,8 +1,7 @@ -// @ts-nocheck -'use strict' +import 'node-forge/lib/x509.js' +// @ts-expect-error types are missing +import forge from 'node-forge/lib/forge.js' -require('node-forge/lib/x509') -const forge = require('node-forge/lib/forge') const pki = forge.pki /** @@ -11,19 +10,15 @@ const pki = forge.pki * The output Uint8Array contains the PKCS #7 message in DER. * * TODO: move to libp2p-crypto package - * - * @param {KeyInfo} key - The id and name of the key - * @param {RsaPrivateKey} privateKey - The naked key - * @returns {Uint8Array} */ -const certificateForKey = (key, privateKey) => { - const publicKey = pki.setRsaPublicKey(privateKey.n, privateKey.e) +export const certificateForKey = (key: any, privateKey: forge.pki.rsa.PrivateKey) => { + const publicKey = pki.rsa.setPublicKey(privateKey.n, privateKey.e) const cert = pki.createCertificate() cert.publicKey = publicKey cert.serialNumber = '01' cert.validity.notBefore = new Date() cert.validity.notAfter = new Date() - cert.validity.notAfter.setFullYear(cert.validity.notBefore.getFullYear() + 10) + cert.validity.notAfter.setFullYear(cert.validity.notBefore.getFullYear() + 10) // eslint-disable-line @typescript-eslint/restrict-plus-operands const attrs = [{ name: 'organizationName', value: 'ipfs' @@ -79,14 +74,9 @@ const certificateForKey = (key, privateKey) => { * @param {Array} array * @param {function(*)} asyncCompare - An async function that returns a boolean */ -async function findAsync (array, asyncCompare) { +export async function findAsync (array: T[], asyncCompare: (val: T) => Promise) { const promises = array.map(asyncCompare) const results = await Promise.all(promises) const index = results.findIndex(result => result) return array[index] } - -module.exports = { - certificateForKey, - findAsync -} diff --git a/src/libp2p.ts b/src/libp2p.ts new file mode 100644 index 00000000..142031e1 --- /dev/null +++ b/src/libp2p.ts @@ -0,0 +1,501 @@ +import { logger } from '@libp2p/logger' +import { AbortOptions, EventEmitter, Startable, CustomEvent, isStartable } from '@libp2p/interfaces' +import type { Multiaddr } from '@multiformats/multiaddr' +import { MemoryDatastore } from 'datastore-core/memory' +import { DefaultPeerRouting } from './peer-routing.js' +import { CompoundContentRouting } from './content-routing/index.js' +import { getPeer } from './get-peer.js' +import { codes } from './errors.js' +import { DefaultAddressManager } from './address-manager/index.js' +import { DefaultConnectionManager } from './connection-manager/index.js' +import { AutoDialler } from './connection-manager/auto-dialler.js' +import { Circuit } from './circuit/transport.js' +import { Relay } from './circuit/index.js' +import { DefaultDialer } from './dialer/index.js' +import { KeyChain } from './keychain/index.js' +import { DefaultMetrics } from './metrics/index.js' +import { DefaultTransportManager } from './transport-manager.js' +import { DefaultUpgrader } from './upgrader.js' +import { DefaultRegistrar } from './registrar.js' +import { IdentifyService } from './identify/index.js' +import { FetchService } from './fetch/index.js' +import { PingService } from './ping/index.js' +import { NatManager } from './nat-manager.js' +import { PeerRecordUpdater } from './peer-record-updater.js' +import { DHTPeerRouting } from './dht/dht-peer-routing.js' +import { PersistentPeerStore } from '@libp2p/peer-store' +import { DHTContentRouting } from './dht/dht-content-routing.js' +import { AutoDialer } from './dialer/auto-dialer.js' +import { Initializable, Components, isInitializable } from '@libp2p/interfaces/components' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Connection } from '@libp2p/interfaces/connection' +import type { PeerRouting } from '@libp2p/interfaces/peer-routing' +import type { ContentRouting } from '@libp2p/interfaces/content-routing' +import type { PubSub } from '@libp2p/interfaces/pubsub' +import type { ConnectionManager, StreamHandler } from '@libp2p/interfaces/registrar' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import type { Libp2p, Libp2pEvents, Libp2pInit, Libp2pOptions } from './index.js' +import { validateConfig } from './config.js' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import type { PeerStore } from '@libp2p/interfaces/peer-store' +import type { DualDHT } from '@libp2p/interfaces/dht' +import { concat as uint8ArrayConcat } from 'uint8arrays/concat' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import errCode from 'err-code' +import { unmarshalPublicKey } from '@libp2p/crypto/keys' + +const log = logger('libp2p') + +export class Libp2pNode extends EventEmitter implements Libp2p { + public peerId: PeerId + public dht?: DualDHT + public pubsub?: PubSub + public identifyService?: IdentifyService + public fetchService: FetchService + public pingService: PingService + public components: Components + public peerStore: PeerStore + public contentRouting: ContentRouting + public peerRouting: PeerRouting + public keychain: KeyChain + public connectionManager: ConnectionManager + + private started: boolean + private readonly services: Startable[] + private readonly initializables: Initializable[] + + constructor (init: Libp2pInit) { + super() + + this.services = [] + this.initializables = [] + this.started = false + this.peerId = init.peerId + this.components = new Components({ + peerId: init.peerId, + datastore: init.datastore ?? new MemoryDatastore() + }) + + // Create Metrics + if (init.metrics.enabled) { + this.components.setMetrics(this.configureComponent(new DefaultMetrics(init.metrics))) + } + + this.components.setConnectionGater(this.configureComponent({ + denyDialPeer: async () => await Promise.resolve(false), + denyDialMultiaddr: async () => await Promise.resolve(false), + denyInboundConnection: async () => await Promise.resolve(false), + denyOutboundConnection: async () => await Promise.resolve(false), + denyInboundEncryptedConnection: async () => await Promise.resolve(false), + denyOutboundEncryptedConnection: async () => await Promise.resolve(false), + denyInboundUpgradedConnection: async () => await Promise.resolve(false), + denyOutboundUpgradedConnection: async () => await Promise.resolve(false), + filterMultiaddrForPeer: async () => await Promise.resolve(true), + ...init.connectionGater + })) + + this.peerStore = this.components.setPeerStore(this.configureComponent(new PersistentPeerStore(this.components, init.peerStore))) + + this.peerStore.addEventListener('peer', evt => { + const { detail: peerData } = evt + + this.dispatchEvent(new CustomEvent('peer:discovery', { detail: peerData })) + }) + + // Set up connection protector if configured + if (init.connectionProtector != null) { + this.components.setConnectionProtector(this.configureComponent(init.connectionProtector)) + } + + // Set up the Upgrader + this.components.setUpgrader(this.configureComponent(new DefaultUpgrader(this.components, { + connectionEncryption: (init.connectionEncryption ?? []).map(component => this.configureComponent(component)), + muxers: (init.streamMuxers ?? []).map(component => this.configureComponent(component)) + }))) + + // Create the Connection Manager + this.connectionManager = this.components.setConnectionManager(this.configureComponent(new DefaultConnectionManager(this.components, init.connectionManager))) + + // Create the Registrar + this.components.setRegistrar(this.configureComponent(new DefaultRegistrar(this.components))) + + // Setup the transport manager + this.components.setTransportManager(this.configureComponent(new DefaultTransportManager(this.components, init.transportManager))) + + // Addresses {listen, announce, noAnnounce} + this.components.setAddressManager(this.configureComponent(new DefaultAddressManager(this.components, init.addresses))) + + // update our peer record when addresses change + this.configureComponent(new PeerRecordUpdater(this.components)) + + this.components.setDialer(this.configureComponent(new DefaultDialer(this.components, init.dialer))) + + this.configureComponent(new AutoDialler(this.components, { + enabled: init.connectionManager.autoDial, + minConnections: init.connectionManager.minConnections, + autoDialInterval: init.connectionManager.autoDialInterval + })) + + // Create keychain + const keychainOpts = KeyChain.generateOptions() + this.keychain = this.configureComponent(new KeyChain(this.components, { + ...keychainOpts, + ...init.keychain + })) + + // Create the Nat Manager + this.services.push(new NatManager(this.components, init.nat)) + + init.transports.forEach((transport) => { + this.components.getTransportManager().add(this.configureComponent(transport)) + }) + + // Attach stream multiplexers + if (init.streamMuxers != null && init.streamMuxers.length > 0) { + // Add the identify service since we can multiplex + this.identifyService = new IdentifyService(this.components, { + protocolPrefix: init.protocolPrefix, + host: { + agentVersion: init.host.agentVersion + } + }) + this.configureComponent(this.identifyService) + } + + // dht provided components (peerRouting, contentRouting, dht) + if (init.dht != null) { + this.dht = this.components.setDHT(this.configureComponent(init.dht)) + } + + // Create pubsub if provided + if (init.pubsub != null) { + this.pubsub = this.components.setPubSub(this.configureComponent(init.pubsub)) + } + + // Attach remaining APIs + // peer and content routing will automatically get modules from _modules and _dht + + const peerRouters: PeerRouting[] = (init.peerRouters ?? []).map(component => this.configureComponent(component)) + + if (this.dht != null) { + // add dht to routers + peerRouters.push(this.configureComponent(new DHTPeerRouting(this.dht))) + } + + this.peerRouting = this.components.setPeerRouting(this.configureComponent(new DefaultPeerRouting(this.components, { + ...init.peerRouting, + routers: peerRouters + }))) + + const contentRouters: ContentRouting[] = (init.contentRouters ?? []).map(component => this.configureComponent(component)) + + if (this.dht != null) { + // add dht to routers + contentRouters.push(this.configureComponent(new DHTContentRouting(this.dht))) + } + + this.contentRouting = this.components.setContentRouting(this.configureComponent(new CompoundContentRouting(this.components, { + routers: contentRouters + }))) + + if (init.relay.enabled) { + this.components.getTransportManager().add(this.configureComponent(new Circuit())) + + this.configureComponent(new Relay(this.components, { + addressSorter: init.dialer.addressSorter, + ...init.relay + })) + } + + this.fetchService = this.configureComponent(new FetchService(this.components, { + protocolPrefix: init.protocolPrefix + })) + + this.pingService = this.configureComponent(new PingService(this.components, { + protocolPrefix: init.protocolPrefix + })) + + const autoDialer = this.configureComponent(new AutoDialer(this.components, { + enabled: init.connectionManager.autoDial !== false, + minConnections: init.connectionManager.minConnections ?? Infinity + })) + + this.addEventListener('peer:discovery', evt => { + if (!this.isStarted()) { + return + } + + autoDialer.handle(evt) + }) + + // Discovery modules + for (const service of init.peerDiscovery ?? []) { + this.configureComponent(service) + + service.addEventListener('peer', (evt) => { + this.onDiscoveryPeer(evt) + }) + } + } + + private configureComponent (component: T): T { + if (isStartable(component)) { + this.services.push(component) + } + + if (isInitializable(component)) { + this.initializables.push(component) + } + + return component + } + + /** + * Starts the libp2p node and all its subsystems + */ + async start () { + if (this.started) { + return + } + + this.started = true + + log('libp2p is starting') + + try { + // Set available components on all modules interested in components + this.initializables.forEach(obj => { + obj.init(this.components) + }) + + await Promise.all( + this.services.map(async service => { + if (service.beforeStart != null) { + await service.beforeStart() + } + }) + ) + + // start any startables + await Promise.all( + this.services.map(service => service.start()) + ) + + await Promise.all( + this.services.map(async service => { + if (service.afterStart != null) { + await service.afterStart() + } + }) + ) + + log('libp2p has started') + + // Once we start, emit any peers we may have already discovered + // TODO: this should be removed, as we already discovered these peers in the past + await this.components.getPeerStore().forEach(peer => { + this.dispatchEvent(new CustomEvent('peer:discovery', { + detail: { + id: peer.id, + multiaddrs: peer.addresses.map(addr => addr.multiaddr), + protocols: peer.protocols + } + })) + }) + } catch (err: any) { + log.error('An error occurred starting libp2p', err) + await this.stop() + throw err + } + } + + /** + * Stop the libp2p node by closing its listeners and open connections + */ + async stop () { + if (!this.started) { + return + } + + log('libp2p is stopping') + + this.started = false + + await Promise.all( + this.services.map(async service => { + if (service.beforeStop != null) { + await service.beforeStop() + } + }) + ) + + await Promise.all( + this.services.map(servce => servce.stop()) + ) + + await Promise.all( + this.services.map(async service => { + if (service.afterStop != null) { + await service.afterStop() + } + }) + ) + + log('libp2p has stopped') + } + + /** + * Load keychain keys from the datastore. + * Imports the private key as 'self', if needed. + */ + async loadKeychain () { + if (this.keychain == null) { + return + } + + try { + await this.keychain.findKeyByName('self') + } catch (err: any) { + await this.keychain.importPeer('self', this.peerId) + } + } + + isStarted () { + return this.started + } + + getConnections (peerId?: PeerId): Connection[] { + if (peerId == null) { + return this.components.getConnectionManager().getConnectionList() + } + + return this.components.getConnectionManager().getConnections(peerId) + } + + getPeers (): PeerId[] { + return this.components.getConnectionManager().getConnectionList() + .map(conn => conn.remotePeer) + } + + async dial (peer: PeerId | Multiaddr, options: AbortOptions = {}): Promise { + return await this.components.getDialer().dial(peer, options) + } + + async dialProtocol (peer: PeerId | Multiaddr, protocols: string | string[], options: AbortOptions = {}) { + return await this.components.getDialer().dialProtocol(peer, protocols, options) + } + + getMultiaddrs (): Multiaddr[] { + return this.components.getAddressManager().getAddresses() + } + + async hangUp (peer: PeerId | Multiaddr | string): Promise { + const { id } = getPeer(peer) + + const connections = this.components.getConnectionManager().getConnections(id) + + await Promise.all( + connections.map(async connection => { + return await connection.close() + }) + ) + } + + /** + * Get the public key for the given peer id + */ + async getPublicKey (peer: PeerId, options: AbortOptions = {}) { + log('getPublicKey %p', peer) + + const peerInfo = await this.peerStore.get(peer) + + if (peerInfo.pubKey != null) { + return peerInfo.pubKey + } + + if (this.dht == null) { + throw errCode(new Error('Public key was not in the peer store and the DHT is not enabled'), codes.ERR_NO_ROUTERS_AVAILABLE) + } + + const peerKey = uint8ArrayConcat([ + uint8ArrayFromString('/pk/'), + peer.multihash.digest + ]) + + // search the dht + for await (const event of this.dht.get(peerKey, options)) { + if (event.name === 'VALUE') { + const key = unmarshalPublicKey(event.value) + + await this.peerStore.keyBook.set(peer, event.value) + + return key + } + } + + throw errCode(new Error(`Node not responding with its public key: ${peer.toString()}`), codes.ERR_INVALID_RECORD) + } + + async fetch (peer: PeerId | Multiaddr | string, key: string): Promise { + const { id, multiaddrs } = getPeer(peer) + + if (multiaddrs != null) { + await this.components.getPeerStore().addressBook.add(id, multiaddrs) + } + + return await this.fetchService.fetch(id, key) + } + + async ping (peer: PeerId | Multiaddr | string): Promise { + const { id, multiaddrs } = getPeer(peer) + + if (multiaddrs.length > 0) { + await this.components.getPeerStore().addressBook.add(id, multiaddrs) + } + + return await this.pingService.ping(id) + } + + async handle (protocols: string | string[], handler: StreamHandler): Promise { + return await this.components.getRegistrar().handle(protocols, handler) + } + + async unhandle (protocols: string[] | string): Promise { + return await this.components.getRegistrar().unhandle(protocols) + } + + /** + * Called whenever peer discovery services emit `peer` events. + * Known peers may be emitted. + */ + onDiscoveryPeer (evt: CustomEvent) { + const { detail: peer } = evt + + if (peer.id.toString() === this.peerId.toString()) { + log.error(new Error(codes.ERR_DISCOVERED_SELF)) + return + } + + if (peer.multiaddrs.length > 0) { + void this.components.getPeerStore().addressBook.add(peer.id, peer.multiaddrs).catch(err => log.error(err)) + } + + if (peer.protocols.length > 0) { + void this.components.getPeerStore().protoBook.set(peer.id, peer.protocols).catch(err => log.error(err)) + } + + this.dispatchEvent(new CustomEvent('peer:discovery', { detail: peer })) + } +} + +/** + * Returns a new Libp2pNode instance - this exposes more of the internals than the + * libp2p interface and is useful for testing and debugging. + */ +export async function createLibp2pNode (options: Libp2pOptions): Promise { + if (options.peerId == null) { + options.peerId = await createEd25519PeerId() + } + + return new Libp2pNode(validateConfig(options)) +} diff --git a/src/metrics/index.js b/src/metrics/index.js deleted file mode 100644 index 2b65c289..00000000 --- a/src/metrics/index.js +++ /dev/null @@ -1,290 +0,0 @@ -// @ts-nocheck -'use strict' - -const mergeOptions = require('merge-options') -const { pipe } = require('it-pipe') -const { tap } = require('streaming-iterables') -const oldPeerLRU = require('./old-peers') -const { METRICS: defaultOptions } = require('../constants') -const Stats = require('./stats') - -const initialCounters = [ - 'dataReceived', - 'dataSent' -] - -const directionToEvent = { - in: 'dataReceived', - out: 'dataSent' -} - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('libp2p-interfaces/src/transport/types').MultiaddrConnection} MultiaddrConnection - */ - -/** - * @typedef MetricsOptions - * @property {number} [computeThrottleMaxQueueSize = defaultOptions.computeThrottleMaxQueueSize] - * @property {number} [computeThrottleTimeout = defaultOptions.computeThrottleTimeout] - * @property {number[]} [movingAverageIntervals = defaultOptions.movingAverageIntervals] - * @property {number} [maxOldPeersRetention = defaultOptions.maxOldPeersRetention] - */ - -class Metrics { - /** - * @class - * @param {MetricsOptions} options - */ - constructor (options) { - this._options = mergeOptions(defaultOptions, options) - this._globalStats = new Stats(initialCounters, this._options) - this._peerStats = new Map() - this._protocolStats = new Map() - this._oldPeers = oldPeerLRU(this._options.maxOldPeersRetention) - this._running = false - this._onMessage = this._onMessage.bind(this) - this._systems = new Map() - } - - /** - * Must be called for stats to saved. Any data pushed for tracking - * will be ignored. - */ - start () { - this._running = true - } - - /** - * Stops all averages timers and prevents new data from being tracked. - * Once `stop` is called, `start` must be called to resume stats tracking. - */ - stop () { - this._running = false - this._globalStats.stop() - for (const stats of this._peerStats.values()) { - stats.stop() - } - for (const stats of this._protocolStats.values()) { - stats.stop() - } - } - - /** - * Gets the global `Stats` object - * - * @returns {Stats} - */ - get global () { - return this._globalStats - } - - /** - * Returns a list of `PeerId` strings currently being tracked - * - * @returns {string[]} - */ - get peers () { - return Array.from(this._peerStats.keys()) - } - - /** - * @returns {Map>>} - */ - getComponentMetrics () { - return this._systems - } - - updateComponentMetric ({ system = 'libp2p', component, metric, value }) { - if (!this._systems.has(system)) { - this._systems.set(system, new Map()) - } - - const systemMetrics = this._systems.get(system) - - if (!systemMetrics.has(component)) { - systemMetrics.set(component, new Map()) - } - - const componentMetrics = systemMetrics.get(component) - - componentMetrics.set(metric, value) - } - - /** - * Returns the `Stats` object for the given `PeerId` whether it - * is a live peer, or in the disconnected peer LRU cache. - * - * @param {PeerId} peerId - * @returns {Stats} - */ - forPeer (peerId) { - const idString = peerId.toB58String() - return this._peerStats.get(idString) || this._oldPeers.get(idString) - } - - /** - * Returns a list of all protocol strings currently being tracked. - * - * @returns {string[]} - */ - get protocols () { - return Array.from(this._protocolStats.keys()) - } - - /** - * Returns the `Stats` object for the given `protocol`. - * - * @param {string} protocol - * @returns {Stats} - */ - forProtocol (protocol) { - return this._protocolStats.get(protocol) - } - - /** - * Should be called when all connections to a given peer - * have closed. The `Stats` collection for the peer will - * be stopped and moved to an LRU for temporary retention. - * - * @param {PeerId} peerId - */ - onPeerDisconnected (peerId) { - const idString = peerId.toB58String() - const peerStats = this._peerStats.get(idString) - if (peerStats) { - peerStats.stop() - this._peerStats.delete(idString) - this._oldPeers.set(idString, peerStats) - } - } - - /** - * Takes the metadata for a message and tracks it in the - * appropriate categories. If the protocol is present, protocol - * stats will also be tracked. - * - * @private - * @param {object} params - * @param {PeerId} params.remotePeer - Remote peer - * @param {string} [params.protocol] - Protocol string the stream is running - * @param {string} params.direction - One of ['in','out'] - * @param {number} params.dataLength - Size of the message - * @returns {void} - */ - _onMessage ({ remotePeer, protocol, direction, dataLength }) { - if (!this._running) return - - const key = directionToEvent[direction] - - let peerStats = this.forPeer(remotePeer) - if (!peerStats) { - peerStats = new Stats(initialCounters, this._options) - this._peerStats.set(remotePeer.toB58String(), peerStats) - } - - // Peer and global stats - peerStats.push(key, dataLength) - this._globalStats.push(key, dataLength) - - // Protocol specific stats - if (protocol) { - let protocolStats = this.forProtocol(protocol) - if (!protocolStats) { - protocolStats = new Stats(initialCounters, this._options) - this._protocolStats.set(protocol, protocolStats) - } - protocolStats.push(key, dataLength) - } - } - - /** - * Replaces the `PeerId` string with the given `peerId`. - * If stats are already being tracked for the given `peerId`, the - * placeholder stats will be merged with the existing stats. - * - * @param {PeerId} placeholder - A peerId string - * @param {PeerId} peerId - * @returns {void} - */ - updatePlaceholder (placeholder, peerId) { - if (!this._running) return - const placeholderStats = this.forPeer(placeholder) - const peerIdString = peerId.toB58String() - const existingStats = this.forPeer(peerId) - let mergedStats = placeholderStats - - // If we already have stats, merge the two - if (existingStats) { - // If existing, merge - mergedStats = Metrics.mergeStats(existingStats, mergedStats) - // Attempt to delete from the old peers list just in case it was tracked there - this._oldPeers.delete(peerIdString) - } - - this._peerStats.delete(placeholder.toB58String()) - this._peerStats.set(peerIdString, mergedStats) - mergedStats.start() - } - - /** - * Tracks data running through a given Duplex Iterable `stream`. If - * the `peerId` is not provided, a placeholder string will be created and - * returned. This allows lazy tracking of a peer when the peer is not yet known. - * When the `PeerId` is known, `Metrics.updatePlaceholder` should be called - * with the placeholder string returned from here, and the known `PeerId`. - * - * @param {Object} options - * @param {MultiaddrConnection} options.stream - A duplex iterable stream - * @param {PeerId} [options.remotePeer] - The id of the remote peer that's connected - * @param {string} [options.protocol] - The protocol the stream is running - * @returns {MultiaddrConnection} The peerId string or placeholder string - */ - trackStream ({ stream, remotePeer, protocol }) { - const metrics = this - const _source = stream.source - stream.source = tap(chunk => metrics._onMessage({ - remotePeer, - protocol, - direction: 'in', - dataLength: chunk.length - }))(_source) - - const _sink = stream.sink - stream.sink = source => { - return pipe( - source, - tap(chunk => metrics._onMessage({ - remotePeer, - protocol, - direction: 'out', - dataLength: chunk.length - })), - _sink - ) - } - - return stream - } - - /** - * Merges `other` into `target`. `target` will be modified - * and returned. - * - * @param {Stats} target - * @param {Stats} other - * @returns {Stats} - */ - static mergeStats (target, other) { - target.stop() - other.stop() - - // Merge queues - target._queue = [...target._queue, ...other._queue] - - // TODO: how to merge moving averages? - return target - } -} - -module.exports = Metrics diff --git a/src/metrics/index.ts b/src/metrics/index.ts new file mode 100644 index 00000000..bf632fa2 --- /dev/null +++ b/src/metrics/index.ts @@ -0,0 +1,310 @@ +import { pipe } from 'it-pipe' +import each from 'it-foreach' +import LRU from 'hashlru' +import { METRICS as defaultOptions } from '../constants.js' +import { DefaultStats, StatsInit } from './stats.js' +import type { ComponentMetricsUpdate, Metrics, Stats, TrackStreamOptions } from '@libp2p/interfaces/metrics' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Startable } from '@libp2p/interfaces' +import type { Duplex } from 'it-stream-types' + +const initialCounters: ['dataReceived', 'dataSent'] = [ + 'dataReceived', + 'dataSent' +] + +const directionToEvent = { + in: 'dataReceived', + out: 'dataSent' +} + +export interface OnMessageOptions { + remotePeer: PeerId + protocol?: string + direction: 'in' | 'out' + dataLength: number +} + +export interface MetricsInit { + enabled: boolean + computeThrottleMaxQueueSize: number + computeThrottleTimeout: number + movingAverageIntervals: number[] + maxOldPeersRetention: number +} + +export class DefaultMetrics implements Metrics, Startable { + public globalStats: DefaultStats + + private readonly enabled: boolean + private readonly peerStats: Map + private readonly protocolStats: Map + private readonly oldPeers: ReturnType + private running: boolean + private readonly systems: Map>> + private readonly statsInit: StatsInit + + constructor (init: MetricsInit) { + this.enabled = init.enabled + this.statsInit = { + ...init, + initialCounters + } + this.globalStats = new DefaultStats(this.statsInit) + this.peerStats = new Map() + this.protocolStats = new Map() + this.oldPeers = LRU(init.maxOldPeersRetention ?? defaultOptions.maxOldPeersRetention) + this.running = false + this._onMessage = this._onMessage.bind(this) + this.systems = new Map() + } + + isStarted () { + return this.running + } + + /** + * Must be called for stats to saved. Any data pushed for tracking + * will be ignored. + */ + async start () { + if (!this.enabled) { + return + } + + this.running = true + } + + /** + * Stops all averages timers and prevents new data from being tracked. + * Once `stop` is called, `start` must be called to resume stats tracking. + */ + async stop () { + if (!this.running) { + return + } + + this.running = false + this.globalStats.stop() + + for (const stats of this.peerStats.values()) { + stats.stop() + } + + for (const stats of this.protocolStats.values()) { + stats.stop() + } + } + + /** + * Gets the global `Stats` object + */ + getGlobal () { + return this.globalStats + } + + /** + * Returns a list of `PeerId` strings currently being tracked + */ + getPeers () { + return Array.from(this.peerStats.keys()) + } + + getComponentMetrics () { + return this.systems + } + + updateComponentMetric (update: ComponentMetricsUpdate) { + const { system = 'libp2p', component, metric, value } = update + + if (!this.systems.has(system)) { + this.systems.set(system, new Map()) + } + + const systemMetrics = this.systems.get(system) + + if (systemMetrics == null) { + throw new Error('Unknown metric system') + } + + if (!systemMetrics.has(component)) { + systemMetrics.set(component, new Map()) + } + + const componentMetrics = systemMetrics.get(component) + + if (componentMetrics == null) { + throw new Error('Unknown metric component') + } + + componentMetrics.set(metric, value) + } + + /** + * Returns the `Stats` object for the given `PeerId` whether it + * is a live peer, or in the disconnected peer LRU cache. + */ + forPeer (peerId: PeerId): Stats | undefined { + const idString = peerId.toString() + return this.peerStats.get(idString) ?? this.oldPeers.get(idString) + } + + /** + * Returns a list of all protocol strings currently being tracked + */ + getProtocols (): string[] { + return Array.from(this.protocolStats.keys()) + } + + /** + * Returns the `Stats` object for the given `protocol` + */ + forProtocol (protocol: string): Stats | undefined { + return this.protocolStats.get(protocol) + } + + /** + * Should be called when all connections to a given peer + * have closed. The `Stats` collection for the peer will + * be stopped and moved to an LRU for temporary retention. + */ + onPeerDisconnected (peerId: PeerId) { + const idString = peerId.toString() + const peerStats = this.peerStats.get(idString) + + if (peerStats != null) { + peerStats.stop() + + this.peerStats.delete(idString) + this.oldPeers.set(idString, peerStats) + } + } + + /** + * Takes the metadata for a message and tracks it in the + * appropriate categories. If the protocol is present, protocol + * stats will also be tracked. + */ + _onMessage (opts: OnMessageOptions) { + if (!this.running) { + return + } + + const { remotePeer, protocol, direction, dataLength } = opts + + const key = directionToEvent[direction] + + let peerStats = this.forPeer(remotePeer) + if (peerStats == null) { + const stats = new DefaultStats(this.statsInit) + this.peerStats.set(remotePeer.toString(), stats) + peerStats = stats + } + + // Peer and global stats + peerStats.push(key, dataLength) + this.globalStats.push(key, dataLength) + + // Protocol specific stats + if (protocol != null) { + let protocolStats = this.forProtocol(protocol) + + if (protocolStats == null) { + const stats = new DefaultStats(this.statsInit) + this.protocolStats.set(protocol, stats) + protocolStats = stats + } + + protocolStats.push(key, dataLength) + } + } + + /** + * Replaces the `PeerId` string with the given `peerId`. + * If stats are already being tracked for the given `peerId`, the + * placeholder stats will be merged with the existing stats. + * + * @param {PeerId} placeholder - A peerId string + * @param {PeerId} peerId + * @returns {void} + */ + updatePlaceholder (placeholder: PeerId, peerId: PeerId) { + if (!this.running) { + return + } + + const placeholderString = placeholder.toString() + const placeholderStats = this.peerStats.get(placeholderString) ?? this.oldPeers.get(placeholderString) + const peerIdString = peerId.toString() + const existingStats = this.peerStats.get(peerIdString) ?? this.oldPeers.get(peerIdString) + let mergedStats = placeholderStats + + // If we already have stats, merge the two + if (existingStats != null) { + // If existing, merge + mergedStats = mergeStats(existingStats, mergedStats) + // Attempt to delete from the old peers list just in case it was tracked there + this.oldPeers.remove(peerIdString) + } + + this.peerStats.delete(placeholder.toString()) + this.peerStats.set(peerIdString, mergedStats) + mergedStats.start() + } + + /** + * Tracks data running through a given Duplex Iterable `stream`. If + * the `peerId` is not provided, a placeholder string will be created and + * returned. This allows lazy tracking of a peer when the peer is not yet known. + * When the `PeerId` is known, `Metrics.updatePlaceholder` should be called + * with the placeholder string returned from here, and the known `PeerId`. + */ + trackStream > (opts: TrackStreamOptions): T { + const { stream, remotePeer, protocol } = opts + + if (!this.running) { + return stream + } + + const source = stream.source + stream.source = each(source, chunk => this._onMessage({ + remotePeer, + protocol, + direction: 'in', + dataLength: chunk.length + })) + + const sink = stream.sink + stream.sink = async source => { + return await pipe( + source, + (source) => each(source, chunk => { + this._onMessage({ + remotePeer, + protocol, + direction: 'out', + dataLength: chunk.length + }) + }), + sink + ) + } + + return stream + } +} + +/** + * Merges `other` into `target`. `target` will be modified + * and returned + */ +function mergeStats (target: DefaultStats, other: DefaultStats) { + target.stop() + other.stop() + + // Merge queues + target.queue = [...target.queue, ...other.queue] + + // TODO: how to merge moving averages? + return target +} diff --git a/src/metrics/moving-average.ts b/src/metrics/moving-average.ts new file mode 100644 index 00000000..615b62cf --- /dev/null +++ b/src/metrics/moving-average.ts @@ -0,0 +1,53 @@ +import type { MovingAverage } from '@libp2p/interfaces/metrics' + +export class DefaultMovingAverage { + public movingAverage: number + public variance: number + public deviation: number + public forecast: number + private readonly timespan: number + private previousTime?: number + + constructor (timespan: number) { + if (typeof timespan !== 'number') { + throw new Error('must provide a timespan to the moving average constructor') + } + + if (timespan <= 0) { + throw new Error('must provide a timespan > 0 to the moving average constructor') + } + + this.timespan = timespan + this.movingAverage = 0 + this.variance = 0 + this.deviation = 0 + this.forecast = 0 + } + + alpha (t: number, pt: number) { + return 1 - (Math.exp(-(t - pt) / this.timespan)) + } + + push (time: number, value: number) { + if (this.previousTime != null) { + // calculate moving average + const a = this.alpha(time, this.previousTime) + const diff = value - this.movingAverage + const incr = a * diff + this.movingAverage = a * value + (1 - a) * this.movingAverage + // calculate variance & deviation + this.variance = (1 - a) * (this.variance + diff * incr) + this.deviation = Math.sqrt(this.variance) + // calculate forecast + this.forecast = this.movingAverage + a * diff + } else { + this.movingAverage = value + } + + this.previousTime = time + } +} + +export function createMovingAverage (timespan: number): MovingAverage { + return new DefaultMovingAverage(timespan) +} diff --git a/src/metrics/old-peers.js b/src/metrics/old-peers.js deleted file mode 100644 index 753bdf5f..00000000 --- a/src/metrics/old-peers.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict' - -const LRU = require('hashlru') - -/** - * Creates and returns a Least Recently Used Cache - * - * @param {number} maxSize - * @returns {any} - */ -module.exports = (maxSize) => { - // @ts-ignore LRU expression is not callable - const patched = LRU(maxSize) - patched.delete = patched.remove - return patched -} diff --git a/src/metrics/stats.js b/src/metrics/stats.js deleted file mode 100644 index 4246588f..00000000 --- a/src/metrics/stats.js +++ /dev/null @@ -1,270 +0,0 @@ -// @ts-nocheck -'use strict' - -const { EventEmitter } = require('events') -const { BigNumber: Big } = require('bignumber.js') -const MovingAverage = require('@vascosantos/moving-average') -const retimer = require('retimer') - -/** - * @typedef {import('@vascosantos/moving-average').IMovingAverage} IMovingAverage - * @typedef {import('bignumber.js').BigNumber} Big - */ - -class Stats extends EventEmitter { - /** - * A queue based manager for stat processing - * - * @class - * @param {string[]} initialCounters - * @param {any} options - */ - constructor (initialCounters, options) { - super() - - this._options = options - this._queue = [] - - /** @type {{ dataReceived: Big, dataSent: Big }} */ - this._stats = { - dataReceived: Big(0), - dataSent: Big(0) - } - - this._frequencyLastTime = Date.now() - this._frequencyAccumulators = {} - - /** @type {{ dataReceived: IMovingAverage[], dataSent: IMovingAverage[] }} */ - this._movingAverages = {} - - this._update = this._update.bind(this) - - const intervals = this._options.movingAverageIntervals - - for (let i = 0; i < initialCounters.length; i++) { - const key = initialCounters[i] - this._stats[key] = Big(0) - this._movingAverages[key] = {} - for (let k = 0; k < intervals.length; k++) { - const interval = intervals[k] - const ma = this._movingAverages[key][interval] = MovingAverage(interval) - ma.push(this._frequencyLastTime, 0) - } - } - } - - /** - * Initializes the internal timer if there are items in the queue. This - * should only need to be called if `Stats.stop` was previously called, as - * `Stats.push` will also start the processing. - * - * @returns {void} - */ - start () { - if (this._queue.length) { - this._resetComputeTimeout() - } - } - - /** - * Stops processing and computing of stats by clearing the internal - * timer. - * - * @returns {void} - */ - stop () { - if (this._timeout) { - this._timeout.clear() - this._timeout = null - } - } - - /** - * Returns a clone of the current stats. - */ - get snapshot () { - return Object.assign({}, this._stats) - } - - /** - * Returns a clone of the internal movingAverages - */ - get movingAverages () { - return Object.assign({}, this._movingAverages) - } - - /** - * Returns a plain JSON object of the stats - * - * @returns {*} - */ - toJSON () { - const snapshot = this.snapshot - const movingAverages = this.movingAverages - const data = { - dataReceived: snapshot.dataReceived.toString(), - dataSent: snapshot.dataSent.toString(), - movingAverages: {} - } - - const counters = Object.keys(movingAverages) - for (const key of counters) { - data.movingAverages[key] = {} - for (const interval of Object.keys(movingAverages[key])) { - data.movingAverages[key][interval] = movingAverages[key][interval].movingAverage() - } - } - - return data - } - - /** - * Pushes the given operation data to the queue, along with the - * current Timestamp, then resets the update timer. - * - * @param {string} counter - * @param {number} inc - * @returns {void} - */ - push (counter, inc) { - this._queue.push([counter, inc, Date.now()]) - this._resetComputeTimeout() - } - - /** - * Resets the timeout for triggering updates. - * - * @private - * @returns {void} - */ - _resetComputeTimeout () { - this._timeout = retimer(this._update, this._nextTimeout()) - } - - /** - * Calculates and returns the timeout for the next update based on - * the urgency of the update. - * - * @private - * @returns {number} - */ - _nextTimeout () { - // calculate the need for an update, depending on the queue length - const urgency = this._queue.length / this._options.computeThrottleMaxQueueSize - const timeout = Math.max(this._options.computeThrottleTimeout * (1 - urgency), 0) - return timeout - } - - /** - * If there are items in the queue, they will will be processed and - * the frequency for all items will be updated based on the Timestamp - * of the last item in the queue. The `update` event will also be emitted - * with the latest stats. - * - * If there are no items in the queue, no action is taken. - * - * @private - * @returns {void} - */ - _update () { - this._timeout = null - if (this._queue.length) { - let last - for (last of this._queue) { - this._applyOp(last) - } - this._queue = [] - - this._updateFrequency(last[2]) // contains timestamp of last op - - this.emit('update', this._stats) - } - } - - /** - * For each key in the stats, the frequency and moving averages - * will be updated via Stats._updateFrequencyFor based on the time - * difference between calls to this method. - * - * @private - * @param {Timestamp} latestTime - * @returns {void} - */ - _updateFrequency (latestTime) { - const timeDiff = latestTime - this._frequencyLastTime - - Object.keys(this._stats).forEach((key) => { - this._updateFrequencyFor(key, timeDiff, latestTime) - }) - - this._frequencyLastTime = latestTime - } - - /** - * Updates the `movingAverages` for the given `key` and also - * resets the `frequencyAccumulator` for the `key`. - * - * @private - * @param {string} key - * @param {number} timeDiffMS - Time in milliseconds - * @param {Timestamp} latestTime - Time in ticks - * @returns {void} - */ - _updateFrequencyFor (key, timeDiffMS, latestTime) { - const count = this._frequencyAccumulators[key] || 0 - this._frequencyAccumulators[key] = 0 - // if `timeDiff` is zero, `hz` becomes Infinity, so we fallback to 1ms - const safeTimeDiff = timeDiffMS || 1 - const hz = (count / safeTimeDiff) * 1000 - - let movingAverages = this._movingAverages[key] - if (!movingAverages) { - movingAverages = this._movingAverages[key] = {} - } - - const intervals = this._options.movingAverageIntervals - - for (let i = 0; i < intervals.length; i++) { - const movingAverageInterval = intervals[i] - let movingAverage = movingAverages[movingAverageInterval] - if (!movingAverage) { - movingAverage = movingAverages[movingAverageInterval] = MovingAverage(movingAverageInterval) - } - movingAverage.push(latestTime, hz) - } - } - - /** - * For the given operation, `op`, the stats and `frequencyAccumulator` - * will be updated or initialized if they don't already exist. - * - * @private - * @param {{string, number}[]} op - * @throws {InvalidNumber} - * @returns {void} - */ - _applyOp (op) { - const key = op[0] - const inc = op[1] - - if (typeof inc !== 'number') { - throw new Error(`invalid increment number: ${inc}`) - } - - let n - - if (!Object.prototype.hasOwnProperty.call(this._stats, key)) { - n = this._stats[key] = Big(0) - } else { - n = this._stats[key] - } - this._stats[key] = n.plus(inc) - - if (!this._frequencyAccumulators[key]) { - this._frequencyAccumulators[key] = 0 - } - this._frequencyAccumulators[key] += inc - } -} - -module.exports = Stats diff --git a/src/metrics/stats.ts b/src/metrics/stats.ts new file mode 100644 index 00000000..56b3d4ca --- /dev/null +++ b/src/metrics/stats.ts @@ -0,0 +1,243 @@ +import { CustomEvent, EventEmitter } from '@libp2p/interfaces' +import { createMovingAverage } from './moving-average.js' +// @ts-expect-error no types +import retimer from 'retimer' +import type { MovingAverages, Stats } from '@libp2p/interfaces/metrics' + +export interface StatsEvents { + 'update': CustomEvent +} + +export interface StatsInit { + enabled: boolean + initialCounters: ['dataReceived', 'dataSent'] + movingAverageIntervals: number[] + computeThrottleMaxQueueSize: number + computeThrottleTimeout: number +} + +export interface TransferStats { + dataReceived: BigInt + dataSent: BigInt +} + +export class DefaultStats extends EventEmitter implements Stats { + private readonly enabled: boolean + public queue: Array<[string, number, number]> + private stats: TransferStats + private frequencyLastTime: number + private frequencyAccumulators: Record + private movingAverages: MovingAverages + private timeout?: any + private readonly computeThrottleMaxQueueSize: number + private readonly computeThrottleTimeout: number + private readonly movingAverageIntervals: number[] + + /** + * A queue based manager for stat processing + */ + constructor (init: StatsInit) { + super() + + this.enabled = init.enabled + this.queue = [] + this.stats = { + dataReceived: 0n, + dataSent: 0n + } + this.frequencyLastTime = Date.now() + this.frequencyAccumulators = {} + this.movingAverages = { + dataReceived: [], + dataSent: [] + } + this.computeThrottleMaxQueueSize = init.computeThrottleMaxQueueSize + this.computeThrottleTimeout = init.computeThrottleTimeout + + this._update = this._update.bind(this) + + this.movingAverageIntervals = init.movingAverageIntervals + + for (let i = 0; i < init.initialCounters.length; i++) { + const key = init.initialCounters[i] + this.stats[key] = 0n + this.movingAverages[key] = [] + + for (let k = 0; k < this.movingAverageIntervals.length; k++) { + const interval = this.movingAverageIntervals[k] + const ma = this.movingAverages[key][interval] = createMovingAverage(interval) + ma.push(this.frequencyLastTime, 0) + } + } + } + + /** + * Initializes the internal timer if there are items in the queue. This + * should only need to be called if `Stats.stop` was previously called, as + * `Stats.push` will also start the processing + */ + start () { + if (!this.enabled) { + return + } + + if (this.queue.length > 0) { + this._resetComputeTimeout() + } + } + + /** + * Stops processing and computing of stats by clearing the internal + * timer + */ + stop () { + if (this.timeout != null) { + this.timeout.clear() + this.timeout = null + } + } + + /** + * Returns a clone of the current stats. + */ + getSnapshot () { + return Object.assign({}, this.stats) + } + + /** + * Returns a clone of the internal movingAverages + */ + getMovingAverages (): MovingAverages { + return Object.assign({}, this.movingAverages) + } + + /** + * Pushes the given operation data to the queue, along with the + * current Timestamp, then resets the update timer. + */ + push (counter: string, inc: number) { + this.queue.push([counter, inc, Date.now()]) + this._resetComputeTimeout() + } + + /** + * Resets the timeout for triggering updates. + */ + _resetComputeTimeout () { + this.timeout = retimer(this._update, this._nextTimeout()) + } + + /** + * Calculates and returns the timeout for the next update based on + * the urgency of the update. + */ + _nextTimeout () { + // calculate the need for an update, depending on the queue length + const urgency = this.queue.length / this.computeThrottleMaxQueueSize + const timeout = Math.max(this.computeThrottleTimeout * (1 - urgency), 0) + return timeout + } + + /** + * If there are items in the queue, they will will be processed and + * the frequency for all items will be updated based on the Timestamp + * of the last item in the queue. The `update` event will also be emitted + * with the latest stats. + * + * If there are no items in the queue, no action is taken. + */ + _update () { + this.timeout = null + if (this.queue.length > 0) { + let last: [string, number, number] = ['', 0, 0] + + for (last of this.queue) { + this._applyOp(last) + } + + this.queue = [] + + if (last.length > 2 && last[0] !== '') { + this._updateFrequency(last[2]) // contains timestamp of last op + } + + this.dispatchEvent(new CustomEvent('update', { + detail: this.stats + })) + } + } + + /** + * For each key in the stats, the frequency and moving averages + * will be updated via Stats._updateFrequencyFor based on the time + * difference between calls to this method. + */ + _updateFrequency (latestTime: number) { + const timeDiff = latestTime - this.frequencyLastTime + + this._updateFrequencyFor('dataReceived', timeDiff, latestTime) + this._updateFrequencyFor('dataSent', timeDiff, latestTime) + + this.frequencyLastTime = latestTime + } + + /** + * Updates the `movingAverages` for the given `key` and also + * resets the `frequencyAccumulator` for the `key`. + */ + _updateFrequencyFor (key: 'dataReceived' | 'dataSent', timeDiffMS: number, latestTime: number) { + const count = this.frequencyAccumulators[key] ?? 0 + this.frequencyAccumulators[key] = 0 + // if `timeDiff` is zero, `hz` becomes Infinity, so we fallback to 1ms + const safeTimeDiff = timeDiffMS ?? 1 + const hz = (count / safeTimeDiff) * 1000 + + let movingAverages = this.movingAverages[key] + if (movingAverages == null) { + movingAverages = this.movingAverages[key] = [] + } + + const intervals = this.movingAverageIntervals + + for (let i = 0; i < intervals.length; i++) { + const movingAverageInterval = intervals[i] + let movingAverage = movingAverages[movingAverageInterval] + if (movingAverage == null) { + movingAverage = movingAverages[movingAverageInterval] = createMovingAverage(movingAverageInterval) + } + movingAverage.push(latestTime, hz) + } + } + + /** + * For the given operation, `op`, the stats and `frequencyAccumulator` + * will be updated or initialized if they don't already exist. + */ + _applyOp (op: [string, number, number]) { + const key = op[0] + const inc = op[1] + + if (typeof inc !== 'number') { + throw new Error('invalid increment number') + } + + let n: bigint + + if (!Object.prototype.hasOwnProperty.call(this.stats, key)) { + // @ts-expect-error cannot index type with key + n = this.stats[key] = 0n + } else { + // @ts-expect-error cannot index type with key + n = this.stats[key] + } + + // @ts-expect-error cannot index type with key + this.stats[key] = n + BigInt(inc) + + if (this.frequencyAccumulators[key] == null) { + this.frequencyAccumulators[key] = 0 + } + + this.frequencyAccumulators[key] += inc + } +} diff --git a/src/metrics/tracked-map.js b/src/metrics/tracked-map.js deleted file mode 100644 index beb0fa72..00000000 --- a/src/metrics/tracked-map.js +++ /dev/null @@ -1,94 +0,0 @@ -'use strict' - -/** - * @template K - * @template V - */ -class TrackedMap extends Map { - /** - * @param {object} options - * @param {string} options.system - * @param {string} options.component - * @param {string} options.metric - * @param {import('.')} options.metrics - */ - constructor (options) { - super() - - const { system, component, metric, metrics } = options - this._system = system - this._component = component - this._metric = metric - this._metrics = metrics - - this._metrics.updateComponentMetric({ - system: this._system, - component: this._component, - metric: this._metric, - value: this.size - }) - } - - /** - * @param {K} key - * @param {V} value - */ - set (key, value) { - super.set(key, value) - this._metrics.updateComponentMetric({ - system: this._system, - component: this._component, - metric: this._metric, - value: this.size - }) - return this - } - - /** - * @param {K} key - */ - delete (key) { - const deleted = super.delete(key) - this._metrics.updateComponentMetric({ - system: this._system, - component: this._component, - metric: this._metric, - value: this.size - }) - return deleted - } - - clear () { - super.clear() - - this._metrics.updateComponentMetric({ - system: this._system, - component: this._component, - metric: this._metric, - value: this.size - }) - } -} - -/** - * @template K - * @template V - * @param {object} options - * @param {string} [options.system] - * @param {string} options.component - * @param {string} options.metric - * @param {import('.')} [options.metrics] - * @returns {Map} - */ -module.exports = ({ system = 'libp2p', component, metric, metrics }) => { - /** @type {Map} */ - let map - - if (metrics) { - map = new TrackedMap({ system, component, metric, metrics }) - } else { - map = new Map() - } - - return map -} diff --git a/src/nat-manager.js b/src/nat-manager.js deleted file mode 100644 index cd8bf5aa..00000000 --- a/src/nat-manager.js +++ /dev/null @@ -1,197 +0,0 @@ -'use strict' - -// @ts-ignore nat-api does not export types -const NatAPI = require('nat-api') -const debug = require('debug') -const { promisify } = require('es6-promisify') -const { Multiaddr } = require('multiaddr') -const log = Object.assign(debug('libp2p:nat'), { - error: debug('libp2p:nat:err') -}) -const { isBrowser } = require('wherearewe') -const retry = require('p-retry') -const isPrivateIp = require('private-ip') -const pkg = require('../package.json') -const errcode = require('err-code') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('./errors') -const isLoopback = require('libp2p-utils/src/multiaddr/is-loopback') - -const DEFAULT_TTL = 7200 - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('./transport-manager')} TransportManager - * @typedef {import('./address-manager')} AddressManager - */ - -/** - * @typedef {Object} NatManagerProperties - * @property {PeerId} peerId - The peer ID of the current node - * @property {TransportManager} transportManager - A transport manager - * @property {AddressManager} addressManager - An address manager - * - * @typedef {Object} NatManagerOptions - * @property {boolean} enabled - Whether to enable the NAT manager - * @property {string} [externalIp] - Pass a value to use instead of auto-detection - * @property {string} [description] - A string value to use for the port mapping description on the gateway - * @property {number} [ttl = DEFAULT_TTL] - How long UPnP port mappings should last for in seconds (minimum 1200) - * @property {boolean} [keepAlive] - Whether to automatically refresh UPnP port mappings when their TTL is reached - * @property {string} [gateway] - Pass a value to use instead of auto-detection - * @property {object} [pmp] - PMP options - * @property {boolean} [pmp.enabled] - Whether to enable PMP as well as UPnP - */ - -function highPort (min = 1024, max = 65535) { - return Math.floor(Math.random() * (max - min + 1) + min) -} - -class NatManager { - /** - * @class - * @param {NatManagerProperties & NatManagerOptions} options - */ - constructor ({ peerId, addressManager, transportManager, ...options }) { - this._peerId = peerId - this._addressManager = addressManager - this._transportManager = transportManager - - this._enabled = options.enabled - this._externalIp = options.externalIp - this._options = { - description: options.description || `${pkg.name}@${pkg.version} ${this._peerId}`, - ttl: options.ttl || DEFAULT_TTL, - autoUpdate: options.keepAlive || true, - gateway: options.gateway, - enablePMP: Boolean(options.pmp && options.pmp.enabled) - } - - if (this._options.ttl < DEFAULT_TTL) { - throw errcode(new Error(`NatManager ttl should be at least ${DEFAULT_TTL} seconds`), ERR_INVALID_PARAMETERS) - } - } - - /** - * Starts the NAT manager - */ - start () { - if (isBrowser || !this._enabled) { - return - } - - // done async to not slow down startup - this._start().catch((err) => { - // hole punching errors are non-fatal - log.error(err) - }) - } - - async _start () { - const addrs = this._transportManager.getAddrs() - - for (const addr of addrs) { - // try to open uPnP ports for each thin waist address - const { family, host, port, transport } = addr.toOptions() - - if (!addr.isThinWaistAddress() || transport !== 'tcp') { - // only bare tcp addresses - // eslint-disable-next-line no-continue - continue - } - - if (isLoopback(addr)) { - // eslint-disable-next-line no-continue - continue - } - - if (family !== 4) { - // ignore ipv6 - // eslint-disable-next-line no-continue - continue - } - - const client = this._getClient() - const publicIp = this._externalIp || await client.externalIp() - - // @ts-expect-error types are wrong - if (isPrivateIp(publicIp)) { - throw new Error(`${publicIp} is private - please set config.nat.externalIp to an externally routable IP or ensure you are not behind a double NAT`) - } - - const publicPort = highPort() - - log(`opening uPnP connection from ${publicIp}:${publicPort} to ${host}:${port}`) - - await client.map({ - publicPort, - privatePort: port, - protocol: transport.toUpperCase() - }) - - this._addressManager.addObservedAddr(Multiaddr.fromNodeAddress({ - family: 4, - address: publicIp, - port: publicPort - }, transport)) - } - } - - _getClient () { - if (this._client) { - return this._client - } - - const client = new NatAPI(this._options) - - /** @type {(...any: any) => any} */ - const map = promisify(client.map.bind(client)) - /** @type {(...any: any) => any} */ - const destroy = promisify(client.destroy.bind(client)) - /** @type {(...any: any) => any} */ - const externalIp = promisify(client.externalIp.bind(client)) - - // these are all network operations so add a retry - this._client = { - /** - * @param {...any} args - * @returns {Promise} - */ - map: (...args) => retry(() => map(...args), { onFailedAttempt: log.error, unref: true }), - - /** - * @param {...any} args - * @returns {Promise} - */ - destroy: (...args) => retry(() => destroy(...args), { onFailedAttempt: log.error, unref: true }), - - /** - * @param {...any} args - * @returns {Promise} - */ - externalIp: (...args) => retry(() => externalIp(...args), { onFailedAttempt: log.error, unref: true }) - } - - return this._client - } - - /** - * Stops the NAT manager - * - * @async - */ - async stop () { - if (isBrowser || !this._client) { - return - } - - try { - await this._client.destroy() - this._client = null - } catch (/** @type {any} */ err) { - log.error(err) - } - } -} - -module.exports = NatManager diff --git a/src/nat-manager.ts b/src/nat-manager.ts new file mode 100644 index 00000000..978efa64 --- /dev/null +++ b/src/nat-manager.ts @@ -0,0 +1,194 @@ +import { upnpNat, NatAPI } from '@achingbrain/nat-port-mapper' +import { logger } from '@libp2p/logger' +import { Multiaddr } from '@multiformats/multiaddr' +import { isBrowser } from 'wherearewe' +import isPrivateIp from 'private-ip' +import * as pkg from './version.js' +import errCode from 'err-code' +import { codes } from './errors.js' +import { isLoopback } from '@libp2p/utils/multiaddr/is-loopback' +import type { Startable } from '@libp2p/interfaces' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:nat') +const DEFAULT_TTL = 7200 + +function highPort (min = 1024, max = 65535) { + return Math.floor(Math.random() * (max - min + 1) + min) +} + +export interface PMPOptions { + /** + * Whether to enable PMP as well as UPnP + */ + enabled?: boolean +} + +export interface NatManagerInit { + /** + * Whether to enable the NAT manager + */ + enabled: boolean + + /** + * Pass a value to use instead of auto-detection + */ + externalAddress?: string + + /** + * Pass a value to use instead of auto-detection + */ + localAddress?: string + + /** + * A string value to use for the port mapping description on the gateway + */ + description?: string + + /** + * How long UPnP port mappings should last for in seconds (minimum 1200) + */ + ttl?: number + + /** + * Whether to automatically refresh UPnP port mappings when their TTL is reached + */ + keepAlive: boolean + + /** + * Pass a value to use instead of auto-detection + */ + gateway?: string +} + +export class NatManager implements Startable { + private readonly components: Components + private readonly enabled: boolean + private readonly externalAddress?: string + private readonly localAddress?: string + private readonly description: string + private readonly ttl: number + private readonly keepAlive: boolean + private readonly gateway?: string + private started: boolean + private client?: NatAPI + + constructor (components: Components, init: NatManagerInit) { + this.components = components + + this.started = false + this.enabled = init.enabled + this.externalAddress = init.externalAddress + this.localAddress = init.localAddress + this.description = init.description ?? `${pkg.name}@${pkg.version} ${this.components.getPeerId().toString()}` + this.ttl = init.ttl ?? DEFAULT_TTL + this.keepAlive = init.keepAlive ?? true + this.gateway = init.gateway + + if (this.ttl < DEFAULT_TTL) { + throw errCode(new Error(`NatManager ttl should be at least ${DEFAULT_TTL} seconds`), codes.ERR_INVALID_PARAMETERS) + } + } + + isStarted () { + return this.started + } + + /** + * Starts the NAT manager + */ + start () { + if (isBrowser || !this.enabled || this.started) { + return + } + + this.started = true + + // done async to not slow down startup + this._start().catch((err) => { + // hole punching errors are non-fatal + log.error(err) + }) + } + + async _start () { + const addrs = this.components.getTransportManager().getAddrs() + + for (const addr of addrs) { + // try to open uPnP ports for each thin waist address + const { family, host, port, transport } = addr.toOptions() + + if (!addr.isThinWaistAddress() || transport !== 'tcp') { + // only bare tcp addresses + // eslint-disable-next-line no-continue + continue + } + + if (isLoopback(addr)) { + // eslint-disable-next-line no-continue + continue + } + + if (family !== 4) { + // ignore ipv6 + // eslint-disable-next-line no-continue + continue + } + + const client = await this._getClient() + const publicIp = this.externalAddress ?? await client.externalIp() + + if (isPrivateIp(publicIp)) { + throw new Error(`${publicIp} is private - please set config.nat.externalIp to an externally routable IP or ensure you are not behind a double NAT`) + } + + const publicPort = highPort() + + log(`opening uPnP connection from ${publicIp}:${publicPort} to ${host}:${port}`) + + await client.map({ + publicPort, + localPort: port, + localAddress: this.localAddress, + protocol: transport.toUpperCase() === 'TCP' ? 'TCP' : 'UDP' + }) + + this.components.getAddressManager().addObservedAddr(Multiaddr.fromNodeAddress({ + family: 4, + address: publicIp, + port: publicPort + }, transport)) + } + } + + async _getClient () { + if (this.client != null) { + return this.client + } + + this.client = await upnpNat({ + description: this.description, + ttl: this.ttl, + keepAlive: this.keepAlive, + gateway: this.gateway + }) + + return this.client + } + + /** + * Stops the NAT manager + */ + async stop () { + if (isBrowser || this.client == null) { + return + } + + try { + await this.client.close() + this.client = undefined + } catch (err: any) { + log.error(err) + } + } +} diff --git a/src/peer-record-updater.ts b/src/peer-record-updater.ts new file mode 100644 index 00000000..340508a6 --- /dev/null +++ b/src/peer-record-updater.ts @@ -0,0 +1,55 @@ +import { RecordEnvelope, PeerRecord } from '@libp2p/peer-record' +import type { Components } from '@libp2p/interfaces/components' +import type { Startable } from '@libp2p/interfaces' +import { logger } from '@libp2p/logger' +import { protocols } from '@multiformats/multiaddr' + +const log = logger('libp2p:peer-record-updater') + +export class PeerRecordUpdater implements Startable { + private readonly components: Components + private started: boolean + + constructor (components: Components) { + this.components = components + this.started = false + this.update = this.update.bind(this) + } + + isStarted () { + return this.started + } + + async start () { + this.started = true + this.components.getTransportManager().addEventListener('listener:listening', this.update) + this.components.getTransportManager().addEventListener('listener:close', this.update) + this.components.getAddressManager().addEventListener('change:addresses', this.update) + } + + async stop () { + this.started = false + this.components.getTransportManager().removeEventListener('listener:listening', this.update) + this.components.getTransportManager().removeEventListener('listener:close', this.update) + this.components.getAddressManager().removeEventListener('change:addresses', this.update) + } + + /** + * Create (or update if existing) self peer record and store it in the AddressBook. + */ + update () { + Promise.resolve() + .then(async () => { + const peerRecord = new PeerRecord({ + peerId: this.components.getPeerId(), + multiaddrs: this.components.getAddressManager().getAddresses().map(ma => ma.decapsulateCode(protocols('p2p').code)) + }) + + const envelope = await RecordEnvelope.seal(peerRecord, this.components.getPeerId()) + await this.components.getPeerStore().addressBook.consumePeerRecord(envelope) + }) + .catch(err => { + log.error('Could not update self peer record: %o', err) + }) + } +} diff --git a/src/peer-routing.js b/src/peer-routing.js deleted file mode 100644 index b525920f..00000000 --- a/src/peer-routing.js +++ /dev/null @@ -1,176 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:peer-routing'), { - error: debug('libp2p:peer-routing:err') -}) -const errCode = require('err-code') -const errors = require('./errors') -const { - storeAddresses, - uniquePeers, - requirePeers -} = require('./content-routing/utils') -const { TimeoutController } = require('timeout-abort-controller') - -const merge = require('it-merge') -const { pipe } = require('it-pipe') -const first = require('it-first') -const drain = require('it-drain') -const filter = require('it-filter') -const { - setDelayedInterval, - clearDelayedInterval -// @ts-ignore module with no types -} = require('set-delayed-interval') -const { DHTPeerRouting } = require('./dht/dht-peer-routing') -// @ts-expect-error setMaxListeners is missing from the types -const { setMaxListeners } = require('events') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('libp2p-interfaces/src/peer-routing/types').PeerRouting} PeerRoutingModule - */ - -/** - * @typedef {Object} RefreshManagerOptions - * @property {boolean} [enabled = true] - Whether to enable the Refresh manager - * @property {number} [bootDelay = 6e5] - Boot delay to start the Refresh Manager (in ms) - * @property {number} [interval = 10e3] - Interval between each Refresh Manager run (in ms) - * @property {number} [timeout = 10e3] - How long to let each refresh run (in ms) - * - * @typedef {Object} PeerRoutingOptions - * @property {RefreshManagerOptions} [refreshManager] - */ - -class PeerRouting { - /** - * @class - * @param {import('./')} libp2p - */ - constructor (libp2p) { - this._peerId = libp2p.peerId - this._peerStore = libp2p.peerStore - /** @type {PeerRoutingModule[]} */ - this._routers = libp2p._modules.peerRouting || [] - - // If we have the dht, add it to the available peer routers - if (libp2p._dht && libp2p._config.dht.enabled) { - this._routers.push(new DHTPeerRouting(libp2p._dht)) - } - - this._refreshManagerOptions = libp2p._options.peerRouting.refreshManager - - this._findClosestPeersTask = this._findClosestPeersTask.bind(this) - } - - /** - * Start peer routing service. - */ - start () { - if (!this._routers.length || this._timeoutId || !this._refreshManagerOptions.enabled) { - return - } - - this._timeoutId = setDelayedInterval( - this._findClosestPeersTask, this._refreshManagerOptions.interval, this._refreshManagerOptions.bootDelay - ) - } - - /** - * Recurrent task to find closest peers and add their addresses to the Address Book. - */ - async _findClosestPeersTask () { - try { - // nb getClosestPeers adds the addresses to the address book - await drain(this.getClosestPeers(this._peerId.id, { timeout: this._refreshManagerOptions.timeout || 10e3 })) - } catch (/** @type {any} */ err) { - log.error(err) - } - } - - /** - * Stop peer routing service. - */ - stop () { - clearDelayedInterval(this._timeoutId) - } - - /** - * Iterates over all peer routers in parallel to find the given peer. - * - * @param {PeerId} id - The id of the peer to find - * @param {object} [options] - * @param {number} [options.timeout] - How long the query should run - * @returns {Promise<{ id: PeerId, multiaddrs: Multiaddr[] }>} - */ - async findPeer (id, options) { // eslint-disable-line require-await - if (!this._routers.length) { - throw errCode(new Error('No peer routers available'), errors.codes.ERR_NO_ROUTERS_AVAILABLE) - } - - if (id.toB58String() === this._peerId.toB58String()) { - throw errCode(new Error('Should not try to find self'), errors.codes.ERR_FIND_SELF) - } - - const output = await pipe( - merge( - ...this._routers.map(router => (async function * () { - try { - yield await router.findPeer(id, options) - } catch (err) { - log.error(err) - } - })()) - ), - (source) => filter(source, Boolean), - (source) => storeAddresses(source, this._peerStore), - (source) => first(source) - ) - - if (output) { - return output - } - - throw errCode(new Error(errors.messages.NOT_FOUND), errors.codes.ERR_NOT_FOUND) - } - - /** - * Attempt to find the closest peers on the network to the given key. - * - * @param {Uint8Array} key - A CID like key - * @param {Object} [options] - * @param {number} [options.timeout=30e3] - How long the query can take - * @param {AbortSignal} [options.signal] - An AbortSignal to abort the request - * @returns {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} - */ - async * getClosestPeers (key, options = { timeout: 30e3 }) { - if (!this._routers.length) { - throw errCode(new Error('No peer routers available'), errors.codes.ERR_NO_ROUTERS_AVAILABLE) - } - - if (options.timeout) { - const controller = new TimeoutController(options.timeout) - // this controller will potentially be used while dialing lots of - // peers so prevent MaxListenersExceededWarning appearing in the console - try { - // fails on node < 15.4 - setMaxListeners && setMaxListeners(Infinity, controller.signal) - } catch {} - - options.signal = controller.signal - } - - yield * pipe( - merge( - ...this._routers.map(router => router.getClosestPeers(key, options)) - ), - (source) => storeAddresses(source, this._peerStore), - (source) => uniquePeers(source), - (source) => requirePeers(source) - ) - } -} - -module.exports = PeerRouting diff --git a/src/peer-routing.ts b/src/peer-routing.ts new file mode 100644 index 00000000..4e95ff5c --- /dev/null +++ b/src/peer-routing.ts @@ -0,0 +1,185 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import { codes, messages } from './errors.js' +import { + storeAddresses, + uniquePeers, + requirePeers +} from './content-routing/utils.js' +import { TimeoutController } from 'timeout-abort-controller' +import merge from 'it-merge' +import { pipe } from 'it-pipe' +import first from 'it-first' +import drain from 'it-drain' +import filter from 'it-filter' +import { + setDelayedInterval, + clearDelayedInterval +// @ts-expect-error module with no types +} from 'set-delayed-interval' +// @ts-expect-error setMaxListeners is missing from the node 16 types +import { setMaxListeners } from 'events' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { PeerRouting } from '@libp2p/interfaces/peer-routing' +import type { AbortOptions, Startable } from '@libp2p/interfaces' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:peer-routing') + +export interface RefreshManagerInit { + /** + * Whether to enable the Refresh manager + */ + enabled?: boolean + + /** + * Boot delay to start the Refresh Manager (in ms) + */ + bootDelay?: number + + /** + * Interval between each Refresh Manager run (in ms) + */ + interval?: number + + /** + * How long to let each refresh run (in ms) + */ + timeout?: number +} + +export interface PeerRoutingInit { + routers: PeerRouting[] + refreshManager?: RefreshManagerInit +} + +export class DefaultPeerRouting implements PeerRouting, Startable { + private readonly components: Components + private readonly routers: PeerRouting[] + private readonly refreshManagerInit: RefreshManagerInit + private timeoutId?: ReturnType + private started: boolean + private abortController?: TimeoutController + + constructor (components: Components, init: PeerRoutingInit) { + this.components = components + this.routers = init.routers + this.refreshManagerInit = init.refreshManager ?? {} + this.started = false + + this._findClosestPeersTask = this._findClosestPeersTask.bind(this) + } + + isStarted () { + return this.started + } + + /** + * Start peer routing service. + */ + async start () { + if (this.started || this.routers.length === 0 || this.timeoutId != null || this.refreshManagerInit.enabled === false) { + return + } + + this.timeoutId = setDelayedInterval( + this._findClosestPeersTask, this.refreshManagerInit.interval, this.refreshManagerInit.bootDelay + ) + + this.started = true + } + + /** + * Recurrent task to find closest peers and add their addresses to the Address Book. + */ + async _findClosestPeersTask () { + if (this.abortController != null) { + // we are already running the query + return + } + + try { + this.abortController = new TimeoutController(this.refreshManagerInit.timeout ?? 10e3) + + // this controller may be used while dialing lots of peers so prevent MaxListenersExceededWarning + // appearing in the console + try { + // fails on node < 15.4 + setMaxListeners?.(Infinity, this.abortController.signal) + } catch {} + + // nb getClosestPeers adds the addresses to the address book + await drain(this.getClosestPeers(this.components.getPeerId().toBytes(), { signal: this.abortController.signal })) + } catch (err: any) { + log.error(err) + } finally { + this.abortController?.clear() + this.abortController = undefined + } + } + + /** + * Stop peer routing service. + */ + async stop () { + clearDelayedInterval(this.timeoutId) + + // abort query if it is in-flight + this.abortController?.abort() + + this.started = false + } + + /** + * Iterates over all peer routers in parallel to find the given peer + */ + async findPeer (id: PeerId, options?: AbortOptions): Promise { + if (this.routers.length === 0) { + throw errCode(new Error('No peer routers available'), codes.ERR_NO_ROUTERS_AVAILABLE) + } + + if (id.toString() === this.components.getPeerId().toString()) { + throw errCode(new Error('Should not try to find self'), codes.ERR_FIND_SELF) + } + + const output = await pipe( + merge( + ...this.routers.map(router => (async function * () { + try { + yield await router.findPeer(id, options) + } catch (err) { + log.error(err) + } + })()) + ), + (source) => filter(source, Boolean), + (source) => storeAddresses(source, this.components.getPeerStore()), + async (source) => await first(source) + ) + + if (output != null) { + return output + } + + throw errCode(new Error(messages.NOT_FOUND), codes.ERR_NOT_FOUND) + } + + /** + * Attempt to find the closest peers on the network to the given key + */ + async * getClosestPeers (key: Uint8Array, options?: AbortOptions): AsyncIterable { + if (this.routers.length === 0) { + throw errCode(new Error('No peer routers available'), codes.ERR_NO_ROUTERS_AVAILABLE) + } + + yield * pipe( + merge( + ...this.routers.map(router => router.getClosestPeers(key, options)) + ), + (source) => storeAddresses(source, this.components.getPeerStore()), + (source) => uniquePeers(source), + (source) => requirePeers(source) + ) + } +} diff --git a/src/peer-store/README.md b/src/peer-store/README.md deleted file mode 100644 index b95cdaf0..00000000 --- a/src/peer-store/README.md +++ /dev/null @@ -1,145 +0,0 @@ -# PeerStore - -Libp2p's PeerStore is responsible for keeping an updated register with the relevant information of the known peers. It should be the single source of truth for all peer data, where a subsystem can learn about peers' data and where someone can listen for updates. The PeerStore comprises four main components: `addressBook`, `keyBook`, `protocolBook` and `metadataBook`. - -The PeerStore manages the high level operations on its inner books. Moreover, the PeerStore should be responsible for notifying interested parties of relevant events, through its Event Emitter. - -## Submitting records to the PeerStore - -Several libp2p subsystems will perform operations that might gather relevant information about peers. - -### Identify -- The Identify protocol automatically runs on every connection when multiplexing is enabled. The protocol will put the multiaddrs and protocols provided by the peer to the PeerStore. -- In the background, the Identify Service is also waiting for protocol change notifications of peers via the IdentifyPush protocol. Peers may leverage the `identify-push` message to communicate protocol changes to all connected peers, so that their PeerStore can be updated with the updated protocols. -- While it is currently not supported in js-libp2p, future iterations may also support the [IdentifyDelta protocol](https://github.com/libp2p/specs/pull/176). -- Taking into account that the Identify protocol records are directly from the peer, they should be considered the source of truth and weighted accordingly. - -### Peer Discovery -- Libp2p discovery protocols aim to discover new peers in the network. In a typical discovery protocol, addresses of the peer are discovered along with its peer id. Once this happens, a libp2p discovery protocol should emit a `peer` event with the information of the discovered peer and this information will be added to the PeerStore by libp2p. - -### Dialer -- Libp2p API supports dialing a peer given a `multiaddr`, and no prior knowledge of the peer. If the node is able to establish a connection with the peer, it and its multiaddr is added to the PeerStore. -- When a connection is being upgraded, more precisely after its encryption, or even in a discovery protocol, a libp2p node can get to know other parties public keys. In this scenario, libp2p will add the peer's public key to its `KeyBook`. - -### DHT -- On some DHT operations, such as finding providers for a given CID, nodes may exchange peer data as part of the query. This passive peer discovery should result in the DHT emitting the `peer` event in the same way [Peer Discovery](#peerdiscovery) does. - -## Retrieving records from the PeerStore - -When data in the PeerStore is updated the PeerStore will emit events based on the changes, to allow applications and other subsystems to take action on those changes. Any subsystem interested in these notifications should subscribe the [`PeerStore events`][peer-store-events]. - -### Peer -- Each time a new peer is discovered, the PeerStore should emit a [`peer` event][peer-store-events], so that interested parties can leverage this peer and establish a connection with it. - -### Protocols -- When the known protocols of a peer change, the PeerStore emits a [`change:protocols` event][peer-store-events]. - -### Multiaddrs -- When the known listening `multiaddrs` of a peer change, the PeerStore emits a [`change:multiaddrs` event][peer-store-events]. - -## PeerStore implementation - -The PeerStore wraps four main components: `addressBook`, `keyBook`, `protocolBook` and `metadataBook`. Moreover, it provides a high level API for those components, as well as data events. - -### Components - -#### Address Book - -The `addressBook` keeps the known multiaddrs of a peer. The multiaddrs of each peer may change over time and the Address Book must account for this. - -`Map` - -A `peerId.toB58String()` identifier mapping to a `Address` object, which should have the following structure: - -```js -{ - multiaddr: -} -``` - -#### Key Book - -The `keyBook` tracks the public keys of the peers by keeping their [`PeerId`][peer-id]. - -`Map>` - -A `peerId.toB58String()` identifier mapping to a `Set` of protocol identifier strings. - -#### Metadata Book - -The `metadataBook` keeps track of the known metadata of a peer. Its metadata is stored in a key value fashion, where a key identifier (`string`) represents a metadata value (`Uint8Array`). - -`Map>` - -A `peerId.toB58String()` identifier mapping to the peer metadata Map. - -### API - -For the complete API documentation, you should check the [API.md](../../doc/API.md). - -Access to its underlying books: - -- `peerStore.addressBook.*` -- `peerStore.keyBook.*` -- `peerStore.metadataBook.*` -- `peerStore.protoBook.*` - -### Events - -- `peer` - emitted when a new peer is added. -- `change:multiaadrs` - emitted when a known peer has a different set of multiaddrs. -- `change:protocols` - emitted when a known peer supports a different set of protocols. -- `change:pubkey` - emitted when a peer's public key is known. -- `change:metadata` - emitted when known metadata of a peer changes. - -## Data Persistence - -The data stored in the PeerStore can be persisted if configured appropriately. Keeping a record of the peers already discovered by the peer, as well as their known data aims to improve the efficiency of peers joining the network after being offline. - -The libp2p node will need to receive a [datastore](https://github.com/ipfs/interface-datastore), in order to persist this data across restarts. A [datastore](https://github.com/ipfs/interface-datastore) stores its data in a key-value fashion. As a result, we need coherent keys so that we do not overwrite data. - -The PeerStore should not continuously update the datastore whenever data is changed. Instead, it should only store new data after reaching a certain threshold of "dirty" peers, as well as when the node is stopped, in order to batch writes to the datastore. - -The peer id will be appended to the datastore key for each data namespace. The namespaces were defined as follows: - -**AddressBook** - -All the known peer addresses are stored with a key pattern as follows: - -`/peers/addrs/` - -**ProtoBook** - -All the known peer protocols are stored with a key pattern as follows: - -`/peers/protos/` - -**KeyBook** - -All public keys are stored under the following pattern: - -` /peers/keys/` - -**MetadataBook** - -Metadata is stored under the following key pattern: - -`/peers/metadata//` - -## Future Considerations - -- If multiaddr TTLs are added, the PeerStore may schedule jobs to delete all addresses that exceed the TTL to prevent AddressBook bloating -- Further API methods will probably need to be added in the context of multiaddr validity and confidence. -- When improving libp2p configuration for specific runtimes, we should take into account the PeerStore recommended datastore. -- When improving libp2p configuration, we should think about a possible way of allowing the configuration of Bootstrap to be influenced by the persisted peers, as a way to decrease the load on Bootstrap nodes. - -[peer-id]: https://github.com/libp2p/js-peer-id -[peer-store-events]: ../../doc/API.md#libp2ppeerstore diff --git a/src/peer-store/address-book.js b/src/peer-store/address-book.js deleted file mode 100644 index 391f9608..00000000 --- a/src/peer-store/address-book.js +++ /dev/null @@ -1,382 +0,0 @@ -'use strict' - -const debug = require('debug') -const errcode = require('err-code') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') -const { codes } = require('../errors') -const PeerRecord = require('../record/peer-record') -const Envelope = require('../record/envelope') -const { pipe } = require('it-pipe') -const all = require('it-all') -const filter = require('it-filter') -const map = require('it-map') -const each = require('it-foreach') - -/** - * @typedef {import('./types').PeerStore} PeerStore - * @typedef {import('./types').Address} Address - * @typedef {import('./types').AddressBook} AddressBook - */ - -const log = Object.assign(debug('libp2p:peer-store:address-book'), { - error: debug('libp2p:peer-store:address-book:err') -}) - -const EVENT_NAME = 'change:multiaddrs' - -/** - * @implements {AddressBook} - */ -class PeerStoreAddressBook { - /** - * @param {PeerStore["emit"]} emit - * @param {import('./types').Store} store - * @param {(peerId: PeerId, multiaddr: Multiaddr) => Promise} addressFilter - */ - constructor (emit, store, addressFilter) { - this._emit = emit - this._store = store - this._addressFilter = addressFilter - } - - /** - * ConsumePeerRecord adds addresses from a signed peer record contained in a record envelope. - * This will return a boolean that indicates if the record was successfully processed and added - * into the AddressBook. - * - * @param {Envelope} envelope - */ - async consumePeerRecord (envelope) { - log('consumePeerRecord await write lock') - const release = await this._store.lock.writeLock() - log('consumePeerRecord got write lock') - - let peerId - let updatedPeer - - try { - let peerRecord - try { - peerRecord = PeerRecord.createFromProtobuf(envelope.payload) - } catch (/** @type {any} */ err) { - log.error('invalid peer record received') - return false - } - - peerId = peerRecord.peerId - const multiaddrs = peerRecord.multiaddrs - - // Verify peerId - if (!peerId.equals(envelope.peerId)) { - log('signing key does not match PeerId in the PeerRecord') - return false - } - - // ensure the record has multiaddrs - if (!multiaddrs || !multiaddrs.length) { - return false - } - - if (await this._store.has(peerId)) { - const peer = await this._store.load(peerId) - - if (peer.peerRecordEnvelope) { - const storedEnvelope = await Envelope.createFromProtobuf(peer.peerRecordEnvelope) - const storedRecord = PeerRecord.createFromProtobuf(storedEnvelope.payload) - - // ensure seq is greater than, or equal to, the last received - if (storedRecord.seqNumber >= peerRecord.seqNumber) { - return false - } - } - } - - // Replace unsigned addresses by the new ones from the record - // TODO: Once we have ttls for the addresses, we should merge these in - updatedPeer = await this._store.patchOrCreate(peerId, { - addresses: await filterMultiaddrs(peerId, multiaddrs, this._addressFilter, true), - peerRecordEnvelope: envelope.marshal() - }) - - log(`stored provided peer record for ${peerRecord.peerId.toB58String()}`) - } finally { - log('consumePeerRecord release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, multiaddrs: updatedPeer.addresses.map(({ multiaddr }) => multiaddr) }) - - return true - } - - /** - * @param {PeerId} peerId - */ - async getRawEnvelope (peerId) { - log('getRawEnvelope await read lock') - const release = await this._store.lock.readLock() - log('getRawEnvelope got read lock') - - try { - const peer = await this._store.load(peerId) - - return peer.peerRecordEnvelope - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('getRawEnvelope release read lock') - release() - } - } - - /** - * Get an Envelope containing a PeerRecord for the given peer. - * Returns undefined if no record exists. - * - * @param {PeerId} peerId - */ - async getPeerRecord (peerId) { - const raw = await this.getRawEnvelope(peerId) - - if (!raw) { - return undefined - } - - return Envelope.createFromProtobuf(raw) - } - - /** - * @param {PeerId} peerId - */ - async get (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('get wait for read lock') - const release = await this._store.lock.readLock() - log('get got read lock') - - try { - const peer = await this._store.load(peerId) - - return peer.addresses - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('get release read lock') - release() - } - - return [] - } - - /** - * @param {PeerId} peerId - * @param {Multiaddr[]} multiaddrs - */ - async set (peerId, multiaddrs) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!Array.isArray(multiaddrs)) { - log.error('multiaddrs must be an array of Multiaddrs') - throw errcode(new Error('multiaddrs must be an array of Multiaddrs'), codes.ERR_INVALID_PARAMETERS) - } - - log('set await write lock') - const release = await this._store.lock.writeLock() - log('set got write lock') - - let hasPeer = false - let updatedPeer - - try { - const addresses = await filterMultiaddrs(peerId, multiaddrs, this._addressFilter) - - // No valid addresses found - if (!addresses.length) { - return - } - - try { - const peer = await this._store.load(peerId) - hasPeer = true - - if (new Set([ - ...addresses.map(({ multiaddr }) => multiaddr.toString()), - ...peer.addresses.map(({ multiaddr }) => multiaddr.toString()) - ]).size === peer.addresses.length && addresses.length === peer.addresses.length) { - // not changing anything, no need to update - return - } - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - updatedPeer = await this._store.patchOrCreate(peerId, { addresses }) - - log(`set multiaddrs for ${peerId.toB58String()}`) - } finally { - log('set release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, multiaddrs: updatedPeer.addresses.map(addr => addr.multiaddr) }) - - // Notify the existence of a new peer - if (!hasPeer) { - this._emit('peer', peerId) - } - } - - /** - * @param {PeerId} peerId - * @param {Multiaddr[]} multiaddrs - */ - async add (peerId, multiaddrs) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!Array.isArray(multiaddrs)) { - log.error('multiaddrs must be an array of Multiaddrs') - throw errcode(new Error('multiaddrs must be an array of Multiaddrs'), codes.ERR_INVALID_PARAMETERS) - } - - log('add await write lock') - const release = await this._store.lock.writeLock() - log('add got write lock') - - let hasPeer - let updatedPeer - - try { - const addresses = await filterMultiaddrs(peerId, multiaddrs, this._addressFilter) - - // No valid addresses found - if (!addresses.length) { - return - } - - try { - const peer = await this._store.load(peerId) - hasPeer = true - - if (new Set([ - ...addresses.map(({ multiaddr }) => multiaddr.toString()), - ...peer.addresses.map(({ multiaddr }) => multiaddr.toString()) - ]).size === peer.addresses.length) { - return - } - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - updatedPeer = await this._store.mergeOrCreate(peerId, { addresses }) - - log(`added multiaddrs for ${peerId.toB58String()}`) - } finally { - log('set release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, multiaddrs: updatedPeer.addresses.map(addr => addr.multiaddr) }) - - // Notify the existence of a new peer - if (!hasPeer) { - this._emit('peer', peerId) - } - } - - /** - * @param {PeerId} peerId - */ - async delete (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('delete await write lock') - const release = await this._store.lock.writeLock() - log('delete got write lock') - - let has - - try { - has = await this._store.has(peerId) - - await this._store.patchOrCreate(peerId, { - addresses: [] - }) - } finally { - log('delete release write lock') - release() - } - - if (has) { - this._emit(EVENT_NAME, { peerId, multiaddrs: [] }) - } - } - - /** - * @param {PeerId} peerId - * @param {(addresses: Address[]) => Address[]} [addressSorter] - */ - async getMultiaddrsForPeer (peerId, addressSorter = (ms) => ms) { - const addresses = await this.get(peerId) - - return addressSorter( - addresses - ).map((address) => { - const multiaddr = address.multiaddr - - const idString = multiaddr.getPeerId() - if (idString && idString === peerId.toB58String()) return multiaddr - - return multiaddr.encapsulate(`/p2p/${peerId.toB58String()}`) - }) - } -} - -/** - * @param {PeerId} peerId - * @param {Multiaddr[]} multiaddrs - * @param {(peerId: PeerId, multiaddr: Multiaddr) => Promise} addressFilter - * @param {boolean} isCertified - */ -function filterMultiaddrs (peerId, multiaddrs, addressFilter, isCertified = false) { - return pipe( - multiaddrs, - (source) => each(source, (multiaddr) => { - if (!Multiaddr.isMultiaddr(multiaddr)) { - log.error('multiaddr must be an instance of Multiaddr') - throw errcode(new Error('multiaddr must be an instance of Multiaddr'), codes.ERR_INVALID_PARAMETERS) - } - }), - (source) => filter(source, (multiaddr) => addressFilter(peerId, multiaddr)), - (source) => map(source, (multiaddr) => { - return { - multiaddr: new Multiaddr(multiaddr.toString()), - isCertified - } - }), - (source) => all(source) - ) -} - -module.exports = PeerStoreAddressBook diff --git a/src/peer-store/index.js b/src/peer-store/index.js deleted file mode 100644 index 2dceac37..00000000 --- a/src/peer-store/index.js +++ /dev/null @@ -1,121 +0,0 @@ -'use strict' - -const debug = require('debug') -const { EventEmitter } = require('events') -const AddressBook = require('./address-book') -const KeyBook = require('./key-book') -const MetadataBook = require('./metadata-book') -const ProtoBook = require('./proto-book') -const Store = require('./store') - -/** - * @typedef {import('./types').PeerStore} PeerStore - * @typedef {import('./types').Peer} Peer - * @typedef {import('peer-id')} PeerId - * @typedef {import('multiaddr').Multiaddr} Multiaddr - */ - -const log = Object.assign(debug('libp2p:peer-store'), { - error: debug('libp2p:peer-store:err') -}) - -/** - * An implementation of PeerStore that stores data in a Datastore - * - * @implements {PeerStore} - */ -class DefaultPeerStore extends EventEmitter { - /** - * @param {object} properties - * @param {PeerId} properties.peerId - * @param {import('interface-datastore').Datastore} properties.datastore - * @param {(peerId: PeerId, multiaddr: Multiaddr) => Promise} properties.addressFilter - */ - constructor ({ peerId, datastore, addressFilter }) { - super() - - this._peerId = peerId - this._store = new Store(datastore) - - this.addressBook = new AddressBook(this.emit.bind(this), this._store, addressFilter) - this.keyBook = new KeyBook(this.emit.bind(this), this._store) - this.metadataBook = new MetadataBook(this.emit.bind(this), this._store) - this.protoBook = new ProtoBook(this.emit.bind(this), this._store) - } - - async * getPeers () { - log('getPeers await read lock') - const release = await this._store.lock.readLock() - log('getPeers got read lock') - - try { - for await (const peer of this._store.all()) { - if (peer.id.toB58String() === this._peerId.toB58String()) { - // Remove self peer if present - continue - } - - yield peer - } - } finally { - log('getPeers release read lock') - release() - } - } - - /** - * Delete the information of the given peer in every book - * - * @param {PeerId} peerId - */ - async delete (peerId) { - log('delete await write lock') - const release = await this._store.lock.writeLock() - log('delete got write lock') - - try { - await this._store.delete(peerId) - } finally { - log('delete release write lock') - release() - } - } - - /** - * Get the stored information of a given peer - * - * @param {PeerId} peerId - */ - async get (peerId) { - log('get await read lock') - const release = await this._store.lock.readLock() - log('get got read lock') - - try { - return this._store.load(peerId) - } finally { - log('get release read lock') - release() - } - } - - /** - * Returns true if we have a record of the peer - * - * @param {PeerId} peerId - */ - async has (peerId) { - log('has await read lock') - const release = await this._store.lock.readLock() - log('has got read lock') - - try { - return this._store.has(peerId) - } finally { - log('has release read lock') - release() - } - } -} - -module.exports = DefaultPeerStore diff --git a/src/peer-store/key-book.js b/src/peer-store/key-book.js deleted file mode 100644 index dfd4c151..00000000 --- a/src/peer-store/key-book.js +++ /dev/null @@ -1,141 +0,0 @@ -'use strict' - -const debug = require('debug') -const errcode = require('err-code') -const { codes } = require('../errors') -const PeerId = require('peer-id') -const { equals: uint8arrayEquals } = require('uint8arrays/equals') - -/** - * @typedef {import('./types').PeerStore} PeerStore - * @typedef {import('./types').KeyBook} KeyBook - * @typedef {import('libp2p-interfaces/src/keys/types').PublicKey} PublicKey - */ - -const log = Object.assign(debug('libp2p:peer-store:key-book'), { - error: debug('libp2p:peer-store:key-book:err') -}) - -const EVENT_NAME = 'change:pubkey' - -/** - * @implements {KeyBook} - */ -class PeerStoreKeyBook { - /** - * The KeyBook is responsible for keeping the known public keys of a peer. - * - * @param {PeerStore["emit"]} emit - * @param {import('./types').Store} store - */ - constructor (emit, store) { - this._emit = emit - this._store = store - } - - /** - * Set the Peer public key - * - * @param {PeerId} peerId - * @param {PublicKey} publicKey - */ - async set (peerId, publicKey) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!publicKey) { - log.error('publicKey must be an instance of PublicKey to store data') - throw errcode(new Error('publicKey must be an instance of PublicKey'), codes.ERR_INVALID_PARAMETERS) - } - - log('set await write lock') - const release = await this._store.lock.writeLock() - log('set got write lock') - - let updatedKey = false - - try { - try { - const existing = await this._store.load(peerId) - - if (existing.pubKey && uint8arrayEquals(existing.pubKey.bytes, publicKey.bytes)) { - return - } - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - await this._store.patchOrCreate(peerId, { - pubKey: publicKey - }) - updatedKey = true - } finally { - log('set release write lock') - release() - } - - if (updatedKey) { - this._emit(EVENT_NAME, { peerId, pubKey: publicKey }) - } - } - - /** - * Get Public key of the given PeerId, if stored - * - * @param {PeerId} peerId - */ - async get (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('get await write lock') - const release = await this._store.lock.readLock() - log('get got write lock') - - try { - const peer = await this._store.load(peerId) - - return peer.pubKey - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('get release write lock') - release() - } - } - - /** - * @param {PeerId} peerId - */ - async delete (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('delete await write lock') - const release = await this._store.lock.writeLock() - log('delete got write lock') - - try { - await this._store.patchOrCreate(peerId, { - pubKey: undefined - }) - } finally { - log('delete release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, pubKey: undefined }) - } -} - -module.exports = PeerStoreKeyBook diff --git a/src/peer-store/metadata-book.js b/src/peer-store/metadata-book.js deleted file mode 100644 index 7176a78a..00000000 --- a/src/peer-store/metadata-book.js +++ /dev/null @@ -1,250 +0,0 @@ -'use strict' - -const debug = require('debug') -const errcode = require('err-code') -const { codes } = require('../errors') -const PeerId = require('peer-id') -const { equals: uint8ArrayEquals } = require('uint8arrays/equals') - -const log = Object.assign(debug('libp2p:peer-store:metadata-book'), { - error: debug('libp2p:peer-store:metadata-book:err') -}) - -/** - * @typedef {import('./types').PeerStore} PeerStore - * @typedef {import('./types').MetadataBook} MetadataBook - */ - -const EVENT_NAME = 'change:metadata' - -/** - * @implements {MetadataBook} - */ -class PeerStoreMetadataBook { - /** - * The MetadataBook is responsible for keeping the known supported - * protocols of a peer - * - * @param {PeerStore["emit"]} emit - * @param {import('./types').Store} store - */ - constructor (emit, store) { - this._emit = emit - this._store = store - } - - /** - * Get the known data of a provided peer - * - * @param {PeerId} peerId - */ - async get (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('get await read lock') - const release = await this._store.lock.readLock() - log('get got read lock') - - try { - const peer = await this._store.load(peerId) - - return peer.metadata - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('get release read lock') - release() - } - - return new Map() - } - - /** - * Get specific metadata value, if it exists - * - * @param {PeerId} peerId - * @param {string} key - */ - async getValue (peerId, key) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('getValue await read lock') - const release = await this._store.lock.readLock() - log('getValue got read lock') - - try { - const peer = await this._store.load(peerId) - - return peer.metadata.get(key) - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('getValue release write lock') - release() - } - } - - /** - * @param {PeerId} peerId - * @param {Map} metadata - */ - async set (peerId, metadata) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!metadata || !(metadata instanceof Map)) { - log.error('valid metadata must be provided to store data') - throw errcode(new Error('valid metadata must be provided'), codes.ERR_INVALID_PARAMETERS) - } - - log('set await write lock') - const release = await this._store.lock.writeLock() - log('set got write lock') - - try { - await this._store.mergeOrCreate(peerId, { - metadata - }) - } finally { - log('set release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, metadata }) - } - - /** - * Set metadata key and value of a provided peer - * - * @param {PeerId} peerId - * @param {string} key - metadata key - * @param {Uint8Array} value - metadata value - */ - async setValue (peerId, key, value) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (typeof key !== 'string' || !(value instanceof Uint8Array)) { - log.error('valid key and value must be provided to store data') - throw errcode(new Error('valid key and value must be provided'), codes.ERR_INVALID_PARAMETERS) - } - - log('setValue await write lock') - const release = await this._store.lock.writeLock() - log('setValue got write lock') - - let updatedPeer - - try { - try { - const existingPeer = await this._store.load(peerId) - const existingValue = existingPeer.metadata.get(key) - - if (existingValue != null && uint8ArrayEquals(value, existingValue)) { - return - } - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - updatedPeer = await this._store.mergeOrCreate(peerId, { - metadata: new Map([[key, value]]) - }) - } finally { - log('setValue release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, metadata: updatedPeer.metadata }) - } - - /** - * @param {PeerId} peerId - */ - async delete (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('delete await write lock') - const release = await this._store.lock.writeLock() - log('delete got write lock') - - let has - - try { - has = await this._store.has(peerId) - - if (has) { - await this._store.patch(peerId, { - metadata: new Map() - }) - } - } finally { - log('delete release write lock') - release() - } - - if (has) { - this._emit(EVENT_NAME, { peerId, metadata: new Map() }) - } - } - - /** - * @param {PeerId} peerId - * @param {string} key - */ - async deleteValue (peerId, key) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - log('deleteValue await write lock') - const release = await this._store.lock.writeLock() - log('deleteValue got write lock') - - let metadata - - try { - const peer = await this._store.load(peerId) - metadata = peer.metadata - - metadata.delete(key) - - await this._store.patch(peerId, { - metadata - }) - } catch (/** @type {any} **/ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('deleteValue release write lock') - release() - } - - if (metadata) { - this._emit(EVENT_NAME, { peerId, metadata }) - } - } -} - -module.exports = PeerStoreMetadataBook diff --git a/src/peer-store/pb/peer.d.ts b/src/peer-store/pb/peer.d.ts deleted file mode 100644 index e6ccdfe6..00000000 --- a/src/peer-store/pb/peer.d.ts +++ /dev/null @@ -1,222 +0,0 @@ -import * as $protobuf from "protobufjs"; -/** Properties of a Peer. */ -export interface IPeer { - - /** Peer addresses */ - addresses?: (IAddress[]|null); - - /** Peer protocols */ - protocols?: (string[]|null); - - /** Peer metadata */ - metadata?: (IMetadata[]|null); - - /** Peer pubKey */ - pubKey?: (Uint8Array|null); - - /** Peer peerRecordEnvelope */ - peerRecordEnvelope?: (Uint8Array|null); -} - -/** Represents a Peer. */ -export class Peer implements IPeer { - - /** - * Constructs a new Peer. - * @param [p] Properties to set - */ - constructor(p?: IPeer); - - /** Peer addresses. */ - public addresses: IAddress[]; - - /** Peer protocols. */ - public protocols: string[]; - - /** Peer metadata. */ - public metadata: IMetadata[]; - - /** Peer pubKey. */ - public pubKey?: (Uint8Array|null); - - /** Peer peerRecordEnvelope. */ - public peerRecordEnvelope?: (Uint8Array|null); - - /** Peer _pubKey. */ - public _pubKey?: "pubKey"; - - /** Peer _peerRecordEnvelope. */ - public _peerRecordEnvelope?: "peerRecordEnvelope"; - - /** - * Encodes the specified Peer message. Does not implicitly {@link Peer.verify|verify} messages. - * @param m Peer message or plain object to encode - * @param [w] Writer to encode to - * @returns Writer - */ - public static encode(m: IPeer, w?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a Peer message from the specified reader or buffer. - * @param r Reader or buffer to decode from - * @param [l] Message length if known beforehand - * @returns Peer - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(r: ($protobuf.Reader|Uint8Array), l?: number): Peer; - - /** - * Creates a Peer message from a plain object. Also converts values to their respective internal types. - * @param d Plain object - * @returns Peer - */ - public static fromObject(d: { [k: string]: any }): Peer; - - /** - * Creates a plain object from a Peer message. Also converts values to other types if specified. - * @param m Peer - * @param [o] Conversion options - * @returns Plain object - */ - public static toObject(m: Peer, o?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this Peer to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; -} - -/** Properties of an Address. */ -export interface IAddress { - - /** Address multiaddr */ - multiaddr?: (Uint8Array|null); - - /** Address isCertified */ - isCertified?: (boolean|null); -} - -/** Represents an Address. */ -export class Address implements IAddress { - - /** - * Constructs a new Address. - * @param [p] Properties to set - */ - constructor(p?: IAddress); - - /** Address multiaddr. */ - public multiaddr: Uint8Array; - - /** Address isCertified. */ - public isCertified?: (boolean|null); - - /** Address _isCertified. */ - public _isCertified?: "isCertified"; - - /** - * Encodes the specified Address message. Does not implicitly {@link Address.verify|verify} messages. - * @param m Address message or plain object to encode - * @param [w] Writer to encode to - * @returns Writer - */ - public static encode(m: IAddress, w?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes an Address message from the specified reader or buffer. - * @param r Reader or buffer to decode from - * @param [l] Message length if known beforehand - * @returns Address - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(r: ($protobuf.Reader|Uint8Array), l?: number): Address; - - /** - * Creates an Address message from a plain object. Also converts values to their respective internal types. - * @param d Plain object - * @returns Address - */ - public static fromObject(d: { [k: string]: any }): Address; - - /** - * Creates a plain object from an Address message. Also converts values to other types if specified. - * @param m Address - * @param [o] Conversion options - * @returns Plain object - */ - public static toObject(m: Address, o?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this Address to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; -} - -/** Properties of a Metadata. */ -export interface IMetadata { - - /** Metadata key */ - key?: (string|null); - - /** Metadata value */ - value?: (Uint8Array|null); -} - -/** Represents a Metadata. */ -export class Metadata implements IMetadata { - - /** - * Constructs a new Metadata. - * @param [p] Properties to set - */ - constructor(p?: IMetadata); - - /** Metadata key. */ - public key: string; - - /** Metadata value. */ - public value: Uint8Array; - - /** - * Encodes the specified Metadata message. Does not implicitly {@link Metadata.verify|verify} messages. - * @param m Metadata message or plain object to encode - * @param [w] Writer to encode to - * @returns Writer - */ - public static encode(m: IMetadata, w?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a Metadata message from the specified reader or buffer. - * @param r Reader or buffer to decode from - * @param [l] Message length if known beforehand - * @returns Metadata - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(r: ($protobuf.Reader|Uint8Array), l?: number): Metadata; - - /** - * Creates a Metadata message from a plain object. Also converts values to their respective internal types. - * @param d Plain object - * @returns Metadata - */ - public static fromObject(d: { [k: string]: any }): Metadata; - - /** - * Creates a plain object from a Metadata message. Also converts values to other types if specified. - * @param m Metadata - * @param [o] Conversion options - * @returns Plain object - */ - public static toObject(m: Metadata, o?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this Metadata to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; -} diff --git a/src/peer-store/pb/peer.js b/src/peer-store/pb/peer.js deleted file mode 100644 index 866911d8..00000000 --- a/src/peer-store/pb/peer.js +++ /dev/null @@ -1,643 +0,0 @@ -/*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); - -// Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; - -// Exported root namespace -var $root = $protobuf.roots["libp2p-peer"] || ($protobuf.roots["libp2p-peer"] = {}); - -$root.Peer = (function() { - - /** - * Properties of a Peer. - * @exports IPeer - * @interface IPeer - * @property {Array.|null} [addresses] Peer addresses - * @property {Array.|null} [protocols] Peer protocols - * @property {Array.|null} [metadata] Peer metadata - * @property {Uint8Array|null} [pubKey] Peer pubKey - * @property {Uint8Array|null} [peerRecordEnvelope] Peer peerRecordEnvelope - */ - - /** - * Constructs a new Peer. - * @exports Peer - * @classdesc Represents a Peer. - * @implements IPeer - * @constructor - * @param {IPeer=} [p] Properties to set - */ - function Peer(p) { - this.addresses = []; - this.protocols = []; - this.metadata = []; - if (p) - for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) - if (p[ks[i]] != null) - this[ks[i]] = p[ks[i]]; - } - - /** - * Peer addresses. - * @member {Array.} addresses - * @memberof Peer - * @instance - */ - Peer.prototype.addresses = $util.emptyArray; - - /** - * Peer protocols. - * @member {Array.} protocols - * @memberof Peer - * @instance - */ - Peer.prototype.protocols = $util.emptyArray; - - /** - * Peer metadata. - * @member {Array.} metadata - * @memberof Peer - * @instance - */ - Peer.prototype.metadata = $util.emptyArray; - - /** - * Peer pubKey. - * @member {Uint8Array|null|undefined} pubKey - * @memberof Peer - * @instance - */ - Peer.prototype.pubKey = null; - - /** - * Peer peerRecordEnvelope. - * @member {Uint8Array|null|undefined} peerRecordEnvelope - * @memberof Peer - * @instance - */ - Peer.prototype.peerRecordEnvelope = null; - - // OneOf field names bound to virtual getters and setters - var $oneOfFields; - - /** - * Peer _pubKey. - * @member {"pubKey"|undefined} _pubKey - * @memberof Peer - * @instance - */ - Object.defineProperty(Peer.prototype, "_pubKey", { - get: $util.oneOfGetter($oneOfFields = ["pubKey"]), - set: $util.oneOfSetter($oneOfFields) - }); - - /** - * Peer _peerRecordEnvelope. - * @member {"peerRecordEnvelope"|undefined} _peerRecordEnvelope - * @memberof Peer - * @instance - */ - Object.defineProperty(Peer.prototype, "_peerRecordEnvelope", { - get: $util.oneOfGetter($oneOfFields = ["peerRecordEnvelope"]), - set: $util.oneOfSetter($oneOfFields) - }); - - /** - * Encodes the specified Peer message. Does not implicitly {@link Peer.verify|verify} messages. - * @function encode - * @memberof Peer - * @static - * @param {IPeer} m Peer message or plain object to encode - * @param {$protobuf.Writer} [w] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Peer.encode = function encode(m, w) { - if (!w) - w = $Writer.create(); - if (m.addresses != null && m.addresses.length) { - for (var i = 0; i < m.addresses.length; ++i) - $root.Address.encode(m.addresses[i], w.uint32(10).fork()).ldelim(); - } - if (m.protocols != null && m.protocols.length) { - for (var i = 0; i < m.protocols.length; ++i) - w.uint32(18).string(m.protocols[i]); - } - if (m.metadata != null && m.metadata.length) { - for (var i = 0; i < m.metadata.length; ++i) - $root.Metadata.encode(m.metadata[i], w.uint32(26).fork()).ldelim(); - } - if (m.pubKey != null && Object.hasOwnProperty.call(m, "pubKey")) - w.uint32(34).bytes(m.pubKey); - if (m.peerRecordEnvelope != null && Object.hasOwnProperty.call(m, "peerRecordEnvelope")) - w.uint32(42).bytes(m.peerRecordEnvelope); - return w; - }; - - /** - * Decodes a Peer message from the specified reader or buffer. - * @function decode - * @memberof Peer - * @static - * @param {$protobuf.Reader|Uint8Array} r Reader or buffer to decode from - * @param {number} [l] Message length if known beforehand - * @returns {Peer} Peer - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Peer.decode = function decode(r, l) { - if (!(r instanceof $Reader)) - r = $Reader.create(r); - var c = l === undefined ? r.len : r.pos + l, m = new $root.Peer(); - while (r.pos < c) { - var t = r.uint32(); - switch (t >>> 3) { - case 1: - if (!(m.addresses && m.addresses.length)) - m.addresses = []; - m.addresses.push($root.Address.decode(r, r.uint32())); - break; - case 2: - if (!(m.protocols && m.protocols.length)) - m.protocols = []; - m.protocols.push(r.string()); - break; - case 3: - if (!(m.metadata && m.metadata.length)) - m.metadata = []; - m.metadata.push($root.Metadata.decode(r, r.uint32())); - break; - case 4: - m.pubKey = r.bytes(); - break; - case 5: - m.peerRecordEnvelope = r.bytes(); - break; - default: - r.skipType(t & 7); - break; - } - } - return m; - }; - - /** - * Creates a Peer message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof Peer - * @static - * @param {Object.} d Plain object - * @returns {Peer} Peer - */ - Peer.fromObject = function fromObject(d) { - if (d instanceof $root.Peer) - return d; - var m = new $root.Peer(); - if (d.addresses) { - if (!Array.isArray(d.addresses)) - throw TypeError(".Peer.addresses: array expected"); - m.addresses = []; - for (var i = 0; i < d.addresses.length; ++i) { - if (typeof d.addresses[i] !== "object") - throw TypeError(".Peer.addresses: object expected"); - m.addresses[i] = $root.Address.fromObject(d.addresses[i]); - } - } - if (d.protocols) { - if (!Array.isArray(d.protocols)) - throw TypeError(".Peer.protocols: array expected"); - m.protocols = []; - for (var i = 0; i < d.protocols.length; ++i) { - m.protocols[i] = String(d.protocols[i]); - } - } - if (d.metadata) { - if (!Array.isArray(d.metadata)) - throw TypeError(".Peer.metadata: array expected"); - m.metadata = []; - for (var i = 0; i < d.metadata.length; ++i) { - if (typeof d.metadata[i] !== "object") - throw TypeError(".Peer.metadata: object expected"); - m.metadata[i] = $root.Metadata.fromObject(d.metadata[i]); - } - } - if (d.pubKey != null) { - if (typeof d.pubKey === "string") - $util.base64.decode(d.pubKey, m.pubKey = $util.newBuffer($util.base64.length(d.pubKey)), 0); - else if (d.pubKey.length) - m.pubKey = d.pubKey; - } - if (d.peerRecordEnvelope != null) { - if (typeof d.peerRecordEnvelope === "string") - $util.base64.decode(d.peerRecordEnvelope, m.peerRecordEnvelope = $util.newBuffer($util.base64.length(d.peerRecordEnvelope)), 0); - else if (d.peerRecordEnvelope.length) - m.peerRecordEnvelope = d.peerRecordEnvelope; - } - return m; - }; - - /** - * Creates a plain object from a Peer message. Also converts values to other types if specified. - * @function toObject - * @memberof Peer - * @static - * @param {Peer} m Peer - * @param {$protobuf.IConversionOptions} [o] Conversion options - * @returns {Object.} Plain object - */ - Peer.toObject = function toObject(m, o) { - if (!o) - o = {}; - var d = {}; - if (o.arrays || o.defaults) { - d.addresses = []; - d.protocols = []; - d.metadata = []; - } - if (m.addresses && m.addresses.length) { - d.addresses = []; - for (var j = 0; j < m.addresses.length; ++j) { - d.addresses[j] = $root.Address.toObject(m.addresses[j], o); - } - } - if (m.protocols && m.protocols.length) { - d.protocols = []; - for (var j = 0; j < m.protocols.length; ++j) { - d.protocols[j] = m.protocols[j]; - } - } - if (m.metadata && m.metadata.length) { - d.metadata = []; - for (var j = 0; j < m.metadata.length; ++j) { - d.metadata[j] = $root.Metadata.toObject(m.metadata[j], o); - } - } - if (m.pubKey != null && m.hasOwnProperty("pubKey")) { - d.pubKey = o.bytes === String ? $util.base64.encode(m.pubKey, 0, m.pubKey.length) : o.bytes === Array ? Array.prototype.slice.call(m.pubKey) : m.pubKey; - if (o.oneofs) - d._pubKey = "pubKey"; - } - if (m.peerRecordEnvelope != null && m.hasOwnProperty("peerRecordEnvelope")) { - d.peerRecordEnvelope = o.bytes === String ? $util.base64.encode(m.peerRecordEnvelope, 0, m.peerRecordEnvelope.length) : o.bytes === Array ? Array.prototype.slice.call(m.peerRecordEnvelope) : m.peerRecordEnvelope; - if (o.oneofs) - d._peerRecordEnvelope = "peerRecordEnvelope"; - } - return d; - }; - - /** - * Converts this Peer to JSON. - * @function toJSON - * @memberof Peer - * @instance - * @returns {Object.} JSON object - */ - Peer.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - return Peer; -})(); - -$root.Address = (function() { - - /** - * Properties of an Address. - * @exports IAddress - * @interface IAddress - * @property {Uint8Array|null} [multiaddr] Address multiaddr - * @property {boolean|null} [isCertified] Address isCertified - */ - - /** - * Constructs a new Address. - * @exports Address - * @classdesc Represents an Address. - * @implements IAddress - * @constructor - * @param {IAddress=} [p] Properties to set - */ - function Address(p) { - if (p) - for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) - if (p[ks[i]] != null) - this[ks[i]] = p[ks[i]]; - } - - /** - * Address multiaddr. - * @member {Uint8Array} multiaddr - * @memberof Address - * @instance - */ - Address.prototype.multiaddr = $util.newBuffer([]); - - /** - * Address isCertified. - * @member {boolean|null|undefined} isCertified - * @memberof Address - * @instance - */ - Address.prototype.isCertified = null; - - // OneOf field names bound to virtual getters and setters - var $oneOfFields; - - /** - * Address _isCertified. - * @member {"isCertified"|undefined} _isCertified - * @memberof Address - * @instance - */ - Object.defineProperty(Address.prototype, "_isCertified", { - get: $util.oneOfGetter($oneOfFields = ["isCertified"]), - set: $util.oneOfSetter($oneOfFields) - }); - - /** - * Encodes the specified Address message. Does not implicitly {@link Address.verify|verify} messages. - * @function encode - * @memberof Address - * @static - * @param {IAddress} m Address message or plain object to encode - * @param {$protobuf.Writer} [w] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Address.encode = function encode(m, w) { - if (!w) - w = $Writer.create(); - if (m.multiaddr != null && Object.hasOwnProperty.call(m, "multiaddr")) - w.uint32(10).bytes(m.multiaddr); - if (m.isCertified != null && Object.hasOwnProperty.call(m, "isCertified")) - w.uint32(16).bool(m.isCertified); - return w; - }; - - /** - * Decodes an Address message from the specified reader or buffer. - * @function decode - * @memberof Address - * @static - * @param {$protobuf.Reader|Uint8Array} r Reader or buffer to decode from - * @param {number} [l] Message length if known beforehand - * @returns {Address} Address - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Address.decode = function decode(r, l) { - if (!(r instanceof $Reader)) - r = $Reader.create(r); - var c = l === undefined ? r.len : r.pos + l, m = new $root.Address(); - while (r.pos < c) { - var t = r.uint32(); - switch (t >>> 3) { - case 1: - m.multiaddr = r.bytes(); - break; - case 2: - m.isCertified = r.bool(); - break; - default: - r.skipType(t & 7); - break; - } - } - return m; - }; - - /** - * Creates an Address message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof Address - * @static - * @param {Object.} d Plain object - * @returns {Address} Address - */ - Address.fromObject = function fromObject(d) { - if (d instanceof $root.Address) - return d; - var m = new $root.Address(); - if (d.multiaddr != null) { - if (typeof d.multiaddr === "string") - $util.base64.decode(d.multiaddr, m.multiaddr = $util.newBuffer($util.base64.length(d.multiaddr)), 0); - else if (d.multiaddr.length) - m.multiaddr = d.multiaddr; - } - if (d.isCertified != null) { - m.isCertified = Boolean(d.isCertified); - } - return m; - }; - - /** - * Creates a plain object from an Address message. Also converts values to other types if specified. - * @function toObject - * @memberof Address - * @static - * @param {Address} m Address - * @param {$protobuf.IConversionOptions} [o] Conversion options - * @returns {Object.} Plain object - */ - Address.toObject = function toObject(m, o) { - if (!o) - o = {}; - var d = {}; - if (o.defaults) { - if (o.bytes === String) - d.multiaddr = ""; - else { - d.multiaddr = []; - if (o.bytes !== Array) - d.multiaddr = $util.newBuffer(d.multiaddr); - } - } - if (m.multiaddr != null && m.hasOwnProperty("multiaddr")) { - d.multiaddr = o.bytes === String ? $util.base64.encode(m.multiaddr, 0, m.multiaddr.length) : o.bytes === Array ? Array.prototype.slice.call(m.multiaddr) : m.multiaddr; - } - if (m.isCertified != null && m.hasOwnProperty("isCertified")) { - d.isCertified = m.isCertified; - if (o.oneofs) - d._isCertified = "isCertified"; - } - return d; - }; - - /** - * Converts this Address to JSON. - * @function toJSON - * @memberof Address - * @instance - * @returns {Object.} JSON object - */ - Address.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - return Address; -})(); - -$root.Metadata = (function() { - - /** - * Properties of a Metadata. - * @exports IMetadata - * @interface IMetadata - * @property {string|null} [key] Metadata key - * @property {Uint8Array|null} [value] Metadata value - */ - - /** - * Constructs a new Metadata. - * @exports Metadata - * @classdesc Represents a Metadata. - * @implements IMetadata - * @constructor - * @param {IMetadata=} [p] Properties to set - */ - function Metadata(p) { - if (p) - for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) - if (p[ks[i]] != null) - this[ks[i]] = p[ks[i]]; - } - - /** - * Metadata key. - * @member {string} key - * @memberof Metadata - * @instance - */ - Metadata.prototype.key = ""; - - /** - * Metadata value. - * @member {Uint8Array} value - * @memberof Metadata - * @instance - */ - Metadata.prototype.value = $util.newBuffer([]); - - /** - * Encodes the specified Metadata message. Does not implicitly {@link Metadata.verify|verify} messages. - * @function encode - * @memberof Metadata - * @static - * @param {IMetadata} m Metadata message or plain object to encode - * @param {$protobuf.Writer} [w] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Metadata.encode = function encode(m, w) { - if (!w) - w = $Writer.create(); - if (m.key != null && Object.hasOwnProperty.call(m, "key")) - w.uint32(10).string(m.key); - if (m.value != null && Object.hasOwnProperty.call(m, "value")) - w.uint32(18).bytes(m.value); - return w; - }; - - /** - * Decodes a Metadata message from the specified reader or buffer. - * @function decode - * @memberof Metadata - * @static - * @param {$protobuf.Reader|Uint8Array} r Reader or buffer to decode from - * @param {number} [l] Message length if known beforehand - * @returns {Metadata} Metadata - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Metadata.decode = function decode(r, l) { - if (!(r instanceof $Reader)) - r = $Reader.create(r); - var c = l === undefined ? r.len : r.pos + l, m = new $root.Metadata(); - while (r.pos < c) { - var t = r.uint32(); - switch (t >>> 3) { - case 1: - m.key = r.string(); - break; - case 2: - m.value = r.bytes(); - break; - default: - r.skipType(t & 7); - break; - } - } - return m; - }; - - /** - * Creates a Metadata message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof Metadata - * @static - * @param {Object.} d Plain object - * @returns {Metadata} Metadata - */ - Metadata.fromObject = function fromObject(d) { - if (d instanceof $root.Metadata) - return d; - var m = new $root.Metadata(); - if (d.key != null) { - m.key = String(d.key); - } - if (d.value != null) { - if (typeof d.value === "string") - $util.base64.decode(d.value, m.value = $util.newBuffer($util.base64.length(d.value)), 0); - else if (d.value.length) - m.value = d.value; - } - return m; - }; - - /** - * Creates a plain object from a Metadata message. Also converts values to other types if specified. - * @function toObject - * @memberof Metadata - * @static - * @param {Metadata} m Metadata - * @param {$protobuf.IConversionOptions} [o] Conversion options - * @returns {Object.} Plain object - */ - Metadata.toObject = function toObject(m, o) { - if (!o) - o = {}; - var d = {}; - if (o.defaults) { - d.key = ""; - if (o.bytes === String) - d.value = ""; - else { - d.value = []; - if (o.bytes !== Array) - d.value = $util.newBuffer(d.value); - } - } - if (m.key != null && m.hasOwnProperty("key")) { - d.key = m.key; - } - if (m.value != null && m.hasOwnProperty("value")) { - d.value = o.bytes === String ? $util.base64.encode(m.value, 0, m.value.length) : o.bytes === Array ? Array.prototype.slice.call(m.value) : m.value; - } - return d; - }; - - /** - * Converts this Metadata to JSON. - * @function toJSON - * @memberof Metadata - * @instance - * @returns {Object.} JSON object - */ - Metadata.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - return Metadata; -})(); - -module.exports = $root; diff --git a/src/peer-store/pb/peer.proto b/src/peer-store/pb/peer.proto deleted file mode 100644 index 1c9cc166..00000000 --- a/src/peer-store/pb/peer.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -message Peer { - // Multiaddrs we know about - repeated Address addresses = 1; - - // The protocols the peer supports - repeated string protocols = 2; - - // Any peer metadata - repeated Metadata metadata = 3; - - // The public key of the peer - optional bytes pub_key = 4; - - // The most recently received signed PeerRecord - optional bytes peer_record_envelope = 5; -} - -// Address represents a single multiaddr -message Address { - bytes multiaddr = 1; - - // Flag to indicate if the address comes from a certified source - optional bool isCertified = 2; -} - -message Metadata { - string key = 1; - bytes value = 2; -} diff --git a/src/peer-store/proto-book.js b/src/peer-store/proto-book.js deleted file mode 100644 index 686957ec..00000000 --- a/src/peer-store/proto-book.js +++ /dev/null @@ -1,237 +0,0 @@ -'use strict' - -const debug = require('debug') -const errcode = require('err-code') -const { codes } = require('../errors') -const PeerId = require('peer-id') - -/** - * @typedef {import('./types').PeerStore} PeerStore - * @typedef {import('./types').ProtoBook} ProtoBook - */ - -const log = Object.assign(debug('libp2p:peer-store:proto-book'), { - error: debug('libp2p:peer-store:proto-book:err') -}) - -const EVENT_NAME = 'change:protocols' - -/** - * @implements {ProtoBook} - */ -class PersistentProtoBook { - /** - * @param {PeerStore["emit"]} emit - * @param {import('./types').Store} store - */ - constructor (emit, store) { - this._emit = emit - this._store = store - } - - /** - * @param {PeerId} peerId - */ - async get (peerId) { - log('get wait for read lock') - const release = await this._store.lock.readLock() - log('get got read lock') - - try { - const peer = await this._store.load(peerId) - - return peer.protocols - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('get release read lock') - release() - } - - return [] - } - - /** - * @param {PeerId} peerId - * @param {string[]} protocols - */ - async set (peerId, protocols) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!Array.isArray(protocols)) { - log.error('protocols must be provided to store data') - throw errcode(new Error('protocols must be provided'), codes.ERR_INVALID_PARAMETERS) - } - - log('set await write lock') - const release = await this._store.lock.writeLock() - log('set got write lock') - - let updatedPeer - - try { - try { - const peer = await this._store.load(peerId) - - if (new Set([ - ...protocols - ]).size === peer.protocols.length) { - return - } - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - updatedPeer = await this._store.patchOrCreate(peerId, { - protocols - }) - - log(`stored provided protocols for ${peerId.toB58String()}`) - } finally { - log('set release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, protocols: updatedPeer.protocols }) - } - - /** - * @param {PeerId} peerId - * @param {string[]} protocols - */ - async add (peerId, protocols) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!Array.isArray(protocols)) { - log.error('protocols must be provided to store data') - throw errcode(new Error('protocols must be provided'), codes.ERR_INVALID_PARAMETERS) - } - - log('add await write lock') - const release = await this._store.lock.writeLock() - log('add got write lock') - - let updatedPeer - - try { - try { - const peer = await this._store.load(peerId) - - if (new Set([ - ...peer.protocols, - ...protocols - ]).size === peer.protocols.length) { - return - } - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - updatedPeer = await this._store.mergeOrCreate(peerId, { - protocols - }) - - log(`added provided protocols for ${peerId.toB58String()}`) - } finally { - log('add release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, protocols: updatedPeer.protocols }) - } - - /** - * @param {PeerId} peerId - * @param {string[]} protocols - */ - async remove (peerId, protocols) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - if (!Array.isArray(protocols)) { - log.error('protocols must be provided to store data') - throw errcode(new Error('protocols must be provided'), codes.ERR_INVALID_PARAMETERS) - } - - log('remove await write lock') - const release = await this._store.lock.writeLock() - log('remove got write lock') - - let updatedPeer - - try { - try { - const peer = await this._store.load(peerId) - const protocolSet = new Set(peer.protocols) - - for (const protocol of protocols) { - protocolSet.delete(protocol) - } - - if (peer.protocols.length === protocolSet.size) { - return - } - - protocols = Array.from(protocolSet) - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } - - updatedPeer = await this._store.patchOrCreate(peerId, { - protocols - }) - } finally { - log('remove release write lock') - release() - } - - this._emit(EVENT_NAME, { peerId, protocols: updatedPeer.protocols }) - } - - /** - * @param {PeerId} peerId - */ - async delete (peerId) { - log('delete await write lock') - const release = await this._store.lock.writeLock() - log('delete got write lock') - let has - - try { - has = await this._store.has(peerId) - - await this._store.patchOrCreate(peerId, { - protocols: [] - }) - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - } finally { - log('delete release write lock') - release() - } - - if (has) { - this._emit(EVENT_NAME, { peerId, protocols: [] }) - } - } -} - -module.exports = PersistentProtoBook diff --git a/src/peer-store/store.js b/src/peer-store/store.js deleted file mode 100644 index 34dcce36..00000000 --- a/src/peer-store/store.js +++ /dev/null @@ -1,263 +0,0 @@ -'use strict' - -const debug = require('debug') -const PeerId = require('peer-id') -const errcode = require('err-code') -const { codes } = require('../errors') -const { Key } = require('interface-datastore/key') -const { base32 } = require('multiformats/bases/base32') -const { keys: { unmarshalPublicKey, marshalPublicKey } } = require('libp2p-crypto') -const { Multiaddr } = require('multiaddr') -const { Peer: PeerPB } = require('./pb/peer') -// @ts-expect-error no types -const mortice = require('mortice') -const { equals: uint8arrayEquals } = require('uint8arrays/equals') - -const log = Object.assign(debug('libp2p:peer-store:store'), { - error: debug('libp2p:peer-store:store:err') -}) - -/** - * @typedef {import('./types').PeerStore} PeerStore - * @typedef {import('./types').EventName} EventName - * @typedef {import('./types').Peer} Peer - */ - -const NAMESPACE_COMMON = '/peers/' - -class PersistentStore { - /** - * @param {import('interface-datastore').Datastore} datastore - */ - constructor (datastore) { - this._datastore = datastore - this.lock = mortice('peer-store', { - singleProcess: true - }) - } - - /** - * @param {PeerId} peerId - * @returns {Key} - */ - _peerIdToDatastoreKey (peerId) { - if (!PeerId.isPeerId(peerId)) { - log.error('peerId must be an instance of peer-id to store data') - throw errcode(new Error('peerId must be an instance of peer-id'), codes.ERR_INVALID_PARAMETERS) - } - - const b32key = peerId.toString() - return new Key(`${NAMESPACE_COMMON}${b32key}`) - } - - /** - * @param {PeerId} peerId - */ - async has (peerId) { - return this._datastore.has(this._peerIdToDatastoreKey(peerId)) - } - - /** - * @param {PeerId} peerId - */ - async delete (peerId) { - await this._datastore.delete(this._peerIdToDatastoreKey(peerId)) - } - - /** - * @param {PeerId} peerId - * @returns {Promise} peer - */ - async load (peerId) { - const buf = await this._datastore.get(this._peerIdToDatastoreKey(peerId)) - const peer = PeerPB.decode(buf) - const pubKey = peer.pubKey ? unmarshalPublicKey(peer.pubKey) : peerId.pubKey - const metadata = new Map() - - for (const meta of peer.metadata) { - metadata.set(meta.key, meta.value) - } - - return { - ...peer, - id: peerId, - pubKey, - addresses: peer.addresses.map(({ multiaddr, isCertified }) => ({ - multiaddr: new Multiaddr(multiaddr), - isCertified: isCertified || false - })), - metadata, - peerRecordEnvelope: peer.peerRecordEnvelope || undefined - } - } - - /** - * @param {Peer} peer - */ - async save (peer) { - if (peer.pubKey != null && peer.id.pubKey != null && !uint8arrayEquals(peer.pubKey.bytes, peer.id.pubKey.bytes)) { - log.error('peer publicKey bytes do not match peer id publicKey bytes') - throw errcode(new Error('publicKey bytes do not match peer id publicKey bytes'), codes.ERR_INVALID_PARAMETERS) - } - - // dedupe addresses - const addressSet = new Set() - - const buf = PeerPB.encode({ - addresses: peer.addresses - .filter(address => { - if (addressSet.has(address.multiaddr.toString())) { - return false - } - - addressSet.add(address.multiaddr.toString()) - return true - }) - .sort((a, b) => { - return a.multiaddr.toString().localeCompare(b.multiaddr.toString()) - }) - .map(({ multiaddr, isCertified }) => ({ - multiaddr: multiaddr.bytes, - isCertified - })), - protocols: peer.protocols.sort(), - pubKey: peer.pubKey ? marshalPublicKey(peer.pubKey) : undefined, - metadata: [...peer.metadata.keys()].sort().map(key => ({ key, value: peer.metadata.get(key) })), - peerRecordEnvelope: peer.peerRecordEnvelope - }).finish() - - await this._datastore.put(this._peerIdToDatastoreKey(peer.id), buf) - - return this.load(peer.id) - } - - /** - * @param {PeerId} peerId - * @param {Partial} data - */ - async patch (peerId, data) { - const peer = await this.load(peerId) - - return await this._patch(peerId, data, peer) - } - - /** - * @param {PeerId} peerId - * @param {Partial} data - */ - async patchOrCreate (peerId, data) { - /** @type {Peer} */ - let peer - - try { - peer = await this.load(peerId) - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - - peer = { id: peerId, addresses: [], protocols: [], metadata: new Map() } - } - - return await this._patch(peerId, data, peer) - } - - /** - * @param {PeerId} peerId - * @param {Partial} data - * @param {Peer} peer - */ - async _patch (peerId, data, peer) { - return await this.save({ - ...peer, - ...data, - id: peerId - }) - } - - /** - * @param {PeerId} peerId - * @param {Partial} data - */ - async merge (peerId, data) { - const peer = await this.load(peerId) - - return this._merge(peerId, data, peer) - } - - /** - * @param {PeerId} peerId - * @param {Partial} data - */ - async mergeOrCreate (peerId, data) { - /** @type {Peer} */ - let peer - - try { - peer = await this.load(peerId) - } catch (/** @type {any} */ err) { - if (err.code !== codes.ERR_NOT_FOUND) { - throw err - } - - peer = { id: peerId, addresses: [], protocols: [], metadata: new Map() } - } - - return await this._merge(peerId, data, peer) - } - - /** - * @param {PeerId} peerId - * @param {Partial} data - * @param {Peer} peer - */ - async _merge (peerId, data, peer) { - // if the peer has certified addresses, use those in - // favour of the supplied versions - /** @type {Map} */ - const addresses = new Map() - - ;(data.addresses || []).forEach(addr => { - addresses.set(addr.multiaddr.toString(), addr.isCertified) - }) - - peer.addresses.forEach(({ multiaddr, isCertified }) => { - const addrStr = multiaddr.toString() - addresses.set(addrStr, Boolean(addresses.get(addrStr) || isCertified)) - }) - - return await this.save({ - id: peerId, - addresses: Array.from(addresses.entries()).map(([addrStr, isCertified]) => { - return { - multiaddr: new Multiaddr(addrStr), - isCertified - } - }), - protocols: Array.from(new Set([ - ...(peer.protocols || []), - ...(data.protocols || []) - ])), - metadata: new Map([ - ...(peer.metadata ? peer.metadata.entries() : []), - ...(data.metadata ? data.metadata.entries() : []) - ]), - pubKey: data.pubKey || (peer != null ? peer.pubKey : undefined), - peerRecordEnvelope: data.peerRecordEnvelope || (peer != null ? peer.peerRecordEnvelope : undefined) - }) - } - - async * all () { - for await (const key of this._datastore.queryKeys({ - prefix: NAMESPACE_COMMON - })) { - // /peers/${peer-id-as-libp2p-key-cid-string-in-base-32} - const base32Str = key.toString().split('/')[2] - const buf = base32.decode(base32Str) - - yield this.load(PeerId.createFromBytes(buf)) - } - } -} - -module.exports = PersistentStore diff --git a/src/peer-store/types.ts b/src/peer-store/types.ts deleted file mode 100644 index 0ae16b46..00000000 --- a/src/peer-store/types.ts +++ /dev/null @@ -1,245 +0,0 @@ -import type PeerId from 'peer-id' -import type { Multiaddr } from 'multiaddr' -import type Envelope from '../record/envelope' -import type { PublicKey } from 'libp2p-interfaces/src/keys/types' - -export interface Address { - /** - * Peer multiaddr - */ - multiaddr: Multiaddr - - /** - * Obtained from a signed peer record - */ - isCertified: boolean -} - -export interface Peer { - /** - * Peer's peer-id instance - */ - id: PeerId - - /** - * Peer's addresses containing its multiaddrs and metadata - */ - addresses: Address[] - - /** - * Peer's supported protocols - */ - protocols: string[] - - /** - * Peer's metadata map - */ - metadata: Map - - /** - * May be set if the key that this Peer has is an RSA key - */ - pubKey?: PublicKey - - /** - * The last peer record envelope received - */ - peerRecordEnvelope?: Uint8Array -} - -export interface CertifiedRecord { - raw: Uint8Array - seqNumber: number -} - -export interface AddressBookEntry { - addresses: Address[] - record: CertifiedRecord -} - -export interface Book { - /** - * Get the known data of a peer - */ - get: (peerId: PeerId) => Promise - - /** - * Set the known data of a peer - */ - set: (peerId: PeerId, data: Type) => Promise - - /** - * Remove the known data of a peer - */ - delete: (peerId: PeerId) => Promise -} - -/** - * AddressBook containing a map of peerIdStr to Address. - */ -export interface AddressBook { - /** - * ConsumePeerRecord adds addresses from a signed peer record contained in a record envelope. - * This will return a boolean that indicates if the record was successfully processed and added - * into the AddressBook - */ - consumePeerRecord: (envelope: Envelope) => Promise - - /** - * Get the raw Envelope for a peer. Returns - * undefined if no Envelope is found - */ - getRawEnvelope: (peerId: PeerId) => Promise - - /** - * Get an Envelope containing a PeerRecord for the given peer. - * Returns undefined if no record exists. - */ - getPeerRecord: (peerId: PeerId) => Promise - - /** - * Add known addresses of a provided peer. - * If the peer is not known, it is set with the given addresses. - */ - add: (peerId: PeerId, multiaddrs: Multiaddr[]) => Promise - - /** - * Set the known addresses of a peer - */ - set: (peerId: PeerId, data: Multiaddr[]) => Promise - - /** - * Return the known addresses of a peer - */ - get: (peerId: PeerId) => Promise - - /** - * Get the known multiaddrs for a given peer. All returned multiaddrs - * will include the encapsulated `PeerId` of the peer. - */ - getMultiaddrsForPeer: (peerId: PeerId, addressSorter?: (ms: Address[]) => Address[]) => Promise -} - -/** - * KeyBook containing a map of peerIdStr to their PeerId with public keys. - */ -export interface KeyBook { - /** - * Get the known data of a peer - */ - get: (peerId: PeerId) => Promise - - /** - * Set the known data of a peer - */ - set: (peerId: PeerId, data: PublicKey) => Promise - - /** - * Remove the known data of a peer - */ - delete: (peerId: PeerId) => Promise -} - -/** - * MetadataBook containing a map of peerIdStr to their metadata Map. - */ -export interface MetadataBook extends Book> { - /** - * Set a specific metadata value - */ - setValue: (peerId: PeerId, key: string, value: Uint8Array) => Promise - - /** - * Get specific metadata value, if it exists - */ - getValue: (peerId: PeerId, key: string) => Promise - - /** - * Deletes the provided peer metadata key from the book - */ - deleteValue: (peerId: PeerId, key: string) => Promise -} - -/** - * ProtoBook containing a map of peerIdStr to supported protocols. - */ -export interface ProtoBook extends Book { - /** - * Adds known protocols of a provided peer. - * If the peer was not known before, it will be added. - */ - add: (peerId: PeerId, protocols: string[]) => Promise - - /** - * Removes known protocols of a provided peer. - * If the protocols did not exist before, nothing will be done. - */ - remove: (peerId: PeerId, protocols: string[]) => Promise -} - -export interface PeerProtocolsChangeEvent { - peerId: PeerId - protocols: string[] -} - -export interface PeerMultiaddrsChangeEvent { - peerId: PeerId - multiaddrs: Multiaddr[] -} - -export interface PeerPublicKeyChangeEvent { - peerId: PeerId - pubKey?: PublicKey -} - -export interface PeerMetadataChangeEvent { - peerId: PeerId - metadata: Map -} - -export type EventName = 'peer' | 'change:protocols' | 'change:multiaddrs' | 'change:pubkey' | 'change:metadata' - -export interface PeerStoreEvents { - 'peer': (event: PeerId) => void - 'change:protocols': (event: PeerProtocolsChangeEvent) => void - 'change:multiaddrs': (event: PeerMultiaddrsChangeEvent) => void - 'change:pubkey': (event: PeerPublicKeyChangeEvent) => void - 'change:metadata': (event: PeerMetadataChangeEvent) => void -} - -export interface PeerStore { - addressBook: AddressBook - keyBook: KeyBook - metadataBook: MetadataBook - protoBook: ProtoBook - - getPeers: () => AsyncIterable - delete: (peerId: PeerId) => Promise - has: (peerId: PeerId) => Promise - get: (peerId: PeerId) => Promise - on: ( - event: U, listener: PeerStoreEvents[U] - ) => this - once: ( - event: U, listener: PeerStoreEvents[U] - ) => this - emit: ( - event: U, ...args: Parameters - ) => boolean -} - -export interface Store { - has: (peerId: PeerId) => Promise - save: (peer: Peer) => Promise - load: (peerId: PeerId) => Promise - merge: (peerId: PeerId, data: Partial) => Promise - mergeOrCreate: (peerId: PeerId, data: Partial) => Promise - patch: (peerId: PeerId, data: Partial) => Promise - patchOrCreate: (peerId: PeerId, data: Partial) => Promise - all: () => AsyncIterable - - lock: { - readLock: () => Promise<() => void> - writeLock: () => Promise<() => void> - } -} diff --git a/src/ping/README.md b/src/ping/README.md index 079e9562..6dccd4be 100644 --- a/src/ping/README.md +++ b/src/ping/README.md @@ -8,7 +8,7 @@ libp2p-ping JavaScript Implementation ## Usage ```javascript -var Ping = require('libp2p/src/ping') +import Ping from 'libp2p/src/ping' Ping.mount(libp2p) // Enable this peer to echo Ping requests diff --git a/src/ping/constants.js b/src/ping/constants.js deleted file mode 100644 index 8ddd596d..00000000 --- a/src/ping/constants.js +++ /dev/null @@ -1,8 +0,0 @@ -'use strict' - -module.exports = { - PROTOCOL: '/ipfs/ping/1.0.0', // deprecated - PING_LENGTH: 32, - PROTOCOL_VERSION: '1.0.0', - PROTOCOL_NAME: 'ping' -} diff --git a/src/ping/constants.ts b/src/ping/constants.ts new file mode 100644 index 00000000..9411e75c --- /dev/null +++ b/src/ping/constants.ts @@ -0,0 +1,5 @@ + +export const PROTOCOL = '/ipfs/ping/1.0.0' +export const PING_LENGTH = 32 +export const PROTOCOL_VERSION = '1.0.0' +export const PROTOCOL_NAME = 'ping' diff --git a/src/ping/index.js b/src/ping/index.js deleted file mode 100644 index 46f87e3a..00000000 --- a/src/ping/index.js +++ /dev/null @@ -1,84 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:ping'), { - error: debug('libp2p:ping:err') -}) -const errCode = require('err-code') -const { codes } = require('../errors') -const crypto = require('libp2p-crypto') -const { pipe } = require('it-pipe') -// @ts-ignore it-buffer has no types exported -const { toBuffer } = require('it-buffer') -const { collect, take } = require('streaming-iterables') -const { equals } = require('uint8arrays/equals') - -const { PROTOCOL_NAME, PING_LENGTH, PROTOCOL_VERSION } = require('./constants') - -/** - * @typedef {import('../')} Libp2p - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('peer-id')} PeerId - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - */ - -class PingService { - /** - * @param {import('../')} libp2p - */ - static getProtocolStr (libp2p) { - return `/${libp2p._config.protocolPrefix}/${PROTOCOL_NAME}/${PROTOCOL_VERSION}` - } - - /** - * @param {Libp2p} libp2p - */ - constructor (libp2p) { - this._libp2p = libp2p - } - - /** - * A handler to register with Libp2p to process ping messages - * - * @param {Object} options - * @param {MuxedStream} options.stream - */ - handleMessage ({ stream }) { - return pipe(stream, stream) - } - - /** - * Ping a given peer and wait for its response, getting the operation latency. - * - * @param {PeerId|Multiaddr} peer - * @returns {Promise} - */ - async ping (peer) { - const protocol = `/${this._libp2p._config.protocolPrefix}/${PROTOCOL_NAME}/${PROTOCOL_VERSION}` - // @ts-ignore multiaddr might not have toB58String - log('dialing %s to %s', protocol, peer.toB58String ? peer.toB58String() : peer) - - const connection = await this._libp2p.dial(peer) - const { stream } = await connection.newStream(protocol) - - const start = Date.now() - const data = crypto.randomBytes(PING_LENGTH) - - const [result] = await pipe( - [data], - stream, - (/** @type {MuxedStream} */ stream) => take(1, stream), - toBuffer, - collect - ) - const end = Date.now() - - if (!equals(data, result)) { - throw errCode(new Error('Received wrong ping ack'), codes.ERR_WRONG_PING_ACK) - } - - return end - start - } -} - -module.exports = PingService diff --git a/src/ping/index.ts b/src/ping/index.ts new file mode 100644 index 00000000..745d1dc4 --- /dev/null +++ b/src/ping/index.ts @@ -0,0 +1,83 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import { codes } from '../errors.js' +import { randomBytes } from '@libp2p/crypto' +import { pipe } from 'it-pipe' +import first from 'it-first' +import { equals as uint8ArrayEquals } from 'uint8arrays/equals' +import { PROTOCOL_NAME, PING_LENGTH, PROTOCOL_VERSION } from './constants.js' +import type { IncomingStreamData } from '@libp2p/interfaces/registrar' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Startable } from '@libp2p/interfaces' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:ping') + +export interface PingServiceInit { + protocolPrefix: string +} + +export class PingService implements Startable { + private readonly components: Components + private readonly protocol: string + private started: boolean + + constructor (components: Components, init: PingServiceInit) { + this.components = components + this.started = false + this.protocol = `/${init.protocolPrefix}/${PROTOCOL_NAME}/${PROTOCOL_VERSION}` + } + + async start () { + await this.components.getRegistrar().handle(this.protocol, this.handleMessage) + this.started = true + } + + async stop () { + await this.components.getRegistrar().unhandle(this.protocol) + this.started = false + } + + isStarted () { + return this.started + } + + /** + * A handler to register with Libp2p to process ping messages + */ + handleMessage (data: IncomingStreamData) { + const { stream } = data + + void pipe(stream, stream) + .catch(err => { + log.error(err) + }) + } + + /** + * Ping a given peer and wait for its response, getting the operation latency. + * + * @param {PeerId|Multiaddr} peer + * @returns {Promise} + */ + async ping (peer: PeerId): Promise { + log('dialing %s to %p', this.protocol, peer) + + const { stream } = await this.components.getDialer().dialProtocol(peer, this.protocol) + const start = Date.now() + const data = randomBytes(PING_LENGTH) + + const result = await pipe( + [data], + stream, + async (source) => await first(source) + ) + const end = Date.now() + + if (result == null || !uint8ArrayEquals(data, result)) { + throw errCode(new Error('Received wrong ping ack'), codes.ERR_WRONG_PING_ACK) + } + + return end - start + } +} diff --git a/src/ping/util.js b/src/ping/util.js deleted file mode 100644 index e942420a..00000000 --- a/src/ping/util.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' - -const crypto = require('libp2p-crypto') -const constants = require('./constants') - -/** - * @param {number} length - */ -function rnd (length) { - if (!length) { - length = constants.PING_LENGTH - } - return crypto.randomBytes(length) -} - -module.exports = { - rnd -} diff --git a/src/pnet/README.md b/src/pnet/README.md index d3cce111..effcb189 100644 --- a/src/pnet/README.md +++ b/src/pnet/README.md @@ -20,7 +20,7 @@ js-libp2p-pnet ## Usage ```js -const Protector = require('libp2p-pnet') +const Protector from 'libp2p-pnet') const protector = new Protector(swarmKeyBuffer) const privateConnection = protector.protect(myPublicConnection, (err) => { }) ``` @@ -63,7 +63,7 @@ node -e "require('libp2p/src/pnet').generate(process.stdout)" > swarm.key #### Programmatically ```js -const writeKey = require('libp2p/src/pnet').generate +const writeKey from 'libp2p/src/pnet').generate const swarmKey = new Uint8Array(95) writeKey(swarmKey) fs.writeFileSync('swarm.key', swarmKey) diff --git a/src/pnet/crypto.js b/src/pnet/crypto.js deleted file mode 100644 index eb9cb223..00000000 --- a/src/pnet/crypto.js +++ /dev/null @@ -1,84 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:pnet'), { - trace: debug('libp2p:pnet:trace'), - error: debug('libp2p:pnet:err') -}) - -const Errors = require('./errors') -// @ts-ignore xsalsa20 has no types exported -const xsalsa20 = require('xsalsa20') -const KEY_LENGTH = require('./key-generator').KEY_LENGTH -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') - -/** - * Creates a stream iterable to encrypt messages in a private network - * - * @param {Uint8Array} nonce - The nonce to use in encryption - * @param {Uint8Array} psk - The private shared key to use in encryption - * @returns {*} a through iterable - */ -module.exports.createBoxStream = (nonce, psk) => { - const xor = xsalsa20(nonce, psk) - - return (/** @type {AsyncIterable} */ source) => (async function * () { - for await (const chunk of source) { - yield Uint8Array.from(xor.update(chunk.slice())) - } - })() -} - -/** - * Creates a stream iterable to decrypt messages in a private network - * - * @param {Uint8Array} nonce - The nonce of the remote peer - * @param {Uint8Array} psk - The private shared key to use in decryption - * @returns {*} a through iterable - */ -module.exports.createUnboxStream = (nonce, psk) => { - return (/** @type {AsyncIterable} */ source) => (async function * () { - const xor = xsalsa20(nonce, psk) - log.trace('Decryption enabled') - - for await (const chunk of source) { - yield Uint8Array.from(xor.update(chunk.slice())) - } - })() -} - -/** - * Decode the version 1 psk from the given Uint8Array - * - * @param {Uint8Array} pskBuffer - * @throws {INVALID_PSK} - * @returns {{ tag?: string, codecName?: string, psk: Uint8Array }} The PSK metadata (tag, codecName, psk) - */ -module.exports.decodeV1PSK = (pskBuffer) => { - try { - // This should pull from multibase/multicodec to allow for - // more encoding flexibility. Ideally we'd consume the codecs - // from the buffer line by line to evaluate the next line - // programmatically instead of making assumptions about the - // encodings of each line. - const metadata = uint8ArrayToString(pskBuffer).split(/(?:\r\n|\r|\n)/g) - const pskTag = metadata.shift() - const codec = metadata.shift() - const pskString = metadata.shift() - const psk = pskString && uint8ArrayFromString(pskString, 'base16') - - if (!psk || psk.byteLength !== KEY_LENGTH) { - throw new Error(Errors.INVALID_PSK) - } - - return { - tag: pskTag, - codecName: codec, - psk: psk - } - } catch (/** @type {any} */ err) { - log.error(err) - throw new Error(Errors.INVALID_PSK) - } -} diff --git a/src/pnet/crypto.ts b/src/pnet/crypto.ts new file mode 100644 index 00000000..ffc6b17d --- /dev/null +++ b/src/pnet/crypto.ts @@ -0,0 +1,67 @@ +import { logger } from '@libp2p/logger' +import * as Errors from './errors.js' +import xsalsa20 from 'xsalsa20' +import { KEY_LENGTH } from './key-generator.js' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import type { Source } from 'it-stream-types' + +const log = logger('libp2p:pnet') + +/** + * Creates a stream iterable to encrypt messages in a private network + */ +export function createBoxStream (nonce: Uint8Array, psk: Uint8Array) { + const xor = xsalsa20(nonce, psk) + + return (source: Source) => (async function * () { + for await (const chunk of source) { + yield Uint8Array.from(xor.update(chunk.slice())) + } + })() +} + +/** + * Creates a stream iterable to decrypt messages in a private network + */ +export function createUnboxStream (nonce: Uint8Array, psk: Uint8Array) { + return (source: Source) => (async function * () { + const xor = xsalsa20(nonce, psk) + log.trace('Decryption enabled') + + for await (const chunk of source) { + yield Uint8Array.from(xor.update(chunk.slice())) + } + })() +} + +/** + * Decode the version 1 psk from the given Uint8Array + */ +export function decodeV1PSK (pskBuffer: Uint8Array) { + try { + // This should pull from multibase/multicodec to allow for + // more encoding flexibility. Ideally we'd consume the codecs + // from the buffer line by line to evaluate the next line + // programmatically instead of making assumptions about the + // encodings of each line. + const metadata = uint8ArrayToString(pskBuffer).split(/(?:\r\n|\r|\n)/g) + const pskTag = metadata.shift() + const codec = metadata.shift() + const pskString = metadata.shift() + const psk = uint8ArrayFromString(pskString ?? '', 'base16') + + if (psk.byteLength !== KEY_LENGTH) { + throw new Error(Errors.INVALID_PSK) + } + + return { + tag: pskTag, + codecName: codec, + psk: psk + } + } catch (err: any) { + log.error(err) + throw new Error(Errors.INVALID_PSK) + } +} diff --git a/src/pnet/errors.js b/src/pnet/errors.js deleted file mode 100644 index 57c44228..00000000 --- a/src/pnet/errors.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -module.exports.INVALID_PEER = 'Not a valid peer connection' -module.exports.INVALID_PSK = 'Your private shared key is invalid' -module.exports.NO_LOCAL_ID = 'No local private key provided' -module.exports.NO_HANDSHAKE_CONNECTION = 'No connection for the handshake provided' -module.exports.STREAM_ENDED = 'Stream ended prematurely' diff --git a/src/pnet/errors.ts b/src/pnet/errors.ts new file mode 100644 index 00000000..b09f68a2 --- /dev/null +++ b/src/pnet/errors.ts @@ -0,0 +1,5 @@ +export const INVALID_PEER = 'Not a valid peer connection' +export const INVALID_PSK = 'Your private shared key is invalid' +export const NO_LOCAL_ID = 'No local private key provided' +export const NO_HANDSHAKE_CONNECTION = 'No connection for the handshake provided' +export const STREAM_ENDED = 'Stream ended prematurely' diff --git a/src/pnet/index.js b/src/pnet/index.js deleted file mode 100644 index c54e1f9c..00000000 --- a/src/pnet/index.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:pnet'), { - error: debug('libp2p:pnet:err') -}) -const { pipe } = require('it-pipe') -const errcode = require('err-code') -// @ts-ignore it-pair has no types exported -const duplexPair = require('it-pair/duplex') -const crypto = require('libp2p-crypto') -const Errors = require('./errors') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../errors') -const { - createBoxStream, - createUnboxStream, - decodeV1PSK -} = require('./crypto') -// @ts-ignore it-handshake has no types exported -const handshake = require('it-handshake') -const { NONCE_LENGTH } = require('./key-generator') - -/** - * @typedef {import('libp2p-interfaces/src/transport/types').MultiaddrConnection} MultiaddrConnection - */ - -class Protector { - /** - * Takes a Private Shared Key (psk) and provides a `protect` method - * for wrapping existing connections in a private encryption stream. - * - * @param {Uint8Array} keyBuffer - The private shared key buffer - * @class - */ - constructor (keyBuffer) { - const decodedPSK = decodeV1PSK(keyBuffer) - this.psk = decodedPSK.psk - this.tag = decodedPSK.tag - } - - /** - * Takes a given Connection and creates a private encryption stream - * between its two peers from the PSK the Protector instance was - * created with. - * - * @param {MultiaddrConnection} connection - The connection to protect - * @returns {Promise} A protected duplex iterable - */ - async protect (connection) { - if (!connection) { - throw errcode(new Error(Errors.NO_HANDSHAKE_CONNECTION), ERR_INVALID_PARAMETERS) - } - - // Exchange nonces - log('protecting the connection') - const localNonce = crypto.randomBytes(NONCE_LENGTH) - - const shake = handshake(connection) - shake.write(localNonce) - - const result = await shake.reader.next(NONCE_LENGTH) - const remoteNonce = result.value.slice() - shake.rest() - - // Create the boxing/unboxing pipe - log('exchanged nonces') - const [internal, external] = duplexPair() - pipe( - external, - // Encrypt all outbound traffic - createBoxStream(localNonce, this.psk), - shake.stream, - // Decrypt all inbound traffic - createUnboxStream(remoteNonce, this.psk), - external - ).catch(log.error) - - return internal - } -} - -module.exports = Protector -module.exports.errors = Errors -module.exports.generate = require('./key-generator') diff --git a/src/pnet/index.ts b/src/pnet/index.ts new file mode 100644 index 00000000..868b4308 --- /dev/null +++ b/src/pnet/index.ts @@ -0,0 +1,95 @@ +import { logger } from '@libp2p/logger' +import { pipe } from 'it-pipe' +import errCode from 'err-code' +import { duplexPair } from 'it-pair/duplex' +import { randomBytes } from '@libp2p/crypto' +import * as Errors from './errors.js' +import { codes } from '../errors.js' +import { + createBoxStream, + createUnboxStream, + decodeV1PSK +} from './crypto.js' +import { handshake } from 'it-handshake' +import { NONCE_LENGTH } from './key-generator.js' +import type { MultiaddrConnection } from '@libp2p/interfaces/transport' +import type { ConnectionProtector } from '@libp2p/interfaces/connection' + +const log = logger('libp2p:pnet') + +export interface ProtectorInit { + enabled?: boolean + psk: Uint8Array +} + +export class PreSharedKeyConnectionProtector implements ConnectionProtector { + public tag: string + private readonly psk: Uint8Array + private readonly enabled: boolean + + /** + * Takes a Private Shared Key (psk) and provides a `protect` method + * for wrapping existing connections in a private encryption stream. + */ + constructor (init: ProtectorInit) { + this.enabled = init.enabled !== false + + if (this.enabled) { + const decodedPSK = decodeV1PSK(init.psk) + this.psk = decodedPSK.psk + this.tag = decodedPSK.tag ?? '' + } else { + this.psk = new Uint8Array() + this.tag = '' + } + } + + /** + * Takes a given Connection and creates a private encryption stream + * between its two peers from the PSK the Protector instance was + * created with. + */ + async protect (connection: MultiaddrConnection): Promise { + if (!this.enabled) { + return connection + } + + if (connection == null) { + throw errCode(new Error(Errors.NO_HANDSHAKE_CONNECTION), codes.ERR_INVALID_PARAMETERS) + } + + // Exchange nonces + log('protecting the connection') + const localNonce = randomBytes(NONCE_LENGTH) + + const shake = handshake(connection) + shake.write(localNonce) + + const result = await shake.reader.next(NONCE_LENGTH) + + if (result.value == null) { + throw errCode(new Error(Errors.STREAM_ENDED), codes.ERR_INVALID_PARAMETERS) + } + + const remoteNonce = result.value.slice() + shake.rest() + + // Create the boxing/unboxing pipe + log('exchanged nonces') + const [internal, external] = duplexPair() + pipe( + external, + // Encrypt all outbound traffic + createBoxStream(localNonce, this.psk), + shake.stream, + // Decrypt all inbound traffic + createUnboxStream(remoteNonce, this.psk), + external + ).catch(log.error) + + return { + ...connection, + ...internal + } + } +} diff --git a/src/pnet/key-generator.js b/src/pnet/key-generator.js deleted file mode 100644 index ad94b140..00000000 --- a/src/pnet/key-generator.js +++ /dev/null @@ -1,33 +0,0 @@ -'use strict' - -const crypto = require('libp2p-crypto') -const KEY_LENGTH = 32 -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -/** - * Generates a PSK that can be used in a libp2p-pnet private network - * - * @param {Uint8Array} bytes - An object to write the psk into - * @returns {void} - */ -function generate (bytes) { - const psk = uint8ArrayToString(crypto.randomBytes(KEY_LENGTH), 'base16') - const key = uint8ArrayFromString('/key/swarm/psk/1.0.0/\n/base16/\n' + psk) - - bytes.set(key) -} - -module.exports = generate -module.exports.NONCE_LENGTH = 24 -module.exports.KEY_LENGTH = KEY_LENGTH - -try { - // @ts-ignore This condition will always return 'false' since the types 'Module | undefined' - if (require.main === module) { - // @ts-ignore - generate(process.stdout) - } -} catch (/** @type {any} */ error) { - -} diff --git a/src/pnet/key-generator.ts b/src/pnet/key-generator.ts new file mode 100644 index 00000000..af3b46c1 --- /dev/null +++ b/src/pnet/key-generator.ts @@ -0,0 +1,28 @@ +import { randomBytes } from '@libp2p/crypto' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' + +/** + * Generates a PSK that can be used in a libp2p-pnet private network + * + * @param {Uint8Array} bytes - An object to write the psk into + * @returns {void} + */ +export function generate (bytes: Uint8Array) { + const psk = uint8ArrayToString(randomBytes(KEY_LENGTH), 'base16') + const key = uint8ArrayFromString('/key/swarm/psk/1.0.0/\n/base16/\n' + psk) + + bytes.set(key) +} + +export const NONCE_LENGTH = 24 +export const KEY_LENGTH = 32 + +try { + if (require.main === module) { + // @ts-expect-error + generate(process.stdout) + } +} catch (error: any) { + +} diff --git a/src/pubsub-adapter.js b/src/pubsub-adapter.js deleted file mode 100644 index 8ccbf90f..00000000 --- a/src/pubsub-adapter.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict' - -// Pubsub adapter to keep API with handlers while not removed. -/** - * @typedef {import('libp2p-interfaces/src/pubsub').InMessage} InMessage - * @typedef {import('libp2p-interfaces/src/pubsub')} PubsubRouter - */ - -/** - * @param {{new(...args: any[]): PubsubRouter}} PubsubRouter - * @param {import('.')} libp2p - * @param {{ enabled: boolean; } & import(".").PubsubLocalOptions & import("libp2p-interfaces/src/pubsub").PubsubOptions} options - */ -function pubsubAdapter (PubsubRouter, libp2p, options) { - /** @type {PubsubRouter & { _subscribeAdapter: PubsubRouter['subscribe'], _unsubscribeAdapter: PubsubRouter['unsubscribe'] }} */ - // @ts-ignore we set the extra _subscribeAdapter and _unsubscribeAdapter properties afterwards - const pubsub = new PubsubRouter(libp2p, options) - pubsub._subscribeAdapter = pubsub.subscribe - pubsub._unsubscribeAdapter = pubsub.unsubscribe - - /** - * Subscribes to a given topic. - * - * @override - * @param {string} topic - * @param {(msg: InMessage) => void} [handler] - * @returns {void} - */ - function subscribe (topic, handler) { - // Bind provided handler - handler && pubsub.on(topic, handler) - pubsub._subscribeAdapter(topic) - } - - /** - * Unsubscribe from the given topic. - * - * @override - * @param {string} topic - * @param {(msg: InMessage) => void} [handler] - * @returns {void} - */ - function unsubscribe (topic, handler) { - if (!handler) { - pubsub.removeAllListeners(topic) - } else { - pubsub.removeListener(topic, handler) - } - - if (pubsub.listenerCount(topic) === 0) { - pubsub._unsubscribeAdapter(topic) - } - } - - pubsub.subscribe = subscribe - pubsub.unsubscribe = unsubscribe - - return pubsub -} - -module.exports = pubsubAdapter diff --git a/src/record/README.md b/src/record/README.md deleted file mode 100644 index 761cd234..00000000 --- a/src/record/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Libp2p Records - -Libp2p nodes need to store data in a public location (e.g. a DHT), or rely on potentially untrustworthy intermediaries to relay information over its lifetime. Accordingly, libp2p nodes need to be able to verify that the data came from a specific peer and that it hasn't been tampered with. - -## Envelope - -Libp2p provides an all-purpose data container called **envelope**. It was created to enable the distribution of verifiable records, which we can prove originated from the addressed peer itself. The envelope includes a signature of the data, so that its authenticity is verified. - -This envelope stores a marshaled record implementing the [interface-record](https://github.com/libp2p/js-libp2p-interfaces/tree/master/src/record). These Records are designed to be serialized to bytes and placed inside of the envelopes before being shared with other peers. - -You can read further about the envelope in [libp2p/specs#217](https://github.com/libp2p/specs/pull/217). - -### Usage - -- create an envelope with an instance of an [interface-record](https://github.com/libp2p/js-libp2p-interfaces/tree/master/src/record) implementation and prepare it for being exchanged: - -```js -// interface-record implementation example with the "libp2p-example" namespace -const Record = require('libp2p-interfaces/src/record') -const { fromString } = require('uint8arrays/from-string') - -class ExampleRecord extends Record { - constructor () { - super ('libp2p-example', fromString('0302', 'hex')) - } - - marshal () {} - - equals (other) {} -} - -ExampleRecord.createFromProtobuf = () => {} -``` - -```js -const Envelope = require('libp2p/src/record/envelop') -const ExampleRecord = require('./example-record') - -const rec = new ExampleRecord() -const e = await Envelope.seal(rec, peerId) -const wireData = e.marshal() -``` - -- consume a received envelope (`wireData`) and transform it back to a record: - -```js -const Envelope = require('libp2p/src/record/envelop') -const ExampleRecord = require('./example-record') - -const domain = 'libp2p-example' -let e - -try { - e = await Envelope.openAndCertify(wireData, domain) -} catch (err) {} - -const rec = ExampleRecord.createFromProtobuf(e.payload) -``` - -## Peer Record - -All libp2p nodes keep a `PeerStore`, that among other information stores a set of known addresses for each peer, which can come from a variety of sources. - -Libp2p peer records were created to enable the distribution of verifiable address records, which we can prove originated from the addressed peer itself. With such guarantees, libp2p is able to prioritize addresses based on their authenticity, with the most strict strategy being to only dial certified addresses (no strategies have been implemented at the time of writing). - -A peer record contains the peers' publicly reachable listen addresses, and may be extended in the future to contain additional metadata relevant to routing. It also contains a `seqNumber` field, a timestamp per the spec, so that we can verify the most recent record. - -You can read further about the Peer Record in [libp2p/specs#217](https://github.com/libp2p/specs/pull/217). - -### Usage - -- create a new Peer Record - -```js -const PeerRecord = require('libp2p/src/record/peer-record') - -const pr = new PeerRecord({ - peerId: node.peerId, - multiaddrs: node.multiaddrs -}) -``` - -- create a Peer Record from a protobuf - -```js -const PeerRecord = require('libp2p/src/record/peer-record') - -const pr = PeerRecord.createFromProtobuf(data) -``` - -### Libp2p Flows - -#### Self Record - -Once a libp2p node has started and is listening on a set of multiaddrs, its own peer record can be created. - -The identify service is responsible for creating the self record when the identify protocol kicks in for the first time. This record will be stored for future needs of the identify protocol when connecting with other peers. - -#### Self record Updates - -**_NOT_YET_IMPLEMENTED_** - -While creating peer records is fairly trivial, addresses are not static and might be modified at arbitrary times. This can happen via an Address Manager API, or even through AutoRelay/AutoNAT. - -When a libp2p node changes its listen addresses, the identify service will be informed. Once that happens, the identify service creates a new self record and stores it. With the new record, the identify push/delta protocol will be used to communicate this change to the connected peers. - -#### Subsystem receiving a record - -Considering that a node can discover other peers' addresses from a variety of sources, Libp2p Peerstore can differentiate the addresses that were obtained through a signed peer record. - -Once a record is received and its signature properly validated, its envelope is stored in the AddressBook in its byte representation. The `seqNumber` remains unmarshalled so that we can quickly compare it against incoming records to determine the most recent record. - -The AddressBook Addresses will be updated with the content of the envelope with a certified property. This allows other subsystems to identify the known certified addresses of a peer. - -#### Subsystem providing a record - -Libp2p subsystems that exchange other peers information will provide the envelope that they received by those peers. As a result, other peers can verify if the envelope was really created by the addressed peer. - -When a subsystem wants to provide a record, it will get it from the AddressBook, if it exists. Other subsystems are also able to provide the self record, since it is also stored in the AddressBook. - -### Future Work - -- Persistence only considering certified addresses? -- Peers may not know their own addresses. It's often impossible to automatically infer one's own public address, and peers may need to rely on third party peers to inform them of their observed public addresses. -- A peer may inadvertently or maliciously sign an address that they do not control. In other words, a signature isn't a guarantee that a given address is valid. -- Some addresses may be ambiguous. For example, addresses on a private subnet are valid within that subnet but are useless on the public internet. -- Once all these pieces are in place, we will also need a way to prioritize addresses based on their authenticity, that is, the dialer can prioritize self-certified addresses over addresses from an unknown origin. - - Modular dialer? (taken from go PR notes) - - With the modular dialer, users should easily be able to configure precedence. With dialer v1, anything we do to prioritise dials is gonna be spaghetti and adhoc. With the modular dialer, you’d be able to specify the order of dials when instantiating the pipeline. - - Multiple parallel dials. We already have the issue where new addresses aren't added to existing dials. diff --git a/src/record/envelope/envelope.d.ts b/src/record/envelope/envelope.d.ts deleted file mode 100644 index 440590c1..00000000 --- a/src/record/envelope/envelope.d.ts +++ /dev/null @@ -1,77 +0,0 @@ -import * as $protobuf from "protobufjs"; -/** Properties of an Envelope. */ -export interface IEnvelope { - - /** Envelope publicKey */ - publicKey?: (Uint8Array|null); - - /** Envelope payloadType */ - payloadType?: (Uint8Array|null); - - /** Envelope payload */ - payload?: (Uint8Array|null); - - /** Envelope signature */ - signature?: (Uint8Array|null); -} - -/** Represents an Envelope. */ -export class Envelope implements IEnvelope { - - /** - * Constructs a new Envelope. - * @param [p] Properties to set - */ - constructor(p?: IEnvelope); - - /** Envelope publicKey. */ - public publicKey: Uint8Array; - - /** Envelope payloadType. */ - public payloadType: Uint8Array; - - /** Envelope payload. */ - public payload: Uint8Array; - - /** Envelope signature. */ - public signature: Uint8Array; - - /** - * Encodes the specified Envelope message. Does not implicitly {@link Envelope.verify|verify} messages. - * @param m Envelope message or plain object to encode - * @param [w] Writer to encode to - * @returns Writer - */ - public static encode(m: IEnvelope, w?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes an Envelope message from the specified reader or buffer. - * @param r Reader or buffer to decode from - * @param [l] Message length if known beforehand - * @returns Envelope - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(r: ($protobuf.Reader|Uint8Array), l?: number): Envelope; - - /** - * Creates an Envelope message from a plain object. Also converts values to their respective internal types. - * @param d Plain object - * @returns Envelope - */ - public static fromObject(d: { [k: string]: any }): Envelope; - - /** - * Creates a plain object from an Envelope message. Also converts values to other types if specified. - * @param m Envelope - * @param [o] Conversion options - * @returns Plain object - */ - public static toObject(m: Envelope, o?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this Envelope to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; -} diff --git a/src/record/envelope/envelope.js b/src/record/envelope/envelope.js deleted file mode 100644 index 8741154e..00000000 --- a/src/record/envelope/envelope.js +++ /dev/null @@ -1,243 +0,0 @@ -/*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); - -// Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; - -// Exported root namespace -var $root = $protobuf.roots["libp2p-envelope"] || ($protobuf.roots["libp2p-envelope"] = {}); - -$root.Envelope = (function() { - - /** - * Properties of an Envelope. - * @exports IEnvelope - * @interface IEnvelope - * @property {Uint8Array|null} [publicKey] Envelope publicKey - * @property {Uint8Array|null} [payloadType] Envelope payloadType - * @property {Uint8Array|null} [payload] Envelope payload - * @property {Uint8Array|null} [signature] Envelope signature - */ - - /** - * Constructs a new Envelope. - * @exports Envelope - * @classdesc Represents an Envelope. - * @implements IEnvelope - * @constructor - * @param {IEnvelope=} [p] Properties to set - */ - function Envelope(p) { - if (p) - for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) - if (p[ks[i]] != null) - this[ks[i]] = p[ks[i]]; - } - - /** - * Envelope publicKey. - * @member {Uint8Array} publicKey - * @memberof Envelope - * @instance - */ - Envelope.prototype.publicKey = $util.newBuffer([]); - - /** - * Envelope payloadType. - * @member {Uint8Array} payloadType - * @memberof Envelope - * @instance - */ - Envelope.prototype.payloadType = $util.newBuffer([]); - - /** - * Envelope payload. - * @member {Uint8Array} payload - * @memberof Envelope - * @instance - */ - Envelope.prototype.payload = $util.newBuffer([]); - - /** - * Envelope signature. - * @member {Uint8Array} signature - * @memberof Envelope - * @instance - */ - Envelope.prototype.signature = $util.newBuffer([]); - - /** - * Encodes the specified Envelope message. Does not implicitly {@link Envelope.verify|verify} messages. - * @function encode - * @memberof Envelope - * @static - * @param {IEnvelope} m Envelope message or plain object to encode - * @param {$protobuf.Writer} [w] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Envelope.encode = function encode(m, w) { - if (!w) - w = $Writer.create(); - if (m.publicKey != null && Object.hasOwnProperty.call(m, "publicKey")) - w.uint32(10).bytes(m.publicKey); - if (m.payloadType != null && Object.hasOwnProperty.call(m, "payloadType")) - w.uint32(18).bytes(m.payloadType); - if (m.payload != null && Object.hasOwnProperty.call(m, "payload")) - w.uint32(26).bytes(m.payload); - if (m.signature != null && Object.hasOwnProperty.call(m, "signature")) - w.uint32(42).bytes(m.signature); - return w; - }; - - /** - * Decodes an Envelope message from the specified reader or buffer. - * @function decode - * @memberof Envelope - * @static - * @param {$protobuf.Reader|Uint8Array} r Reader or buffer to decode from - * @param {number} [l] Message length if known beforehand - * @returns {Envelope} Envelope - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Envelope.decode = function decode(r, l) { - if (!(r instanceof $Reader)) - r = $Reader.create(r); - var c = l === undefined ? r.len : r.pos + l, m = new $root.Envelope(); - while (r.pos < c) { - var t = r.uint32(); - switch (t >>> 3) { - case 1: - m.publicKey = r.bytes(); - break; - case 2: - m.payloadType = r.bytes(); - break; - case 3: - m.payload = r.bytes(); - break; - case 5: - m.signature = r.bytes(); - break; - default: - r.skipType(t & 7); - break; - } - } - return m; - }; - - /** - * Creates an Envelope message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof Envelope - * @static - * @param {Object.} d Plain object - * @returns {Envelope} Envelope - */ - Envelope.fromObject = function fromObject(d) { - if (d instanceof $root.Envelope) - return d; - var m = new $root.Envelope(); - if (d.publicKey != null) { - if (typeof d.publicKey === "string") - $util.base64.decode(d.publicKey, m.publicKey = $util.newBuffer($util.base64.length(d.publicKey)), 0); - else if (d.publicKey.length) - m.publicKey = d.publicKey; - } - if (d.payloadType != null) { - if (typeof d.payloadType === "string") - $util.base64.decode(d.payloadType, m.payloadType = $util.newBuffer($util.base64.length(d.payloadType)), 0); - else if (d.payloadType.length) - m.payloadType = d.payloadType; - } - if (d.payload != null) { - if (typeof d.payload === "string") - $util.base64.decode(d.payload, m.payload = $util.newBuffer($util.base64.length(d.payload)), 0); - else if (d.payload.length) - m.payload = d.payload; - } - if (d.signature != null) { - if (typeof d.signature === "string") - $util.base64.decode(d.signature, m.signature = $util.newBuffer($util.base64.length(d.signature)), 0); - else if (d.signature.length) - m.signature = d.signature; - } - return m; - }; - - /** - * Creates a plain object from an Envelope message. Also converts values to other types if specified. - * @function toObject - * @memberof Envelope - * @static - * @param {Envelope} m Envelope - * @param {$protobuf.IConversionOptions} [o] Conversion options - * @returns {Object.} Plain object - */ - Envelope.toObject = function toObject(m, o) { - if (!o) - o = {}; - var d = {}; - if (o.defaults) { - if (o.bytes === String) - d.publicKey = ""; - else { - d.publicKey = []; - if (o.bytes !== Array) - d.publicKey = $util.newBuffer(d.publicKey); - } - if (o.bytes === String) - d.payloadType = ""; - else { - d.payloadType = []; - if (o.bytes !== Array) - d.payloadType = $util.newBuffer(d.payloadType); - } - if (o.bytes === String) - d.payload = ""; - else { - d.payload = []; - if (o.bytes !== Array) - d.payload = $util.newBuffer(d.payload); - } - if (o.bytes === String) - d.signature = ""; - else { - d.signature = []; - if (o.bytes !== Array) - d.signature = $util.newBuffer(d.signature); - } - } - if (m.publicKey != null && m.hasOwnProperty("publicKey")) { - d.publicKey = o.bytes === String ? $util.base64.encode(m.publicKey, 0, m.publicKey.length) : o.bytes === Array ? Array.prototype.slice.call(m.publicKey) : m.publicKey; - } - if (m.payloadType != null && m.hasOwnProperty("payloadType")) { - d.payloadType = o.bytes === String ? $util.base64.encode(m.payloadType, 0, m.payloadType.length) : o.bytes === Array ? Array.prototype.slice.call(m.payloadType) : m.payloadType; - } - if (m.payload != null && m.hasOwnProperty("payload")) { - d.payload = o.bytes === String ? $util.base64.encode(m.payload, 0, m.payload.length) : o.bytes === Array ? Array.prototype.slice.call(m.payload) : m.payload; - } - if (m.signature != null && m.hasOwnProperty("signature")) { - d.signature = o.bytes === String ? $util.base64.encode(m.signature, 0, m.signature.length) : o.bytes === Array ? Array.prototype.slice.call(m.signature) : m.signature; - } - return d; - }; - - /** - * Converts this Envelope to JSON. - * @function toJSON - * @memberof Envelope - * @instance - * @returns {Object.} JSON object - */ - Envelope.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - return Envelope; -})(); - -module.exports = $root; diff --git a/src/record/envelope/envelope.proto b/src/record/envelope/envelope.proto deleted file mode 100644 index 5b80cf50..00000000 --- a/src/record/envelope/envelope.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -message Envelope { - // public_key is the public key of the keypair the enclosed payload was - // signed with. - bytes public_key = 1; - - // payload_type encodes the type of payload, so that it can be deserialized - // deterministically. - bytes payload_type = 2; - - // payload is the actual payload carried inside this envelope. - bytes payload = 3; - - // signature is the signature produced by the private key corresponding to - // the enclosed public key, over the payload, prefixing a domain string for - // additional security. - bytes signature = 5; -} \ No newline at end of file diff --git a/src/record/envelope/index.js b/src/record/envelope/index.js deleted file mode 100644 index 4fadb4f0..00000000 --- a/src/record/envelope/index.js +++ /dev/null @@ -1,183 +0,0 @@ -'use strict' - -const errCode = require('err-code') -const { concat: uint8arraysConcat } = require('uint8arrays/concat') -const { fromString: uint8arraysFromString } = require('uint8arrays/from-string') -// @ts-ignore libp2p-crypto does not support types -const cryptoKeys = require('libp2p-crypto/src/keys') -const PeerId = require('peer-id') -const varint = require('varint') -const { equals: uint8arraysEquals } = require('uint8arrays/equals') - -const { codes } = require('../../errors') -const { Envelope: Protobuf } = require('./envelope') - -/** - * @typedef {import('libp2p-interfaces/src/record/types').Record} Record - */ - -class Envelope { - /** - * The Envelope is responsible for keeping an arbitrary signed record - * by a libp2p peer. - * - * @class - * @param {object} params - * @param {PeerId} params.peerId - * @param {Uint8Array} params.payloadType - * @param {Uint8Array} params.payload - marshaled record - * @param {Uint8Array} params.signature - signature of the domain string :: type hint :: payload. - */ - constructor ({ peerId, payloadType, payload, signature }) { - this.peerId = peerId - this.payloadType = payloadType - this.payload = payload - this.signature = signature - - // Cache - this._marshal = undefined - } - - /** - * Marshal the envelope content. - * - * @returns {Uint8Array} - */ - marshal () { - if (this._marshal) { - return this._marshal - } - - const publicKey = cryptoKeys.marshalPublicKey(this.peerId.pubKey) - - this._marshal = Protobuf.encode({ - publicKey: publicKey, - payloadType: this.payloadType, - payload: this.payload, - signature: this.signature - }).finish() - - return this._marshal - } - - /** - * Verifies if the other Envelope is identical to this one. - * - * @param {Envelope} other - * @returns {boolean} - */ - equals (other) { - return uint8arraysEquals(this.peerId.pubKey.bytes, other.peerId.pubKey.bytes) && - uint8arraysEquals(this.payloadType, other.payloadType) && - uint8arraysEquals(this.payload, other.payload) && - uint8arraysEquals(this.signature, other.signature) - } - - /** - * Validate envelope data signature for the given domain. - * - * @param {string} domain - * @returns {Promise} - */ - validate (domain) { - const signData = formatSignaturePayload(domain, this.payloadType, this.payload) - - return this.peerId.pubKey.verify(signData, this.signature) - } -} - -/** - * Helper function that prepares a Uint8Array to sign or verify a signature. - * - * @param {string} domain - * @param {Uint8Array} payloadType - * @param {Uint8Array} payload - * @returns {Uint8Array} - */ -const formatSignaturePayload = (domain, payloadType, payload) => { - // When signing, a peer will prepare a Uint8Array by concatenating the following: - // - The length of the domain separation string string in bytes - // - The domain separation string, encoded as UTF-8 - // - The length of the payload_type field in bytes - // - The value of the payload_type field - // - The length of the payload field in bytes - // - The value of the payload field - - const domainUint8Array = uint8arraysFromString(domain) - const domainLength = varint.encode(domainUint8Array.byteLength) - const payloadTypeLength = varint.encode(payloadType.length) - const payloadLength = varint.encode(payload.length) - - return uint8arraysConcat([ - new Uint8Array(domainLength), - domainUint8Array, - new Uint8Array(payloadTypeLength), - payloadType, - new Uint8Array(payloadLength), - payload - ]) -} - -/** - * Unmarshal a serialized Envelope protobuf message. - * - * @param {Uint8Array} data - * @returns {Promise} - */ -Envelope.createFromProtobuf = async (data) => { - const envelopeData = Protobuf.decode(data) - const peerId = await PeerId.createFromPubKey(envelopeData.publicKey) - - return new Envelope({ - peerId, - payloadType: envelopeData.payloadType, - payload: envelopeData.payload, - signature: envelopeData.signature - }) -} - -/** - * Seal marshals the given Record, places the marshaled bytes inside an Envelope - * and signs it with the given peerId's private key. - * - * @async - * @param {Record} record - * @param {PeerId} peerId - * @returns {Promise} - */ -Envelope.seal = async (record, peerId) => { - const domain = record.domain - const payloadType = record.codec - const payload = record.marshal() - - const signData = formatSignaturePayload(domain, payloadType, payload) - const signature = await peerId.privKey.sign(signData) - - return new Envelope({ - peerId, - payloadType, - payload, - signature - }) -} - -/** - * Open and certify a given marshalled envelope. - * Data is unmarshalled and the signature validated for the given domain. - * - * @param {Uint8Array} data - * @param {string} domain - * @returns {Promise} - */ -Envelope.openAndCertify = async (data, domain) => { - const envelope = await Envelope.createFromProtobuf(data) - const valid = await envelope.validate(domain) - - if (!valid) { - throw errCode(new Error('envelope signature is not valid for the given domain'), codes.ERR_SIGNATURE_NOT_VALID) - } - - return envelope -} - -module.exports = Envelope diff --git a/src/record/peer-record/consts.js b/src/record/peer-record/consts.js deleted file mode 100644 index 9b35427e..00000000 --- a/src/record/peer-record/consts.js +++ /dev/null @@ -1,14 +0,0 @@ -'use strict' - -// The domain string used for peer records contained in a Envelope. -const domain = 'libp2p-peer-record' - -// The type hint used to identify peer records in a Envelope. -// Defined in https://github.com/multiformats/multicodec/blob/master/table.csv -// with name "libp2p-peer-record" -const payloadType = Uint8Array.from([3, 1]) - -module.exports = { - ENVELOPE_DOMAIN_PEER_RECORD: domain, - ENVELOPE_PAYLOAD_TYPE_PEER_RECORD: payloadType -} diff --git a/src/record/peer-record/index.js b/src/record/peer-record/index.js deleted file mode 100644 index bcf637b8..00000000 --- a/src/record/peer-record/index.js +++ /dev/null @@ -1,113 +0,0 @@ -'use strict' - -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') -const arrayEquals = require('libp2p-utils/src/array-equals') - -const { PeerRecord: Protobuf } = require('./peer-record') -const { - ENVELOPE_DOMAIN_PEER_RECORD, - ENVELOPE_PAYLOAD_TYPE_PEER_RECORD -} = require('./consts') - -/** - * @typedef {import('../../peer-store/types').Address} Address - * @typedef {import('libp2p-interfaces/src/record/types').Record} Record - */ - -/** - * @implements {Record} - */ -class PeerRecord { - /** - * The PeerRecord is used for distributing peer routing records across the network. - * It contains the peer's reachable listen addresses. - * - * @class - * @param {Object} params - * @param {PeerId} params.peerId - * @param {Multiaddr[]} params.multiaddrs - addresses of the associated peer. - * @param {number} [params.seqNumber] - monotonically-increasing sequence counter that's used to order PeerRecords in time. - */ - constructor ({ peerId, multiaddrs = [], seqNumber = Date.now() }) { - this.domain = ENVELOPE_DOMAIN_PEER_RECORD - this.codec = ENVELOPE_PAYLOAD_TYPE_PEER_RECORD - - this.peerId = peerId - this.multiaddrs = multiaddrs - this.seqNumber = seqNumber - - // Cache - this._marshal = undefined - } - - /** - * Marshal a record to be used in an envelope. - * - * @returns {Uint8Array} - */ - marshal () { - if (this._marshal) { - return this._marshal - } - - this._marshal = Protobuf.encode({ - peerId: this.peerId.toBytes(), - seq: this.seqNumber, - addresses: this.multiaddrs.map((m) => ({ - multiaddr: m.bytes - })) - }).finish() - - return this._marshal - } - - /** - * Returns true if `this` record equals the `other`. - * - * @param {unknown} other - * @returns {boolean} - */ - equals (other) { - if (!(other instanceof PeerRecord)) { - return false - } - - // Validate PeerId - if (!this.peerId.equals(other.peerId)) { - return false - } - - // Validate seqNumber - if (this.seqNumber !== other.seqNumber) { - return false - } - - // Validate multiaddrs - if (!arrayEquals(this.multiaddrs, other.multiaddrs)) { - return false - } - - return true - } -} - -/** - * Unmarshal Peer Record Protobuf. - * - * @param {Uint8Array} buf - marshaled peer record. - * @returns {PeerRecord} - */ -PeerRecord.createFromProtobuf = (buf) => { - const peerRecord = Protobuf.decode(buf) - - const peerId = PeerId.createFromBytes(peerRecord.peerId) - const multiaddrs = (peerRecord.addresses || []).map((a) => new Multiaddr(a.multiaddr)) - const seqNumber = Number(peerRecord.seq) - - return new PeerRecord({ peerId, multiaddrs, seqNumber }) -} - -PeerRecord.DOMAIN = ENVELOPE_DOMAIN_PEER_RECORD - -module.exports = PeerRecord diff --git a/src/record/peer-record/peer-record.d.ts b/src/record/peer-record/peer-record.d.ts deleted file mode 100644 index a851b533..00000000 --- a/src/record/peer-record/peer-record.d.ts +++ /dev/null @@ -1,133 +0,0 @@ -import * as $protobuf from "protobufjs"; -/** Properties of a PeerRecord. */ -export interface IPeerRecord { - - /** PeerRecord peerId */ - peerId?: (Uint8Array|null); - - /** PeerRecord seq */ - seq?: (number|null); - - /** PeerRecord addresses */ - addresses?: (PeerRecord.IAddressInfo[]|null); -} - -/** Represents a PeerRecord. */ -export class PeerRecord implements IPeerRecord { - - /** - * Constructs a new PeerRecord. - * @param [p] Properties to set - */ - constructor(p?: IPeerRecord); - - /** PeerRecord peerId. */ - public peerId: Uint8Array; - - /** PeerRecord seq. */ - public seq: number; - - /** PeerRecord addresses. */ - public addresses: PeerRecord.IAddressInfo[]; - - /** - * Encodes the specified PeerRecord message. Does not implicitly {@link PeerRecord.verify|verify} messages. - * @param m PeerRecord message or plain object to encode - * @param [w] Writer to encode to - * @returns Writer - */ - public static encode(m: IPeerRecord, w?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a PeerRecord message from the specified reader or buffer. - * @param r Reader or buffer to decode from - * @param [l] Message length if known beforehand - * @returns PeerRecord - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(r: ($protobuf.Reader|Uint8Array), l?: number): PeerRecord; - - /** - * Creates a PeerRecord message from a plain object. Also converts values to their respective internal types. - * @param d Plain object - * @returns PeerRecord - */ - public static fromObject(d: { [k: string]: any }): PeerRecord; - - /** - * Creates a plain object from a PeerRecord message. Also converts values to other types if specified. - * @param m PeerRecord - * @param [o] Conversion options - * @returns Plain object - */ - public static toObject(m: PeerRecord, o?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this PeerRecord to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; -} - -export namespace PeerRecord { - - /** Properties of an AddressInfo. */ - interface IAddressInfo { - - /** AddressInfo multiaddr */ - multiaddr?: (Uint8Array|null); - } - - /** Represents an AddressInfo. */ - class AddressInfo implements IAddressInfo { - - /** - * Constructs a new AddressInfo. - * @param [p] Properties to set - */ - constructor(p?: PeerRecord.IAddressInfo); - - /** AddressInfo multiaddr. */ - public multiaddr: Uint8Array; - - /** - * Encodes the specified AddressInfo message. Does not implicitly {@link PeerRecord.AddressInfo.verify|verify} messages. - * @param m AddressInfo message or plain object to encode - * @param [w] Writer to encode to - * @returns Writer - */ - public static encode(m: PeerRecord.IAddressInfo, w?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes an AddressInfo message from the specified reader or buffer. - * @param r Reader or buffer to decode from - * @param [l] Message length if known beforehand - * @returns AddressInfo - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(r: ($protobuf.Reader|Uint8Array), l?: number): PeerRecord.AddressInfo; - - /** - * Creates an AddressInfo message from a plain object. Also converts values to their respective internal types. - * @param d Plain object - * @returns AddressInfo - */ - public static fromObject(d: { [k: string]: any }): PeerRecord.AddressInfo; - - /** - * Creates a plain object from an AddressInfo message. Also converts values to other types if specified. - * @param m AddressInfo - * @param [o] Conversion options - * @returns Plain object - */ - public static toObject(m: PeerRecord.AddressInfo, o?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this AddressInfo to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - } -} diff --git a/src/record/peer-record/peer-record.js b/src/record/peer-record/peer-record.js deleted file mode 100644 index 9f956670..00000000 --- a/src/record/peer-record/peer-record.js +++ /dev/null @@ -1,367 +0,0 @@ -/*eslint-disable*/ -"use strict"; - -var $protobuf = require("protobufjs/minimal"); - -// Common aliases -var $Reader = $protobuf.Reader, $Writer = $protobuf.Writer, $util = $protobuf.util; - -// Exported root namespace -var $root = $protobuf.roots["libp2p-peer-record"] || ($protobuf.roots["libp2p-peer-record"] = {}); - -$root.PeerRecord = (function() { - - /** - * Properties of a PeerRecord. - * @exports IPeerRecord - * @interface IPeerRecord - * @property {Uint8Array|null} [peerId] PeerRecord peerId - * @property {number|null} [seq] PeerRecord seq - * @property {Array.|null} [addresses] PeerRecord addresses - */ - - /** - * Constructs a new PeerRecord. - * @exports PeerRecord - * @classdesc Represents a PeerRecord. - * @implements IPeerRecord - * @constructor - * @param {IPeerRecord=} [p] Properties to set - */ - function PeerRecord(p) { - this.addresses = []; - if (p) - for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) - if (p[ks[i]] != null) - this[ks[i]] = p[ks[i]]; - } - - /** - * PeerRecord peerId. - * @member {Uint8Array} peerId - * @memberof PeerRecord - * @instance - */ - PeerRecord.prototype.peerId = $util.newBuffer([]); - - /** - * PeerRecord seq. - * @member {number} seq - * @memberof PeerRecord - * @instance - */ - PeerRecord.prototype.seq = $util.Long ? $util.Long.fromBits(0,0,true) : 0; - - /** - * PeerRecord addresses. - * @member {Array.} addresses - * @memberof PeerRecord - * @instance - */ - PeerRecord.prototype.addresses = $util.emptyArray; - - /** - * Encodes the specified PeerRecord message. Does not implicitly {@link PeerRecord.verify|verify} messages. - * @function encode - * @memberof PeerRecord - * @static - * @param {IPeerRecord} m PeerRecord message or plain object to encode - * @param {$protobuf.Writer} [w] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - PeerRecord.encode = function encode(m, w) { - if (!w) - w = $Writer.create(); - if (m.peerId != null && Object.hasOwnProperty.call(m, "peerId")) - w.uint32(10).bytes(m.peerId); - if (m.seq != null && Object.hasOwnProperty.call(m, "seq")) - w.uint32(16).uint64(m.seq); - if (m.addresses != null && m.addresses.length) { - for (var i = 0; i < m.addresses.length; ++i) - $root.PeerRecord.AddressInfo.encode(m.addresses[i], w.uint32(26).fork()).ldelim(); - } - return w; - }; - - /** - * Decodes a PeerRecord message from the specified reader or buffer. - * @function decode - * @memberof PeerRecord - * @static - * @param {$protobuf.Reader|Uint8Array} r Reader or buffer to decode from - * @param {number} [l] Message length if known beforehand - * @returns {PeerRecord} PeerRecord - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - PeerRecord.decode = function decode(r, l) { - if (!(r instanceof $Reader)) - r = $Reader.create(r); - var c = l === undefined ? r.len : r.pos + l, m = new $root.PeerRecord(); - while (r.pos < c) { - var t = r.uint32(); - switch (t >>> 3) { - case 1: - m.peerId = r.bytes(); - break; - case 2: - m.seq = r.uint64(); - break; - case 3: - if (!(m.addresses && m.addresses.length)) - m.addresses = []; - m.addresses.push($root.PeerRecord.AddressInfo.decode(r, r.uint32())); - break; - default: - r.skipType(t & 7); - break; - } - } - return m; - }; - - /** - * Creates a PeerRecord message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof PeerRecord - * @static - * @param {Object.} d Plain object - * @returns {PeerRecord} PeerRecord - */ - PeerRecord.fromObject = function fromObject(d) { - if (d instanceof $root.PeerRecord) - return d; - var m = new $root.PeerRecord(); - if (d.peerId != null) { - if (typeof d.peerId === "string") - $util.base64.decode(d.peerId, m.peerId = $util.newBuffer($util.base64.length(d.peerId)), 0); - else if (d.peerId.length) - m.peerId = d.peerId; - } - if (d.seq != null) { - if ($util.Long) - (m.seq = $util.Long.fromValue(d.seq)).unsigned = true; - else if (typeof d.seq === "string") - m.seq = parseInt(d.seq, 10); - else if (typeof d.seq === "number") - m.seq = d.seq; - else if (typeof d.seq === "object") - m.seq = new $util.LongBits(d.seq.low >>> 0, d.seq.high >>> 0).toNumber(true); - } - if (d.addresses) { - if (!Array.isArray(d.addresses)) - throw TypeError(".PeerRecord.addresses: array expected"); - m.addresses = []; - for (var i = 0; i < d.addresses.length; ++i) { - if (typeof d.addresses[i] !== "object") - throw TypeError(".PeerRecord.addresses: object expected"); - m.addresses[i] = $root.PeerRecord.AddressInfo.fromObject(d.addresses[i]); - } - } - return m; - }; - - /** - * Creates a plain object from a PeerRecord message. Also converts values to other types if specified. - * @function toObject - * @memberof PeerRecord - * @static - * @param {PeerRecord} m PeerRecord - * @param {$protobuf.IConversionOptions} [o] Conversion options - * @returns {Object.} Plain object - */ - PeerRecord.toObject = function toObject(m, o) { - if (!o) - o = {}; - var d = {}; - if (o.arrays || o.defaults) { - d.addresses = []; - } - if (o.defaults) { - if (o.bytes === String) - d.peerId = ""; - else { - d.peerId = []; - if (o.bytes !== Array) - d.peerId = $util.newBuffer(d.peerId); - } - if ($util.Long) { - var n = new $util.Long(0, 0, true); - d.seq = o.longs === String ? n.toString() : o.longs === Number ? n.toNumber() : n; - } else - d.seq = o.longs === String ? "0" : 0; - } - if (m.peerId != null && m.hasOwnProperty("peerId")) { - d.peerId = o.bytes === String ? $util.base64.encode(m.peerId, 0, m.peerId.length) : o.bytes === Array ? Array.prototype.slice.call(m.peerId) : m.peerId; - } - if (m.seq != null && m.hasOwnProperty("seq")) { - if (typeof m.seq === "number") - d.seq = o.longs === String ? String(m.seq) : m.seq; - else - d.seq = o.longs === String ? $util.Long.prototype.toString.call(m.seq) : o.longs === Number ? new $util.LongBits(m.seq.low >>> 0, m.seq.high >>> 0).toNumber(true) : m.seq; - } - if (m.addresses && m.addresses.length) { - d.addresses = []; - for (var j = 0; j < m.addresses.length; ++j) { - d.addresses[j] = $root.PeerRecord.AddressInfo.toObject(m.addresses[j], o); - } - } - return d; - }; - - /** - * Converts this PeerRecord to JSON. - * @function toJSON - * @memberof PeerRecord - * @instance - * @returns {Object.} JSON object - */ - PeerRecord.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - PeerRecord.AddressInfo = (function() { - - /** - * Properties of an AddressInfo. - * @memberof PeerRecord - * @interface IAddressInfo - * @property {Uint8Array|null} [multiaddr] AddressInfo multiaddr - */ - - /** - * Constructs a new AddressInfo. - * @memberof PeerRecord - * @classdesc Represents an AddressInfo. - * @implements IAddressInfo - * @constructor - * @param {PeerRecord.IAddressInfo=} [p] Properties to set - */ - function AddressInfo(p) { - if (p) - for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) - if (p[ks[i]] != null) - this[ks[i]] = p[ks[i]]; - } - - /** - * AddressInfo multiaddr. - * @member {Uint8Array} multiaddr - * @memberof PeerRecord.AddressInfo - * @instance - */ - AddressInfo.prototype.multiaddr = $util.newBuffer([]); - - /** - * Encodes the specified AddressInfo message. Does not implicitly {@link PeerRecord.AddressInfo.verify|verify} messages. - * @function encode - * @memberof PeerRecord.AddressInfo - * @static - * @param {PeerRecord.IAddressInfo} m AddressInfo message or plain object to encode - * @param {$protobuf.Writer} [w] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - AddressInfo.encode = function encode(m, w) { - if (!w) - w = $Writer.create(); - if (m.multiaddr != null && Object.hasOwnProperty.call(m, "multiaddr")) - w.uint32(10).bytes(m.multiaddr); - return w; - }; - - /** - * Decodes an AddressInfo message from the specified reader or buffer. - * @function decode - * @memberof PeerRecord.AddressInfo - * @static - * @param {$protobuf.Reader|Uint8Array} r Reader or buffer to decode from - * @param {number} [l] Message length if known beforehand - * @returns {PeerRecord.AddressInfo} AddressInfo - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - AddressInfo.decode = function decode(r, l) { - if (!(r instanceof $Reader)) - r = $Reader.create(r); - var c = l === undefined ? r.len : r.pos + l, m = new $root.PeerRecord.AddressInfo(); - while (r.pos < c) { - var t = r.uint32(); - switch (t >>> 3) { - case 1: - m.multiaddr = r.bytes(); - break; - default: - r.skipType(t & 7); - break; - } - } - return m; - }; - - /** - * Creates an AddressInfo message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof PeerRecord.AddressInfo - * @static - * @param {Object.} d Plain object - * @returns {PeerRecord.AddressInfo} AddressInfo - */ - AddressInfo.fromObject = function fromObject(d) { - if (d instanceof $root.PeerRecord.AddressInfo) - return d; - var m = new $root.PeerRecord.AddressInfo(); - if (d.multiaddr != null) { - if (typeof d.multiaddr === "string") - $util.base64.decode(d.multiaddr, m.multiaddr = $util.newBuffer($util.base64.length(d.multiaddr)), 0); - else if (d.multiaddr.length) - m.multiaddr = d.multiaddr; - } - return m; - }; - - /** - * Creates a plain object from an AddressInfo message. Also converts values to other types if specified. - * @function toObject - * @memberof PeerRecord.AddressInfo - * @static - * @param {PeerRecord.AddressInfo} m AddressInfo - * @param {$protobuf.IConversionOptions} [o] Conversion options - * @returns {Object.} Plain object - */ - AddressInfo.toObject = function toObject(m, o) { - if (!o) - o = {}; - var d = {}; - if (o.defaults) { - if (o.bytes === String) - d.multiaddr = ""; - else { - d.multiaddr = []; - if (o.bytes !== Array) - d.multiaddr = $util.newBuffer(d.multiaddr); - } - } - if (m.multiaddr != null && m.hasOwnProperty("multiaddr")) { - d.multiaddr = o.bytes === String ? $util.base64.encode(m.multiaddr, 0, m.multiaddr.length) : o.bytes === Array ? Array.prototype.slice.call(m.multiaddr) : m.multiaddr; - } - return d; - }; - - /** - * Converts this AddressInfo to JSON. - * @function toJSON - * @memberof PeerRecord.AddressInfo - * @instance - * @returns {Object.} JSON object - */ - AddressInfo.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - return AddressInfo; - })(); - - return PeerRecord; -})(); - -module.exports = $root; diff --git a/src/record/peer-record/peer-record.proto b/src/record/peer-record/peer-record.proto deleted file mode 100644 index 6b740dc8..00000000 --- a/src/record/peer-record/peer-record.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -message PeerRecord { - // AddressInfo is a wrapper around a binary multiaddr. It is defined as a - // separate message to allow us to add per-address metadata in the future. - message AddressInfo { - bytes multiaddr = 1; - } - - // peer_id contains a libp2p peer id in its binary representation. - bytes peer_id = 1; - - // seq contains a monotonically-increasing sequence counter to order PeerRecords in time. - uint64 seq = 2; - - // addresses is a list of public listen addresses for the peer. - repeated AddressInfo addresses = 3; -} \ No newline at end of file diff --git a/src/record/utils.js b/src/record/utils.js deleted file mode 100644 index 512d62a2..00000000 --- a/src/record/utils.js +++ /dev/null @@ -1,25 +0,0 @@ -'use strict' - -const Envelope = require('./envelope') -const PeerRecord = require('./peer-record') - -/** - * @typedef {import('../')} Libp2p - */ - -/** - * Create (or update if existing) self peer record and store it in the AddressBook. - * - * @param {Libp2p} libp2p - * @returns {Promise} - */ -async function updateSelfPeerRecord (libp2p) { - const peerRecord = new PeerRecord({ - peerId: libp2p.peerId, - multiaddrs: libp2p.multiaddrs - }) - const envelope = await Envelope.seal(peerRecord, libp2p.peerId) - await libp2p.peerStore.addressBook.consumePeerRecord(envelope) -} - -module.exports.updateSelfPeerRecord = updateSelfPeerRecord diff --git a/src/registrar.js b/src/registrar.js deleted file mode 100644 index 812c7601..00000000 --- a/src/registrar.js +++ /dev/null @@ -1,127 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:registrar'), { - error: debug('libp2p:registrar:err') -}) -const errcode = require('err-code') - -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('./errors') -const Topology = require('libp2p-interfaces/src/topology') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {import('./peer-store/types').PeerStore} PeerStore - * @typedef {import('./connection-manager')} ConnectionManager - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('./').HandlerProps} HandlerProps - */ - -/** - * - */ - -/** - * Responsible for notifying registered protocols of events in the network. - */ -class Registrar { - /** - * @param {Object} props - * @param {PeerStore} props.peerStore - * @param {ConnectionManager} props.connectionManager - * @class - */ - constructor ({ peerStore, connectionManager }) { - // Used on topology to listen for protocol changes - this.peerStore = peerStore - - this.connectionManager = connectionManager - - /** - * Map of topologies - * - * @type {Map} - */ - this.topologies = new Map() - - /** @type {(protocols: string[]|string, handler: (props: HandlerProps) => void) => void} */ - // @ts-ignore handle is not optional - this._handle = undefined - - this._onDisconnect = this._onDisconnect.bind(this) - this.connectionManager.on('peer:disconnect', this._onDisconnect) - } - - /** - * @returns {(protocols: string[]|string, handler: (props: HandlerProps) => void) => void} - */ - get handle () { - return this._handle - } - - /** - * @param {(protocols: string[]|string, handler: (props: HandlerProps) => void) => void} handle - */ - set handle (handle) { - this._handle = handle - } - - /** - * Get a connection with a peer. - * - * @param {PeerId} peerId - * @returns {Connection | null} - */ - getConnection (peerId) { - return this.connectionManager.get(peerId) - } - - /** - * Register handlers for a set of multicodecs given - * - * @param {Topology} topology - protocol topology - * @returns {Promise} registrar identifier - */ - async register (topology) { - if (!Topology.isTopology(topology)) { - log.error('topology must be an instance of interfaces/topology') - throw errcode(new Error('topology must be an instance of interfaces/topology'), ERR_INVALID_PARAMETERS) - } - - // Create topology - const id = (Math.random() * 1e9).toString(36) + Date.now() - - this.topologies.set(id, topology) - - // Set registrar - await topology.setRegistrar(this) - - return id - } - - /** - * Unregister topology. - * - * @param {string} id - registrar identifier - * @returns {boolean} unregistered successfully - */ - unregister (id) { - return this.topologies.delete(id) - } - - /** - * Remove a disconnected peer from the record - * - * @param {Connection} connection - * @returns {void} - */ - _onDisconnect (connection) { - for (const [, topology] of this.topologies) { - topology.disconnect(connection.remotePeer) - } - } -} - -module.exports = Registrar diff --git a/src/registrar.ts b/src/registrar.ts new file mode 100644 index 00000000..20d960f5 --- /dev/null +++ b/src/registrar.ts @@ -0,0 +1,205 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import { codes } from './errors.js' +import { isTopology, Topology } from '@libp2p/interfaces/topology' +import type { Registrar, StreamHandler } from '@libp2p/interfaces/registrar' +import type { PeerProtocolsChangeData } from '@libp2p/interfaces/peer-store' +import type { Connection } from '@libp2p/interfaces/connection' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:registrar') + +function supportsProtocol (peerProtocols: string[], topologyProtocols: string[]) { + for (const peerProtocol of peerProtocols) { + if (topologyProtocols.includes(peerProtocol)) { + return true + } + } + + return false +} + +/** + * Responsible for notifying registered protocols of events in the network. + */ +export class DefaultRegistrar implements Registrar { + private readonly topologies: Map + private readonly handlers: Map + private readonly components: Components + + constructor (components: Components) { + this.topologies = new Map() + this.handlers = new Map() + this.components = components + + this._onDisconnect = this._onDisconnect.bind(this) + this._onConnect = this._onConnect.bind(this) + this._onProtocolChange = this._onProtocolChange.bind(this) + + this.components.getConnectionManager().addEventListener('peer:disconnect', this._onDisconnect) + this.components.getConnectionManager().addEventListener('peer:connect', this._onConnect) + this.components.getPeerStore().addEventListener('change:protocols', this._onProtocolChange) + } + + getProtocols () { + const protocols = new Set() + + for (const topology of this.topologies.values()) { + topology.protocols.forEach(protocol => protocols.add(protocol)) + } + + for (const protocol of this.handlers.keys()) { + protocols.add(protocol) + } + + return Array.from(protocols).sort() + } + + getHandler (protocol: string) { + const handler = this.handlers.get(protocol) + + if (handler == null) { + throw new Error(`No handler registered for protocol ${protocol}`) + } + + return handler + } + + getTopologies (protocol: string) { + const output: Topology[] = [] + + for (const { topology, protocols } of this.topologies.values()) { + if (protocols.includes(protocol)) { + output.push(topology) + } + } + + return output + } + + /** + * Registers the `handler` for each protocol + */ + async handle (protocols: string | string[], handler: StreamHandler): Promise { + const protocolList = Array.isArray(protocols) ? protocols : [protocols] + + for (const protocol of protocolList) { + if (this.handlers.has(protocol)) { + throw errCode(new Error(`Handler already registered for protocol ${protocol}`), codes.ERR_PROTOCOL_HANDLER_ALREADY_REGISTERED) + } + + this.handlers.set(protocol, handler) + } + + // Add new protocols to self protocols in the Protobook + await this.components.getPeerStore().protoBook.add(this.components.getPeerId(), protocolList) + } + + /** + * Removes the handler for each protocol. The protocol + * will no longer be supported on streams. + */ + async unhandle (protocols: string | string[]) { + const protocolList = Array.isArray(protocols) ? protocols : [protocols] + + protocolList.forEach(protocol => { + this.handlers.delete(protocol) + }) + + // Remove protocols from self protocols in the Protobook + await this.components.getPeerStore().protoBook.remove(this.components.getPeerId(), protocolList) + } + + /** + * Register handlers for a set of multicodecs given + */ + async register (protocols: string | string[], topology: Topology): Promise { + if (!isTopology(topology)) { + log.error('topology must be an instance of interfaces/topology') + throw errCode(new Error('topology must be an instance of interfaces/topology'), codes.ERR_INVALID_PARAMETERS) + } + + // Create topology + const id = `${(Math.random() * 1e9).toString(36)}${Date.now()}` + + this.topologies.set(id, { + topology, + protocols: Array.isArray(protocols) ? protocols : [protocols] + }) + + // Set registrar + await topology.setRegistrar(this) + + return id + } + + /** + * Unregister topology + */ + unregister (id: string) { + this.topologies.delete(id) + } + + /** + * Remove a disconnected peer from the record + */ + _onDisconnect (evt: CustomEvent) { + const connection = evt.detail + + void this.components.getPeerStore().protoBook.get(connection.remotePeer) + .then(peerProtocols => { + for (const { topology, protocols } of this.topologies.values()) { + if (supportsProtocol(peerProtocols, protocols)) { + topology.onDisconnect(connection.remotePeer) + } + } + }) + .catch(err => { + log.error(err) + }) + } + + _onConnect (evt: CustomEvent) { + const connection = evt.detail + + void this.components.getPeerStore().protoBook.get(connection.remotePeer) + .then(peerProtocols => { + for (const { topology, protocols } of this.topologies.values()) { + if (supportsProtocol(peerProtocols, protocols)) { + topology.onConnect(connection.remotePeer, connection) + } + } + }) + .catch(err => { + log.error(err) + }) + } + + /** + * Check if a new peer support the multicodecs for this topology + */ + _onProtocolChange (evt: CustomEvent) { + const { peerId, protocols, oldProtocols } = evt.detail + + const removed = oldProtocols.filter(protocol => !protocols.includes(protocol)) + const added = protocols.filter(protocol => !oldProtocols.includes(protocol)) + + for (const { topology, protocols } of this.topologies.values()) { + if (supportsProtocol(removed, protocols)) { + topology.onDisconnect(peerId) + } + } + + for (const { topology, protocols } of this.topologies.values()) { + if (supportsProtocol(added, protocols)) { + const connection = this.components.getConnectionManager().getConnection(peerId) + + if (connection == null) { + continue + } + + topology.onConnect(peerId, connection) + } + } + } +} diff --git a/src/transport-manager.js b/src/transport-manager.js deleted file mode 100644 index 8a7303ef..00000000 --- a/src/transport-manager.js +++ /dev/null @@ -1,269 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:transports'), { - error: debug('libp2p:transports:err') -}) - -const pSettle = require('p-settle') -const { codes } = require('./errors') -const errCode = require('err-code') - -const { updateSelfPeerRecord } = require('./record/utils') - -/** - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('libp2p-interfaces/src/transport/types').TransportFactory} TransportFactory - * @typedef {import('libp2p-interfaces/src/transport/types').Transport} Transport - * - * @typedef {Object} TransportManagerProperties - * @property {import('./')} libp2p - * @property {import('./upgrader')} upgrader - * - * @typedef {Object} TransportManagerOptions - * @property {number} [faultTolerance = FAULT_TOLERANCE.FATAL_ALL] - Address listen error tolerance. - */ - -class TransportManager { - /** - * @class - * @param {TransportManagerProperties & TransportManagerOptions} options - */ - constructor ({ libp2p, upgrader, faultTolerance = FAULT_TOLERANCE.FATAL_ALL }) { - this.libp2p = libp2p - this.upgrader = upgrader - /** @type {Map} */ - this._transports = new Map() - this._listeners = new Map() - this._listenerOptions = new Map() - this.faultTolerance = faultTolerance - } - - /** - * Adds a `Transport` to the manager - * - * @param {string} key - * @param {TransportFactory} Transport - * @param {*} transportOptions - Additional options to pass to the transport - * @returns {void} - */ - add (key, Transport, transportOptions = {}) { - log('adding %s', key) - if (!key) { - throw errCode(new Error(`Transport must have a valid key, was given '${key}'`), codes.ERR_INVALID_KEY) - } - if (this._transports.has(key)) { - throw errCode(new Error('There is already a transport with this key'), codes.ERR_DUPLICATE_TRANSPORT) - } - - const transport = new Transport({ - ...transportOptions, - libp2p: this.libp2p, - upgrader: this.upgrader - }) - - this._transports.set(key, transport) - this._listenerOptions.set(key, transportOptions.listenerOptions || {}) - if (!this._listeners.has(key)) { - this._listeners.set(key, []) - } - } - - /** - * Stops all listeners - * - * @async - */ - async close () { - const tasks = [] - for (const [key, listeners] of this._listeners) { - log('closing listeners for %s', key) - while (listeners.length) { - const listener = listeners.pop() - listener.removeAllListeners('listening') - listener.removeAllListeners('close') - tasks.push(listener.close()) - } - } - - await Promise.all(tasks) - log('all listeners closed') - for (const key of this._listeners.keys()) { - this._listeners.set(key, []) - } - } - - /** - * Dials the given Multiaddr over it's supported transport - * - * @param {Multiaddr} ma - * @param {*} options - * @returns {Promise} - */ - async dial (ma, options) { - const transport = this.transportForMultiaddr(ma) - if (!transport) { - throw errCode(new Error(`No transport available for address ${String(ma)}`), codes.ERR_TRANSPORT_UNAVAILABLE) - } - - try { - return await transport.dial(ma, options) - } catch (/** @type {any} */ err) { - if (!err.code) err.code = codes.ERR_TRANSPORT_DIAL_FAILED - throw err - } - } - - /** - * Returns all Multiaddr's the listeners are using - * - * @returns {Multiaddr[]} - */ - getAddrs () { - /** @type {Multiaddr[]} */ - let addrs = [] - for (const listeners of this._listeners.values()) { - for (const listener of listeners) { - addrs = [...addrs, ...listener.getAddrs()] - } - } - return addrs - } - - /** - * Returns all the transports instances. - * - * @returns {IterableIterator} - */ - getTransports () { - return this._transports.values() - } - - /** - * Finds a transport that matches the given Multiaddr - * - * @param {Multiaddr} ma - * @returns {Transport|null} - */ - transportForMultiaddr (ma) { - for (const transport of this._transports.values()) { - const addrs = transport.filter([ma]) - if (addrs.length) return transport - } - return null - } - - /** - * Starts listeners for each listen Multiaddr. - * - * @async - * @param {Multiaddr[]} addrs - addresses to attempt to listen on - */ - async listen (addrs) { - if (!addrs || addrs.length === 0) { - log('no addresses were provided for listening, this node is dial only') - return - } - - const couldNotListen = [] - for (const [key, transport] of this._transports.entries()) { - const supportedAddrs = transport.filter(addrs) - const tasks = [] - - // For each supported multiaddr, create a listener - for (const addr of supportedAddrs) { - log('creating listener for %s on %s', key, addr) - const listener = transport.createListener(this._listenerOptions.get(key)) - this._listeners.get(key).push(listener) - - // Track listen/close events - listener.on('listening', () => updateSelfPeerRecord(this.libp2p)) - listener.on('close', () => updateSelfPeerRecord(this.libp2p)) - - // We need to attempt to listen on everything - tasks.push(listener.listen(addr)) - } - - // Keep track of transports we had no addresses for - if (tasks.length === 0) { - couldNotListen.push(key) - continue - } - - const results = await pSettle(tasks) - // If we are listening on at least 1 address, succeed. - // TODO: we should look at adding a retry (`p-retry`) here to better support - // listening on remote addresses as they may be offline. We could then potentially - // just wait for any (`p-any`) listener to succeed on each transport before returning - const isListening = results.find(r => r.isFulfilled === true) - if (!isListening && this.faultTolerance !== FAULT_TOLERANCE.NO_FATAL) { - throw errCode(new Error(`Transport (${key}) could not listen on any available address`), codes.ERR_NO_VALID_ADDRESSES) - } - } - - // If no transports were able to listen, throw an error. This likely - // means we were given addresses we do not have transports for - if (couldNotListen.length === this._transports.size) { - const message = `no valid addresses were provided for transports [${couldNotListen}]` - if (this.faultTolerance === FAULT_TOLERANCE.FATAL_ALL) { - throw errCode(new Error(message), codes.ERR_NO_VALID_ADDRESSES) - } - log(`libp2p in dial mode only: ${message}`) - } - } - - /** - * Removes the given transport from the manager. - * If a transport has any running listeners, they will be closed. - * - * @async - * @param {string} key - */ - async remove (key) { - log('removing %s', key) - if (this._listeners.has(key)) { - // Close any running listeners - for (const listener of this._listeners.get(key)) { - listener.removeAllListeners('listening') - listener.removeAllListeners('close') - await listener.close() - } - } - - this._transports.delete(key) - this._listeners.delete(key) - } - - /** - * Removes all transports from the manager. - * If any listeners are running, they will be closed. - * - * @async - */ - async removeAll () { - const tasks = [] - for (const key of this._transports.keys()) { - tasks.push(this.remove(key)) - } - - await Promise.all(tasks) - } -} - -/** - * Enum Transport Manager Fault Tolerance values. - * FATAL_ALL should be used for failing in any listen circumstance. - * NO_FATAL should be used for not failing when not listening. - * - * @readonly - * @enum {number} - */ -const FAULT_TOLERANCE = { - FATAL_ALL: 0, - NO_FATAL: 1 -} - -TransportManager.FaultTolerance = FAULT_TOLERANCE - -module.exports = TransportManager diff --git a/src/transport-manager.ts b/src/transport-manager.ts new file mode 100644 index 00000000..d6e43642 --- /dev/null +++ b/src/transport-manager.ts @@ -0,0 +1,279 @@ +import { logger } from '@libp2p/logger' +import pSettle from 'p-settle' +import { codes } from './errors.js' +import errCode from 'err-code' +import type { Listener, Transport, TransportManager, TransportManagerEvents } from '@libp2p/interfaces/transport' +import type { Multiaddr } from '@multiformats/multiaddr' +import type { Connection } from '@libp2p/interfaces/connection' +import { AbortOptions, CustomEvent, EventEmitter, Startable } from '@libp2p/interfaces' +import type { Components } from '@libp2p/interfaces/components' +import { trackedMap } from '@libp2p/tracked-map' + +const log = logger('libp2p:transports') + +export interface TransportManagerInit { + faultTolerance?: FAULT_TOLERANCE +} + +export class DefaultTransportManager extends EventEmitter implements TransportManager, Startable { + private readonly components: Components + private readonly transports: Map + private readonly listeners: Map + private readonly faultTolerance: FAULT_TOLERANCE + private started: boolean + + constructor (components: Components, init: TransportManagerInit = {}) { + super() + + this.components = components + this.started = false + this.transports = new Map() + this.listeners = trackedMap({ + component: 'transport-manager', + metric: 'listeners', + metrics: this.components.getMetrics() + }) + this.faultTolerance = init.faultTolerance ?? FAULT_TOLERANCE.FATAL_ALL + } + + /** + * Adds a `Transport` to the manager + */ + add (transport: Transport) { + const tag = transport[Symbol.toStringTag] + + if (tag == null) { + throw errCode(new Error('Transport must have a valid tag'), codes.ERR_INVALID_KEY) + } + + if (this.transports.has(tag)) { + throw errCode(new Error('There is already a transport with this tag'), codes.ERR_DUPLICATE_TRANSPORT) + } + + log('adding transport %s', tag) + + this.transports.set(tag, transport) + + if (!this.listeners.has(tag)) { + this.listeners.set(tag, []) + } + } + + isStarted () { + return this.started + } + + async start () { + // Listen on the provided transports for the provided addresses + const addrs = this.components.getAddressManager().getListenAddrs() + + await this.listen(addrs) + + this.started = true + } + + /** + * Stops all listeners + */ + async stop () { + const tasks = [] + for (const [key, listeners] of this.listeners) { + log('closing listeners for %s', key) + while (listeners.length > 0) { + const listener = listeners.pop() + + if (listener == null) { + continue + } + + tasks.push(listener.close()) + } + } + + await Promise.all(tasks) + log('all listeners closed') + for (const key of this.listeners.keys()) { + this.listeners.set(key, []) + } + + this.started = false + } + + /** + * Dials the given Multiaddr over it's supported transport + */ + async dial (ma: Multiaddr, options?: AbortOptions): Promise { + const transport = this.transportForMultiaddr(ma) + + if (transport == null) { + throw errCode(new Error(`No transport available for address ${String(ma)}`), codes.ERR_TRANSPORT_UNAVAILABLE) + } + + try { + return await transport.dial(ma, { + ...options, + upgrader: this.components.getUpgrader() + }) + } catch (err: any) { + if (err.code == null) { + err.code = codes.ERR_TRANSPORT_DIAL_FAILED + } + + throw err + } + } + + /** + * Returns all Multiaddr's the listeners are using + */ + getAddrs (): Multiaddr[] { + let addrs: Multiaddr[] = [] + for (const listeners of this.listeners.values()) { + for (const listener of listeners) { + addrs = [...addrs, ...listener.getAddrs()] + } + } + return addrs + } + + /** + * Returns all the transports instances + */ + getTransports () { + return Array.of(...this.transports.values()) + } + + /** + * Finds a transport that matches the given Multiaddr + */ + transportForMultiaddr (ma: Multiaddr) { + for (const transport of this.transports.values()) { + const addrs = transport.filter([ma]) + + if (addrs.length > 0) { + return transport + } + } + } + + /** + * Starts listeners for each listen Multiaddr + */ + async listen (addrs: Multiaddr[]) { + if (addrs == null || addrs.length === 0) { + log('no addresses were provided for listening, this node is dial only') + return + } + + const couldNotListen = [] + + for (const [key, transport] of this.transports.entries()) { + const supportedAddrs = transport.filter(addrs) + const tasks = [] + + // For each supported multiaddr, create a listener + for (const addr of supportedAddrs) { + log('creating listener for %s on %s', key, addr) + const listener = transport.createListener({ + upgrader: this.components.getUpgrader() + }) + + let listeners = this.listeners.get(key) + + if (listeners == null) { + listeners = [] + this.listeners.set(key, listeners) + } + + listeners.push(listener) + + // Track listen/close events + listener.addEventListener('listening', () => { + this.dispatchEvent(new CustomEvent('listener:listening', { + detail: listener + })) + }) + listener.addEventListener('close', () => { + this.dispatchEvent(new CustomEvent('listener:close', { + detail: listener + })) + }) + + // We need to attempt to listen on everything + tasks.push(listener.listen(addr)) + } + + // Keep track of transports we had no addresses for + if (tasks.length === 0) { + couldNotListen.push(key) + continue + } + + const results = await pSettle(tasks) + // If we are listening on at least 1 address, succeed. + // TODO: we should look at adding a retry (`p-retry`) here to better support + // listening on remote addresses as they may be offline. We could then potentially + // just wait for any (`p-any`) listener to succeed on each transport before returning + const isListening = results.find(r => r.isFulfilled) + if ((isListening == null) && this.faultTolerance !== FAULT_TOLERANCE.NO_FATAL) { + throw errCode(new Error(`Transport (${key}) could not listen on any available address`), codes.ERR_NO_VALID_ADDRESSES) + } + } + + // If no transports were able to listen, throw an error. This likely + // means we were given addresses we do not have transports for + if (couldNotListen.length === this.transports.size) { + const message = `no valid addresses were provided for transports [${couldNotListen.join(', ')}]` + if (this.faultTolerance === FAULT_TOLERANCE.FATAL_ALL) { + throw errCode(new Error(message), codes.ERR_NO_VALID_ADDRESSES) + } + log(`libp2p in dial mode only: ${message}`) + } + } + + /** + * Removes the given transport from the manager. + * If a transport has any running listeners, they will be closed. + */ + async remove (key: string) { + log('removing %s', key) + + // Close any running listeners + for (const listener of this.listeners.get(key) ?? []) { + await listener.close() + } + + this.transports.delete(key) + this.listeners.delete(key) + } + + /** + * Removes all transports from the manager. + * If any listeners are running, they will be closed. + * + * @async + */ + async removeAll () { + const tasks = [] + for (const key of this.transports.keys()) { + tasks.push(this.remove(key)) + } + + await Promise.all(tasks) + } +} + +/** + * Enum Transport Manager Fault Tolerance values + */ +export enum FAULT_TOLERANCE { + /** + * should be used for failing in any listen circumstance + */ + FATAL_ALL = 0, + + /** + * should be used for not failing when not listening + */ + NO_FATAL +} diff --git a/src/types.ts b/src/types.ts deleted file mode 100644 index b8af9a6f..00000000 --- a/src/types.ts +++ /dev/null @@ -1,98 +0,0 @@ -import type PeerId from 'peer-id' -import type { Multiaddr } from 'multiaddr' -import type { MultiaddrConnection } from 'libp2p-interfaces/src/transport/types' - -export interface ConnectionGater { - /** - * denyDialMultiaddr tests whether we're permitted to Dial the - * specified peer. - * - * This is called by the dialer.connectToPeer implementation before - * dialling a peer. - * - * Return true to prevent dialing the passed peer. - */ - denyDialPeer: (peerId: PeerId) => Promise - - /** - * denyDialMultiaddr tests whether we're permitted to dial the specified - * multiaddr for the given peer. - * - * This is called by the dialer.connectToPeer implementation after it has - * resolved the peer's addrs, and prior to dialling each. - * - * Return true to prevent dialing the passed peer on the passed multiaddr. - */ - denyDialMultiaddr: (peerId: PeerId, multiaddr: Multiaddr) => Promise - - /** - * denyInboundConnection tests whether an incipient inbound connection is allowed. - * - * This is called by the upgrader, or by the transport directly (e.g. QUIC, - * Bluetooth), straight after it has accepted a connection from its socket. - * - * Return true to deny the incoming passed connection. - */ - denyInboundConnection: (maConn: MultiaddrConnection) => Promise - - /** - * denyOutboundConnection tests whether an incipient outbound connection is allowed. - * - * This is called by the upgrader, or by the transport directly (e.g. QUIC, - * Bluetooth), straight after it has created a connection with its socket. - * - * Return true to deny the incoming passed connection. - */ - denyOutboundConnection: (peerId: PeerId, maConn: MultiaddrConnection) => Promise - - /** - * denyInboundEncryptedConnection tests whether a given connection, now encrypted, - * is allowed. - * - * This is called by the upgrader, after it has performed the security - * handshake, and before it negotiates the muxer, or by the directly by the - * transport, at the exact same checkpoint. - * - * Return true to deny the passed secured connection. - */ - denyInboundEncryptedConnection: (peerId: PeerId, maConn: MultiaddrConnection) => Promise - - /** - * denyOutboundEncryptedConnection tests whether a given connection, now encrypted, - * is allowed. - * - * This is called by the upgrader, after it has performed the security - * handshake, and before it negotiates the muxer, or by the directly by the - * transport, at the exact same checkpoint. - * - * Return true to deny the passed secured connection. - */ - denyOutboundEncryptedConnection: (peerId: PeerId, maConn: MultiaddrConnection) => Promise - - /** - * denyInboundUpgradedConnection tests whether a fully capable connection is allowed. - * - * This is called after encryption has been negotiated and the connection has been - * multiplexed, if a multiplexer is configured. - * - * Return true to deny the passed upgraded connection. - */ - denyInboundUpgradedConnection: (peerId: PeerId, maConn: MultiaddrConnection) => Promise - - /** - * denyOutboundUpgradedConnection tests whether a fully capable connection is allowed. - * - * This is called after encryption has been negotiated and the connection has been - * multiplexed, if a multiplexer is configured. - * - * Return true to deny the passed upgraded connection. - */ - denyOutboundUpgradedConnection: (peerId: PeerId, maConn: MultiaddrConnection) => Promise - - /** - * Used by the address book to filter passed addresses. - * - * Return true to allow storing the passed multiaddr for the passed peer. - */ - filterMultiaddrForPeer: (peer: PeerId, multiaddr: Multiaddr) => Promise -} diff --git a/src/upgrader.js b/src/upgrader.js deleted file mode 100644 index cbfc77c3..00000000 --- a/src/upgrader.js +++ /dev/null @@ -1,492 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = Object.assign(debug('libp2p:upgrader'), { - error: debug('libp2p:upgrader:err') -}) -const errCode = require('err-code') -const Multistream = require('multistream-select') -const { Connection } = require('libp2p-interfaces/src/connection') -const PeerId = require('peer-id') -const { pipe } = require('it-pipe') -// @ts-ignore mutable-proxy does not export types -const mutableProxy = require('mutable-proxy') - -const { codes } = require('./errors') - -/** - * @typedef {import('libp2p-interfaces/src/transport/types').MultiaddrConnection} MultiaddrConnection - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxerFactory} MuxerFactory - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').Muxer} Muxer - * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream - * @typedef {import('libp2p-interfaces/src/crypto/types').Crypto} Crypto - * @typedef {import('libp2p-interfaces/src/connection').Connection} Connection - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {import('./types').ConnectionGater} ConnectionGater - */ - -/** - * @typedef CryptoResult - * @property {MultiaddrConnection} conn A duplex iterable - * @property {PeerId} remotePeer - * @property {string} protocol - */ - -class Upgrader { - /** - * @param {object} options - * @param {PeerId} options.localPeer - * @param {ConnectionGater} options.connectionGater - * - * @param {import('./metrics')} [options.metrics] - * @param {Map} [options.cryptos] - * @param {Map} [options.muxers] - * @param {(connection: Connection) => void} options.onConnection - Called when a connection is upgraded - * @param {(connection: Connection) => void} options.onConnectionEnd - */ - constructor ({ - localPeer, - metrics, - connectionGater, - cryptos = new Map(), - muxers = new Map(), - onConnectionEnd = () => {}, - onConnection = () => {} - }) { - this.connectionGater = connectionGater - this.localPeer = localPeer - this.metrics = metrics - this.cryptos = cryptos - this.muxers = muxers - /** @type {import("./pnet") | null} */ - this.protector = null - this.protocols = new Map() - this.onConnection = onConnection - this.onConnectionEnd = onConnectionEnd - } - - /** - * Upgrades an inbound connection - * - * @async - * @param {MultiaddrConnection} maConn - * @returns {Promise} - */ - async upgradeInbound (maConn) { - let encryptedConn - let remotePeer - let upgradedConn - let Muxer - let cryptoProtocol - let setPeer - let proxyPeer - - if (await this.connectionGater.denyInboundConnection(maConn)) { - throw errCode(new Error('The multiaddr connection is blocked by gater.acceptConnection'), codes.ERR_CONNECTION_INTERCEPTED) - } - - if (this.metrics) { - ({ setTarget: setPeer, proxy: proxyPeer } = mutableProxy()) - const idString = (Math.random() * 1e9).toString(36) + Date.now() - setPeer({ toB58String: () => idString }) - maConn = this.metrics.trackStream({ stream: maConn, remotePeer: proxyPeer }) - } - - log('Starting the inbound connection upgrade') - - // Protect - let protectedConn = maConn - if (this.protector) { - protectedConn = await this.protector.protect(maConn) - } - - try { - // Encrypt the connection - ({ - conn: encryptedConn, - remotePeer, - protocol: cryptoProtocol - } = await this._encryptInbound(this.localPeer, protectedConn, this.cryptos)) - - if (await this.connectionGater.denyInboundEncryptedConnection(remotePeer, encryptedConn)) { - throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) - } - - // Multiplex the connection - if (this.muxers.size) { - ({ stream: upgradedConn, Muxer } = await this._multiplexInbound(encryptedConn, this.muxers)) - } else { - upgradedConn = encryptedConn - } - } catch (/** @type {any} */ err) { - log.error('Failed to upgrade inbound connection', err) - await maConn.close(err) - throw err - } - - if (await this.connectionGater.denyInboundUpgradedConnection(remotePeer, encryptedConn)) { - throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) - } - - if (this.metrics) { - this.metrics.updatePlaceholder(proxyPeer, remotePeer) - setPeer(remotePeer) - } - - log('Successfully upgraded inbound connection') - - return this._createConnection({ - cryptoProtocol, - direction: 'inbound', - maConn, - upgradedConn, - Muxer, - remotePeer - }) - } - - /** - * Upgrades an outbound connection - * - * @async - * @param {MultiaddrConnection} maConn - * @returns {Promise} - */ - async upgradeOutbound (maConn) { - const idStr = maConn.remoteAddr.getPeerId() - if (!idStr) { - throw errCode(new Error('outbound connection must have a peer id'), codes.ERR_INVALID_MULTIADDR) - } - - const remotePeerId = PeerId.createFromB58String(idStr) - - if (await this.connectionGater.denyOutboundConnection(remotePeerId, maConn)) { - throw errCode(new Error('The multiaddr connection is blocked by connectionGater.denyOutboundConnection'), codes.ERR_CONNECTION_INTERCEPTED) - } - - let encryptedConn - let remotePeer - let upgradedConn - let cryptoProtocol - let Muxer - let setPeer - let proxyPeer - - if (this.metrics) { - ({ setTarget: setPeer, proxy: proxyPeer } = mutableProxy()) - const idString = (Math.random() * 1e9).toString(36) + Date.now() - setPeer({ toB58String: () => idString }) - maConn = this.metrics.trackStream({ stream: maConn, remotePeer: proxyPeer }) - } - - log('Starting the outbound connection upgrade') - - // Protect - let protectedConn = maConn - if (this.protector) { - protectedConn = await this.protector.protect(maConn) - } - - try { - // Encrypt the connection - ({ - conn: encryptedConn, - remotePeer, - protocol: cryptoProtocol - } = await this._encryptOutbound(this.localPeer, protectedConn, remotePeerId, this.cryptos)) - - if (await this.connectionGater.denyOutboundEncryptedConnection(remotePeer, encryptedConn)) { - throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) - } - - // Multiplex the connection - if (this.muxers.size) { - ({ stream: upgradedConn, Muxer } = await this._multiplexOutbound(encryptedConn, this.muxers)) - } else { - upgradedConn = encryptedConn - } - } catch (/** @type {any} */ err) { - log.error('Failed to upgrade outbound connection', err) - await maConn.close(err) - throw err - } - - if (await this.connectionGater.denyOutboundUpgradedConnection(remotePeer, encryptedConn)) { - throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) - } - - if (this.metrics) { - this.metrics.updatePlaceholder(proxyPeer, remotePeer) - setPeer(remotePeer) - } - - log('Successfully upgraded outbound connection') - - return this._createConnection({ - cryptoProtocol, - direction: 'outbound', - maConn, - upgradedConn, - Muxer, - remotePeer - }) - } - - /** - * A convenience method for generating a new `Connection` - * - * @private - * @param {object} options - * @param {string} options.cryptoProtocol - The crypto protocol that was negotiated - * @param {'inbound' | 'outbound'} options.direction - One of ['inbound', 'outbound'] - * @param {MultiaddrConnection} options.maConn - The transport layer connection - * @param {MuxedStream | MultiaddrConnection} options.upgradedConn - A duplex connection returned from multiplexer and/or crypto selection - * @param {MuxerFactory} [options.Muxer] - The muxer to be used for muxing - * @param {PeerId} options.remotePeer - The peer the connection is with - * @returns {Connection} - */ - _createConnection ({ - cryptoProtocol, - direction, - maConn, - upgradedConn, - Muxer, - remotePeer - }) { - /** @type {import("libp2p-interfaces/src/stream-muxer/types").Muxer} */ - let muxer - /** @type {import("libp2p-interfaces/src/connection/connection").CreatedMuxedStream | undefined} */ - let newStream - /** @type {Connection} */ - let connection // eslint-disable-line prefer-const - - if (Muxer) { - // Create the muxer - muxer = new Muxer({ - // Run anytime a remote stream is created - onStream: async muxedStream => { - if (!connection) return - const mss = new Multistream.Listener(muxedStream) - try { - const { stream, protocol } = await mss.handle(Array.from(this.protocols.keys())) - log('%s: incoming stream opened on %s', direction, protocol) - if (this.metrics) this.metrics.trackStream({ stream, remotePeer, protocol }) - connection.addStream(muxedStream, { protocol }) - this._onStream({ connection, stream: { ...muxedStream, ...stream }, protocol }) - } catch (/** @type {any} */ err) { - log.error(err) - } - }, - // Run anytime a stream closes - onStreamEnd: muxedStream => { - connection.removeStream(muxedStream.id) - } - }) - - newStream = async (protocols) => { - log('%s: starting new stream on %s', direction, protocols) - const muxedStream = muxer.newStream() - const mss = new Multistream.Dialer(muxedStream) - try { - const { stream, protocol } = await mss.select(protocols) - if (this.metrics) this.metrics.trackStream({ stream, remotePeer, protocol }) - return { stream: { ...muxedStream, ...stream }, protocol } - } catch (/** @type {any} */ err) { - log.error('could not create new stream', err) - throw errCode(err, codes.ERR_UNSUPPORTED_PROTOCOL) - } - } - - // Pipe all data through the muxer - pipe(upgradedConn, muxer, upgradedConn).catch(log.error) - } - - const _timeline = maConn.timeline - maConn.timeline = new Proxy(_timeline, { - set: (...args) => { - if (connection && args[1] === 'close' && args[2] && !_timeline.close) { - // Wait for close to finish before notifying of the closure - (async () => { - try { - if (connection.stat.status === 'open') { - await connection.close() - } - } catch (/** @type {any} */ err) { - log.error(err) - } finally { - this.onConnectionEnd(connection) - } - })().catch(err => { - log.error(err) - }) - } - - return Reflect.set(...args) - } - }) - maConn.timeline.upgraded = Date.now() - - const errConnectionNotMultiplexed = () => { - throw errCode(new Error('connection is not multiplexed'), codes.ERR_CONNECTION_NOT_MULTIPLEXED) - } - - // Create the connection - connection = new Connection({ - localAddr: maConn.localAddr, - remoteAddr: maConn.remoteAddr, - localPeer: this.localPeer, - remotePeer: remotePeer, - stat: { - direction, - // @ts-ignore - timeline: maConn.timeline, - multiplexer: Muxer && Muxer.multicodec, - encryption: cryptoProtocol - }, - newStream: newStream || errConnectionNotMultiplexed, - getStreams: () => muxer ? muxer.streams : errConnectionNotMultiplexed(), - close: async () => { - await maConn.close() - // Ensure remaining streams are aborted - if (muxer) { - muxer.streams.map(stream => stream.abort()) - } - } - }) - - this.onConnection(connection) - - return connection - } - - /** - * Routes incoming streams to the correct handler - * - * @private - * @param {object} options - * @param {Connection} options.connection - The connection the stream belongs to - * @param {MuxedStream} options.stream - * @param {string} options.protocol - */ - _onStream ({ connection, stream, protocol }) { - const handler = this.protocols.get(protocol) - handler({ connection, stream, protocol }) - } - - /** - * Attempts to encrypt the incoming `connection` with the provided `cryptos`. - * - * @private - * @async - * @param {PeerId} localPeer - The initiators PeerId - * @param {*} connection - * @param {Map} cryptos - * @returns {Promise} An encrypted connection, remote peer `PeerId` and the protocol of the `Crypto` used - */ - async _encryptInbound (localPeer, connection, cryptos) { - const mss = new Multistream.Listener(connection) - const protocols = Array.from(cryptos.keys()) - log('handling inbound crypto protocol selection', protocols) - - try { - const { stream, protocol } = await mss.handle(protocols) - const crypto = cryptos.get(protocol) - log('encrypting inbound connection...') - - if (!crypto) { - throw new Error(`no crypto module found for ${protocol}`) - } - - return { - ...await crypto.secureInbound(localPeer, stream), - protocol - } - } catch (/** @type {any} */ err) { - throw errCode(err, codes.ERR_ENCRYPTION_FAILED) - } - } - - /** - * Attempts to encrypt the given `connection` with the provided `cryptos`. - * The first `Crypto` module to succeed will be used - * - * @private - * @async - * @param {PeerId} localPeer - The initiators PeerId - * @param {MultiaddrConnection} connection - * @param {PeerId} remotePeerId - * @param {Map} cryptos - * @returns {Promise} An encrypted connection, remote peer `PeerId` and the protocol of the `Crypto` used - */ - async _encryptOutbound (localPeer, connection, remotePeerId, cryptos) { - const mss = new Multistream.Dialer(connection) - const protocols = Array.from(cryptos.keys()) - log('selecting outbound crypto protocol', protocols) - - try { - const { stream, protocol } = await mss.select(protocols) - const crypto = cryptos.get(protocol) - log('encrypting outbound connection to %j', remotePeerId) - - if (!crypto) { - throw new Error(`no crypto module found for ${protocol}`) - } - - return { - ...await crypto.secureOutbound(localPeer, stream, remotePeerId), - protocol - } - } catch (/** @type {any} */ err) { - throw errCode(err, codes.ERR_ENCRYPTION_FAILED) - } - } - - /** - * Selects one of the given muxers via multistream-select. That - * muxer will be used for all future streams on the connection. - * - * @private - * @async - * @param {MultiaddrConnection} connection - A basic duplex connection to multiplex - * @param {Map} muxers - The muxers to attempt multiplexing with - * @returns {Promise<{ stream: MuxedStream, Muxer?: MuxerFactory}>} A muxed connection - */ - async _multiplexOutbound (connection, muxers) { - const dialer = new Multistream.Dialer(connection) - const protocols = Array.from(muxers.keys()) - log('outbound selecting muxer %s', protocols) - try { - const { stream, protocol } = await dialer.select(protocols) - log('%s selected as muxer protocol', protocol) - const Muxer = muxers.get(protocol) - return { stream, Muxer } - } catch (/** @type {any} */ err) { - throw errCode(err, codes.ERR_MUXER_UNAVAILABLE) - } - } - - /** - * Registers support for one of the given muxers via multistream-select. The - * selected muxer will be used for all future streams on the connection. - * - * @private - * @async - * @param {MultiaddrConnection} connection - A basic duplex connection to multiplex - * @param {Map} muxers - The muxers to attempt multiplexing with - * @returns {Promise<{ stream: MuxedStream, Muxer?: MuxerFactory}>} A muxed connection - */ - async _multiplexInbound (connection, muxers) { - const listener = new Multistream.Listener(connection) - const protocols = Array.from(muxers.keys()) - log('inbound handling muxers %s', protocols) - try { - const { stream, protocol } = await listener.handle(protocols) - const Muxer = muxers.get(protocol) - return { stream, Muxer } - } catch (/** @type {any} */ err) { - throw errCode(err, codes.ERR_MUXER_UNAVAILABLE) - } - } -} - -module.exports = Upgrader diff --git a/src/upgrader.ts b/src/upgrader.ts new file mode 100644 index 00000000..12e6ee77 --- /dev/null +++ b/src/upgrader.ts @@ -0,0 +1,499 @@ +import { logger } from '@libp2p/logger' +import errCode from 'err-code' +import { Dialer, Listener } from '@libp2p/multistream-select' +import { pipe } from 'it-pipe' +// @ts-expect-error mutable-proxy does not export types +import mutableProxy from 'mutable-proxy' +import { codes } from './errors.js' +import { createConnection } from '@libp2p/connection' +import { CustomEvent, EventEmitter } from '@libp2p/interfaces' +import { peerIdFromString } from '@libp2p/peer-id' +import type { Connection, ProtocolStream, Stream } from '@libp2p/interfaces/connection' +import type { ConnectionEncrypter, SecuredConnection } from '@libp2p/interfaces/connection-encrypter' +import type { StreamMuxer, StreamMuxerFactory } from '@libp2p/interfaces/stream-muxer' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { MultiaddrConnection, Upgrader, UpgraderEvents } from '@libp2p/interfaces/transport' +import type { Duplex } from 'it-stream-types' +import type { Components } from '@libp2p/interfaces/components' + +const log = logger('libp2p:upgrader') + +interface CreateConectionOptions { + cryptoProtocol: string + direction: 'inbound' | 'outbound' + maConn: MultiaddrConnection + upgradedConn: Duplex + remotePeer: PeerId + muxerFactory?: StreamMuxerFactory +} + +interface OnStreamOptions { + connection: Connection + stream: Stream + protocol: string +} + +export interface CryptoResult extends SecuredConnection { + protocol: string +} + +export interface UpgraderInit { + connectionEncryption: ConnectionEncrypter[] + muxers: StreamMuxerFactory[] +} + +export class DefaultUpgrader extends EventEmitter implements Upgrader { + private readonly components: Components + private readonly connectionEncryption: Map + private readonly muxers: Map + + constructor (components: Components, init: UpgraderInit) { + super() + + this.components = components + this.connectionEncryption = new Map() + + init.connectionEncryption.forEach(encrypter => { + this.connectionEncryption.set(encrypter.protocol, encrypter) + }) + + this.muxers = new Map() + + init.muxers.forEach(muxer => { + this.muxers.set(muxer.protocol, muxer) + }) + } + + /** + * Upgrades an inbound connection + */ + async upgradeInbound (maConn: MultiaddrConnection): Promise { + let encryptedConn + let remotePeer + let upgradedConn: Duplex + let muxerFactory: StreamMuxerFactory | undefined + let cryptoProtocol + let setPeer + let proxyPeer + const metrics = this.components.getMetrics() + + if (await this.components.getConnectionGater().denyInboundConnection(maConn)) { + throw errCode(new Error('The multiaddr connection is blocked by gater.acceptConnection'), codes.ERR_CONNECTION_INTERCEPTED) + } + + if (metrics != null) { + ({ setTarget: setPeer, proxy: proxyPeer } = mutableProxy()) + const idString = `${(Math.random() * 1e9).toString(36)}${Date.now()}` + setPeer({ toString: () => idString }) + maConn = metrics.trackStream({ stream: maConn, remotePeer: proxyPeer }) + } + + log('starting the inbound connection upgrade') + + // Protect + let protectedConn = maConn + const protector = this.components.getConnectionProtector() + + if (protector != null) { + log('protecting the inbound connection') + protectedConn = await protector.protect(maConn) + } + + try { + // Encrypt the connection + ({ + conn: encryptedConn, + remotePeer, + protocol: cryptoProtocol + } = await this._encryptInbound(protectedConn)) + + if (await this.components.getConnectionGater().denyInboundEncryptedConnection(remotePeer, { + ...protectedConn, + ...encryptedConn + })) { + throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) + } + + // Multiplex the connection + if (this.muxers.size > 0) { + const multiplexed = await this._multiplexInbound({ + ...protectedConn, + ...encryptedConn + }, this.muxers) + muxerFactory = multiplexed.muxerFactory + upgradedConn = multiplexed.stream + } else { + upgradedConn = encryptedConn + } + } catch (err: any) { + log.error('Failed to upgrade inbound connection', err) + await maConn.close(err) + throw err + } + + if (await this.components.getConnectionGater().denyInboundUpgradedConnection(remotePeer, { + ...protectedConn, + ...encryptedConn + })) { + throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) + } + + if (metrics != null) { + metrics.updatePlaceholder(proxyPeer, remotePeer) + setPeer(remotePeer) + } + + log('Successfully upgraded inbound connection') + + return this._createConnection({ + cryptoProtocol, + direction: 'inbound', + maConn, + upgradedConn, + muxerFactory, + remotePeer + }) + } + + /** + * Upgrades an outbound connection + */ + async upgradeOutbound (maConn: MultiaddrConnection): Promise { + const idStr = maConn.remoteAddr.getPeerId() + if (idStr == null) { + throw errCode(new Error('outbound connection must have a peer id'), codes.ERR_INVALID_MULTIADDR) + } + + const remotePeerId = peerIdFromString(idStr) + + if (await this.components.getConnectionGater().denyOutboundConnection(remotePeerId, maConn)) { + throw errCode(new Error('The multiaddr connection is blocked by connectionGater.denyOutboundConnection'), codes.ERR_CONNECTION_INTERCEPTED) + } + + let encryptedConn + let remotePeer + let upgradedConn + let cryptoProtocol + let muxerFactory + let setPeer + let proxyPeer + const metrics = this.components.getMetrics() + + if (metrics != null) { + ({ setTarget: setPeer, proxy: proxyPeer } = mutableProxy()) + const idString = `${(Math.random() * 1e9).toString(36)}${Date.now()}` + setPeer({ toB58String: () => idString }) + maConn = metrics.trackStream({ stream: maConn, remotePeer: proxyPeer }) + } + + log('Starting the outbound connection upgrade') + + // Protect + let protectedConn = maConn + const protector = this.components.getConnectionProtector() + + if (protector != null) { + protectedConn = await protector.protect(maConn) + } + + try { + // Encrypt the connection + ({ + conn: encryptedConn, + remotePeer, + protocol: cryptoProtocol + } = await this._encryptOutbound(protectedConn, remotePeerId)) + + if (await this.components.getConnectionGater().denyOutboundEncryptedConnection(remotePeer, { + ...protectedConn, + ...encryptedConn + })) { + throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) + } + + // Multiplex the connection + if (this.muxers.size > 0) { + const multiplexed = await this._multiplexOutbound({ + ...protectedConn, + ...encryptedConn + }, this.muxers) + muxerFactory = multiplexed.muxerFactory + upgradedConn = multiplexed.stream + } else { + upgradedConn = encryptedConn + } + } catch (err: any) { + log.error('Failed to upgrade outbound connection', err) + await maConn.close(err) + throw err + } + + if (await this.components.getConnectionGater().denyOutboundUpgradedConnection(remotePeer, { + ...protectedConn, + ...encryptedConn + })) { + throw errCode(new Error('The multiaddr connection is blocked by gater.acceptEncryptedConnection'), codes.ERR_CONNECTION_INTERCEPTED) + } + + if (metrics != null) { + metrics.updatePlaceholder(proxyPeer, remotePeer) + setPeer(remotePeer) + } + + log('Successfully upgraded outbound connection') + + return this._createConnection({ + cryptoProtocol, + direction: 'outbound', + maConn, + upgradedConn, + muxerFactory, + remotePeer + }) + } + + /** + * A convenience method for generating a new `Connection` + */ + _createConnection (opts: CreateConectionOptions): Connection { + const { + cryptoProtocol, + direction, + maConn, + upgradedConn, + remotePeer, + muxerFactory + } = opts + + let muxer: StreamMuxer | undefined + let newStream: ((multicodecs: string[]) => Promise) | undefined + let connection: Connection // eslint-disable-line prefer-const + + if (muxerFactory != null) { + // Create the muxer + muxer = muxerFactory.createStreamMuxer(this.components, { + // Run anytime a remote stream is created + onIncomingStream: muxedStream => { + if (connection == null) { + return + } + + void Promise.resolve() + .then(async () => { + const mss = new Listener(muxedStream) + const protocols = this.components.getRegistrar().getProtocols() + const { stream, protocol } = await mss.handle(protocols) + log('%s: incoming stream opened on %s', direction, protocol) + + const metrics = this.components.getMetrics() + + if (metrics != null) { + metrics.trackStream({ stream, remotePeer, protocol }) + } + + if (connection == null) { + return + } + + connection.addStream(muxedStream, { protocol }) + this._onStream({ connection, stream: { ...muxedStream, ...stream }, protocol }) + }) + .catch(err => { + log.error(err) + }) + }, + // Run anytime a stream closes + onStreamEnd: muxedStream => { + connection?.removeStream(muxedStream.id) + } + }) + + newStream = async (protocols: string[]): Promise => { + if (muxer == null) { + throw errCode(new Error('Stream is not multiplexed'), codes.ERR_MUXER_UNAVAILABLE) + } + + log('%s: starting new stream on %s', direction, protocols) + const muxedStream = muxer.newStream() + const mss = new Dialer(muxedStream) + const metrics = this.components.getMetrics() + + try { + let { stream, protocol } = await mss.select(protocols) + + if (metrics != null) { + stream = metrics.trackStream({ stream, remotePeer, protocol }) + } + + return { stream: { ...muxedStream, ...stream }, protocol } + } catch (err: any) { + log.error('could not create new stream', err) + throw errCode(err, codes.ERR_UNSUPPORTED_PROTOCOL) + } + } + + // Pipe all data through the muxer + pipe(upgradedConn, muxer, upgradedConn).catch(log.error) + } + + const _timeline = maConn.timeline + maConn.timeline = new Proxy(_timeline, { + set: (...args) => { + if (connection != null && args[1] === 'close' && args[2] != null && _timeline.close == null) { + // Wait for close to finish before notifying of the closure + (async () => { + try { + if (connection.stat.status === 'OPEN') { + await connection.close() + } + } catch (err: any) { + log.error(err) + } finally { + this.dispatchEvent(new CustomEvent('connectionEnd', { + detail: connection + })) + } + })().catch(err => { + log.error(err) + }) + } + + return Reflect.set(...args) + } + }) + maConn.timeline.upgraded = Date.now() + + const errConnectionNotMultiplexed = () => { + throw errCode(new Error('connection is not multiplexed'), codes.ERR_CONNECTION_NOT_MULTIPLEXED) + } + + // Create the connection + connection = createConnection({ + remoteAddr: maConn.remoteAddr, + remotePeer: remotePeer, + stat: { + status: 'OPEN', + direction, + timeline: maConn.timeline, + multiplexer: muxer?.protocol, + encryption: cryptoProtocol + }, + newStream: newStream ?? errConnectionNotMultiplexed, + getStreams: () => muxer != null ? muxer.streams : errConnectionNotMultiplexed(), + close: async () => { + await maConn.close() + // Ensure remaining streams are aborted + if (muxer != null) { + muxer.streams.map(stream => stream.abort()) + } + } + }) + + this.dispatchEvent(new CustomEvent('connection', { + detail: connection + })) + + return connection + } + + /** + * Routes incoming streams to the correct handler + */ + _onStream (opts: OnStreamOptions): void { + const { connection, stream, protocol } = opts + const handler = this.components.getRegistrar().getHandler(protocol) + handler({ connection, stream, protocol }) + } + + /** + * Attempts to encrypt the incoming `connection` with the provided `cryptos` + */ + async _encryptInbound (connection: Duplex): Promise { + const mss = new Listener(connection) + const protocols = Array.from(this.connectionEncryption.keys()) + log('handling inbound crypto protocol selection', protocols) + + try { + const { stream, protocol } = await mss.handle(protocols) + const encrypter = this.connectionEncryption.get(protocol) + + if (encrypter == null) { + throw new Error(`no crypto module found for ${protocol}`) + } + + log('encrypting inbound connection...') + + return { + ...await encrypter.secureInbound(this.components.getPeerId(), stream), + protocol + } + } catch (err: any) { + throw errCode(err, codes.ERR_ENCRYPTION_FAILED) + } + } + + /** + * Attempts to encrypt the given `connection` with the provided connection encrypters. + * The first `ConnectionEncrypter` module to succeed will be used + */ + async _encryptOutbound (connection: MultiaddrConnection, remotePeerId: PeerId): Promise { + const mss = new Dialer(connection) + const protocols = Array.from(this.connectionEncryption.keys()) + log('selecting outbound crypto protocol', protocols) + + try { + const { stream, protocol } = await mss.select(protocols) + const encrypter = this.connectionEncryption.get(protocol) + + if (encrypter == null) { + throw new Error(`no crypto module found for ${protocol}`) + } + + log('encrypting outbound connection to %p', remotePeerId) + + return { + ...await encrypter.secureOutbound(this.components.getPeerId(), stream, remotePeerId), + protocol + } + } catch (err: any) { + throw errCode(err, codes.ERR_ENCRYPTION_FAILED) + } + } + + /** + * Selects one of the given muxers via multistream-select. That + * muxer will be used for all future streams on the connection. + */ + async _multiplexOutbound (connection: MultiaddrConnection, muxers: Map): Promise<{ stream: Duplex, muxerFactory?: StreamMuxerFactory}> { + const dialer = new Dialer(connection) + const protocols = Array.from(muxers.keys()) + log('outbound selecting muxer %s', protocols) + try { + const { stream, protocol } = await dialer.select(protocols) + log('%s selected as muxer protocol', protocol) + const muxerFactory = muxers.get(protocol) + return { stream, muxerFactory } + } catch (err: any) { + log.error('error multiplexing outbound stream', err) + throw errCode(err, codes.ERR_MUXER_UNAVAILABLE) + } + } + + /** + * Registers support for one of the given muxers via multistream-select. The + * selected muxer will be used for all future streams on the connection. + */ + async _multiplexInbound (connection: MultiaddrConnection, muxers: Map): Promise<{ stream: Duplex, muxerFactory?: StreamMuxerFactory}> { + const listener = new Listener(connection) + const protocols = Array.from(muxers.keys()) + log('inbound handling muxers %s', protocols) + try { + const { stream, protocol } = await listener.handle(protocols) + const muxerFactory = muxers.get(protocol) + return { stream, muxerFactory } + } catch (err: any) { + log.error('error multiplexing inbound stream', err) + throw errCode(err, codes.ERR_MUXER_UNAVAILABLE) + } + } +} diff --git a/src/version.ts b/src/version.ts new file mode 100644 index 00000000..c77a04e1 --- /dev/null +++ b/src/version.ts @@ -0,0 +1,3 @@ + +export const version = '0.0.0' +export const name = 'libp2p' diff --git a/test/addresses/address-manager.spec.js b/test/addresses/address-manager.spec.js deleted file mode 100644 index 10747008..00000000 --- a/test/addresses/address-manager.spec.js +++ /dev/null @@ -1,146 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') - -const AddressManager = require('../../src/address-manager') -const peerUtils = require('../utils/creators/peer') - -const Peers = require('../fixtures/peers') - -const listenAddresses = ['/ip4/127.0.0.1/tcp/15006/ws', '/ip4/127.0.0.1/tcp/15008/ws'] -const announceAddreses = ['/dns4/peer.io'] - -describe('Address Manager', () => { - let peerId - - before(async () => { - peerId = await PeerId.createFromJSON(Peers[0]) - }) - - it('should not need any addresses', () => { - const am = new AddressManager(peerId) - - expect(am.listen.size).to.equal(0) - expect(am.announce.size).to.equal(0) - }) - - it('should return listen multiaddrs on get', () => { - const am = new AddressManager(peerId, { - listen: listenAddresses - }) - - expect(am.listen.size).to.equal(listenAddresses.length) - expect(am.announce.size).to.equal(0) - - const listenMultiaddrs = am.getListenAddrs() - expect(listenMultiaddrs.length).to.equal(2) - expect(listenMultiaddrs[0].equals(new Multiaddr(listenAddresses[0]))).to.equal(true) - expect(listenMultiaddrs[1].equals(new Multiaddr(listenAddresses[1]))).to.equal(true) - }) - - it('should return announce multiaddrs on get', () => { - const am = new AddressManager(peerId, { - listen: listenAddresses, - announce: announceAddreses - }) - - expect(am.listen.size).to.equal(listenAddresses.length) - expect(am.announce.size).to.equal(announceAddreses.length) - - const announceMultiaddrs = am.getAnnounceAddrs() - expect(announceMultiaddrs.length).to.equal(1) - expect(announceMultiaddrs[0].equals(new Multiaddr(announceAddreses[0]))).to.equal(true) - }) - - it('should add observed addresses', () => { - const am = new AddressManager(peerId) - - expect(am.observed).to.be.empty() - - am.addObservedAddr('/ip4/123.123.123.123/tcp/39201') - - expect(am.observed).to.have.property('size', 1) - }) - - it('should dedupe added observed addresses', () => { - const ma = '/ip4/123.123.123.123/tcp/39201' - const am = new AddressManager(peerId) - - expect(am.observed).to.be.empty() - - am.addObservedAddr(ma) - am.addObservedAddr(ma) - am.addObservedAddr(ma) - - expect(am.observed).to.have.property('size', 1) - expect(am.observed).to.include(ma) - }) - - it('should only emit one change:addresses event', () => { - const ma = '/ip4/123.123.123.123/tcp/39201' - const am = new AddressManager(peerId) - let eventCount = 0 - - am.on('change:addresses', () => { - eventCount++ - }) - - am.addObservedAddr(ma) - am.addObservedAddr(ma) - am.addObservedAddr(ma) - am.addObservedAddr(`${ma}/p2p/${peerId}`) - am.addObservedAddr(`${ma}/p2p/${peerId.toB58String()}`) - - expect(eventCount).to.equal(1) - }) - - it('should strip our peer address from added observed addresses', () => { - const ma = '/ip4/123.123.123.123/tcp/39201' - const am = new AddressManager(peerId) - - expect(am.observed).to.be.empty() - - am.addObservedAddr(ma) - am.addObservedAddr(`${ma}/p2p/${peerId}`) - - expect(am.observed).to.have.property('size', 1) - expect(am.observed).to.include(ma) - }) - - it('should strip our peer address from added observed addresses in difference formats', () => { - const ma = '/ip4/123.123.123.123/tcp/39201' - const am = new AddressManager(peerId) - - expect(am.observed).to.be.empty() - - am.addObservedAddr(ma) - am.addObservedAddr(`${ma}/p2p/${peerId}`) // base32 CID - am.addObservedAddr(`${ma}/p2p/${peerId.toB58String()}`) // base58btc - - expect(am.observed).to.have.property('size', 1) - expect(am.observed).to.include(ma) - }) -}) - -describe('libp2p.addressManager', () => { - let libp2p - afterEach(() => libp2p && libp2p.stop()) - - it('should populate the AddressManager from the config', async () => { - [libp2p] = await peerUtils.createPeer({ - started: false, - config: { - addresses: { - listen: listenAddresses, - announce: announceAddreses - } - } - }) - - expect(libp2p.addressManager.listen.size).to.equal(listenAddresses.length) - expect(libp2p.addressManager.announce.size).to.equal(announceAddreses.length) - }) -}) diff --git a/test/addresses/address-manager.spec.ts b/test/addresses/address-manager.spec.ts new file mode 100644 index 00000000..a667e6d9 --- /dev/null +++ b/test/addresses/address-manager.spec.ts @@ -0,0 +1,188 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { Multiaddr, protocols } from '@multiformats/multiaddr' +import { AddressFilter, DefaultAddressManager } from '../../src/address-manager/index.js' +import { createNode } from '../utils/creators/peer.js' +import { createFromJSON } from '@libp2p/peer-id-factory' +import Peers from '../fixtures/peers.js' +import { stubInterface } from 'ts-sinon' +import type { TransportManager } from '@libp2p/interfaces/transport' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Libp2p } from '../../src/index.js' +import { Components } from '@libp2p/interfaces/components' + +const listenAddresses = ['/ip4/127.0.0.1/tcp/15006/ws', '/ip4/127.0.0.1/tcp/15008/ws'] +const announceAddreses = ['/dns4/peer.io'] + +describe('Address Manager', () => { + let peerId: PeerId + + before(async () => { + peerId = await createFromJSON(Peers[0]) + }) + + it('should not need any addresses', () => { + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface() + }) + + expect(am.getListenAddrs()).to.be.empty() + expect(am.getAnnounceAddrs()).to.be.empty() + }) + + it('should return listen multiaddrs on get', () => { + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface(), + listen: listenAddresses + }) + + expect(am.getListenAddrs()).to.have.lengthOf(listenAddresses.length) + expect(am.getAnnounceAddrs()).to.be.empty() + + const listenMultiaddrs = am.getListenAddrs() + expect(listenMultiaddrs.length).to.equal(2) + expect(listenMultiaddrs[0].equals(new Multiaddr(listenAddresses[0]))).to.equal(true) + expect(listenMultiaddrs[1].equals(new Multiaddr(listenAddresses[1]))).to.equal(true) + }) + + it('should return announce multiaddrs on get', () => { + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface(), + listen: listenAddresses, + announce: announceAddreses + }) + + expect(am.getListenAddrs()).to.have.lengthOf(listenAddresses.length) + expect(am.getAnnounceAddrs()).to.have.lengthOf(announceAddreses.length) + + const announceMultiaddrs = am.getAnnounceAddrs() + expect(announceMultiaddrs.length).to.equal(1) + expect(announceMultiaddrs[0].equals(new Multiaddr(announceAddreses[0]))).to.equal(true) + }) + + it('should add observed addresses', () => { + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface() + }) + + expect(am.getObservedAddrs()).to.be.empty() + + am.addObservedAddr('/ip4/123.123.123.123/tcp/39201') + + expect(am.getObservedAddrs()).to.have.lengthOf(1) + }) + + it('should dedupe added observed addresses', () => { + const ma = '/ip4/123.123.123.123/tcp/39201' + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface() + }) + + expect(am.getObservedAddrs()).to.be.empty() + + am.addObservedAddr(ma) + am.addObservedAddr(ma) + am.addObservedAddr(ma) + + expect(am.getObservedAddrs()).to.have.lengthOf(1) + expect(am.getObservedAddrs().map(ma => ma.toString())).to.include(ma) + }) + + it('should only emit one change:addresses event', () => { + const ma = '/ip4/123.123.123.123/tcp/39201' + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface() + }) + let eventCount = 0 + + am.addEventListener('change:addresses', () => { + eventCount++ + }) + + am.addObservedAddr(ma) + am.addObservedAddr(ma) + am.addObservedAddr(ma) + am.addObservedAddr(`${ma}/p2p/${peerId.toString()}`) + + expect(eventCount).to.equal(1) + }) + + it('should strip our peer address from added observed addresses', () => { + const ma = '/ip4/123.123.123.123/tcp/39201' + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface() + }) + + expect(am.getObservedAddrs()).to.be.empty() + + am.addObservedAddr(ma) + am.addObservedAddr(`${ma}/p2p/${peerId.toString()}`) + + expect(am.getObservedAddrs()).to.have.lengthOf(1) + expect(am.getObservedAddrs().map(ma => ma.toString())).to.include(ma) + }) + + it('should strip our peer address from added observed addresses in difference formats', () => { + const ma = '/ip4/123.123.123.123/tcp/39201' + const am = new DefaultAddressManager(new Components({ + peerId, + transportManager: stubInterface() + }), { + announceFilter: stubInterface() + }) + + expect(am.getObservedAddrs()).to.be.empty() + + am.addObservedAddr(ma) + am.addObservedAddr(`${ma}/p2p/${peerId.toString()}`) + + expect(am.getObservedAddrs()).to.have.lengthOf(1) + expect(am.getObservedAddrs().map(ma => ma.toString())).to.include(ma) + }) +}) + +describe('libp2p.addressManager', () => { + let libp2p: Libp2p + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should populate the AddressManager from the config', async () => { + libp2p = await createNode({ + started: false, + config: { + addresses: { + listen: listenAddresses, + announce: announceAddreses + } + } + }) + + expect(libp2p.getMultiaddrs().map(ma => ma.decapsulateCode(protocols('p2p').code).toString())).to.have.members(announceAddreses) + expect(libp2p.getMultiaddrs().map(ma => ma.decapsulateCode(protocols('p2p').code).toString())).to.not.have.members(listenAddresses) + }) +}) diff --git a/test/addresses/addresses.node.js b/test/addresses/addresses.node.ts similarity index 50% rename from test/addresses/addresses.node.js rename to test/addresses/addresses.node.ts index 00d93ca1..d656dde8 100644 --- a/test/addresses/addresses.node.js +++ b/test/addresses/addresses.node.ts @@ -1,25 +1,27 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { Multiaddr } = require('multiaddr') -const isLoopback = require('libp2p-utils/src/multiaddr/is-loopback') - -const { AddressesOptions } = require('./utils') -const peerUtils = require('../utils/creators/peer') +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { Multiaddr, protocols } from '@multiformats/multiaddr' +import { isLoopback } from '@libp2p/utils/multiaddr/is-loopback' +import { AddressesOptions } from './utils.js' +import { createNode } from '../utils/creators/peer.js' +import type { Libp2pNode } from '../../src/libp2p.js' const listenAddresses = ['/ip4/127.0.0.1/tcp/0', '/ip4/127.0.0.1/tcp/8000/ws'] const announceAddreses = ['/dns4/peer.io/tcp/433/p2p/12D3KooWNvSZnPi3RrhrTwEY4LuuBeB6K6facKUCJcyWG1aoDd2p'] describe('libp2p.multiaddrs', () => { - let libp2p + let libp2p: Libp2pNode - afterEach(() => libp2p && libp2p.stop()) + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) it('should keep listen addresses after start, even if changed', async () => { - [libp2p] = await peerUtils.createPeer({ + libp2p = await createNode({ started: false, config: { ...AddressesOptions, @@ -30,23 +32,23 @@ describe('libp2p.multiaddrs', () => { } }) - let listenAddrs = libp2p.addressManager.listen - expect(listenAddrs.size).to.equal(listenAddresses.length) - expect(listenAddrs.has(listenAddresses[0])).to.equal(true) - expect(listenAddrs.has(listenAddresses[1])).to.equal(true) + let listenAddrs = libp2p.components.getAddressManager().getListenAddrs().map(ma => ma.toString()) + expect(listenAddrs).to.have.lengthOf(listenAddresses.length) + expect(listenAddrs).to.include(listenAddresses[0]) + expect(listenAddrs).to.include(listenAddresses[1]) // Should not replace listen addresses after transport listen // Only transportManager has visibility of the port used await libp2p.start() - listenAddrs = libp2p.addressManager.listen - expect(listenAddrs.size).to.equal(listenAddresses.length) - expect(listenAddrs.has(listenAddresses[0])).to.equal(true) - expect(listenAddrs.has(listenAddresses[1])).to.equal(true) + listenAddrs = libp2p.components.getAddressManager().getListenAddrs().map(ma => ma.toString()) + expect(listenAddrs).to.have.lengthOf(listenAddresses.length) + expect(listenAddrs).to.include(listenAddresses[0]) + expect(listenAddrs).to.include(listenAddresses[1]) }) it('should announce transport listen addresses if announce addresses are not provided', async () => { - [libp2p] = await peerUtils.createPeer({ + libp2p = await createNode({ started: false, config: { ...AddressesOptions, @@ -58,11 +60,12 @@ describe('libp2p.multiaddrs', () => { await libp2p.start() - const tmListen = libp2p.transportManager.getAddrs().map((ma) => ma.toString()) + const tmListen = libp2p.components.getTransportManager().getAddrs().map((ma) => ma.toString()) // Announce 2 listen (transport) - const advertiseMultiaddrs = libp2p.multiaddrs.map((ma) => ma.toString()) - expect(advertiseMultiaddrs.length).to.equal(2) + const advertiseMultiaddrs = libp2p.components.getAddressManager().getAddresses().map((ma) => ma.decapsulateCode(protocols('p2p').code).toString()) + + expect(advertiseMultiaddrs).to.have.lengthOf(2) tmListen.forEach((m) => { expect(advertiseMultiaddrs).to.include(m) }) @@ -70,7 +73,7 @@ describe('libp2p.multiaddrs', () => { }) it('should only announce the given announce addresses when provided', async () => { - [libp2p] = await peerUtils.createPeer({ + libp2p = await createNode({ started: false, config: { ...AddressesOptions, @@ -83,10 +86,10 @@ describe('libp2p.multiaddrs', () => { await libp2p.start() - const tmListen = libp2p.transportManager.getAddrs().map((ma) => ma.toString()) + const tmListen = libp2p.components.getTransportManager().getAddrs().map((ma) => ma.toString()) // Announce 1 announce addr - const advertiseMultiaddrs = libp2p.multiaddrs.map((ma) => ma.toString()) + const advertiseMultiaddrs = libp2p.components.getAddressManager().getAddresses().map((ma) => ma.decapsulateCode(protocols('p2p').code).toString()) expect(advertiseMultiaddrs.length).to.equal(announceAddreses.length) advertiseMultiaddrs.forEach((m) => { expect(tmListen).to.not.include(m) @@ -95,7 +98,7 @@ describe('libp2p.multiaddrs', () => { }) it('can filter out loopback addresses by the announce filter', async () => { - [libp2p] = await peerUtils.createPeer({ + libp2p = await createNode({ started: false, config: { ...AddressesOptions, @@ -108,22 +111,22 @@ describe('libp2p.multiaddrs', () => { await libp2p.start() - expect(libp2p.multiaddrs.length).to.equal(0) + expect(libp2p.components.getAddressManager().getAddresses()).to.have.lengthOf(0) // Stub transportManager addresses to add a public address const stubMa = new Multiaddr('/ip4/120.220.10.1/tcp/1000') - sinon.stub(libp2p.transportManager, 'getAddrs').returns([ + sinon.stub(libp2p.components.getTransportManager(), 'getAddrs').returns([ ...listenAddresses.map((a) => new Multiaddr(a)), stubMa ]) - const multiaddrs = libp2p.multiaddrs + const multiaddrs = libp2p.components.getAddressManager().getAddresses() expect(multiaddrs.length).to.equal(1) - expect(multiaddrs[0].equals(stubMa)).to.eql(true) + expect(multiaddrs[0].decapsulateCode(protocols('p2p').code).equals(stubMa)).to.eql(true) }) it('can filter out loopback addresses to announced by the announce filter', async () => { - [libp2p] = await peerUtils.createPeer({ + libp2p = await createNode({ started: false, config: { ...AddressesOptions, @@ -135,21 +138,19 @@ describe('libp2p.multiaddrs', () => { } }) - const listenAddrs = libp2p.addressManager.listen - expect(listenAddrs.size).to.equal(listenAddresses.length) - expect(listenAddrs.has(listenAddresses[0])).to.equal(true) - expect(listenAddrs.has(listenAddresses[1])).to.equal(true) + const listenAddrs = libp2p.components.getAddressManager().getListenAddrs().map((ma) => ma.toString()) + expect(listenAddrs).to.have.lengthOf(listenAddresses.length) + expect(listenAddrs).to.include(listenAddresses[0]) + expect(listenAddrs).to.include(listenAddresses[1]) await libp2p.start() - const multiaddrs = libp2p.multiaddrs - expect(multiaddrs.length).to.equal(announceAddreses.length) - expect(multiaddrs.includes(listenAddresses[0])).to.equal(false) - expect(multiaddrs.includes(listenAddresses[1])).to.equal(false) + const loopbackAddrs = libp2p.components.getAddressManager().getAddresses().filter(ma => isLoopback(ma)) + expect(loopbackAddrs).to.be.empty() }) it('should include observed addresses in returned multiaddrs', async () => { - [libp2p] = await peerUtils.createPeer({ + libp2p = await createNode({ started: false, config: { ...AddressesOptions, @@ -162,11 +163,11 @@ describe('libp2p.multiaddrs', () => { await libp2p.start() - expect(libp2p.multiaddrs).to.have.lengthOf(listenAddresses.length) + expect(libp2p.components.getAddressManager().getAddresses()).to.have.lengthOf(listenAddresses.length) - libp2p.addressManager.addObservedAddr(ma) + libp2p.components.getAddressManager().addObservedAddr(new Multiaddr(ma)) - expect(libp2p.multiaddrs).to.have.lengthOf(listenAddresses.length + 1) - expect(libp2p.multiaddrs.map(ma => ma.toString())).to.include(ma) + expect(libp2p.components.getAddressManager().getAddresses()).to.have.lengthOf(listenAddresses.length + 1) + expect(libp2p.components.getAddressManager().getAddresses().map(ma => ma.decapsulateCode(protocols('p2p').code).toString())).to.include(ma) }) }) diff --git a/test/addresses/utils.js b/test/addresses/utils.js deleted file mode 100644 index 08295c7b..00000000 --- a/test/addresses/utils.js +++ /dev/null @@ -1,16 +0,0 @@ -'use strict' - -const Transport1 = require('libp2p-tcp') -const Transport2 = require('libp2p-websockets') -const mergeOptions = require('merge-options') -const baseOptions = require('../utils/base-options') - -module.exports.baseOptions = baseOptions - -const AddressesOptions = mergeOptions(baseOptions, { - modules: { - transport: [Transport1, Transport2] - } -}) - -module.exports.AddressesOptions = AddressesOptions diff --git a/test/addresses/utils.ts b/test/addresses/utils.ts new file mode 100644 index 00000000..ef8d95d5 --- /dev/null +++ b/test/addresses/utils.ts @@ -0,0 +1,10 @@ +import { TCP } from '@libp2p/tcp' +import { WebSockets } from '@libp2p/websockets' +import { createBaseOptions } from '../utils/base-options.js' + +export const AddressesOptions = createBaseOptions({ + transports: [ + new TCP(), + new WebSockets() + ] +}) diff --git a/test/configuration/protocol-prefix.node.js b/test/configuration/protocol-prefix.node.ts similarity index 61% rename from test/configuration/protocol-prefix.node.js rename to test/configuration/protocol-prefix.node.ts index aff8579e..3b9ab32c 100644 --- a/test/configuration/protocol-prefix.node.js +++ b/test/configuration/protocol-prefix.node.ts @@ -1,30 +1,30 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') -const mergeOptions = require('merge-options') - -const { create } = require('../../src') -const { baseOptions } = require('./utils') +import { expect } from 'aegir/utils/chai.js' +import mergeOptions from 'merge-options' +import { validateConfig } from '../../src/config.js' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { baseOptions } from './utils.js' describe('Protocol prefix is configurable', () => { - let libp2p + let libp2p: Libp2pNode afterEach(async () => { - libp2p && await libp2p.stop() + if (libp2p != null) { + await libp2p.stop() + } }) it('protocolPrefix is provided', async () => { const testProtocol = 'test-protocol' - libp2p = await create(mergeOptions(baseOptions, { - config: { - protocolPrefix: testProtocol - } + libp2p = await createLibp2pNode(mergeOptions(baseOptions, { + protocolPrefix: testProtocol })) await libp2p.start() const protocols = await libp2p.peerStore.protoBook.get(libp2p.peerId) expect(protocols).to.include.members([ + '/libp2p/fetch/0.0.1', '/libp2p/circuit/relay/0.1.0', `/${testProtocol}/id/1.0.0`, `/${testProtocol}/id/push/1.0.0`, @@ -33,7 +33,7 @@ describe('Protocol prefix is configurable', () => { }) it('protocolPrefix is not provided', async () => { - libp2p = await create(baseOptions) + libp2p = await createLibp2pNode(validateConfig(baseOptions)) await libp2p.start() const protocols = await libp2p.peerStore.protoBook.get(libp2p.peerId) diff --git a/test/configuration/pubsub.spec.js b/test/configuration/pubsub.spec.js deleted file mode 100644 index 46e6c8fb..00000000 --- a/test/configuration/pubsub.spec.js +++ /dev/null @@ -1,129 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const mergeOptions = require('merge-options') -const pDefer = require('p-defer') -const delay = require('delay') - -const { create } = require('../../src') -const { baseOptions, pubsubSubsystemOptions } = require('./utils') -const peerUtils = require('../utils/creators/peer') - -describe('Pubsub subsystem is configurable', () => { - let libp2p - - afterEach(async () => { - libp2p && await libp2p.stop() - }) - - it('should not exist if no module is provided', async () => { - libp2p = await create(baseOptions) - expect(libp2p.pubsub).to.not.exist() - }) - - it('should exist if the module is provided', async () => { - libp2p = await create(pubsubSubsystemOptions) - expect(libp2p.pubsub).to.exist() - }) - - it('should start and stop by default once libp2p starts', async () => { - const [peerId] = await peerUtils.createPeerId() - - const customOptions = mergeOptions(pubsubSubsystemOptions, { - peerId - }) - - libp2p = await create(customOptions) - expect(libp2p.pubsub.started).to.equal(false) - - await libp2p.start() - expect(libp2p.pubsub.started).to.equal(true) - - await libp2p.stop() - expect(libp2p.pubsub.started).to.equal(false) - }) - - it('should not start if disabled once libp2p starts', async () => { - const [peerId] = await peerUtils.createPeerId() - - const customOptions = mergeOptions(pubsubSubsystemOptions, { - peerId, - config: { - pubsub: { - enabled: false - } - } - }) - - libp2p = await create(customOptions) - expect(libp2p.pubsub.started).to.equal(false) - - await libp2p.start() - expect(libp2p.pubsub.started).to.equal(false) - }) - - it('should allow a manual start', async () => { - const [peerId] = await peerUtils.createPeerId() - - const customOptions = mergeOptions(pubsubSubsystemOptions, { - peerId, - config: { - pubsub: { - enabled: false - } - } - }) - - libp2p = await create(customOptions) - await libp2p.start() - expect(libp2p.pubsub.started).to.equal(false) - - await libp2p.pubsub.start() - expect(libp2p.pubsub.started).to.equal(true) - }) -}) - -describe('Pubsub subscription handlers adapter', () => { - let libp2p - - beforeEach(async () => { - const [peerId] = await peerUtils.createPeerId() - - libp2p = await create(mergeOptions(pubsubSubsystemOptions, { - peerId - })) - - await libp2p.start() - }) - - afterEach(async () => { - libp2p && await libp2p.stop() - }) - - it('extends pubsub with subscribe handler', async () => { - let countMessages = 0 - const topic = 'topic' - const defer = pDefer() - - const handler = () => { - countMessages++ - if (countMessages > 1) { - throw new Error('only one message should be received') - } - - defer.resolve() - } - - await libp2p.pubsub.subscribe(topic, handler) - - libp2p.pubsub.emit(topic, 'useless-data') - await defer.promise - - await libp2p.pubsub.unsubscribe(topic, handler) - libp2p.pubsub.emit(topic, 'useless-data') - - // wait to guarantee that the handler is not called twice - await delay(100) - }) -}) diff --git a/test/configuration/pubsub.spec.ts b/test/configuration/pubsub.spec.ts new file mode 100644 index 00000000..762a3fcc --- /dev/null +++ b/test/configuration/pubsub.spec.ts @@ -0,0 +1,106 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import mergeOptions from 'merge-options' +import pDefer from 'p-defer' +import delay from 'delay' +import { createLibp2p, Libp2p } from '../../src/index.js' +import { baseOptions, pubsubSubsystemOptions } from './utils.js' +import { createPeerId } from '../utils/creators/peer.js' +import { CustomEvent } from '@libp2p/interfaces' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { FloodSub } from '@libp2p/floodsub' +import type { PubSub } from '@libp2p/interfaces/pubsub' + +describe('Pubsub subsystem is configurable', () => { + let libp2p: Libp2p + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should not exist if no module is provided', async () => { + libp2p = await createLibp2p(baseOptions) + expect(libp2p.pubsub).to.not.exist() + }) + + it('should exist if the module is provided', async () => { + libp2p = await createLibp2p(pubsubSubsystemOptions) + expect(libp2p.pubsub).to.exist() + }) + + it('should start and stop by default once libp2p starts', async () => { + const peerId = await createPeerId() + + const customOptions = mergeOptions(pubsubSubsystemOptions, { + peerId + }) + + libp2p = await createLibp2p(customOptions) + expect(libp2p.pubsub?.isStarted()).to.equal(false) + + await libp2p.start() + expect(libp2p.pubsub?.isStarted()).to.equal(true) + + await libp2p.stop() + expect(libp2p.pubsub?.isStarted()).to.equal(false) + }) +}) + +describe('Pubsub subscription handlers adapter', () => { + let libp2p: Libp2p + + beforeEach(async () => { + const peerId = await createPeerId() + + libp2p = await createLibp2p(mergeOptions(pubsubSubsystemOptions, { + peerId, + pubsub: new FloodSub({ + emitSelf: true + }) + })) + + await libp2p.start() + }) + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('extends pubsub with subscribe handler', async () => { + let countMessages = 0 + const topic = 'topic' + const defer = pDefer() + + const handler = () => { + countMessages++ + defer.resolve() + } + + const pubsub: PubSub | undefined = libp2p.pubsub + + if (pubsub == null) { + throw new Error('Pubsub was not enabled') + } + + pubsub.addEventListener(topic, handler) + pubsub.dispatchEvent(new CustomEvent(topic, { + detail: uint8ArrayFromString('useless-data') + })) + await defer.promise + + pubsub.removeEventListener(topic, handler) + pubsub.dispatchEvent(new CustomEvent(topic, { + detail: uint8ArrayFromString('useless-data') + })) + + // wait to guarantee that the handler is not called twice + await delay(100) + + expect(countMessages).to.equal(1) + }) +}) diff --git a/test/configuration/utils.js b/test/configuration/utils.js deleted file mode 100644 index 2e3ec538..00000000 --- a/test/configuration/utils.js +++ /dev/null @@ -1,52 +0,0 @@ -'use strict' - -const Pubsub = require('libp2p-interfaces/src/pubsub') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') -const Muxer = require('libp2p-mplex') -const Transport = require('libp2p-websockets') -const filters = require('libp2p-websockets/src/filters') -const transportKey = Transport.prototype[Symbol.toStringTag] - -const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser') -const relayAddr = MULTIADDRS_WEBSOCKETS[0] - -const mergeOptions = require('merge-options') - -const baseOptions = { - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } -} - -module.exports.baseOptions = baseOptions - -class MockPubsub extends Pubsub { - constructor (libp2p, options = {}) { - super({ - debugName: 'mock-pubsub', - multicodecs: '/mock-pubsub', - libp2p, - ...options - }) - } -} - -const pubsubSubsystemOptions = mergeOptions(baseOptions, { - modules: { - pubsub: MockPubsub - }, - addresses: { - listen: [`${relayAddr}/p2p-circuit`] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } -}) - -module.exports.pubsubSubsystemOptions = pubsubSubsystemOptions diff --git a/test/configuration/utils.ts b/test/configuration/utils.ts new file mode 100644 index 00000000..3965d6b0 --- /dev/null +++ b/test/configuration/utils.ts @@ -0,0 +1,76 @@ +import { PubSubBaseProtocol } from '@libp2p/pubsub' +import { Plaintext } from '../../src/insecure/index.js' +import { Mplex } from '@libp2p/mplex' +import { WebSockets } from '@libp2p/websockets' +import * as filters from '@libp2p/websockets/filters' +import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js' +import mergeOptions from 'merge-options' +import type { Message, PubSubInit, PubSubRPC, PubSubRPCMessage } from '@libp2p/interfaces/pubsub' +import type { Libp2pInit, Libp2pOptions } from '../../src/index.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import * as cborg from 'cborg' +import { peerIdFromString } from '@libp2p/peer-id' + +const relayAddr = MULTIADDRS_WEBSOCKETS[0] + +export const baseOptions: Partial = { + peerId: peerIdFromString('12D3KooWJKCJW8Y26pRFNv78TCMGLNTfyN8oKaFswMRYXTzSbSst'), + transports: [new WebSockets()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Plaintext()] +} + +class MockPubSub extends PubSubBaseProtocol { + constructor (init?: PubSubInit) { + super({ + multicodecs: ['/mock-pubsub'], + ...init + }) + } + + decodeRpc (bytes: Uint8Array): PubSubRPC { + return cborg.decode(bytes) + } + + encodeRpc (rpc: PubSubRPC): Uint8Array { + return cborg.encode(rpc) + } + + decodeMessage (bytes: Uint8Array): PubSubRPCMessage { + return cborg.decode(bytes) + } + + encodeMessage (rpc: PubSubRPCMessage): Uint8Array { + return cborg.encode(rpc) + } + + async publishMessage (from: PeerId, message: Message): Promise { + const peers = this.getSubscribers(message.topic) + + if (peers == null || peers.length === 0) { + return + } + + peers.forEach(id => { + if (this.components.getPeerId().equals(id)) { + return + } + + if (id.equals(from)) { + return + } + + this.send(id, { messages: [message] }) + }) + } +} + +export const pubsubSubsystemOptions: Libp2pOptions = mergeOptions(baseOptions, { + pubsub: new MockPubSub(), + addresses: { + listen: [`${relayAddr.toString()}/p2p-circuit`] + }, + transports: [ + new WebSockets({ filter: filters.all }) + ] +}) diff --git a/test/connection-manager/auto-dialler.spec.js b/test/connection-manager/auto-dialler.spec.js deleted file mode 100644 index 4b69adfd..00000000 --- a/test/connection-manager/auto-dialler.spec.js +++ /dev/null @@ -1,64 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const AutoDialler = require('../../src/connection-manager/auto-dialler') -const pWaitFor = require('p-wait-for') -const PeerId = require('peer-id') -const delay = require('delay') - -describe('Auto-dialler', () => { - let autoDialler - let libp2p - let options - - beforeEach(async () => { - libp2p = {} - options = {} - autoDialler = new AutoDialler(libp2p, options) - }) - - afterEach(async () => { - sinon.restore() - }) - - it('should not dial self', async () => { - // peers with protocols are dialled before peers without protocols - const self = { - id: await PeerId.create(), - protocols: [ - '/foo/bar' - ] - } - const other = { - id: await PeerId.create(), - protocols: [] - } - - autoDialler._options.minConnections = 10 - libp2p.peerId = self.id - libp2p.connections = { - size: 1 - } - libp2p.peerStore = { - getPeers: sinon.stub().returns([self, other]) - } - libp2p.connectionManager = { - get: () => {} - } - libp2p.dialer = { - connectToPeer: sinon.stub().resolves() - } - - await autoDialler.start() - - await pWaitFor(() => libp2p.dialer.connectToPeer.callCount === 1) - await delay(1000) - - await autoDialler.stop() - - expect(libp2p.dialer.connectToPeer.callCount).to.equal(1) - expect(libp2p.dialer.connectToPeer.calledWith(self.id)).to.be.false() - }) -}) diff --git a/test/connection-manager/auto-dialler.spec.ts b/test/connection-manager/auto-dialler.spec.ts new file mode 100644 index 00000000..0ed22920 --- /dev/null +++ b/test/connection-manager/auto-dialler.spec.ts @@ -0,0 +1,61 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { AutoDialler } from '../../src/connection-manager/auto-dialler.js' +import pWaitFor from 'p-wait-for' +import delay from 'delay' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import { Components } from '@libp2p/interfaces/components' +import { stubInterface } from 'ts-sinon' +import type { ConnectionManager } from '@libp2p/interfaces/registrar' +import type { PeerStore, Peer } from '@libp2p/interfaces/peer-store' +import type { Dialer } from '@libp2p/interfaces/dialer' + +describe('Auto-dialler', () => { + it('should not dial self', async () => { + // peers with protocols are dialled before peers without protocols + const self: Peer = { + id: await createEd25519PeerId(), + protocols: [ + '/foo/bar' + ], + addresses: [], + metadata: new Map() + } + const other: Peer = { + id: await createEd25519PeerId(), + protocols: [], + addresses: [], + metadata: new Map() + } + + const peerStore = stubInterface() + + peerStore.all.returns(Promise.resolve([ + self, other + ])) + + const connectionManager = stubInterface() + connectionManager.getConnectionList.returns([]) + const dialer = stubInterface() + + const autoDialler = new AutoDialler(new Components({ + peerId: self.id, + peerStore, + connectionManager, + dialer + }), { + minConnections: 10 + }) + + await autoDialler.start() + + await pWaitFor(() => dialer.dial.callCount === 1) + await delay(1000) + + await autoDialler.stop() + + expect(dialer.dial.callCount).to.equal(1) + expect(dialer.dial.calledWith(self.id)).to.be.false() + }) +}) diff --git a/test/connection-manager/index.node.js b/test/connection-manager/index.node.ts similarity index 53% rename from test/connection-manager/index.node.js rename to test/connection-manager/index.node.ts index 19f59bff..6eaacf87 100644 --- a/test/connection-manager/index.node.js +++ b/test/connection-manager/index.node.ts @@ -1,153 +1,184 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const { CLOSED } = require('libp2p-interfaces/src/connection/status') - -const delay = require('delay') -const pWaitFor = require('p-wait-for') -const peerUtils = require('../utils/creators/peer') -const mockConnection = require('../utils/mockConnection') -const baseOptions = require('../utils/base-options.browser') -const { codes } = require('../../src/errors') -const { Multiaddr } = require('multiaddr') +import { expect } from 'aegir/utils/chai.js' +import { createNode, createPeerId } from '../utils/creators/peer.js' +import { mockConnection, mockDuplex, mockMultiaddrConnection, mockUpgrader } from '@libp2p/interface-compliance-tests/mocks' +import { createBaseOptions } from '../utils/base-options.browser.js' +import type { Libp2p } from '../../src/index.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { DefaultConnectionManager } from '../../src/connection-manager/index.js' +import { Components } from '@libp2p/interfaces/components' +import { CustomEvent } from '@libp2p/interfaces' +import * as STATUS from '@libp2p/interfaces/connection/status' +import { stubInterface } from 'ts-sinon' +import type { KeyBook, PeerStore } from '@libp2p/interfaces/peer-store' +import sinon from 'sinon' +import pWaitFor from 'p-wait-for' +import type { Connection } from '@libp2p/interfaces/connection' +import delay from 'delay' +import type { Libp2pNode } from '../../src/libp2p.js' +import { codes } from '../../src/errors.js' describe('Connection Manager', () => { - let libp2p - let peerIds + let libp2p: Libp2p + let peerIds: PeerId[] before(async () => { - peerIds = await peerUtils.createPeerId({ number: 2 }) + peerIds = await Promise.all([ + createPeerId(), + createPeerId() + ]) }) beforeEach(async () => { - [libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] - }, - modules: baseOptions.modules - } + } + }) }) }) - afterEach(() => libp2p.stop()) + afterEach(async () => { + await libp2p.stop() + }) it('should filter connections on disconnect, removing the closed one', async () => { - const conn1 = await mockConnection({ localPeer: peerIds[0], remotePeer: peerIds[1] }) - const conn2 = await mockConnection({ localPeer: peerIds[0], remotePeer: peerIds[1] }) + const upgrader = mockUpgrader() + const peerStore = stubInterface() + peerStore.keyBook = stubInterface() - const id = peerIds[1].toB58String() + const connectionManager = new DefaultConnectionManager(new Components({ upgrader, peerStore })) + + await connectionManager.start() + + const conn1 = await mockConnection(mockMultiaddrConnection(mockDuplex(), peerIds[1])) + const conn2 = await mockConnection(mockMultiaddrConnection(mockDuplex(), peerIds[1])) + + expect(connectionManager.getConnections(peerIds[1])).to.have.lengthOf(0) // Add connection to the connectionManager - libp2p.connectionManager.onConnect(conn1) - libp2p.connectionManager.onConnect(conn2) + upgrader.dispatchEvent(new CustomEvent('connection', { detail: conn1 })) + upgrader.dispatchEvent(new CustomEvent('connection', { detail: conn2 })) - expect(libp2p.connectionManager.connections.get(id).length).to.eql(2) + expect(connectionManager.getConnections(peerIds[1])).to.have.lengthOf(2) - conn2._stat.status = 'closed' - libp2p.connectionManager.onDisconnect(conn2) + await conn2.close() + upgrader.dispatchEvent(new CustomEvent('connectionEnd', { detail: conn2 })) - const peerConnections = libp2p.connectionManager.connections.get(id) - expect(peerConnections.length).to.eql(1) - expect(peerConnections[0]._stat.status).to.eql('open') + expect(connectionManager.getConnections(peerIds[1])).to.have.lengthOf(1) + + expect(conn1).to.have.nested.property('stat.status', STATUS.OPEN) + + await connectionManager.stop() }) - it('should add connection on dial and remove on node stop', async () => { - const [remoteLibp2p] = await peerUtils.createPeer({ - config: { - peerId: peerIds[1], - addresses: { - listen: ['/ip4/127.0.0.1/tcp/15003/ws'] - }, - modules: baseOptions.modules - } - }) + it('should close connections on stop', async () => { + const upgrader = mockUpgrader() + const peerStore = stubInterface() + peerStore.keyBook = stubInterface() - // Spy on emit for easy verification - sinon.spy(libp2p.connectionManager, 'emit') - sinon.spy(remoteLibp2p.connectionManager, 'emit') + const connectionManager = new DefaultConnectionManager(new Components({ upgrader, peerStore })) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) - await libp2p.dial(remoteLibp2p.peerId) + await connectionManager.start() - // check connect event - expect(libp2p.connectionManager.emit.callCount).to.equal(1) - const [event, connection] = libp2p.connectionManager.emit.getCall(0).args - expect(event).to.equal('peer:connect') - expect(connection.remotePeer.equals(remoteLibp2p.peerId)).to.equal(true) + const conn1 = await mockConnection(mockMultiaddrConnection(mockDuplex(), peerIds[1])) + const conn2 = await mockConnection(mockMultiaddrConnection(mockDuplex(), peerIds[1])) - const libp2pConn = libp2p.connectionManager.get(remoteLibp2p.peerId) - expect(libp2pConn).to.exist() + // Add connection to the connectionManager + upgrader.dispatchEvent(new CustomEvent('connection', { detail: conn1 })) + upgrader.dispatchEvent(new CustomEvent('connection', { detail: conn2 })) - const remoteConn = remoteLibp2p.connectionManager.get(libp2p.peerId) - expect(remoteConn).to.exist() + expect(connectionManager.getConnections(peerIds[1])).to.have.lengthOf(2) - await remoteLibp2p.stop() - expect(remoteLibp2p.connectionManager.size).to.eql(0) + await connectionManager.stop() + + expect(connectionManager.getConnections(peerIds[1])).to.have.lengthOf(0) }) }) describe('libp2p.connections', () => { - let peerIds + let peerIds: PeerId[] + let libp2p: Libp2p before(async () => { - peerIds = await peerUtils.createPeerId({ number: 2 }) + peerIds = await Promise.all([ + createPeerId(), + createPeerId() + ]) + }) + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } }) it('libp2p.connections gets the connectionManager conns', async () => { - const [libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/15003/ws'] - }, - modules: baseOptions.modules - } + } + }) }) - const [remoteLibp2p] = await peerUtils.createPeer({ - config: { + const remoteLibp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[1], addresses: { listen: ['/ip4/127.0.0.1/tcp/15004/ws'] - }, - modules: baseOptions.modules - } + } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) - await libp2p.dial(remoteLibp2p.peerId) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) + const conn = await libp2p.dial(remoteLibp2p.peerId) - expect(libp2p.connections.size).to.eql(1) + expect(conn).to.be.ok() + expect(libp2p.getConnections()).to.have.lengthOf(1) await libp2p.stop() await remoteLibp2p.stop() }) describe('proactive connections', () => { - let nodes = [] + let libp2p: Libp2pNode + let nodes: Libp2p[] = [] beforeEach(async () => { - nodes = await peerUtils.createPeer({ - number: 2, - config: { - addresses: { - listen: ['/ip4/127.0.0.1/tcp/0/ws'] + nodes = await Promise.all([ + createNode({ + config: { + addresses: { + listen: ['/ip4/127.0.0.1/tcp/0/ws'] + } } - } - }) + }), + createNode({ + config: { + addresses: { + listen: ['/ip4/127.0.0.1/tcp/0/ws'] + } + } + }) + ]) }) afterEach(async () => { await Promise.all(nodes.map((node) => node.stop())) + + if (libp2p != null) { + await libp2p.stop() + } + sinon.reset() }) it('should connect to all the peers stored in the PeerStore, if their number is below minConnections', async () => { - const [libp2p] = await peerUtils.createPeer({ - fixture: false, + libp2p = await createNode({ started: false, config: { addresses: { @@ -160,76 +191,77 @@ describe('libp2p.connections', () => { }) // Populate PeerStore before starting - await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs) - await libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs) + await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].getMultiaddrs()) + await libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].getMultiaddrs()) await libp2p.start() // Wait for peers to connect - await pWaitFor(() => libp2p.connectionManager.size === 2) + await pWaitFor(() => libp2p.getConnections().length === 2) await libp2p.stop() }) it('should connect to all the peers stored in the PeerStore until reaching the minConnections', async () => { const minConnections = 1 - const [libp2p] = await peerUtils.createPeer({ - fixture: false, + libp2p = await createNode({ started: false, config: { addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, connectionManager: { - minConnections + minConnections, + maxConnections: 1 } } }) // Populate PeerStore before starting - await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs) - await libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs) + await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].getMultiaddrs()) + await libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].getMultiaddrs()) await libp2p.start() // Wait for peer to connect - await pWaitFor(() => libp2p.connectionManager.size === minConnections) + await pWaitFor(() => libp2p.components.getConnectionManager().getConnectionMap().size === minConnections) // Wait more time to guarantee no other connection happened await delay(200) - expect(libp2p.connectionManager.size).to.eql(minConnections) + expect(libp2p.components.getConnectionManager().getConnectionMap().size).to.eql(minConnections) await libp2p.stop() }) - it('should connect to all the peers stored in the PeerStore until reaching the minConnections sorted', async () => { + // flaky + it.skip('should connect to all the peers stored in the PeerStore until reaching the minConnections sorted', async () => { const minConnections = 1 - const [libp2p] = await peerUtils.createPeer({ - fixture: false, + libp2p = await createNode({ started: false, config: { addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, connectionManager: { - minConnections + minConnections, + maxConnections: 1 } } }) // Populate PeerStore before starting - await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs) - await libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs) + await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].getMultiaddrs()) + await libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].getMultiaddrs()) await libp2p.peerStore.protoBook.set(nodes[1].peerId, ['/protocol-min-conns']) await libp2p.start() // Wait for peer to connect - await pWaitFor(() => libp2p.connectionManager.size === minConnections) + await pWaitFor(() => libp2p.components.getConnectionManager().getConnectionMap().size === minConnections) // Should have connected to the peer with protocols - expect(libp2p.connectionManager.get(nodes[0].peerId)).to.not.exist() - expect(libp2p.connectionManager.get(nodes[1].peerId)).to.exist() + expect(libp2p.components.getConnectionManager().getConnection(nodes[0].peerId)).to.not.exist() + expect(libp2p.components.getConnectionManager().getConnection(nodes[1].peerId)).to.exist() await libp2p.stop() }) @@ -238,8 +270,7 @@ describe('libp2p.connections', () => { const minConnections = 1 const autoDialInterval = 1000 - const [libp2p] = await peerUtils.createPeer({ - fixture: false, + libp2p = await createNode({ config: { addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] @@ -252,96 +283,97 @@ describe('libp2p.connections', () => { }) // Populate PeerStore after starting (discovery) - await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs) + await libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].getMultiaddrs()) // Wait for peer to connect const conn = await libp2p.dial(nodes[0].peerId) - expect(libp2p.connectionManager.get(nodes[0].peerId)).to.exist() + expect(libp2p.components.getConnectionManager().getConnection(nodes[0].peerId)).to.exist() await conn.close() // Closed - await pWaitFor(() => libp2p.connectionManager.size === 0) + await pWaitFor(() => libp2p.components.getConnectionManager().getConnectionMap().size === 0) // Connected - await pWaitFor(() => libp2p.connectionManager.size === 1) + await pWaitFor(() => libp2p.components.getConnectionManager().getConnectionMap().size === 1) - expect(libp2p.connectionManager.get(nodes[0].peerId)).to.exist() + expect(libp2p.components.getConnectionManager().getConnection(nodes[0].peerId)).to.exist() await libp2p.stop() }) it('should be closed status once immediately stopping', async () => { - const [libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/15003/ws'] - }, - modules: baseOptions.modules - } + } + }) }) - const [remoteLibp2p] = await peerUtils.createPeer({ - config: { + const remoteLibp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[1], addresses: { listen: ['/ip4/127.0.0.1/tcp/15004/ws'] - }, - modules: baseOptions.modules - } + } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) await libp2p.dial(remoteLibp2p.peerId) - const totalConns = Array.from(libp2p.connections.values()) + const totalConns = Array.from(libp2p.components.getConnectionManager().getConnectionMap().values()) expect(totalConns.length).to.eql(1) const conns = totalConns[0] expect(conns.length).to.eql(1) const conn = conns[0] await libp2p.stop() - expect(conn.stat.status).to.eql(CLOSED) + expect(conn.stat.status).to.eql(STATUS.CLOSED) await remoteLibp2p.stop() }) }) describe('connection gater', () => { - let libp2p - let remoteLibp2p + let libp2p: Libp2pNode + let remoteLibp2p: Libp2pNode beforeEach(async () => { - [remoteLibp2p] = await peerUtils.createPeer({ - config: { + remoteLibp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[1], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] - }, - modules: baseOptions.modules - } + } + }) }) }) afterEach(async () => { - remoteLibp2p && await remoteLibp2p.stop() - libp2p && await libp2p.stop() + if (remoteLibp2p != null) { + await remoteLibp2p.stop() + } + + if (libp2p != null) { + await libp2p.stop() + } }) it('intercept peer dial', async () => { const denyDialPeer = sinon.stub().returns(true) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyDialPeer } - } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) await expect(libp2p.dial(remoteLibp2p.peerId)) .to.eventually.be.rejected().with.property('code', codes.ERR_PEER_DIAL_INTERCEPTED) @@ -350,48 +382,43 @@ describe('libp2p.connections', () => { it('intercept addr dial', async () => { const denyDialMultiaddr = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyDialMultiaddr } - } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) - await libp2p.dialer.connectToPeer(remoteLibp2p.peerId) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) + await libp2p.components.getDialer().dial(remoteLibp2p.peerId) - const peerIdMultiaddr = new Multiaddr(`/p2p/${remoteLibp2p.peerId}`) - - for (const multiaddr of remoteLibp2p.multiaddrs) { - expect(denyDialMultiaddr.calledWith(remoteLibp2p.peerId, multiaddr.encapsulate(peerIdMultiaddr))).to.be.true() + for (const multiaddr of remoteLibp2p.getMultiaddrs()) { + expect(denyDialMultiaddr.calledWith(remoteLibp2p.peerId, multiaddr)).to.be.true() } }) it('intercept multiaddr store during multiaddr dial', async () => { const filterMultiaddrForPeer = sinon.stub().returns(true) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { filterMultiaddrForPeer } - } + }) }) - const peerIdMultiaddr = new Multiaddr(`/p2p/${remoteLibp2p.peerId}`) - const fullMultiaddr = remoteLibp2p.multiaddrs[0].encapsulate(peerIdMultiaddr) + const fullMultiaddr = remoteLibp2p.getMultiaddrs()[0] - await libp2p.dialer.connectToPeer(fullMultiaddr) + await libp2p.components.getDialer().dial(fullMultiaddr) expect(filterMultiaddrForPeer.callCount).to.equal(2) @@ -403,19 +430,18 @@ describe('libp2p.connections', () => { it('intercept accept inbound connection', async () => { const denyInboundConnection = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyInboundConnection } - } + }) }) - await remoteLibp2p.peerStore.addressBook.set(libp2p.peerId, libp2p.multiaddrs) + await remoteLibp2p.peerStore.addressBook.set(libp2p.peerId, libp2p.getMultiaddrs()) await remoteLibp2p.dial(libp2p.peerId) expect(denyInboundConnection.called).to.be.true() @@ -424,19 +450,18 @@ describe('libp2p.connections', () => { it('intercept accept outbound connection', async () => { const denyOutboundConnection = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyOutboundConnection } - } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) await libp2p.dial(remoteLibp2p.peerId) expect(denyOutboundConnection.called).to.be.true() @@ -445,89 +470,85 @@ describe('libp2p.connections', () => { it('intercept inbound encrypted', async () => { const denyInboundEncryptedConnection = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyInboundEncryptedConnection } - } + }) }) - await remoteLibp2p.peerStore.addressBook.set(libp2p.peerId, libp2p.multiaddrs) + await remoteLibp2p.peerStore.addressBook.set(libp2p.peerId, libp2p.getMultiaddrs()) await remoteLibp2p.dial(libp2p.peerId) expect(denyInboundEncryptedConnection.called).to.be.true() - expect(denyInboundEncryptedConnection.getCall(0)).to.have.nested.property('args[0].id').that.equalBytes(remoteLibp2p.peerId.id) + expect(denyInboundEncryptedConnection.getCall(0)).to.have.nested.property('args[0].multihash.digest').that.equalBytes(remoteLibp2p.peerId.multihash.digest) }) it('intercept outbound encrypted', async () => { const denyOutboundEncryptedConnection = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyOutboundEncryptedConnection } - } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) await libp2p.dial(remoteLibp2p.peerId) expect(denyOutboundEncryptedConnection.called).to.be.true() - expect(denyOutboundEncryptedConnection.getCall(0)).to.have.nested.property('args[0].id').that.equalBytes(remoteLibp2p.peerId.id) + expect(denyOutboundEncryptedConnection.getCall(0)).to.have.nested.property('args[0].multihash.digest').that.equalBytes(remoteLibp2p.peerId.multihash.digest) }) it('intercept inbound upgraded', async () => { const denyInboundUpgradedConnection = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyInboundUpgradedConnection } - } + }) }) - await remoteLibp2p.peerStore.addressBook.set(libp2p.peerId, libp2p.multiaddrs) + await remoteLibp2p.peerStore.addressBook.set(libp2p.peerId, libp2p.getMultiaddrs()) await remoteLibp2p.dial(libp2p.peerId) expect(denyInboundUpgradedConnection.called).to.be.true() - expect(denyInboundUpgradedConnection.getCall(0)).to.have.nested.property('args[0].id').that.equalBytes(remoteLibp2p.peerId.id) + expect(denyInboundUpgradedConnection.getCall(0)).to.have.nested.property('args[0].multihash.digest').that.equalBytes(remoteLibp2p.peerId.multihash.digest) }) it('intercept outbound upgraded', async () => { const denyOutboundUpgradedConnection = sinon.stub().returns(false) - ;[libp2p] = await peerUtils.createPeer({ - config: { + libp2p = await createNode({ + config: createBaseOptions({ peerId: peerIds[0], addresses: { listen: ['/ip4/127.0.0.1/tcp/0/ws'] }, - modules: baseOptions.modules, connectionGater: { denyOutboundUpgradedConnection } - } + }) }) - await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.multiaddrs) + await libp2p.peerStore.addressBook.set(remoteLibp2p.peerId, remoteLibp2p.getMultiaddrs()) await libp2p.dial(remoteLibp2p.peerId) expect(denyOutboundUpgradedConnection.called).to.be.true() - expect(denyOutboundUpgradedConnection.getCall(0)).to.have.nested.property('args[0].id').that.equalBytes(remoteLibp2p.peerId.id) + expect(denyOutboundUpgradedConnection.getCall(0)).to.have.nested.property('args[0].multihash.digest').that.equalBytes(remoteLibp2p.peerId.multihash.digest) }) }) }) diff --git a/test/connection-manager/index.spec.js b/test/connection-manager/index.spec.js deleted file mode 100644 index d400f54b..00000000 --- a/test/connection-manager/index.spec.js +++ /dev/null @@ -1,132 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const peerUtils = require('../utils/creators/peer') -const mockConnection = require('../utils/mockConnection') -const baseOptions = require('../utils/base-options.browser') - -describe('Connection Manager', () => { - let libp2p - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - }) - - it('should be able to create without metrics', async () => { - [libp2p] = await peerUtils.createPeer({ - config: { - modules: baseOptions.modules - }, - started: false - }) - - const spy = sinon.spy(libp2p.connectionManager, 'start') - - await libp2p.start() - expect(spy).to.have.property('callCount', 1) - expect(libp2p.connectionManager._metrics).to.not.exist() - }) - - it('should be able to create with metrics', async () => { - [libp2p] = await peerUtils.createPeer({ - config: { - modules: baseOptions.modules, - metrics: { - enabled: true - } - }, - started: false - }) - - const spy = sinon.spy(libp2p.connectionManager, 'start') - - await libp2p.start() - expect(spy).to.have.property('callCount', 1) - expect(libp2p.connectionManager._libp2p.metrics).to.exist() - }) - - it('should close lowest value peer connection when the maximum has been reached', async () => { - const max = 5 - ;[libp2p] = await peerUtils.createPeer({ - config: { - modules: baseOptions.modules, - connectionManager: { - maxConnections: max, - minConnections: 2 - } - }, - started: false - }) - - await libp2p.start() - - sinon.spy(libp2p.connectionManager, '_maybeDisconnectOne') - - // Add 1 too many connections - const spies = new Map() - await Promise.all([...new Array(max + 1)].map(async (_, index) => { - const connection = await mockConnection() - const spy = sinon.spy(connection, 'close') - // The connections have the same remote id, give them random ones - // so that we can verify the correct connection was closed - sinon.stub(connection.remotePeer, 'toB58String').returns(index) - const value = Math.random() - spies.set(value, spy) - libp2p.connectionManager.setPeerValue(connection.remotePeer, value) - await libp2p.connectionManager.onConnect(connection) - })) - - // get the lowest value - const lowest = Array.from(spies.keys()).sort()[0] - const lowestSpy = spies.get(lowest) - - expect(libp2p.connectionManager._maybeDisconnectOne).to.have.property('callCount', 1) - expect(lowestSpy).to.have.property('callCount', 1) - }) - - it('should close connection when the maximum has been reached even without peer values', async () => { - const max = 5 - ;[libp2p] = await peerUtils.createPeer({ - config: { - modules: baseOptions.modules, - connectionManager: { - maxConnections: max, - minConnections: 0 - } - }, - started: false - }) - - await libp2p.start() - - sinon.spy(libp2p.connectionManager, '_maybeDisconnectOne') - - // Add 1 too many connections - const spy = sinon.spy() - await Promise.all([...new Array(max + 1)].map(async () => { - const connection = await mockConnection() - sinon.stub(connection, 'close').callsFake(async () => spy()) // eslint-disable-line - await libp2p.connectionManager.onConnect(connection) - })) - - expect(libp2p.connectionManager._maybeDisconnectOne).to.have.property('callCount', 1) - expect(spy).to.have.property('callCount', 1) - }) - - it('should fail if the connection manager has mismatched connection limit options', async () => { - await expect(peerUtils.createPeer({ - config: { - modules: baseOptions.modules, - connectionManager: { - maxConnections: 5, - minConnections: 6 - } - }, - started: false - })).to.eventually.rejected('maxConnections must be greater') - }) -}) diff --git a/test/connection-manager/index.spec.ts b/test/connection-manager/index.spec.ts new file mode 100644 index 00000000..6f6dba91 --- /dev/null +++ b/test/connection-manager/index.spec.ts @@ -0,0 +1,143 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { createNode } from '../utils/creators/peer.js' +import { createBaseOptions } from '../utils/base-options.browser.js' +import type { Libp2pNode } from '../../src/libp2p.js' +import type { DefaultConnectionManager } from '../../src/connection-manager/index.js' +import { mockConnection, mockDuplex, mockMultiaddrConnection } from '@libp2p/interface-compliance-tests/mocks' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import { CustomEvent } from '@libp2p/interfaces' + +describe('Connection Manager', () => { + let libp2p: Libp2pNode + + afterEach(async () => { + sinon.restore() + + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should be able to create without metrics', async () => { + libp2p = await createNode({ + config: createBaseOptions(), + started: false + }) + + const spy = sinon.spy(libp2p.components.getConnectionManager() as DefaultConnectionManager, 'start') + + await libp2p.start() + expect(spy).to.have.property('callCount', 1) + expect(libp2p.components.getMetrics()).to.not.exist() + }) + + it('should be able to create with metrics', async () => { + libp2p = await createNode({ + config: createBaseOptions({ + metrics: { + enabled: true + } + }), + started: false + }) + + const spy = sinon.spy(libp2p.components.getConnectionManager() as DefaultConnectionManager, 'start') + + await libp2p.start() + expect(spy).to.have.property('callCount', 1) + expect(libp2p.components.getMetrics()).to.exist() + }) + + it('should close lowest value peer connection when the maximum has been reached', async () => { + const max = 5 + libp2p = await createNode({ + config: createBaseOptions({ + connectionManager: { + maxConnections: max, + minConnections: 2 + } + }), + started: false + }) + + await libp2p.start() + + const connectionManager = libp2p.components.getConnectionManager() as DefaultConnectionManager + const connectionManagerMaybeDisconnectOneSpy = sinon.spy(connectionManager, '_maybeDisconnectOne') + + // Add 1 too many connections + const spies = new Map>>() + await Promise.all([...new Array(max + 1)].map(async (_, index) => { + const connection = mockConnection(mockMultiaddrConnection(mockDuplex(), await createEd25519PeerId())) + const spy = sinon.spy(connection, 'close') + // The connections have the same remote id, give them random ones + // so that we can verify the correct connection was closed + // sinon.stub(connection.remotePeer, 'toString').returns(index) + const value = Math.random() + spies.set(value, spy) + connectionManager.setPeerValue(connection.remotePeer, value) + await connectionManager.onConnect(new CustomEvent('connection', { detail: connection })) + })) + + // get the lowest value + const lowest = Array.from(spies.keys()).sort((a, b) => { + if (a > b) { + return 1 + } + + if (a < b) { + return -1 + } + + return 0 + })[0] + const lowestSpy = spies.get(lowest) + + expect(connectionManagerMaybeDisconnectOneSpy.callCount).to.equal(1) + expect(lowestSpy).to.have.property('callCount', 1) + }) + + it('should close connection when the maximum has been reached even without peer values', async () => { + const max = 5 + libp2p = await createNode({ + config: createBaseOptions({ + connectionManager: { + maxConnections: max, + minConnections: 0 + } + }), + started: false + }) + + await libp2p.start() + + const connectionManager = libp2p.components.getConnectionManager() as DefaultConnectionManager + const connectionManagerMaybeDisconnectOneSpy = sinon.spy(connectionManager, '_maybeDisconnectOne') + + // Add 1 too many connections + const spy = sinon.spy() + await Promise.all([...new Array(max + 1)].map(async () => { + const connection = mockConnection(mockMultiaddrConnection(mockDuplex(), await createEd25519PeerId())) + sinon.stub(connection, 'close').callsFake(async () => spy()) // eslint-disable-line + await connectionManager.onConnect(new CustomEvent('connection', { detail: connection })) + })) + + expect(connectionManagerMaybeDisconnectOneSpy.callCount).to.equal(1) + expect(spy).to.have.property('callCount', 1) + }) + + it('should fail if the connection manager has mismatched connection limit options', async () => { + await expect(createNode({ + config: createBaseOptions({ + connectionManager: { + maxConnections: 5, + minConnections: 6 + } + }), + started: false + })).to.eventually.rejected('maxConnections must be greater') + }) +}) diff --git a/test/content-routing/content-routing.node.js b/test/content-routing/content-routing.node.ts similarity index 56% rename from test/content-routing/content-routing.node.js rename to test/content-routing/content-routing.node.ts index 694d71cf..25f429dc 100644 --- a/test/content-routing/content-routing.node.js +++ b/test/content-routing/content-routing.node.ts @@ -1,30 +1,29 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') -const nock = require('nock') -const sinon = require('sinon') - -const pDefer = require('p-defer') -const mergeOptions = require('merge-options') - -const { CID } = require('multiformats/cid') -const ipfsHttpClient = require('ipfs-http-client') -const DelegatedContentRouter = require('libp2p-delegated-content-routing') -const { Multiaddr } = require('multiaddr') -const drain = require('it-drain') -const all = require('it-all') - -const peerUtils = require('../utils/creators/peer') -const { baseOptions, routingOptions } = require('./utils') +import { expect } from 'aegir/utils/chai.js' +import nock from 'nock' +import sinon from 'sinon' +import pDefer from 'p-defer' +import { CID } from 'multiformats/cid' +import { create as createIpfsHttpClient } from 'ipfs-http-client' +import { DelegatedContentRouting } from '@libp2p/delegated-content-routing' +import { Multiaddr } from '@multiformats/multiaddr' +import drain from 'it-drain' +import all from 'it-all' +import { createNode, createPeerId, populateAddressBooks } from '../utils/creators/peer.js' +import { createBaseOptions } from '../utils/base-options.js' +import { createRoutingOptions } from './utils.js' +import type { Libp2p } from '../../src/index.js' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import type { Libp2pNode } from '../../src/libp2p.js' describe('content-routing', () => { describe('no routers', () => { - let node + let node: Libp2p before(async () => { - [node] = await peerUtils.createPeer({ - config: baseOptions + node = await createNode({ + config: createBaseOptions() }) }) @@ -32,15 +31,17 @@ describe('content-routing', () => { it('.findProviders should return an error', async () => { try { + // @ts-expect-error invalid params for await (const _ of node.contentRouting.findProviders('a cid')) {} // eslint-disable-line throw new Error('.findProviders should return an error') - } catch (/** @type {any} */ err) { + } catch (err: any) { expect(err).to.exist() expect(err.code).to.equal('ERR_NO_ROUTERS_AVAILABLE') } }) it('.provide should return an error', async () => { + // @ts-expect-error invalid params await expect(node.contentRouting.provide('a cid')) .to.eventually.be.rejected() .and.to.have.property('code', 'ERR_NO_ROUTERS_AVAILABLE') @@ -49,17 +50,21 @@ describe('content-routing', () => { describe('via dht router', () => { const number = 5 - let nodes + let nodes: Libp2pNode[] before(async () => { - nodes = await peerUtils.createPeer({ - number, - config: routingOptions - }) + nodes = await Promise.all([ + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }) + ]) + await populateAddressBooks(nodes) // Ring dial await Promise.all( - nodes.map((peer, i) => peer.dial(nodes[(i + 1) % number].peerId)) + nodes.map(async (peer, i) => await peer.dial(nodes[(i + 1) % number].peerId)) ) }) @@ -67,103 +72,107 @@ describe('content-routing', () => { sinon.restore() }) - after(() => Promise.all(nodes.map((n) => n.stop()))) + after(async () => await Promise.all(nodes.map(async (n) => await n.stop()))) - it('should use the nodes dht to provide', () => { + it('should use the nodes dht to provide', async () => { const deferred = pDefer() - sinon.stub(nodes[0]._dht, 'provide').callsFake(() => { + if (nodes[0].dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(nodes[0].dht, 'provide').callsFake(async function * () { // eslint-disable-line require-yield deferred.resolve() }) - nodes[0].contentRouting.provide() - return deferred.promise + void nodes[0].contentRouting.provide(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')) + + return await deferred.promise }) it('should use the nodes dht to find providers', async () => { const deferred = pDefer() - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) - sinon.stub(nodes[0]._dht, 'findProviders').callsFake(function * () { - deferred.resolve() + if (nodes[0].dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(nodes[0].dht, 'findProviders').callsFake(async function * () { yield { + from: nodes[0].peerId, + type: 0, name: 'PROVIDER', providers: [{ - id: providerPeerId, - multiaddrs: [] + id: nodes[0].peerId, + multiaddrs: [], + protocols: [] }] } + deferred.resolve() }) - await nodes[0].contentRouting.findProviders().next() + await drain(nodes[0].contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) - return deferred.promise + return await deferred.promise }) }) describe('via delegate router', () => { - let node - let delegate + let node: Libp2pNode + let delegate: DelegatedContentRouting beforeEach(async () => { - const [peerId] = await peerUtils.createPeerId({ fixture: true }) - - delegate = new DelegatedContentRouter(peerId, ipfsHttpClient.create({ + delegate = new DelegatedContentRouting(createIpfsHttpClient({ host: '0.0.0.0', protocol: 'http', port: 60197 })) - ;[node] = await peerUtils.createPeer({ - config: mergeOptions(baseOptions, { - modules: { - contentRouting: [delegate] - }, - config: { - dht: { - enabled: false - } - } + node = await createNode({ + config: createBaseOptions({ + contentRouters: [ + delegate + ], + dht: undefined }) }) }) - afterEach(() => { + afterEach(async () => { + if (node != null) { + await node.stop() + } + sinon.restore() }) - afterEach(() => node.stop()) - - it('should only have one router', () => { - expect(node.contentRouting.routers).to.have.lengthOf(1) - }) - - it('should use the delegate router to provide', () => { + it('should use the delegate router to provide', async () => { const deferred = pDefer() - sinon.stub(delegate, 'provide').callsFake(() => { + sinon.stub(delegate, 'provide').callsFake(async () => { deferred.resolve() }) - node.contentRouting.provide() - return deferred.promise + void node.contentRouting.provide(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')) + + return await deferred.promise }) it('should use the delegate router to find providers', async () => { const deferred = pDefer() - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) - sinon.stub(delegate, 'findProviders').callsFake(function * () { - deferred.resolve() + sinon.stub(delegate, 'findProviders').callsFake(async function * () { yield { - id: providerPeerId, - multiaddrs: [] + id: node.peerId, + multiaddrs: [], + protocols: [] } + deferred.resolve() }) - await node.contentRouting.findProviders().next() + await drain(node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) - return deferred.promise + return await deferred.promise }) it('should be able to register as a provider', async () => { @@ -219,13 +228,10 @@ describe('content-routing', () => { 'X-Chunked-Output', '1' ]) - const providers = [] - for await (const provider of node.contentRouting.findProviders(cid, { timeout: 1000 })) { - providers.push(provider) - } + const providers = await all(node.contentRouting.findProviders(cid)) expect(providers).to.have.length(1) - expect(providers[0].id.toB58String()).to.equal(provider) + expect(providers[0].id.toString()).to.equal(provider) expect(mockApi.isDone()).to.equal(true) }) @@ -241,7 +247,7 @@ describe('content-routing', () => { try { for await (const _ of node.contentRouting.findProviders(cid)) { } // eslint-disable-line throw new Error('should handle errors when finding providers') - } catch (/** @type {any} */ err) { + } catch (err: any) { expect(err).to.exist() } @@ -250,23 +256,19 @@ describe('content-routing', () => { }) describe('via dht and delegate routers', () => { - let node - let delegate + let node: Libp2pNode + let delegate: DelegatedContentRouting beforeEach(async () => { - const [peerId] = await peerUtils.createPeerId({ fixture: true }) - - delegate = new DelegatedContentRouter(peerId, ipfsHttpClient.create({ + delegate = new DelegatedContentRouting(createIpfsHttpClient({ host: '0.0.0.0', protocol: 'http', port: 60197 })) - ;[node] = await peerUtils.createPeer({ - config: mergeOptions(routingOptions, { - modules: { - contentRouting: [delegate] - } + node = await createNode({ + config: createRoutingOptions({ + contentRouters: [delegate] }) }) }) @@ -275,25 +277,30 @@ describe('content-routing', () => { sinon.restore() }) - afterEach(() => node.stop()) + afterEach(async () => await node.stop()) it('should store the multiaddrs of a peer', async () => { - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) - const result = { + const providerPeerId = await createPeerId() + const result: PeerInfo = { id: providerPeerId, multiaddrs: [ new Multiaddr('/ip4/123.123.123.123/tcp/49320') - ] + ], + protocols: [] } - sinon.stub(node._dht, 'findProviders').callsFake(function * () {}) - sinon.stub(delegate, 'findProviders').callsFake(function * () { + if (node.dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(node.dht, 'findProviders').callsFake(async function * () {}) + sinon.stub(delegate, 'findProviders').callsFake(async function * () { yield result }) expect(await node.peerStore.has(providerPeerId)).to.not.be.ok() - await drain(node.contentRouting.findProviders('a cid')) + await drain(node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) expect(await node.peerStore.addressBook.get(providerPeerId)).to.deep.include({ isCertified: false, @@ -302,17 +309,22 @@ describe('content-routing', () => { }) it('should not wait for routing findProviders to finish before returning results', async () => { - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) + const providerPeerId = await createPeerId() const result = { id: providerPeerId, multiaddrs: [ new Multiaddr('/ip4/123.123.123.123/tcp/49320') - ] + ], + protocols: [] + } + + if (node.dht == null) { + throw new Error('DHT was not configured') } const defer = pDefer() - sinon.stub(node._dht, 'findProviders').callsFake(async function * () { // eslint-disable-line require-yield + sinon.stub(node.dht, 'findProviders').callsFake(async function * () { // eslint-disable-line require-yield await defer.promise }) sinon.stub(delegate, 'findProviders').callsFake(async function * () { @@ -321,50 +333,70 @@ describe('content-routing', () => { await defer.promise }) - for await (const provider of node.contentRouting.findProviders('a cid')) { + for await (const provider of node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) { expect(provider.id).to.deep.equal(providerPeerId) defer.resolve() } }) it('should dedupe results', async () => { - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) + const providerPeerId = await createPeerId() const result = { id: providerPeerId, multiaddrs: [ new Multiaddr('/ip4/123.123.123.123/tcp/49320') - ] + ], + protocols: [] } - sinon.stub(node._dht, 'findProviders').callsFake(async function * () { - yield result + if (node.dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(node.dht, 'findProviders').callsFake(async function * () { + yield { + from: providerPeerId, + type: 0, + name: 'PROVIDER', + providers: [ + result + ] + } }) sinon.stub(delegate, 'findProviders').callsFake(async function * () { yield result }) - const results = await all(node.contentRouting.findProviders('a cid')) + const results = await all(node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) expect(results).to.be.an('array').with.lengthOf(1).that.deep.equals([result]) }) it('should combine multiaddrs when different addresses are returned by different content routers', async () => { - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) + const providerPeerId = await createPeerId() const result1 = { id: providerPeerId, multiaddrs: [ new Multiaddr('/ip4/123.123.123.123/tcp/49320') - ] + ], + protocols: [] } const result2 = { id: providerPeerId, multiaddrs: [ new Multiaddr('/ip4/213.213.213.213/tcp/2344') - ] + ], + protocols: [] } - sinon.stub(node._dht, 'findProviders').callsFake(async function * () { + if (node.dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(node.dht, 'findProviders').callsFake(async function * () { yield { + from: providerPeerId, + type: 0, name: 'PROVIDER', providers: [ result1 @@ -375,7 +407,7 @@ describe('content-routing', () => { yield result2 }) - await drain(node.contentRouting.findProviders('a cid')) + await drain(node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) expect(await node.peerStore.addressBook.get(providerPeerId)).to.deep.include({ isCertified: false, @@ -390,16 +422,19 @@ describe('content-routing', () => { const dhtDeferred = pDefer() const delegatedDeferred = pDefer() - sinon.stub(node._dht, 'provide').callsFake(async function * () { - yield + if (node.dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(node.dht, 'provide').callsFake(async function * () { // eslint-disable-line require-yield dhtDeferred.resolve() }) - sinon.stub(delegate, 'provide').callsFake(() => { + sinon.stub(delegate, 'provide').callsFake(async function () { delegatedDeferred.resolve() }) - await node.contentRouting.provide() + await node.contentRouting.provide(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')) await Promise.all([ dhtDeferred.promise, @@ -408,14 +443,21 @@ describe('content-routing', () => { }) it('should use the dht if the delegate fails to find providers', async () => { - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) + const providerPeerId = await createPeerId() const results = [{ id: providerPeerId, - multiaddrs: [] + multiaddrs: [], + protocols: [] }] - sinon.stub(node._dht, 'findProviders').callsFake(function * () { + if (node.dht == null) { + throw new Error('DHT was not configured') + } + + sinon.stub(node.dht, 'findProviders').callsFake(async function * () { yield { + from: providerPeerId, + type: 0, name: 'PROVIDER', providers: [ results[0] @@ -423,11 +465,11 @@ describe('content-routing', () => { } }) - sinon.stub(delegate, 'findProviders').callsFake(function * () { // eslint-disable-line require-yield + sinon.stub(delegate, 'findProviders').callsFake(async function * () { // eslint-disable-line require-yield }) const providers = [] - for await (const prov of node.contentRouting.findProviders('a cid')) { + for await (const prov of node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) { providers.push(prov) } @@ -436,20 +478,25 @@ describe('content-routing', () => { }) it('should use the delegate if the dht fails to find providers', async () => { - const [providerPeerId] = await peerUtils.createPeerId({ fixture: false }) + const providerPeerId = await createPeerId() const results = [{ id: providerPeerId, - multiaddrs: [] + multiaddrs: [], + protocols: [] }] - sinon.stub(node._dht, 'findProviders').callsFake(function * () {}) + if (node.dht == null) { + throw new Error('DHT was not configured') + } - sinon.stub(delegate, 'findProviders').callsFake(function * () { + sinon.stub(node.dht, 'findProviders').callsFake(async function * () {}) + + sinon.stub(delegate, 'findProviders').callsFake(async function * () { yield results[0] }) const providers = [] - for await (const prov of node.contentRouting.findProviders('a cid')) { + for await (const prov of node.contentRouting.findProviders(CID.parse('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB'))) { providers.push(prov) } diff --git a/test/content-routing/dht/configuration.node.js b/test/content-routing/dht/configuration.node.js deleted file mode 100644 index ed22d3e9..00000000 --- a/test/content-routing/dht/configuration.node.js +++ /dev/null @@ -1,94 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const mergeOptions = require('merge-options') - -const { create } = require('../../../src') -const { baseOptions, subsystemOptions } = require('./utils') -const peerUtils = require('../../utils/creators/peer') - -const listenAddr = '/ip4/127.0.0.1/tcp/0' - -describe('DHT subsystem is configurable', () => { - let libp2p - - afterEach(async () => { - libp2p && await libp2p.stop() - }) - - it('should not exist if no module is provided', async () => { - libp2p = await create(baseOptions) - expect(libp2p._dht).to.not.exist() - }) - - it('should exist if the module is provided', async () => { - libp2p = await create(subsystemOptions) - expect(libp2p._dht).to.exist() - }) - - it('should start and stop by default once libp2p starts', async () => { - const [peerId] = await peerUtils.createPeerId(1) - - const customOptions = mergeOptions(subsystemOptions, { - peerId, - addresses: { - listen: [listenAddr] - } - }) - - libp2p = await create(customOptions) - expect(libp2p._dht.isStarted()).to.equal(false) - - await libp2p.start() - expect(libp2p._dht.isStarted()).to.equal(true) - - await libp2p.stop() - expect(libp2p._dht.isStarted()).to.equal(false) - }) - - it('should not start if disabled once libp2p starts', async () => { - const [peerId] = await peerUtils.createPeerId(1) - - const customOptions = mergeOptions(subsystemOptions, { - peerId, - addresses: { - listen: [listenAddr] - }, - config: { - dht: { - enabled: false - } - } - }) - - libp2p = await create(customOptions) - expect(libp2p._dht.isStarted()).to.equal(false) - - await libp2p.start() - expect(libp2p._dht.isStarted()).to.equal(false) - }) - - it('should allow a manual start', async () => { - const [peerId] = await peerUtils.createPeerId(1) - - const customOptions = mergeOptions(subsystemOptions, { - peerId, - addresses: { - listen: [listenAddr] - }, - config: { - dht: { - enabled: false - } - } - }) - - libp2p = await create(customOptions) - await libp2p.start() - expect(libp2p._dht.isStarted()).to.equal(false) - - await libp2p._dht.start() - expect(libp2p._dht.isStarted()).to.equal(true) - }) -}) diff --git a/test/content-routing/dht/configuration.node.ts b/test/content-routing/dht/configuration.node.ts new file mode 100644 index 00000000..57b58c90 --- /dev/null +++ b/test/content-routing/dht/configuration.node.ts @@ -0,0 +1,27 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { createLibp2p, Libp2p } from '../../../src/index.js' +import { createSubsystemOptions } from './utils.js' + +describe('DHT subsystem is configurable', () => { + let libp2p: Libp2p + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should not exist if no module is provided', async () => { + libp2p = await createLibp2p(createSubsystemOptions({ + dht: undefined + })) + expect(libp2p.dht).to.not.exist() + }) + + it('should exist if the module is provided', async () => { + libp2p = await createLibp2p(createSubsystemOptions()) + expect(libp2p.dht).to.exist() + }) +}) diff --git a/test/content-routing/dht/operation.node.js b/test/content-routing/dht/operation.node.js deleted file mode 100644 index e6662b20..00000000 --- a/test/content-routing/dht/operation.node.js +++ /dev/null @@ -1,146 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') - -const { Multiaddr } = require('multiaddr') -const pWaitFor = require('p-wait-for') -const mergeOptions = require('merge-options') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -const { create } = require('../../../src') -const { subsystemOptions, subsystemMulticodecs } = require('./utils') -const peerUtils = require('../../utils/creators/peer') - -const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/8000') -const remoteListenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/8001') - -describe('DHT subsystem operates correctly', () => { - let peerId, remotePeerId - let libp2p, remoteLibp2p - let remAddr - - beforeEach(async () => { - [peerId, remotePeerId] = await peerUtils.createPeerId({ number: 2 }) - }) - - describe('dht started before connect', () => { - beforeEach(async () => { - libp2p = await create(mergeOptions(subsystemOptions, { - peerId, - addresses: { - listen: [listenAddr] - } - })) - - remoteLibp2p = await create(mergeOptions(subsystemOptions, { - peerId: remotePeerId, - addresses: { - listen: [remoteListenAddr] - } - })) - - await Promise.all([ - libp2p.start(), - remoteLibp2p.start() - ]) - - await libp2p.peerStore.addressBook.set(remotePeerId, [remoteListenAddr]); - [remAddr] = await libp2p.peerStore.addressBook.getMultiaddrsForPeer(remotePeerId) - }) - - afterEach(() => Promise.all([ - libp2p && libp2p.stop(), - remoteLibp2p && remoteLibp2p.stop() - ])) - - it('should get notified of connected peers on dial', async () => { - const connection = await libp2p.dialProtocol(remAddr, subsystemMulticodecs) - - expect(connection).to.exist() - - return Promise.all([ - pWaitFor(() => libp2p._dht._lan._routingTable.size === 1), - pWaitFor(() => remoteLibp2p._dht._lan._routingTable.size === 1) - ]) - }) - - it('should put on a peer and get from the other', async () => { - const key = uint8ArrayFromString('hello') - const value = uint8ArrayFromString('world') - - await libp2p.dialProtocol(remAddr, subsystemMulticodecs) - await Promise.all([ - pWaitFor(() => libp2p._dht._lan._routingTable.size === 1), - pWaitFor(() => remoteLibp2p._dht._lan._routingTable.size === 1) - ]) - - await libp2p.contentRouting.put(key, value) - - const fetchedValue = await remoteLibp2p.contentRouting.get(key) - expect(fetchedValue).to.have.property('val').that.equalBytes(value) - }) - }) - - describe('dht started after connect', () => { - beforeEach(async () => { - libp2p = await create(mergeOptions(subsystemOptions, { - peerId, - addresses: { - listen: [listenAddr] - } - })) - - remoteLibp2p = await create(mergeOptions(subsystemOptions, { - peerId: remotePeerId, - addresses: { - listen: [remoteListenAddr] - }, - config: { - dht: { - enabled: false - } - } - })) - - await libp2p.start() - await remoteLibp2p.start() - - await libp2p.peerStore.addressBook.set(remotePeerId, [remoteListenAddr]) - remAddr = (await libp2p.peerStore.addressBook.getMultiaddrsForPeer(remotePeerId))[0] - }) - - afterEach(() => Promise.all([ - libp2p && libp2p.stop(), - remoteLibp2p && remoteLibp2p.stop() - ])) - - it('should get notified of connected peers after starting', async () => { - const connection = await libp2p.dial(remAddr) - - expect(connection).to.exist() - expect(libp2p._dht._lan._routingTable.size).to.be.eql(0) - - await remoteLibp2p._dht.start() - // should be 0 directly after start - TODO this may be susceptible to timing bugs, we should have - // the ability to report stats on the DHT routing table instead of reaching into it's heart like this - expect(remoteLibp2p._dht._lan._routingTable.size).to.be.eql(0) - return pWaitFor(() => libp2p._dht._lan._routingTable.size === 1) - }) - - it('should put on a peer and get from the other', async () => { - await libp2p.dial(remAddr) - - const key = uint8ArrayFromString('hello') - const value = uint8ArrayFromString('world') - - await remoteLibp2p._dht.start() - await pWaitFor(() => libp2p._dht._lan._routingTable.size === 1) - - await libp2p.contentRouting.put(key, value) - - const fetchedValue = await remoteLibp2p.contentRouting.get(key) - expect(fetchedValue).to.have.property('val').that.equalBytes(value) - }) - }) -}) diff --git a/test/content-routing/dht/operation.node.ts b/test/content-routing/dht/operation.node.ts new file mode 100644 index 00000000..e3fa534b --- /dev/null +++ b/test/content-routing/dht/operation.node.ts @@ -0,0 +1,178 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { Multiaddr } from '@multiformats/multiaddr' +import pWaitFor from 'p-wait-for' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { subsystemMulticodecs, createSubsystemOptions } from './utils.js' +import { createPeerId } from '../../utils/creators/peer.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createLibp2pNode, Libp2pNode } from '../../../src/libp2p.js' +import { isStartable } from '@libp2p/interfaces' + +const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/8000') +const remoteListenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/8001') + +async function getRemoteAddr (remotePeerId: PeerId, libp2p: Libp2pNode) { + const addrs = await libp2p.components.getPeerStore().addressBook.get(remotePeerId) + + if (addrs.length === 0) { + throw new Error('No addrs found') + } + + const addr = addrs[0] + + return addr.multiaddr.encapsulate(`/p2p/${remotePeerId.toString()}`) +} + +describe('DHT subsystem operates correctly', () => { + let peerId: PeerId, remotePeerId: PeerId + let libp2p: Libp2pNode, remoteLibp2p: Libp2pNode + let remAddr: Multiaddr + + beforeEach(async () => { + [peerId, remotePeerId] = await Promise.all([ + createPeerId(), + createPeerId() + ]) + }) + + describe('dht started before connect', () => { + beforeEach(async () => { + libp2p = await createLibp2pNode(createSubsystemOptions({ + peerId, + addresses: { + listen: [listenAddr.toString()] + } + })) + + remoteLibp2p = await createLibp2pNode(createSubsystemOptions({ + peerId: remotePeerId, + addresses: { + listen: [remoteListenAddr.toString()] + } + })) + + await Promise.all([ + libp2p.start(), + remoteLibp2p.start() + ]) + + await libp2p.components.getPeerStore().addressBook.set(remotePeerId, [remoteListenAddr]) + remAddr = await getRemoteAddr(remotePeerId, libp2p) + }) + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + + if (remoteLibp2p != null) { + await remoteLibp2p.stop() + } + }) + + it('should get notified of connected peers on dial', async () => { + const connection = await libp2p.dialProtocol(remAddr, subsystemMulticodecs) + + expect(connection).to.exist() + + return await Promise.all([ + pWaitFor(() => libp2p.dht?.lan.routingTable.size === 1), + pWaitFor(() => remoteLibp2p.dht?.lan.routingTable.size === 1) + ]) + }) + + it('should put on a peer and get from the other', async () => { + const key = uint8ArrayFromString('hello') + const value = uint8ArrayFromString('world') + + await libp2p.dialProtocol(remAddr, subsystemMulticodecs) + await Promise.all([ + pWaitFor(() => libp2p.dht?.lan.routingTable.size === 1), + pWaitFor(() => remoteLibp2p.dht?.lan.routingTable.size === 1) + ]) + + await libp2p.components.getContentRouting().put(key, value) + + const fetchedValue = await remoteLibp2p.components.getContentRouting().get(key) + expect(fetchedValue).to.equalBytes(value) + }) + }) + + describe('dht started after connect', () => { + beforeEach(async () => { + libp2p = await createLibp2pNode(createSubsystemOptions({ + peerId, + addresses: { + listen: [listenAddr.toString()] + } + })) + + remoteLibp2p = await createLibp2pNode(createSubsystemOptions({ + peerId: remotePeerId, + addresses: { + listen: [remoteListenAddr.toString()] + } + })) + + await libp2p.start() + await remoteLibp2p.start() + + await libp2p.components.getPeerStore().addressBook.set(remotePeerId, [remoteListenAddr]) + remAddr = await getRemoteAddr(remotePeerId, libp2p) + }) + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + + if (remoteLibp2p != null) { + await remoteLibp2p.stop() + } + }) + + // TODO: we pre-fill the routing tables on dht startup with artificial peers so this test + // doesn't really work as intended. We should be testing that a connected peer can change + // it's supported protocols and we should notice that change so there may be something to + // salvage from here, though it could be better as identify protocol tests. + it.skip('should get notified of connected peers after starting', async () => { + const connection = await libp2p.dial(remAddr) + + expect(connection).to.exist() + expect(libp2p.dht?.lan.routingTable).to.be.empty() + + const dht = remoteLibp2p.dht + + if (isStartable(dht)) { + await dht.start() + } + + // should be 0 directly after start - TODO this may be susceptible to timing bugs, we should have + // the ability to report stats on the DHT routing table instead of reaching into it's heart like this + expect(remoteLibp2p.dht?.lan.routingTable).to.be.empty() + + return await pWaitFor(() => libp2p.dht?.lan.routingTable.size === 1) + }) + + it('should put on a peer and get from the other', async () => { + await libp2p.dial(remAddr) + + const key = uint8ArrayFromString('hello') + const value = uint8ArrayFromString('world') + + const dht = remoteLibp2p.dht + + if (isStartable(dht)) { + await dht.start() + } + + await pWaitFor(() => libp2p.dht?.lan.routingTable.size === 1) + await libp2p.components.getContentRouting().put(key, value) + + const fetchedValue = await remoteLibp2p.components.getContentRouting().get(key) + expect(fetchedValue).to.equalBytes(value) + }) + }) +}) diff --git a/test/content-routing/dht/utils.js b/test/content-routing/dht/utils.js deleted file mode 100644 index 0a37de41..00000000 --- a/test/content-routing/dht/utils.js +++ /dev/null @@ -1,35 +0,0 @@ -'use strict' - -const KadDht = require('libp2p-kad-dht') -const Crypto = require('../../../src/insecure/plaintext') -const Muxer = require('libp2p-mplex') -const Transport = require('libp2p-tcp') - -const mergeOptions = require('merge-options') - -const baseOptions = { - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } -} - -module.exports.baseOptions = baseOptions - -const subsystemOptions = mergeOptions(baseOptions, { - modules: { - dht: KadDht - }, - config: { - dht: { - kBucketSize: 20, - enabled: true - } - } -}) - -module.exports.subsystemOptions = subsystemOptions -module.exports.subsystemMulticodecs = [ - '/ipfs/lan/kad/1.0.0' -] diff --git a/test/content-routing/dht/utils.ts b/test/content-routing/dht/utils.ts new file mode 100644 index 00000000..e3a91061 --- /dev/null +++ b/test/content-routing/dht/utils.ts @@ -0,0 +1,15 @@ +import { KadDHT } from '@libp2p/kad-dht' +import type { Libp2pOptions } from '../../../src/index.js' +import { createBaseOptions } from '../../utils/base-options.js' + +export function createSubsystemOptions (...overrides: Libp2pOptions[]) { + return createBaseOptions({ + dht: new KadDHT({ + kBucketSize: 20 + }) + }, ...overrides) +} + +export const subsystemMulticodecs = [ + '/ipfs/lan/kad/1.0.0' +] diff --git a/test/content-routing/utils.js b/test/content-routing/utils.js deleted file mode 100644 index 7b43d050..00000000 --- a/test/content-routing/utils.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const KadDht = require('libp2p-kad-dht') -const mergeOptions = require('merge-options') -const baseOptions = require('../utils/base-options') - -module.exports.baseOptions = baseOptions - -const routingOptions = mergeOptions(baseOptions, { - modules: { - dht: KadDht - }, - config: { - dht: { - kBucketSize: 20, - enabled: true - } - } -}) - -module.exports.routingOptions = routingOptions diff --git a/test/content-routing/utils.ts b/test/content-routing/utils.ts new file mode 100644 index 00000000..25ae8b23 --- /dev/null +++ b/test/content-routing/utils.ts @@ -0,0 +1,11 @@ +import { KadDHT } from '@libp2p/kad-dht' +import type { Libp2pOptions } from '../../src/index.js' +import { createBaseOptions } from '../utils/base-options.js' + +export function createRoutingOptions (...overrides: Libp2pOptions[]): Libp2pOptions { + return createBaseOptions({ + dht: new KadDHT({ + kBucketSize: 20 + }) + }, ...overrides) +} diff --git a/test/core/consume-peer-record.spec.js b/test/core/consume-peer-record.spec.js deleted file mode 100644 index 609a6c29..00000000 --- a/test/core/consume-peer-record.spec.js +++ /dev/null @@ -1,46 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const Transport = require('libp2p-websockets') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') - -const Libp2p = require('../../src') -const { createPeerId } = require('../utils/creators/peer') - -describe('Consume peer record', () => { - let libp2p - - beforeEach(async () => { - const [peerId] = await createPeerId() - const config = { - peerId, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - } - libp2p = await Libp2p.create(config) - }) - - afterEach(async () => { - await libp2p.stop() - }) - - it('should consume peer record when observed addrs are added', async () => { - let done - - libp2p.peerStore.addressBook.consumePeerRecord = () => { - done() - } - - const p = new Promise(resolve => { - done = resolve - }) - - libp2p.addressManager.addObservedAddr('/ip4/123.123.123.123/tcp/3983') - - await p - - libp2p.stop() - }) -}) diff --git a/test/core/consume-peer-record.spec.ts b/test/core/consume-peer-record.spec.ts new file mode 100644 index 00000000..70dad598 --- /dev/null +++ b/test/core/consume-peer-record.spec.ts @@ -0,0 +1,51 @@ +/* eslint-env mocha */ + +import { WebSockets } from '@libp2p/websockets' +import { NOISE } from '@chainsafe/libp2p-noise' +import { createPeerId } from '../utils/creators/peer.js' +import { Multiaddr } from '@multiformats/multiaddr' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import type { Libp2pOptions } from '../../src/index.js' + +describe('Consume peer record', () => { + let libp2p: Libp2pNode + + beforeEach(async () => { + const peerId = await createPeerId() + const config: Libp2pOptions = { + peerId, + transports: [ + new WebSockets() + ], + connectionEncryption: [ + NOISE + ] + } + libp2p = await createLibp2pNode(config) + }) + + afterEach(async () => { + await libp2p.stop() + }) + + it('should consume peer record when observed addrs are added', async () => { + let done: () => void + + libp2p.components.getPeerStore().addressBook.consumePeerRecord = async () => { + done() + return true + } + + const p = new Promise(resolve => { + done = resolve + }) + + await libp2p.start() + + libp2p.components.getAddressManager().addObservedAddr(new Multiaddr('/ip4/123.123.123.123/tcp/3983')) + + await p + + await libp2p.stop() + }) +}) diff --git a/test/core/encryption.spec.js b/test/core/encryption.spec.js deleted file mode 100644 index da5c6e11..00000000 --- a/test/core/encryption.spec.js +++ /dev/null @@ -1,54 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') - -const Transport = require('libp2p-websockets') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') - -const Libp2p = require('../../src') -const { codes: ErrorCodes } = require('../../src/errors') -const { createPeerId } = require('../utils/creators/peer') - -describe('Connection encryption configuration', () => { - let peerId - - before(async () => { - [peerId] = await createPeerId() - }) - - it('is required', async () => { - const config = { - peerId, - modules: { - transport: [Transport] - } - } - - await expect(Libp2p.create(config)).to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.CONN_ENCRYPTION_REQUIRED) - }) - - it('is required and needs at least one module', async () => { - const config = { - peerId, - modules: { - transport: [Transport], - connEncryption: [] - } - } - await expect(Libp2p.create(config)).to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.CONN_ENCRYPTION_REQUIRED) - }) - - it('can be created', async () => { - const config = { - peerId, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - } - await Libp2p.create(config) - }) -}) diff --git a/test/core/encryption.spec.ts b/test/core/encryption.spec.ts new file mode 100644 index 00000000..6d2ac817 --- /dev/null +++ b/test/core/encryption.spec.ts @@ -0,0 +1,54 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { WebSockets } from '@libp2p/websockets' +import { NOISE } from '@chainsafe/libp2p-noise' +import { createLibp2p, Libp2pOptions } from '../../src/index.js' +import { codes as ErrorCodes } from '../../src/errors.js' +import { createPeerId } from '../utils/creators/peer.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' + +describe('Connection encryption configuration', () => { + let peerId: PeerId + + before(async () => { + peerId = await createPeerId() + }) + + it('is required', async () => { + const config = { + peerId, + transports: [ + new WebSockets() + ] + } + + await expect(createLibp2p(config)).to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.CONN_ENCRYPTION_REQUIRED) + }) + + it('is required and needs at least one module', async () => { + const config = { + peerId, + transports: [ + new WebSockets() + ], + connectionEncryption: [] + } + await expect(createLibp2p(config)).to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.CONN_ENCRYPTION_REQUIRED) + }) + + it('can be created', async () => { + const config: Libp2pOptions = { + peerId, + transports: [ + new WebSockets() + ], + connectionEncryption: [ + NOISE + ] + } + await createLibp2p(config) + }) +}) diff --git a/test/core/listening.node.js b/test/core/listening.node.ts similarity index 57% rename from test/core/listening.node.js rename to test/core/listening.node.ts index c83c6d89..04f81c91 100644 --- a/test/core/listening.node.js +++ b/test/core/listening.node.ts @@ -1,22 +1,20 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') - -const Transport = require('libp2p-tcp') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') - -const { create } = require('../../src') -const peerUtils = require('../utils/creators/peer') +import { expect } from 'aegir/utils/chai.js' +import { TCP } from '@libp2p/tcp' +import { NOISE } from '@chainsafe/libp2p-noise' +import { createPeerId } from '../utils/creators/peer.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' const listenAddr = '/ip4/0.0.0.0/tcp/0' describe('Listening', () => { - let peerId - let libp2p + let peerId: PeerId + let libp2p: Libp2pNode before(async () => { - [peerId] = await peerUtils.createPeerId() + peerId = await createPeerId() }) after(async () => { @@ -24,20 +22,22 @@ describe('Listening', () => { }) it('should replace wildcard host and port with actual host and port on startup', async () => { - libp2p = await create({ + libp2p = await createLibp2pNode({ peerId, addresses: { listen: [listenAddr] }, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } + transports: [ + new TCP() + ], + connectionEncryption: [ + NOISE + ] }) await libp2p.start() - const addrs = libp2p.transportManager.getAddrs() + const addrs = libp2p.components.getTransportManager().getAddrs() // Should get something like: // /ip4/127.0.0.1/tcp/50866 diff --git a/test/core/ping.node.js b/test/core/ping.node.js deleted file mode 100644 index 63ebec01..00000000 --- a/test/core/ping.node.js +++ /dev/null @@ -1,84 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') - -const pTimes = require('p-times') -const pipe = require('it-pipe') - -const peerUtils = require('../utils/creators/peer') -const baseOptions = require('../utils/base-options') -const { PROTOCOL } = require('../../src/ping/constants') - -describe('ping', () => { - let nodes - - beforeEach(async () => { - nodes = await peerUtils.createPeer({ - number: 3, - config: baseOptions - }) - - await nodes[0].peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs) - await nodes[1].peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs) - }) - - afterEach(() => Promise.all(nodes.map(n => n.stop()))) - - it('ping once from peer0 to peer1 using a multiaddr', async () => { - const ma = `${nodes[2].multiaddrs[0]}/p2p/${nodes[2].peerId.toB58String()}` - const latency = await nodes[0].ping(ma) - - expect(latency).to.be.a('Number') - }) - - it('ping once from peer0 to peer1 using a peerId', async () => { - const latency = await nodes[0].ping(nodes[1].peerId) - - expect(latency).to.be.a('Number') - }) - - it('ping several times for getting an average', async () => { - const latencies = await pTimes(5, () => nodes[1].ping(nodes[0].peerId)) - - const averageLatency = latencies.reduce((p, c) => p + c, 0) / latencies.length - expect(averageLatency).to.be.a('Number') - }) - - it('only waits for the first response to arrive', async () => { - nodes[1].handle(PROTOCOL, async ({ connection, stream }) => { - let firstInvocation = true - - await pipe( - stream, - function (stream) { - const output = { - [Symbol.asyncIterator]: () => output, - next: async () => { - if (firstInvocation) { - firstInvocation = false - - // eslint-disable-next-line no-unreachable-loop - for await (const data of stream) { - return { - value: data, - done: false - } - } - } else { - return new Promise() // never resolve - } - } - } - - return output - }, - stream - ) - }) - - const latency = await nodes[0].ping(nodes[1].peerId) - - expect(latency).to.be.a('Number') - }) -}) diff --git a/test/core/ping.node.ts b/test/core/ping.node.ts new file mode 100644 index 00000000..45d5d519 --- /dev/null +++ b/test/core/ping.node.ts @@ -0,0 +1,75 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import pTimes from 'p-times' +import { pipe } from 'it-pipe' +import { createNode, populateAddressBooks } from '../utils/creators/peer.js' +import { createBaseOptions } from '../utils/base-options.js' +import { PROTOCOL } from '../../src/ping/constants.js' +import { Multiaddr } from '@multiformats/multiaddr' +import pDefer from 'p-defer' +import type { Libp2pNode } from '../../src/libp2p.js' + +describe('ping', () => { + let nodes: Libp2pNode[] + + beforeEach(async () => { + nodes = await Promise.all([ + createNode({ config: createBaseOptions() }), + createNode({ config: createBaseOptions() }), + createNode({ config: createBaseOptions() }) + ]) + await populateAddressBooks(nodes) + + await nodes[0].components.getPeerStore().addressBook.set(nodes[1].peerId, nodes[1].getMultiaddrs()) + await nodes[1].components.getPeerStore().addressBook.set(nodes[0].peerId, nodes[0].getMultiaddrs()) + }) + + afterEach(async () => await Promise.all(nodes.map(async n => await n.stop()))) + + it('ping once from peer0 to peer1 using a multiaddr', async () => { + const ma = new Multiaddr(`${nodes[2].getMultiaddrs()[0].toString()}/p2p/${nodes[2].peerId.toString()}`) + const latency = await nodes[0].ping(ma) + + expect(latency).to.be.a('Number') + }) + + it('ping once from peer0 to peer1 using a peerId', async () => { + const latency = await nodes[0].ping(nodes[1].peerId) + + expect(latency).to.be.a('Number') + }) + + it('ping several times for getting an average', async () => { + const latencies = await pTimes(5, async () => await nodes[1].ping(nodes[0].peerId)) + + const averageLatency = latencies.reduce((p, c) => p + c, 0) / latencies.length + expect(averageLatency).to.be.a('Number') + }) + + it('only waits for the first response to arrive', async () => { + const defer = pDefer() + + await nodes[1].unhandle(PROTOCOL) + await nodes[1].handle(PROTOCOL, ({ stream }) => { + void pipe( + stream, + async function * (stream) { + for await (const data of stream) { + yield data + + // something longer than the test timeout + await defer.promise + } + }, + stream + ) + }) + + const latency = await nodes[0].ping(nodes[1].peerId) + + expect(latency).to.be.a('Number') + + defer.resolve() + }) +}) diff --git a/test/dialing/dial-request.spec.js b/test/dialing/dial-request.spec.js deleted file mode 100644 index 3ec2eba8..00000000 --- a/test/dialing/dial-request.spec.js +++ /dev/null @@ -1,224 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { AbortError } = require('libp2p-interfaces/src/transport/errors') -const AggregateError = require('aggregate-error') -const pDefer = require('p-defer') -const delay = require('delay') - -const DialRequest = require('../../src/dialer/dial-request') -const createMockConnection = require('../utils/mockConnection') -const error = new Error('dial failes') - -describe('Dial Request', () => { - it('should end when a single multiaddr dials succeeds', async () => { - const mockConnection = await createMockConnection() - const actions = { - 1: () => Promise.reject(error), - 2: () => Promise.resolve(mockConnection), - 3: () => Promise.reject(error) - } - const dialAction = (num) => actions[num]() - const tokens = ['a', 'b'] - const controller = new AbortController() - const dialer = { - getTokens: () => [...tokens], - releaseToken: () => {} - } - - const dialRequest = new DialRequest({ - addrs: Object.keys(actions), - dialer, - dialAction - }) - - sinon.spy(actions, 1) - sinon.spy(actions, 2) - sinon.spy(actions, 3) - sinon.spy(dialer, 'releaseToken') - const result = await dialRequest.run({ signal: controller.signal }) - expect(result).to.equal(mockConnection) - expect(actions[1]).to.have.property('callCount', 1) - expect(actions[2]).to.have.property('callCount', 1) - expect(actions[3]).to.have.property('callCount', 0) - expect(dialer.releaseToken).to.have.property('callCount', tokens.length) - }) - - it('should release tokens when all addr dials have started', async () => { - const mockConnection = await createMockConnection() - const firstDials = pDefer() - const deferred = pDefer() - const actions = { - 1: () => firstDials.promise, - 2: () => firstDials.promise, - 3: () => deferred.promise - } - const dialAction = (num) => actions[num]() - const tokens = ['a', 'b'] - const controller = new AbortController() - const dialer = { - getTokens: () => [...tokens], - releaseToken: () => {} - } - - const dialRequest = new DialRequest({ - addrs: Object.keys(actions), - dialer, - dialAction - }) - - sinon.spy(actions, 1) - sinon.spy(actions, 2) - sinon.spy(actions, 3) - sinon.spy(dialer, 'releaseToken') - dialRequest.run({ signal: controller.signal }) - // Let the first dials run - await delay(0) - - // Finish the first 2 dials - firstDials.reject(error) - await delay(0) - - // Only 1 dial should remain, so 1 token should have been released - expect(actions[1]).to.have.property('callCount', 1) - expect(actions[2]).to.have.property('callCount', 1) - expect(actions[3]).to.have.property('callCount', 1) - expect(dialer.releaseToken).to.have.property('callCount', 1) - - // Finish the dial and release the 2nd token - deferred.resolve(mockConnection) - await delay(0) - expect(dialer.releaseToken).to.have.property('callCount', 2) - }) - - it('should throw an AggregateError if all dials fail', async () => { - const actions = { - 1: () => Promise.reject(error), - 2: () => Promise.reject(error), - 3: () => Promise.reject(error) - } - const dialAction = (num) => actions[num]() - const addrs = Object.keys(actions) - const tokens = ['a', 'b'] - const controller = new AbortController() - const dialer = { - getTokens: () => [...tokens], - releaseToken: () => {} - } - - const dialRequest = new DialRequest({ - addrs, - dialer, - dialAction - }) - - sinon.spy(actions, 1) - sinon.spy(actions, 2) - sinon.spy(actions, 3) - sinon.spy(dialer, 'getTokens') - sinon.spy(dialer, 'releaseToken') - - try { - await dialRequest.run({ signal: controller.signal }) - expect.fail('Should have thrown') - } catch (/** @type {any} */ err) { - expect(err).to.be.an.instanceof(AggregateError) - } - - expect(actions[1]).to.have.property('callCount', 1) - expect(actions[2]).to.have.property('callCount', 1) - expect(actions[3]).to.have.property('callCount', 1) - expect(dialer.getTokens.calledWith(addrs.length)).to.equal(true) - expect(dialer.releaseToken).to.have.property('callCount', tokens.length) - }) - - it('should handle a large number of addrs', async () => { - const reject = sinon.stub().callsFake(() => Promise.reject(error)) - const actions = {} - const addrs = [...new Array(25)].map((_, index) => index + 1) - addrs.forEach(addr => { - actions[addr] = reject - }) - - const dialAction = (addr) => actions[addr]() - const tokens = ['a', 'b'] - const controller = new AbortController() - const dialer = { - getTokens: () => [...tokens], - releaseToken: () => {} - } - - const dialRequest = new DialRequest({ - addrs, - dialer, - dialAction - }) - - sinon.spy(dialer, 'releaseToken') - try { - await dialRequest.run({ signal: controller.signal }) - expect.fail('Should have thrown') - } catch (/** @type {any} */ err) { - expect(err).to.be.an.instanceof(AggregateError) - } - - expect(reject).to.have.property('callCount', addrs.length) - expect(dialer.releaseToken).to.have.property('callCount', tokens.length) - }) - - it('should abort all dials when its signal is aborted', async () => { - const deferToAbort = ({ signal }) => { - if (signal.aborted) throw new Error('already aborted') - const deferred = pDefer() - const onAbort = () => { - deferred.reject(new AbortError()) - signal.removeEventListener('abort', onAbort) - } - signal.addEventListener('abort', onAbort) - return deferred.promise - } - - const actions = { - 1: deferToAbort, - 2: deferToAbort, - 3: deferToAbort - } - const dialAction = (num, opts) => actions[num](opts) - const addrs = Object.keys(actions) - const tokens = ['a', 'b'] - const controller = new AbortController() - const dialer = { - getTokens: () => [...tokens], - releaseToken: () => {} - } - - const dialRequest = new DialRequest({ - addrs, - dialer, - dialAction - }) - - sinon.spy(actions, 1) - sinon.spy(actions, 2) - sinon.spy(actions, 3) - sinon.spy(dialer, 'getTokens') - sinon.spy(dialer, 'releaseToken') - - try { - setTimeout(() => controller.abort(), 100) - await dialRequest.run({ signal: controller.signal }) - expect.fail('dial should have failed') - } catch (/** @type {any} */ err) { - expect(err).to.be.an.instanceof(AggregateError) - } - - expect(actions[1]).to.have.property('callCount', 1) - expect(actions[2]).to.have.property('callCount', 1) - expect(actions[3]).to.have.property('callCount', 1) - expect(dialer.getTokens.calledWith(addrs.length)).to.equal(true) - expect(dialer.releaseToken).to.have.property('callCount', tokens.length) - }) -}) diff --git a/test/dialing/dial-request.spec.ts b/test/dialing/dial-request.spec.ts new file mode 100644 index 00000000..c41d8636 --- /dev/null +++ b/test/dialing/dial-request.spec.ts @@ -0,0 +1,218 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { AbortError } from '@libp2p/interfaces/errors' +import pDefer from 'p-defer' +import delay from 'delay' +import { DialAction, DialRequest } from '../../src/dialer/dial-request.js' +import { mockConnection, mockDuplex, mockMultiaddrConnection } from '@libp2p/interface-compliance-tests/mocks' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import { Multiaddr } from '@multiformats/multiaddr' +import { DefaultDialer } from '../../src/dialer/index.js' +import { Components } from '@libp2p/interfaces/components' +const error = new Error('dial failure') + +describe('Dial Request', () => { + it('should end when a single multiaddr dials succeeds', async () => { + const connection = mockConnection(mockMultiaddrConnection(mockDuplex(), await createEd25519PeerId())) + const actions: Record Promise> = { + '/ip4/127.0.0.1/tcp/1231': async () => await Promise.reject(error), + '/ip4/127.0.0.1/tcp/1232': async () => await Promise.resolve(connection), + '/ip4/127.0.0.1/tcp/1233': async () => await Promise.reject(error) + } + const dialAction: DialAction = async (num) => await actions[num.toString()]() + const controller = new AbortController() + const dialer = new DefaultDialer(new Components(), { + maxParallelDials: 2 + }) + const dialerReleaseTokenSpy = sinon.spy(dialer, 'releaseToken') + const dialRequest = new DialRequest({ + addrs: Object.keys(actions).map(str => new Multiaddr(str)), + dialer, + dialAction + }) + + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1231') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1232') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1233') + + const result = await dialRequest.run({ signal: controller.signal }) + expect(result).to.equal(connection) + expect(actions['/ip4/127.0.0.1/tcp/1231']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1232']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1233']).to.have.property('callCount', 0) + expect(dialerReleaseTokenSpy.callCount).to.equal(2) + }) + + it('should release tokens when all addr dials have started', async () => { + const connection = mockConnection(mockMultiaddrConnection(mockDuplex(), await createEd25519PeerId())) + const firstDials = pDefer() + const deferred = pDefer() + const actions: Record Promise> = { + '/ip4/127.0.0.1/tcp/1231': async () => await firstDials.promise, + '/ip4/127.0.0.1/tcp/1232': async () => await firstDials.promise, + '/ip4/127.0.0.1/tcp/1233': async () => await deferred.promise + } + const dialAction: DialAction = async (num) => await actions[num.toString()]() + const controller = new AbortController() + const dialer = new DefaultDialer(new Components(), { + maxParallelDials: 2 + }) + const dialerReleaseTokenSpy = sinon.spy(dialer, 'releaseToken') + const dialRequest = new DialRequest({ + addrs: Object.keys(actions).map(str => new Multiaddr(str)), + dialer, + dialAction + }) + + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1231') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1232') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1233') + + void dialRequest.run({ signal: controller.signal }) + // Let the first dials run + await delay(0) + + // Finish the first 2 dials + firstDials.reject(error) + await delay(0) + + // Only 1 dial should remain, so 1 token should have been released + expect(actions['/ip4/127.0.0.1/tcp/1231']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1232']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1233']).to.have.property('callCount', 1) + expect(dialerReleaseTokenSpy.callCount).to.equal(1) + + // Finish the dial and release the 2nd token + deferred.resolve(connection) + await delay(0) + expect(dialerReleaseTokenSpy.callCount).to.equal(2) + }) + + it('should throw an AggregateError if all dials fail', async () => { + const actions: Record Promise> = { + '/ip4/127.0.0.1/tcp/1231': async () => await Promise.reject(error), + '/ip4/127.0.0.1/tcp/1232': async () => await Promise.reject(error), + '/ip4/127.0.0.1/tcp/1233': async () => await Promise.reject(error) + } + const dialAction: DialAction = async (num) => await actions[num.toString()]() + const addrs = Object.keys(actions) + const controller = new AbortController() + const dialer = new DefaultDialer(new Components(), { + maxParallelDials: 2 + }) + const dialerReleaseTokenSpy = sinon.spy(dialer, 'releaseToken') + const dialerGetTokensSpy = sinon.spy(dialer, 'getTokens') + const dialRequest = new DialRequest({ + addrs: Object.keys(actions).map(str => new Multiaddr(str)), + dialer, + dialAction + }) + + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1231') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1232') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1233') + + try { + await dialRequest.run({ signal: controller.signal }) + expect.fail('Should have thrown') + } catch (err: any) { + expect(err).to.have.property('name', 'AggregateError') + } + + expect(actions['/ip4/127.0.0.1/tcp/1231']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1232']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1233']).to.have.property('callCount', 1) + + expect(dialerGetTokensSpy.calledWith(addrs.length)).to.equal(true) + expect(dialerReleaseTokenSpy.callCount).to.equal(2) + }) + + it('should handle a large number of addrs', async () => { + const reject = sinon.stub().callsFake(async () => await Promise.reject(error)) + const actions: Record Promise> = {} + const addrs = [...new Array(25)].map((_, index) => `/ip4/127.0.0.1/tcp/12${index + 1}`) + addrs.forEach(addr => { + actions[addr] = reject + }) + + const dialAction: DialAction = async (num) => await actions[num.toString()]() + const controller = new AbortController() + const dialer = new DefaultDialer(new Components(), { + maxParallelDials: 2 + }) + const dialerReleaseTokenSpy = sinon.spy(dialer, 'releaseToken') + const dialRequest = new DialRequest({ + addrs: Object.keys(actions).map(str => new Multiaddr(str)), + dialer, + dialAction + }) + + try { + await dialRequest.run({ signal: controller.signal }) + expect.fail('Should have thrown') + } catch (err: any) { + expect(err).to.have.property('name', 'AggregateError') + } + + expect(reject).to.have.property('callCount', addrs.length) + expect(dialerReleaseTokenSpy.callCount).to.equal(2) + }) + + it('should abort all dials when its signal is aborted', async () => { + const deferToAbort = async (args: { signal: AbortSignal }) => { + const { signal } = args + + if (signal.aborted) { + throw new Error('already aborted') + } + + const deferred = pDefer() + const onAbort = () => { + deferred.reject(new AbortError()) + signal.removeEventListener('abort', onAbort) + } + signal.addEventListener('abort', onAbort) + return await deferred.promise + } + + const actions: Record Promise> = { + '/ip4/127.0.0.1/tcp/1231': deferToAbort, + '/ip4/127.0.0.1/tcp/1232': deferToAbort, + '/ip4/127.0.0.1/tcp/1233': deferToAbort + } + const dialAction: DialAction = async (num) => await actions[num.toString()]() + const addrs = Object.keys(actions) + const controller = new AbortController() + const dialer = new DefaultDialer(new Components(), { + maxParallelDials: 2 + }) + const dialerReleaseTokenSpy = sinon.spy(dialer, 'releaseToken') + const dialerGetTokensSpy = sinon.spy(dialer, 'getTokens') + const dialRequest = new DialRequest({ + addrs: Object.keys(actions).map(str => new Multiaddr(str)), + dialer, + dialAction + }) + + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1231') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1232') + sinon.spy(actions, '/ip4/127.0.0.1/tcp/1233') + + try { + setTimeout(() => controller.abort(), 100) + await dialRequest.run({ signal: controller.signal }) + expect.fail('dial should have failed') + } catch (err: any) { + expect(err).to.have.property('name', 'AggregateError') + } + + expect(actions['/ip4/127.0.0.1/tcp/1231']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1232']).to.have.property('callCount', 1) + expect(actions['/ip4/127.0.0.1/tcp/1233']).to.have.property('callCount', 1) + + expect(dialerGetTokensSpy.calledWith(addrs.length)).to.equal(true) + expect(dialerReleaseTokenSpy.callCount).to.equal(2) + }) +}) diff --git a/test/dialing/direct.node.js b/test/dialing/direct.node.js deleted file mode 100644 index 81d8205d..00000000 --- a/test/dialing/direct.node.js +++ /dev/null @@ -1,572 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const Transport = require('libp2p-tcp') -const Muxer = require('libp2p-mplex') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') -const delay = require('delay') -const pDefer = require('p-defer') -const pSettle = require('p-settle') -const pWaitFor = require('p-wait-for') -const pipe = require('it-pipe') -const pushable = require('it-pushable') -const AggregateError = require('aggregate-error') -const { Connection } = require('libp2p-interfaces/src/connection') -const { AbortError } = require('libp2p-interfaces/src/transport/errors') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { MemoryDatastore } = require('datastore-core/memory') -const Libp2p = require('../../src') -const Dialer = require('../../src/dialer') -const AddressManager = require('../../src/address-manager') -const PeerStore = require('../../src/peer-store') -const TransportManager = require('../../src/transport-manager') -const { codes: ErrorCodes } = require('../../src/errors') -const Protector = require('../../src/pnet') -const swarmKeyBuffer = uint8ArrayFromString(require('../fixtures/swarm.key')) -const { mockConnectionGater } = require('../utils/mock-connection-gater') -const mockUpgrader = require('../utils/mockUpgrader') -const createMockConnection = require('../utils/mockConnection') -const Peers = require('../fixtures/peers') -const { createPeerId } = require('../utils/creators/peer') - -const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') -const unsupportedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/9999/ws/p2p/QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN') - -describe('Dialing (direct, TCP)', () => { - const connectionGater = mockConnectionGater() - let remoteTM - let localTM - let peerStore - let remoteAddr - - beforeEach(async () => { - const [localPeerId, remotePeerId] = await Promise.all([ - PeerId.createFromJSON(Peers[0]), - PeerId.createFromJSON(Peers[1]) - ]) - - peerStore = new PeerStore({ - peerId: remotePeerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - remoteTM = new TransportManager({ - libp2p: { - addressManager: new AddressManager(remotePeerId, { listen: [listenAddr] }), - peerId: remotePeerId, - peerStore - }, - upgrader: mockUpgrader - }) - remoteTM.add(Transport.prototype[Symbol.toStringTag], Transport) - - localTM = new TransportManager({ - libp2p: { - peerId: localPeerId, - peerStore: new PeerStore({ - peerId: localPeerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - }, - upgrader: mockUpgrader - }) - localTM.add(Transport.prototype[Symbol.toStringTag], Transport) - - await remoteTM.listen([listenAddr]) - - remoteAddr = remoteTM.getAddrs()[0].encapsulate(`/p2p/${remotePeerId.toB58String()}`) - }) - - afterEach(() => remoteTM.close()) - - afterEach(() => { - sinon.restore() - }) - - it('should be able to connect to a remote node via its multiaddr', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - - const connection = await dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - await connection.close() - }) - - it('should be able to connect to a remote node via its stringified multiaddr', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - const connection = await dialer.connectToPeer(remoteAddr.toString()) - expect(connection).to.exist() - await connection.close() - }) - - it('should fail to connect to an unsupported multiaddr', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - - await expect(dialer.connectToPeer(unsupportedAddr)) - .to.eventually.be.rejectedWith(Error) - .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) - }) - - it('should fail to connect if peer has no known addresses', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - const peerId = await PeerId.createFromJSON(Peers[1]) - - await expect(dialer.connectToPeer(peerId)) - .to.eventually.be.rejectedWith(Error) - .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) - }) - - it('should be able to connect to a given peer id', async () => { - const peerId = await PeerId.createFromJSON(Peers[0]) - const peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - - await peerStore.addressBook.set(peerId, remoteTM.getAddrs()) - - const connection = await dialer.connectToPeer(peerId) - expect(connection).to.exist() - await connection.close() - }) - - it('should fail to connect to a given peer with unsupported addresses', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - add: () => {}, - getMultiaddrsForPeer: () => [unsupportedAddr] - } - }, - connectionGater - }) - const peerId = await PeerId.createFromJSON(Peers[0]) - - await expect(dialer.connectToPeer(peerId)) - .to.eventually.be.rejectedWith(Error) - .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) - }) - - it('should only try to connect to addresses supported by the transports configured', async () => { - const remoteAddrs = remoteTM.getAddrs() - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - add: () => { }, - getMultiaddrsForPeer: () => [...remoteAddrs, unsupportedAddr] - } - }, - connectionGater - }) - const peerId = await PeerId.createFromJSON(Peers[0]) - - sinon.spy(localTM, 'dial') - const connection = await dialer.connectToPeer(peerId) - expect(localTM.dial.callCount).to.equal(remoteAddrs.length) - expect(connection).to.exist() - await connection.close() - }) - - it('should abort dials on queue task timeout', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - dialTimeout: 50, - connectionGater - }) - sinon.stub(localTM, 'dial').callsFake(async (addr, options) => { - expect(options.signal).to.exist() - expect(options.signal.aborted).to.equal(false) - expect(addr.toString()).to.eql(remoteAddr.toString()) - await delay(60) - expect(options.signal.aborted).to.equal(true) - throw new AbortError() - }) - - await expect(dialer.connectToPeer(remoteAddr)) - .to.eventually.be.rejectedWith(Error) - .and.to.have.property('code', ErrorCodes.ERR_TIMEOUT) - }) - - it('should dial to the max concurrency', async () => { - const addrs = [ - new Multiaddr('/ip4/0.0.0.0/tcp/8000'), - new Multiaddr('/ip4/0.0.0.0/tcp/8001'), - new Multiaddr('/ip4/0.0.0.0/tcp/8002') - ] - const dialer = new Dialer({ - transportManager: localTM, - maxParallelDials: 2, - peerStore: { - addressBook: { - add: () => {}, - getMultiaddrsForPeer: () => addrs - } - }, - connectionGater - }) - - expect(dialer.tokens).to.have.length(2) - - const deferredDial = pDefer() - sinon.stub(localTM, 'dial').callsFake(() => deferredDial.promise) - - const [peerId] = await createPeerId() - - // Perform 3 multiaddr dials - dialer.connectToPeer(peerId) - - // Let the call stack run - await delay(0) - - // We should have 2 in progress, and 1 waiting - expect(dialer.tokens).to.have.length(0) - - deferredDial.resolve(await createMockConnection()) - - // Let the call stack run - await delay(0) - - // Only two dials should be executed, as the first dial will succeed - expect(localTM.dial.callCount).to.equal(2) - expect(dialer.tokens).to.have.length(2) - }) - - describe('libp2p.dialer', () => { - let peerId, remotePeerId - let libp2p - let remoteLibp2p - let remoteAddr - - before(async () => { - [peerId, remotePeerId] = await Promise.all([ - PeerId.createFromJSON(Peers[0]), - PeerId.createFromJSON(Peers[1]) - ]) - - remoteLibp2p = new Libp2p({ - peerId: remotePeerId, - addresses: { - listen: [listenAddr] - }, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - await remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - - await remoteLibp2p.start() - remoteAddr = remoteLibp2p.transportManager.getAddrs()[0].encapsulate(`/p2p/${remotePeerId.toB58String()}`) - }) - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - libp2p = null - }) - - after(() => remoteLibp2p.stop()) - - it('should fail if no peer id is provided', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - sinon.spy(libp2p.dialer, 'connectToPeer') - - try { - await libp2p.dial(remoteLibp2p.transportManager.getAddrs()[0]) - } catch (/** @type {any} */ err) { - expect(err).to.have.property('code', ErrorCodes.ERR_INVALID_MULTIADDR) - return - } - - expect.fail('dial should have failed') - }) - - it('should use the dialer for connecting to a multiaddr', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - sinon.spy(libp2p.dialer, 'connectToPeer') - - const connection = await libp2p.dial(remoteAddr) - expect(connection).to.exist() - const { stream, protocol } = await connection.newStream('/echo/1.0.0') - expect(stream).to.exist() - expect(protocol).to.equal('/echo/1.0.0') - await connection.close() - expect(libp2p.dialer.connectToPeer.callCount).to.be.greaterThan(0) - }) - - it('should use the dialer for connecting to a peer', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - sinon.spy(libp2p.dialer, 'connectToPeer') - await libp2p.peerStore.addressBook.set(remotePeerId, remoteLibp2p.multiaddrs) - - const connection = await libp2p.dial(remotePeerId) - expect(connection).to.exist() - const { stream, protocol } = await connection.newStream('/echo/1.0.0') - expect(stream).to.exist() - expect(protocol).to.equal('/echo/1.0.0') - await connection.close() - expect(libp2p.dialer.connectToPeer.callCount).to.be.greaterThan(0) - }) - - it('should close all streams when the connection closes', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - // register some stream handlers to simulate several protocols - await libp2p.handle('/stream-count/1', ({ stream }) => pipe(stream, stream)) - await libp2p.handle('/stream-count/2', ({ stream }) => pipe(stream, stream)) - await remoteLibp2p.handle('/stream-count/3', ({ stream }) => pipe(stream, stream)) - await remoteLibp2p.handle('/stream-count/4', ({ stream }) => pipe(stream, stream)) - - await libp2p.peerStore.addressBook.set(remotePeerId, remoteLibp2p.multiaddrs) - const connection = await libp2p.dial(remotePeerId) - - // Create local to remote streams - const { stream } = await connection.newStream('/echo/1.0.0') - await connection.newStream('/stream-count/3') - await libp2p.dialProtocol(remoteLibp2p.peerId, '/stream-count/4') - - // Partially write to the echo stream - const source = pushable() - stream.sink(source) - source.push('hello') - - // Create remote to local streams - await remoteLibp2p.dialProtocol(libp2p.peerId, '/stream-count/1') - await remoteLibp2p.dialProtocol(libp2p.peerId, '/stream-count/2') - - // Verify stream count - const remoteConn = remoteLibp2p.connectionManager.get(libp2p.peerId) - expect(connection.streams).to.have.length(5) - expect(remoteConn.streams).to.have.length(5) - - // Close the connection and verify all streams have been closed - await connection.close() - await pWaitFor(() => connection.streams.length === 0) - await pWaitFor(() => remoteConn.streams.length === 0) - }) - - it('should throw when using dialProtocol with no protocols', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - await expect(libp2p.dialProtocol(remotePeerId)) - .to.eventually.be.rejectedWith(Error) - .and.to.have.property('code', ErrorCodes.ERR_INVALID_PROTOCOLS_FOR_STREAM) - - await expect(libp2p.dialProtocol(remotePeerId, [])) - .to.eventually.be.rejectedWith(Error) - .and.to.have.property('code', ErrorCodes.ERR_INVALID_PROTOCOLS_FOR_STREAM) - }) - - it('should be able to use hangup to close connections', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - const connection = await libp2p.dial(remoteAddr) - expect(connection).to.exist() - expect(connection.stat.timeline.close).to.not.exist() - await libp2p.hangUp(connection.remotePeer) - expect(connection.stat.timeline.close).to.exist() - }) - - it('should be able to use hangup by address string to close connections', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - const connection = await libp2p.dial(`${remoteAddr.toString()}`) - expect(connection).to.exist() - expect(connection.stat.timeline.close).to.not.exist() - await libp2p.hangUp(connection.remotePeer) - expect(connection.stat.timeline.close).to.exist() - }) - - it('should use the protectors when provided for connecting', async () => { - const protector = new Protector(swarmKeyBuffer) - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto], - connProtector: protector - } - }) - - sinon.spy(libp2p.upgrader.protector, 'protect') - sinon.stub(remoteLibp2p.upgrader, 'protector').value(new Protector(swarmKeyBuffer)) - - await libp2p.start() - - const connection = await libp2p.dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - const { stream, protocol } = await connection.newStream('/echo/1.0.0') - expect(stream).to.exist() - expect(protocol).to.equal('/echo/1.0.0') - await connection.close() - expect(libp2p.upgrader.protector.protect.callCount).to.equal(1) - }) - - it('should coalesce parallel dials to the same peer (id in multiaddr)', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - const dials = 10 - const fullAddress = remoteAddr.encapsulate(`/p2p/${remoteLibp2p.peerId.toB58String()}`) - - await libp2p.peerStore.addressBook.set(remotePeerId, remoteLibp2p.multiaddrs) - const dialResults = await Promise.all([...new Array(dials)].map((_, index) => { - if (index % 2 === 0) return libp2p.dial(remoteLibp2p.peerId) - return libp2p.dial(fullAddress) - })) - - // All should succeed and we should have ten results - expect(dialResults).to.have.length(10) - for (const connection of dialResults) { - expect(Connection.isConnection(connection)).to.equal(true) - } - - // 1 connection, because we know the peer in the multiaddr - expect(libp2p.connectionManager.size).to.equal(1) - expect(remoteLibp2p.connectionManager.size).to.equal(1) - }) - - it('should coalesce parallel dials to the same error on failure', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - - const dials = 10 - const error = new Error('Boom') - sinon.stub(libp2p.transportManager, 'dial').callsFake(() => Promise.reject(error)) - - await libp2p.peerStore.addressBook.set(remotePeerId, remoteLibp2p.multiaddrs) - const dialResults = await pSettle([...new Array(dials)].map((_, index) => { - if (index % 2 === 0) return libp2p.dial(remoteLibp2p.peerId) - return libp2p.dial(remoteAddr) - })) - - // All should succeed and we should have ten results - expect(dialResults).to.have.length(10) - for (const result of dialResults) { - expect(result).to.have.property('isRejected', true) - expect(result.reason).to.be.an.instanceof(AggregateError) - // All errors should be the exact same as `error` - for (const err of result.reason) { - expect(err).to.equal(error) - } - } - - // 1 connection, because we know the peer in the multiaddr - expect(libp2p.connectionManager.size).to.equal(0) - expect(remoteLibp2p.connectionManager.size).to.equal(0) - }) - }) -}) diff --git a/test/dialing/direct.node.ts b/test/dialing/direct.node.ts new file mode 100644 index 00000000..e9298500 --- /dev/null +++ b/test/dialing/direct.node.ts @@ -0,0 +1,572 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { NOISE } from '@chainsafe/libp2p-noise' +import { Multiaddr } from '@multiformats/multiaddr' + +import delay from 'delay' +import pDefer from 'p-defer' +import pSettle, { PromiseResult } from 'p-settle' +import pWaitFor from 'p-wait-for' +import { pipe } from 'it-pipe' +import { pushable } from 'it-pushable' +import { Connection, isConnection } from '@libp2p/interfaces/connection' +import { AbortError } from '@libp2p/interfaces/errors' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { MemoryDatastore } from 'datastore-core/memory' +import { DefaultDialer } from '../../src/dialer/index.js' +import { DefaultAddressManager } from '../../src/address-manager/index.js' +import { PersistentPeerStore } from '@libp2p/peer-store' +import { DefaultTransportManager } from '../../src/transport-manager.js' +import { codes as ErrorCodes } from '../../src/errors.js' +import { mockConnectionGater, mockDuplex, mockMultiaddrConnection, mockUpgrader, mockConnection } from '@libp2p/interface-compliance-tests/mocks' +import Peers from '../fixtures/peers.js' +import { Components } from '@libp2p/interfaces/components' +import type { PeerStore } from '@libp2p/interfaces/peer-store' +import { createFromJSON } from '@libp2p/peer-id-factory' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { PreSharedKeyConnectionProtector } from '../../src/pnet/index.js' +import swarmKey from '../fixtures/swarm.key.js' +import { DefaultConnectionManager } from '../../src/connection-manager/index.js' + +const swarmKeyBuffer = uint8ArrayFromString(swarmKey) +const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') +const unsupportedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/9999/ws/p2p/QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN') + +describe('Dialing (direct, TCP)', () => { + let remoteTM: DefaultTransportManager + let localTM: DefaultTransportManager + let peerStore: PeerStore + let remoteAddr: Multiaddr + let remoteComponents: Components + let localComponents: Components + + beforeEach(async () => { + const [localPeerId, remotePeerId] = await Promise.all([ + createFromJSON(Peers[0]), + createFromJSON(Peers[1]) + ]) + + remoteComponents = new Components({ + peerId: remotePeerId, + datastore: new MemoryDatastore(), + upgrader: mockUpgrader(), + connectionGater: mockConnectionGater() + }) + remoteComponents.setAddressManager(new DefaultAddressManager(remoteComponents, { + listen: [ + listenAddr.toString() + ] + })) + peerStore = new PersistentPeerStore(remoteComponents, { + addressFilter: remoteComponents.getConnectionGater().filterMultiaddrForPeer + }) + remoteComponents.setPeerStore(peerStore) + remoteTM = new DefaultTransportManager(remoteComponents) + remoteTM.add(new TCP()) + + localComponents = new Components({ + peerId: localPeerId, + datastore: new MemoryDatastore(), + upgrader: mockUpgrader(), + connectionGater: mockConnectionGater() + }) + localComponents.setPeerStore(new PersistentPeerStore(localComponents, { + addressFilter: localComponents.getConnectionGater().filterMultiaddrForPeer + })) + localComponents.setConnectionManager(new DefaultConnectionManager(localComponents)) + + localTM = new DefaultTransportManager(localComponents) + localTM.add(new TCP()) + + localComponents.setTransportManager(localTM) + + await remoteTM.listen([listenAddr]) + + remoteAddr = remoteTM.getAddrs()[0].encapsulate(`/p2p/${remotePeerId.toString()}`) + }) + + afterEach(async () => await remoteTM.stop()) + + afterEach(() => { + sinon.restore() + }) + + it('should be able to connect to a remote node via its multiaddr', async () => { + const dialer = new DefaultDialer(localComponents) + + const connection = await dialer.dial(remoteAddr) + expect(connection).to.exist() + await connection.close() + }) + + it('should fail to connect to an unsupported multiaddr', async () => { + const dialer = new DefaultDialer(localComponents) + + await expect(dialer.dial(unsupportedAddr)) + .to.eventually.be.rejectedWith(Error) + .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) + + it('should fail to connect if peer has no known addresses', async () => { + const dialer = new DefaultDialer(localComponents) + const peerId = await createFromJSON(Peers[1]) + + await expect(dialer.dial(peerId)) + .to.eventually.be.rejectedWith(Error) + .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) + + it('should be able to connect to a given peer id', async () => { + await localComponents.getPeerStore().addressBook.set(remoteComponents.getPeerId(), remoteTM.getAddrs()) + + const dialer = new DefaultDialer(localComponents) + + const connection = await dialer.dial(remoteComponents.getPeerId()) + expect(connection).to.exist() + await connection.close() + }) + + it('should fail to connect to a given peer with unsupported addresses', async () => { + await localComponents.getPeerStore().addressBook.add(remoteComponents.getPeerId(), [unsupportedAddr]) + + const dialer = new DefaultDialer(localComponents) + + await expect(dialer.dial(remoteComponents.getPeerId())) + .to.eventually.be.rejectedWith(Error) + .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) + + it('should only try to connect to addresses supported by the transports configured', async () => { + const remoteAddrs = remoteTM.getAddrs() + + const peerId = await createFromJSON(Peers[1]) + await localComponents.getPeerStore().addressBook.add(peerId, [...remoteAddrs, unsupportedAddr]) + + const dialer = new DefaultDialer(localComponents) + + sinon.spy(localTM, 'dial') + const connection = await dialer.dial(peerId) + expect(localTM.dial).to.have.property('callCount', remoteAddrs.length) + expect(connection).to.exist() + + await connection.close() + }) + + it('should abort dials on queue task timeout', async () => { + const dialer = new DefaultDialer(localComponents, { + dialTimeout: 50 + }) + + sinon.stub(localTM, 'dial').callsFake(async (addr, options = {}) => { + expect(options.signal).to.exist() + expect(options.signal?.aborted).to.equal(false) + expect(addr.toString()).to.eql(remoteAddr.toString()) + await delay(60) + expect(options.signal?.aborted).to.equal(true) + throw new AbortError() + }) + + await expect(dialer.dial(remoteAddr)) + .to.eventually.be.rejectedWith(Error) + .and.to.have.property('code', ErrorCodes.ERR_TIMEOUT) + }) + + it('should dial to the max concurrency', async () => { + const addrs = [ + new Multiaddr('/ip4/0.0.0.0/tcp/8000'), + new Multiaddr('/ip4/0.0.0.0/tcp/8001'), + new Multiaddr('/ip4/0.0.0.0/tcp/8002') + ] + const peerId = await createFromJSON(Peers[1]) + + await localComponents.getPeerStore().addressBook.add(peerId, addrs) + + const dialer = new DefaultDialer(localComponents, { + maxParallelDials: 2 + }) + + expect(dialer.tokens).to.have.lengthOf(2) + + const deferredDial = pDefer() + sinon.stub(localTM, 'dial').callsFake(async () => await deferredDial.promise) + + // Perform 3 multiaddr dials + void dialer.dial(peerId) + + // Let the call stack run + await delay(0) + + // We should have 2 in progress, and 1 waiting + expect(dialer.tokens).to.have.lengthOf(0) + + deferredDial.resolve(mockConnection(mockMultiaddrConnection(mockDuplex(), peerId))) + + // Let the call stack run + await delay(0) + + // Only two dials should be executed, as the first dial will succeed + expect(localTM.dial).to.have.property('callCount', 2) + expect(dialer.tokens).to.have.lengthOf(2) + }) +}) + +describe('libp2p.dialer (direct, TCP)', () => { + let peerId: PeerId, remotePeerId: PeerId + let libp2p: Libp2pNode + let remoteLibp2p: Libp2pNode + let remoteAddr: Multiaddr + + beforeEach(async () => { + [peerId, remotePeerId] = await Promise.all([ + createFromJSON(Peers[0]), + createFromJSON(Peers[1]) + ]) + + remoteLibp2p = await createLibp2pNode({ + peerId: remotePeerId, + addresses: { + listen: [listenAddr.toString()] + }, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + await remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => { + void pipe(stream, stream) + }) + + await remoteLibp2p.start() + remoteAddr = remoteLibp2p.components.getTransportManager().getAddrs()[0].encapsulate(`/p2p/${remotePeerId.toString()}`) + }) + + afterEach(async () => { + sinon.restore() + + if (libp2p != null) { + await libp2p.stop() + } + + if (remoteLibp2p != null) { + await remoteLibp2p.stop() + } + }) + + it('should fail if no peer id is provided', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + await expect(libp2p.dial(remoteLibp2p.components.getTransportManager().getAddrs()[0])).to.eventually.be.rejected() + .with.property('code', ErrorCodes.ERR_INVALID_MULTIADDR) + }) + + it('should use the dialer for connecting to a multiaddr', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const dialerDialSpy = sinon.spy(libp2p.components.getDialer(), 'dial') + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + const { stream, protocol } = await connection.newStream(['/echo/1.0.0']) + expect(stream).to.exist() + expect(protocol).to.equal('/echo/1.0.0') + expect(dialerDialSpy.callCount).to.be.greaterThan(0) + await connection.close() + }) + + it('should use the dialer for connecting to a peer', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const dialerDialSpy = sinon.spy(libp2p.components.getDialer(), 'dial') + + await libp2p.components.getPeerStore().addressBook.set(remotePeerId, remoteLibp2p.getMultiaddrs()) + + const connection = await libp2p.dial(remotePeerId) + expect(connection).to.exist() + const { stream, protocol } = await connection.newStream('/echo/1.0.0') + expect(stream).to.exist() + expect(protocol).to.equal('/echo/1.0.0') + await connection.close() + expect(dialerDialSpy.callCount).to.be.greaterThan(0) + }) + + it('should close all streams when the connection closes', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + // register some stream handlers to simulate several protocols + await libp2p.handle('/stream-count/1', ({ stream }) => { + void pipe(stream, stream) + }) + await libp2p.handle('/stream-count/2', ({ stream }) => { + void pipe(stream, stream) + }) + await remoteLibp2p.handle('/stream-count/3', ({ stream }) => { + void pipe(stream, stream) + }) + await remoteLibp2p.handle('/stream-count/4', ({ stream }) => { + void pipe(stream, stream) + }) + + await libp2p.components.getPeerStore().addressBook.set(remotePeerId, remoteLibp2p.getMultiaddrs()) + const connection = await libp2p.dial(remotePeerId) + + // Create local to remote streams + const { stream } = await connection.newStream('/echo/1.0.0') + await connection.newStream('/stream-count/3') + await libp2p.dialProtocol(remoteLibp2p.peerId, '/stream-count/4') + + // Partially write to the echo stream + const source = pushable() + void stream.sink(source) + source.push(uint8ArrayFromString('hello')) + + // Create remote to local streams + await remoteLibp2p.dialProtocol(libp2p.peerId, '/stream-count/1') + await remoteLibp2p.dialProtocol(libp2p.peerId, '/stream-count/2') + + // Verify stream count + const remoteConn = remoteLibp2p.getConnections(libp2p.peerId) + + if (remoteConn == null) { + throw new Error('No remote connection found') + } + + expect(connection.streams).to.have.length(5) + expect(remoteConn).to.have.lengthOf(1) + expect(remoteConn).to.have.nested.property('[0].streams').with.lengthOf(5) + + // Close the connection and verify all streams have been closed + await connection.close() + await pWaitFor(() => connection.streams.length === 0) + await pWaitFor(() => remoteConn[0].streams.length === 0) + }) + + it('should throw when using dialProtocol with no protocols', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + // @ts-expect-error invalid params + await expect(libp2p.dialProtocol(remoteAddr)) + .to.eventually.be.rejectedWith(Error) + .and.to.have.property('code', ErrorCodes.ERR_INVALID_PROTOCOLS_FOR_STREAM) + + await expect(libp2p.dialProtocol(remoteAddr, [])) + .to.eventually.be.rejectedWith(Error) + .and.to.have.property('code', ErrorCodes.ERR_INVALID_PROTOCOLS_FOR_STREAM) + }) + + it('should be able to use hangup to close connections', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + expect(connection.stat.timeline.close).to.not.exist() + await libp2p.hangUp(connection.remotePeer) + expect(connection.stat.timeline.close).to.exist() + }) + + it('should use the protectors when provided for connecting', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ], + connectionProtector: new PreSharedKeyConnectionProtector({ + psk: swarmKeyBuffer + }) + }) + + const protector = libp2p.components.getConnectionProtector() + + if (protector == null) { + throw new Error('No protector was configured') + } + + const protectorProtectSpy = sinon.spy(protector, 'protect') + + remoteLibp2p.components.setConnectionProtector(new PreSharedKeyConnectionProtector({ enabled: true, psk: swarmKeyBuffer })) + + await libp2p.start() + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + const { stream, protocol } = await connection.newStream('/echo/1.0.0') + expect(stream).to.exist() + expect(protocol).to.equal('/echo/1.0.0') + await connection.close() + expect(protectorProtectSpy.callCount).to.equal(1) + }) + + it('should coalesce parallel dials to the same peer (id in multiaddr)', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const dials = 10 + const fullAddress = remoteAddr.encapsulate(`/p2p/${remoteLibp2p.peerId.toString()}`) + + await libp2p.components.getPeerStore().addressBook.set(remotePeerId, remoteLibp2p.getMultiaddrs()) + const dialResults = await Promise.all([...new Array(dials)].map(async (_, index) => { + if (index % 2 === 0) return await libp2p.dial(remoteLibp2p.peerId) + return await libp2p.dial(fullAddress) + })) + + // All should succeed and we should have ten results + expect(dialResults).to.have.length(10) + for (const connection of dialResults) { + expect(isConnection(connection)).to.equal(true) + } + + // 1 connection, because we know the peer in the multiaddr + expect(libp2p.getConnections()).to.have.lengthOf(1) + expect(remoteLibp2p.getConnections()).to.have.lengthOf(1) + }) + + it('should coalesce parallel dials to the same error on failure', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const dials = 10 + const error = new Error('Boom') + sinon.stub(libp2p.components.getTransportManager(), 'dial').callsFake(async () => await Promise.reject(error)) + + await libp2p.components.getPeerStore().addressBook.set(remotePeerId, remoteLibp2p.getMultiaddrs()) + const dialResults: Array> = await pSettle([...new Array(dials)].map(async (_, index) => { + if (index % 2 === 0) return await libp2p.dial(remoteLibp2p.peerId) + return await libp2p.dial(remoteAddr) + })) + + // All should succeed and we should have ten results + expect(dialResults).to.have.length(10) + + for (const result of dialResults) { + expect(result).to.have.property('isRejected', true) + expect(result).to.have.property('reason').that.has.property('name', 'AggregateError') + + // All errors should be the exact same as `error` + // @ts-expect-error reason is any + for (const err of result.reason.errors) { + expect(err).to.equal(error) + } + } + + // 1 connection, because we know the peer in the multiaddr + expect(libp2p.getConnections()).to.have.lengthOf(0) + expect(remoteLibp2p.getConnections()).to.have.lengthOf(0) + }) +}) diff --git a/test/dialing/direct.spec.js b/test/dialing/direct.spec.js deleted file mode 100644 index aa0e45bb..00000000 --- a/test/dialing/direct.spec.js +++ /dev/null @@ -1,637 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const pDefer = require('p-defer') -const pWaitFor = require('p-wait-for') -const delay = require('delay') -const Transport = require('libp2p-websockets') -const filters = require('libp2p-websockets/src/filters') -const Muxer = require('libp2p-mplex') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') -const { Multiaddr } = require('multiaddr') -const AggregateError = require('aggregate-error') -const { AbortError } = require('libp2p-interfaces/src/transport/errors') -const { MemoryDatastore } = require('datastore-core/memory') -const { codes: ErrorCodes } = require('../../src/errors') -const Constants = require('../../src/constants') -const Dialer = require('../../src/dialer') -const addressSort = require('libp2p-utils/src/address-sort') -const PeerStore = require('../../src/peer-store') -const TransportManager = require('../../src/transport-manager') -const Libp2p = require('../../src') -const { mockConnectionGater } = require('../utils/mock-connection-gater') - -const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser') -const mockUpgrader = require('../utils/mockUpgrader') -const createMockConnection = require('../utils/mockConnection') -const { createPeerId } = require('../utils/creators/peer') -const unsupportedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/9999/ws/p2p/QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN') -const remoteAddr = MULTIADDRS_WEBSOCKETS[0] - -describe('Dialing (direct, WebSockets)', () => { - const connectionGater = mockConnectionGater() - let localTM - let peerStore - let peerId - - before(async () => { - [peerId] = await createPeerId() - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - localTM = new TransportManager({ - libp2p: {}, - upgrader: mockUpgrader, - onConnection: () => {} - }) - localTM.add(Transport.prototype[Symbol.toStringTag], Transport, { filter: filters.all }) - }) - - afterEach(async () => { - await peerStore.delete(peerId) - sinon.restore() - }) - - it('should have appropriate defaults', () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - expect(dialer.maxParallelDials).to.equal(Constants.MAX_PARALLEL_DIALS) - expect(dialer.timeout).to.equal(Constants.DIAL_TIMEOUT) - }) - - it('should limit the number of tokens it provides', () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - const maxPerPeer = Constants.MAX_PER_PEER_DIALS - expect(dialer.tokens).to.have.length(Constants.MAX_PARALLEL_DIALS) - const tokens = dialer.getTokens(maxPerPeer + 1) - expect(tokens).to.have.length(maxPerPeer) - expect(dialer.tokens).to.have.length(Constants.MAX_PARALLEL_DIALS - maxPerPeer) - }) - - it('should not return tokens if non are left', () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - sinon.stub(dialer, 'tokens').value([]) - const tokens = dialer.getTokens(1) - expect(tokens.length).to.equal(0) - }) - - it('should NOT be able to return a token twice', () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - const tokens = dialer.getTokens(1) - expect(tokens).to.have.length(1) - expect(dialer.tokens).to.have.length(Constants.MAX_PARALLEL_DIALS - 1) - dialer.releaseToken(tokens[0]) - dialer.releaseToken(tokens[0]) - expect(dialer.tokens).to.have.length(Constants.MAX_PARALLEL_DIALS) - }) - - it('should be able to connect to a remote node via its multiaddr', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - add: () => {}, - getMultiaddrsForPeer: () => [remoteAddr] - } - }, - connectionGater - }) - - const connection = await dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - await connection.close() - }) - - it('should be able to connect to a remote node via its stringified multiaddr', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - add: () => {}, - getMultiaddrsForPeer: () => [remoteAddr] - } - }, - connectionGater - }) - - const connection = await dialer.connectToPeer(remoteAddr.toString()) - expect(connection).to.exist() - await connection.close() - }) - - it('should fail to connect to an unsupported multiaddr', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore, - connectionGater - }) - - await expect(dialer.connectToPeer(unsupportedAddr)) - .to.eventually.be.rejectedWith(AggregateError) - }) - - it('should be able to connect to a given peer', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - add: () => {}, - getMultiaddrsForPeer: () => [remoteAddr] - } - }, - connectionGater - }) - - const connection = await dialer.connectToPeer(peerId) - expect(connection).to.exist() - await connection.close() - }) - - it('should fail to connect to a given peer with unsupported addresses', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - set: () => {}, - getMultiaddrsForPeer: () => [unsupportedAddr] - } - }, - connectionGater - }) - - await expect(dialer.connectToPeer(peerId)) - .to.eventually.be.rejectedWith(AggregateError) - }) - - it('should abort dials on queue task timeout', async () => { - const dialer = new Dialer({ - transportManager: localTM, - dialTimeout: 50, - peerStore: { - addressBook: { - add: () => {}, - getMultiaddrsForPeer: () => [remoteAddr] - } - }, - connectionGater - }) - sinon.stub(localTM, 'dial').callsFake(async (addr, options) => { - expect(options.signal).to.exist() - expect(options.signal.aborted).to.equal(false) - expect(addr.toString()).to.eql(remoteAddr.toString()) - await delay(60) - expect(options.signal.aborted).to.equal(true) - throw new AbortError() - }) - - await expect(dialer.connectToPeer(remoteAddr)) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_TIMEOUT) - }) - - it('should throw when a peer advertises more than the allowed number of peers', async () => { - const spy = sinon.spy() - const dialer = new Dialer({ - transportManager: localTM, - maxAddrsToDial: 10, - peerStore: { - delete: spy, - addressBook: { - add: () => { }, - getMultiaddrsForPeer: () => Array.from({ length: 11 }, (_, i) => new Multiaddr(`/ip4/127.0.0.1/tcp/1500${i}/ws/p2p/12D3KooWHFKTMzwerBtsVmtz4ZZEQy2heafxzWw6wNn5PPYkBxJ5`)) - } - }, - connectionGater - }) - - await expect(dialer.connectToPeer(remoteAddr)) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_TOO_MANY_ADDRESSES) - expect(spy.calledOnce).to.be.true() - }) - - it('should sort addresses on dial', async () => { - const peerMultiaddrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/15001/ws'), - new Multiaddr('/ip4/20.0.0.1/tcp/15001/ws'), - new Multiaddr('/ip4/30.0.0.1/tcp/15001/ws') - ] - - sinon.spy(addressSort, 'publicAddressesFirst') - sinon.stub(localTM, 'dial').callsFake(createMockConnection) - - const dialer = new Dialer({ - transportManager: localTM, - addressSorter: addressSort.publicAddressesFirst, - maxParallelDials: 3, - peerStore, - connectionGater - }) - - // Inject data in the AddressBook - await peerStore.addressBook.add(peerId, peerMultiaddrs) - - // Perform 3 multiaddr dials - await dialer.connectToPeer(peerId) - - expect(addressSort.publicAddressesFirst.callCount).to.eql(1) - - const sortedAddresses = addressSort.publicAddressesFirst(peerMultiaddrs.map((m) => ({ multiaddr: m }))) - expect(localTM.dial.getCall(0).args[0].equals(sortedAddresses[0].multiaddr)) - expect(localTM.dial.getCall(1).args[0].equals(sortedAddresses[1].multiaddr)) - expect(localTM.dial.getCall(2).args[0].equals(sortedAddresses[2].multiaddr)) - }) - - it('should dial to the max concurrency', async () => { - const dialer = new Dialer({ - transportManager: localTM, - maxParallelDials: 2, - peerStore: { - addressBook: { - set: () => {}, - getMultiaddrsForPeer: () => [remoteAddr, remoteAddr, remoteAddr] - } - }, - connectionGater - }) - - expect(dialer.tokens).to.have.length(2) - - const deferredDial = pDefer() - sinon.stub(localTM, 'dial').callsFake(() => deferredDial.promise) - - // Perform 3 multiaddr dials - dialer.connectToPeer(peerId) - - // Let the call stack run - await delay(0) - - // We should have 2 in progress, and 1 waiting - expect(dialer.tokens).to.have.length(0) - expect(dialer._pendingDials.size).to.equal(1) // 1 dial request - - deferredDial.resolve(await createMockConnection()) - - // Let the call stack run - await delay(0) - - // Only two dials will be run, as the first two succeeded - expect(localTM.dial.callCount).to.equal(2) - expect(dialer.tokens).to.have.length(2) - expect(dialer._pendingDials.size).to.equal(0) - }) - - it('.destroy should abort pending dials', async () => { - const dialer = new Dialer({ - transportManager: localTM, - maxParallelDials: 2, - peerStore: { - addressBook: { - set: () => {}, - getMultiaddrsForPeer: () => [remoteAddr, remoteAddr, remoteAddr] - } - }, - connectionGater - }) - - expect(dialer.tokens).to.have.length(2) - - sinon.stub(localTM, 'dial').callsFake((_, options) => { - const deferredDial = pDefer() - const onAbort = () => { - options.signal.removeEventListener('abort', onAbort) - deferredDial.reject(new AbortError()) - } - options.signal.addEventListener('abort', onAbort) - return deferredDial.promise - }) - - // Perform 3 multiaddr dials - const dialPromise = dialer.connectToPeer(peerId) - - // Let the call stack run - await delay(0) - - // We should have 2 in progress, and 1 waiting - expect(dialer.tokens).to.have.length(0) - expect(dialer._pendingDials.size).to.equal(1) // 1 dial request - - try { - dialer.destroy() - await dialPromise - expect.fail('should have failed') - } catch (/** @type {any} */ err) { - expect(err).to.be.an.instanceof(AggregateError) - expect(dialer._pendingDials.size).to.equal(0) // 1 dial request - } - }) - - it('should cancel pending dial targets before proceeding', async () => { - const dialer = new Dialer({ - transportManager: localTM, - peerStore: { - addressBook: { - set: () => { } - } - }, - connectionGater - }) - - sinon.stub(dialer, '_createDialTarget').callsFake(() => { - const deferredDial = pDefer() - return deferredDial.promise - }) - - // Perform dial - const dialPromise = dialer.connectToPeer(peerId) - - // Let the call stack run - await delay(0) - - dialer.destroy() - - await expect(dialPromise) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ABORT_ERR') - }) - - describe('libp2p.dialer', () => { - const transportKey = Transport.prototype[Symbol.toStringTag] - let libp2p - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - libp2p = null - }) - - it('should create a dialer', () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - }, - connectionGater - }) - - expect(libp2p.dialer).to.exist() - expect(libp2p.dialer.maxParallelDials).to.equal(Constants.MAX_PARALLEL_DIALS) - expect(libp2p.dialer.maxDialsPerPeer).to.equal(Constants.MAX_PER_PEER_DIALS) - expect(libp2p.dialer.timeout).to.equal(Constants.DIAL_TIMEOUT) - // Ensure the dialer also has the transport manager - expect(libp2p.transportManager).to.equal(libp2p.dialer.transportManager) - }) - - it('should be able to override dialer options', async () => { - const config = { - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - dialer: { - maxParallelDials: 10, - maxDialsPerPeer: 1, - dialTimeout: 1e3 // 30 second dial timeout per peer - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - } - libp2p = await Libp2p.create(config) - - expect(libp2p.dialer).to.exist() - expect(libp2p.dialer.maxParallelDials).to.equal(config.dialer.maxParallelDials) - expect(libp2p.dialer.maxDialsPerPeer).to.equal(config.dialer.maxDialsPerPeer) - expect(libp2p.dialer.timeout).to.equal(config.dialer.dialTimeout) - }) - - it('should use the dialer for connecting', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - }) - - sinon.spy(libp2p.dialer, 'connectToPeer') - sinon.spy(libp2p.peerStore.addressBook, 'add') - - await libp2p.start() - - const connection = await libp2p.dial(remoteAddr) - expect(connection).to.exist() - const { stream, protocol } = await connection.newStream('/echo/1.0.0') - expect(stream).to.exist() - expect(protocol).to.equal('/echo/1.0.0') - await connection.close() - expect(libp2p.dialer.connectToPeer.callCount).to.be.at.least(1) - expect(libp2p.peerStore.addressBook.add.callCount).to.be.at.least(1) - - await libp2p.stop() - }) - - it('should run identify automatically after connecting', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - }) - - sinon.spy(libp2p.identifyService, 'identify') - sinon.spy(libp2p.upgrader, 'onConnection') - - await libp2p.start() - - const connection = await libp2p.dial(remoteAddr) - expect(connection).to.exist() - - sinon.spy(libp2p.peerStore.protoBook, 'set') - - // Wait for onConnection to be called - await pWaitFor(() => libp2p.upgrader.onConnection.callCount === 1) - - expect(libp2p.identifyService.identify.callCount).to.equal(1) - await libp2p.identifyService.identify.firstCall.returnValue - - expect(libp2p.peerStore.protoBook.set.callCount).to.equal(1) - - await libp2p.stop() - }) - - it('should be able to use hangup to close connections', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - }) - - await libp2p.start() - - const connection = await libp2p.dial(remoteAddr) - expect(connection).to.exist() - expect(connection.stat.timeline.close).to.not.exist() - await libp2p.hangUp(connection.remotePeer) - expect(connection.stat.timeline.close).to.exist() - - await libp2p.stop() - }) - - it('should be able to use hangup when no connection exists', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - }) - - await libp2p.hangUp(remoteAddr) - }) - - it('should cancel pending dial targets and stop', async () => { - const [, remotePeerId] = await createPeerId({ number: 2 }) - - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - }) - - sinon.stub(libp2p.dialer, '_createDialTarget').callsFake(() => { - const deferredDial = pDefer() - return deferredDial.promise - }) - - // Perform dial - const dialPromise = libp2p.dial(remotePeerId) - - // Let the call stack run - await delay(0) - - await libp2p.stop() - await expect(dialPromise) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ABORT_ERR') - }) - - it('should abort pending dials on stop', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - transport: { - [transportKey]: { - filter: filters.all - } - } - } - }) - - sinon.spy(libp2p.dialer, 'destroy') - - await libp2p.stop() - - expect(libp2p.dialer.destroy).to.have.property('callCount', 1) - }) - - it('should fail to dial self', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - await expect(libp2p.dial(peerId)) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_DIALED_SELF) - }) - }) -}) diff --git a/test/dialing/direct.spec.ts b/test/dialing/direct.spec.ts new file mode 100644 index 00000000..b6e698da --- /dev/null +++ b/test/dialing/direct.spec.ts @@ -0,0 +1,589 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import pDefer from 'p-defer' +import delay from 'delay' +import { WebSockets } from '@libp2p/websockets' +import * as filters from '@libp2p/websockets/filters' +import { Mplex } from '@libp2p/mplex' +import { NOISE } from '@chainsafe/libp2p-noise' +import { Multiaddr } from '@multiformats/multiaddr' +import { AbortError } from '@libp2p/interfaces/errors' +import { MemoryDatastore } from 'datastore-core/memory' +import { codes as ErrorCodes } from '../../src/errors.js' +import * as Constants from '../../src/constants.js' +import { DefaultDialer, DialTarget } from '../../src/dialer/index.js' +import { publicAddressesFirst } from '@libp2p/utils/address-sort' +import { PersistentPeerStore } from '@libp2p/peer-store' +import { DefaultTransportManager } from '../../src/transport-manager.js' +import { mockConnectionGater, mockDuplex, mockMultiaddrConnection, mockUpgrader, mockConnection } from '@libp2p/interface-compliance-tests/mocks' +import { createPeerId } from '../utils/creators/peer.js' +import type { TransportManager } from '@libp2p/interfaces/transport' +import { Components } from '@libp2p/interfaces/components' +import { peerIdFromString } from '@libp2p/peer-id' +import type { Connection } from '@libp2p/interfaces/connection' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { DefaultConnectionManager } from '../../src/connection-manager/index.js' +import { createFromJSON } from '@libp2p/peer-id-factory' +import Peers from '../fixtures/peers.js' +import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' + +const unsupportedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/9999') + +describe('Dialing (direct, WebSockets)', () => { + let localTM: TransportManager + let localComponents: Components + let remoteAddr: Multiaddr + let remoteComponents: Components + + beforeEach(async () => { + localComponents = new Components({ + peerId: await createFromJSON(Peers[0]), + datastore: new MemoryDatastore(), + upgrader: mockUpgrader(), + connectionGater: mockConnectionGater() + }) + localComponents.setPeerStore(new PersistentPeerStore(localComponents, { + addressFilter: localComponents.getConnectionGater().filterMultiaddrForPeer + })) + localComponents.setConnectionManager(new DefaultConnectionManager(localComponents)) + + localTM = new DefaultTransportManager(localComponents) + localTM.add(new WebSockets({ filter: filters.all })) + localComponents.setTransportManager(localTM) + + // this peer is spun up in .aegir.cjs + remoteAddr = MULTIADDRS_WEBSOCKETS[0] + remoteComponents = new Components({ + peerId: peerIdFromString(remoteAddr.getPeerId() ?? '') + }) + }) + + afterEach(async () => { + sinon.restore() + }) + + it('should limit the number of tokens it provides', () => { + const dialer = new DefaultDialer(localComponents) + + const maxPerPeer = Constants.MAX_PER_PEER_DIALS + expect(dialer.tokens).to.have.lengthOf(Constants.MAX_PARALLEL_DIALS) + const tokens = dialer.getTokens(maxPerPeer + 1) + expect(tokens).to.have.length(maxPerPeer) + expect(dialer.tokens).to.have.lengthOf(Constants.MAX_PARALLEL_DIALS - maxPerPeer) + }) + + it('should not return tokens if none are left', () => { + const dialer = new DefaultDialer(localComponents, { + maxDialsPerPeer: Infinity + }) + + const maxTokens = dialer.tokens.length + + const tokens = dialer.getTokens(maxTokens) + + expect(tokens).to.have.lengthOf(maxTokens) + expect(dialer.getTokens(1)).to.be.empty() + }) + + it('should NOT be able to return a token twice', () => { + const dialer = new DefaultDialer(localComponents) + + const tokens = dialer.getTokens(1) + expect(tokens).to.have.length(1) + expect(dialer.tokens).to.have.lengthOf(Constants.MAX_PARALLEL_DIALS - 1) + dialer.releaseToken(tokens[0]) + dialer.releaseToken(tokens[0]) + expect(dialer.tokens).to.have.lengthOf(Constants.MAX_PARALLEL_DIALS) + }) + + it('should be able to connect to a remote node via its multiaddr', async () => { + const dialer = new DefaultDialer(localComponents) + const remotePeerId = peerIdFromString(remoteAddr.getPeerId() ?? '') + await localComponents.getPeerStore().addressBook.set(remotePeerId, [remoteAddr]) + + const connection = await dialer.dial(remoteAddr) + expect(connection).to.exist() + await connection.close() + }) + + it('should fail to connect to an unsupported multiaddr', async () => { + const dialer = new DefaultDialer(localComponents) + + await expect(dialer.dial(unsupportedAddr.encapsulate(`/p2p/${remoteComponents.getPeerId().toString()}`))) + .to.eventually.be.rejectedWith(Error) + .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) + + it('should be able to connect to a given peer', async () => { + const dialer = new DefaultDialer(localComponents) + const remotePeerId = peerIdFromString(remoteAddr.getPeerId() ?? '') + await localComponents.getPeerStore().addressBook.set(remotePeerId, [remoteAddr]) + + const connection = await dialer.dial(remotePeerId) + expect(connection).to.exist() + await connection.close() + }) + + it('should fail to connect to a given peer with unsupported addresses', async () => { + const dialer = new DefaultDialer(localComponents) + const remotePeerId = peerIdFromString(remoteAddr.getPeerId() ?? '') + await localComponents.getPeerStore().addressBook.set(remotePeerId, [unsupportedAddr]) + + await expect(dialer.dial(remotePeerId)) + .to.eventually.be.rejectedWith(Error) + .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) + + it('should abort dials on queue task timeout', async () => { + const dialer = new DefaultDialer(localComponents, { + dialTimeout: 50 + }) + const remotePeerId = peerIdFromString(remoteAddr.getPeerId() ?? '') + await localComponents.getPeerStore().addressBook.set(remotePeerId, [remoteAddr]) + + sinon.stub(localTM, 'dial').callsFake(async (addr, options) => { + expect(options.signal).to.exist() + expect(options.signal.aborted).to.equal(false) + expect(addr.toString()).to.eql(remoteAddr.toString()) + await delay(60) + expect(options.signal.aborted).to.equal(true) + throw new AbortError() + }) + + await expect(dialer.dial(remoteAddr)) + .to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.ERR_TIMEOUT) + }) + + it('should throw when a peer advertises more than the allowed number of peers', async () => { + const dialer = new DefaultDialer(localComponents, { + maxAddrsToDial: 10 + }) + const remotePeerId = peerIdFromString(remoteAddr.getPeerId() ?? '') + await localComponents.getPeerStore().addressBook.set(remotePeerId, Array.from({ length: 11 }, (_, i) => new Multiaddr(`/ip4/127.0.0.1/tcp/1500${i}/ws/p2p/12D3KooWHFKTMzwerBtsVmtz4ZZEQy2heafxzWw6wNn5PPYkBxJ5`))) + + await expect(dialer.dial(remoteAddr)) + .to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.ERR_TOO_MANY_ADDRESSES) + }) + + it('should sort addresses on dial', async () => { + const peerMultiaddrs = [ + new Multiaddr('/ip4/127.0.0.1/tcp/15001/ws'), + new Multiaddr('/ip4/20.0.0.1/tcp/15001/ws'), + new Multiaddr('/ip4/30.0.0.1/tcp/15001/ws') + ] + + const publicAddressesFirstSpy = sinon.spy(publicAddressesFirst) + const localTMDialStub = sinon.stub(localTM, 'dial').callsFake(async (ma) => mockConnection(mockMultiaddrConnection(mockDuplex(), peerIdFromString(ma.getPeerId() ?? '')))) + + const dialer = new DefaultDialer(localComponents, { + addressSorter: publicAddressesFirstSpy, + maxParallelDials: 3 + }) + + // Inject data in the AddressBook + await localComponents.getPeerStore().addressBook.add(remoteComponents.getPeerId(), peerMultiaddrs) + + // Perform 3 multiaddr dials + await dialer.dial(remoteComponents.getPeerId()) + + const sortedAddresses = peerMultiaddrs + .map((m) => ({ multiaddr: m, isCertified: false })) + .sort(publicAddressesFirst) + + expect(localTMDialStub.getCall(0).args[0].equals(sortedAddresses[0].multiaddr)) + expect(localTMDialStub.getCall(1).args[0].equals(sortedAddresses[1].multiaddr)) + expect(localTMDialStub.getCall(2).args[0].equals(sortedAddresses[2].multiaddr)) + }) + + it('should dial to the max concurrency', async () => { + const addrs = [ + new Multiaddr('/ip4/0.0.0.0/tcp/8000/ws'), + new Multiaddr('/ip4/0.0.0.0/tcp/8001/ws'), + new Multiaddr('/ip4/0.0.0.0/tcp/8002/ws') + ] + const remotePeerId = peerIdFromString(remoteAddr.getPeerId() ?? '') + + const dialer = new DefaultDialer(localComponents, { + maxParallelDials: 2 + }) + + // Inject data in the AddressBook + await localComponents.getPeerStore().addressBook.add(remotePeerId, addrs) + + expect(dialer.tokens).to.have.lengthOf(2) + + const deferredDial = pDefer() + const localTMDialStub = sinon.stub(localTM, 'dial').callsFake(async () => await deferredDial.promise) + + // Perform 3 multiaddr dials + void dialer.dial(remotePeerId) + + // Let the call stack run + await delay(0) + + // We should have 2 in progress, and 1 waiting + expect(dialer.tokens).to.have.lengthOf(0) + + deferredDial.resolve(mockConnection(mockMultiaddrConnection(mockDuplex(), remotePeerId))) + + // Let the call stack run + await delay(0) + + // Only two dials will be run, as the first two succeeded + expect(localTMDialStub.callCount).to.equal(2) + expect(dialer.tokens).to.have.lengthOf(2) + expect(dialer.pendingDials.size).to.equal(0) + }) + + it('.destroy should abort pending dials', async () => { + const addrs = [ + new Multiaddr('/ip4/0.0.0.0/tcp/8000/ws'), + new Multiaddr('/ip4/0.0.0.0/tcp/8001/ws'), + new Multiaddr('/ip4/0.0.0.0/tcp/8002/ws') + ] + const dialer = new DefaultDialer(localComponents, { + maxParallelDials: 2 + }) + + // Inject data in the AddressBook + await localComponents.getPeerStore().addressBook.add(remoteComponents.getPeerId(), addrs) + + expect(dialer.tokens).to.have.lengthOf(2) + + sinon.stub(localTM, 'dial').callsFake(async (_, options) => { + const deferredDial = pDefer() + const onAbort = () => { + options.signal.removeEventListener('abort', onAbort) + deferredDial.reject(new AbortError()) + } + options.signal.addEventListener('abort', onAbort) + return await deferredDial.promise + }) + + // Perform 3 multiaddr dials + const dialPromise = dialer.dial(remoteComponents.getPeerId()) + + // Let the call stack run + await delay(0) + + // We should have 2 in progress, and 1 waiting + expect(dialer.tokens).to.have.length(0) + expect(dialer.pendingDials.size).to.equal(1) // 1 dial request + + try { + await dialer.stop() + await dialPromise + expect.fail('should have failed') + } catch (err: any) { + expect(err).to.have.property('name', 'AggregateError') + expect(dialer.pendingDials.size).to.equal(0) // 1 dial request + } + }) + + it('should cancel pending dial targets before proceeding', async () => { + const dialer = new DefaultDialer(localComponents) + + sinon.stub(dialer, '_createDialTarget').callsFake(async () => { + const deferredDial = pDefer() + return await deferredDial.promise + }) + + // Perform dial + const dialPromise = dialer.dial(remoteComponents.getPeerId()) + + // Let the call stack run + await delay(0) + + await dialer.stop() + + await expect(dialPromise) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ABORT_ERR') + }) +}) + +describe('libp2p.dialer (direct, WebSockets)', () => { + const connectionGater = mockConnectionGater() + let libp2p: Libp2pNode + let peerId: PeerId + + beforeEach(async () => { + peerId = await createPeerId() + }) + + afterEach(async () => { + sinon.restore() + + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should create a dialer', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ], + connectionGater + }) + + expect(libp2p.components.getDialer()).to.exist() + expect(libp2p.components.getDialer()).to.have.property('tokens').with.lengthOf(Constants.MAX_PARALLEL_DIALS) + expect(libp2p.components.getDialer()).to.have.property('maxDialsPerPeer', Constants.MAX_PER_PEER_DIALS) + expect(libp2p.components.getDialer()).to.have.property('timeout', Constants.DIAL_TIMEOUT) + }) + + it('should be able to override dialer options', async () => { + const config = { + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ], + dialer: { + maxParallelDials: 10, + maxDialsPerPeer: 1, + dialTimeout: 1e3 // 30 second dial timeout per peer + } + } + libp2p = await createLibp2pNode(config) + + expect(libp2p.components.getDialer()).to.exist() + expect(libp2p.components.getDialer()).to.have.property('tokens').with.lengthOf(config.dialer.maxParallelDials) + expect(libp2p.components.getDialer()).to.have.property('maxDialsPerPeer', config.dialer.maxDialsPerPeer) + expect(libp2p.components.getDialer()).to.have.property('timeout', config.dialer.dialTimeout) + }) + + it('should use the dialer for connecting', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + const dialerDialSpy = sinon.spy(libp2p.components.getDialer(), 'dial') + const addressBookAddSpy = sinon.spy(libp2p.components.getPeerStore().addressBook, 'add') + + await libp2p.start() + + const connection = await libp2p.dial(MULTIADDRS_WEBSOCKETS[0]) + expect(connection).to.exist() + const { stream, protocol } = await connection.newStream('/echo/1.0.0') + expect(stream).to.exist() + expect(protocol).to.equal('/echo/1.0.0') + await connection.close() + expect(dialerDialSpy.callCount).to.be.at.least(1) + expect(addressBookAddSpy.callCount).to.be.at.least(1) + + await libp2p.stop() + }) + + it('should run identify automatically after connecting', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + if (libp2p.identifyService == null) { + throw new Error('Identify service missing') + } + + const identifySpy = sinon.spy(libp2p.identifyService, 'identify') + const protobookSetSpy = sinon.spy(libp2p.components.getPeerStore().protoBook, 'set') + const connectionPromise = pDefer() + + await libp2p.start() + + libp2p.components.getUpgrader().addEventListener('connection', () => { + connectionPromise.resolve() + }, { + once: true + }) + + const connection = await libp2p.dial(MULTIADDRS_WEBSOCKETS[0]) + expect(connection).to.exist() + + // Wait for connection event to be emitted + await connectionPromise.promise + + expect(identifySpy.callCount).to.equal(1) + await identifySpy.firstCall.returnValue + + expect(protobookSetSpy.callCount).to.equal(1) + + await libp2p.stop() + }) + + it('should be able to use hangup to close connections', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const connection = await libp2p.dial(MULTIADDRS_WEBSOCKETS[0]) + expect(connection).to.exist() + expect(connection.stat.timeline.close).to.not.exist() + + await libp2p.hangUp(connection.remotePeer) + expect(connection.stat.timeline.close).to.exist() + + await libp2p.stop() + }) + + it('should be able to use hangup when no connection exists', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.hangUp(MULTIADDRS_WEBSOCKETS[0]) + }) + + it('should cancel pending dial targets and stop', async () => { + const remotePeerId = await createPeerId() + + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + sinon.stub(libp2p.components.getDialer() as DefaultDialer, '_createDialTarget').callsFake(async () => { + const deferredDial = pDefer() + return await deferredDial.promise + }) + + await libp2p.start() + + // Perform dial + const dialPromise = libp2p.dial(remotePeerId) + + // Let the call stack run + await delay(0) + + await libp2p.stop() + + await expect(dialPromise) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ABORT_ERR') + }) + + it('should abort pending dials on stop', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + const dialerDestroyStub = sinon.spy(libp2p.components.getDialer() as DefaultDialer, 'stop') + + await libp2p.stop() + + expect(dialerDestroyStub.callCount).to.equal(1) + }) + + it('should fail to dial self', async () => { + libp2p = await createLibp2pNode({ + peerId, + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + + await expect(libp2p.dial(new Multiaddr(`/ip4/127.0.0.1/tcp/1234/ws/p2p/${peerId.toString()}`))) + .to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.ERR_DIALED_SELF) + }) +}) diff --git a/test/dialing/resolver.spec.js b/test/dialing/resolver.spec.js deleted file mode 100644 index ab36881f..00000000 --- a/test/dialing/resolver.spec.js +++ /dev/null @@ -1,180 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { Multiaddr } = require('multiaddr') -const Resolver = require('multiaddr/src/resolvers/dns') - -const { codes: ErrorCodes } = require('../../src/errors') - -const peerUtils = require('../utils/creators/peer') -const baseOptions = require('../utils/base-options.browser') - -const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser') -const relayAddr = MULTIADDRS_WEBSOCKETS[0] - -const getDnsaddrStub = (peerId) => [ - [`dnsaddr=/dnsaddr/ams-1.bootstrap.libp2p.io/p2p/${peerId}`], - [`dnsaddr=/dnsaddr/ams-2.bootstrap.libp2p.io/p2p/${peerId}`], - [`dnsaddr=/dnsaddr/lon-1.bootstrap.libp2p.io/p2p/${peerId}`], - [`dnsaddr=/dnsaddr/nrt-1.bootstrap.libp2p.io/p2p/${peerId}`], - [`dnsaddr=/dnsaddr/nyc-1.bootstrap.libp2p.io/p2p/${peerId}`], - [`dnsaddr=/dnsaddr/sfo-2.bootstrap.libp2p.io/p2p/${peerId}`] -] - -const relayedAddr = (peerId) => `${relayAddr}/p2p-circuit/p2p/${peerId}` - -const getDnsRelayedAddrStub = (peerId) => [ - [`dnsaddr=${relayedAddr(peerId)}`] -] - -describe('Dialing (resolvable addresses)', () => { - let libp2p, remoteLibp2p - - beforeEach(async () => { - [libp2p, remoteLibp2p] = await peerUtils.createPeer({ - number: 2, - config: { - ...baseOptions, - addresses: { - listen: [new Multiaddr(`${relayAddr}/p2p-circuit`)] - }, - config: { - ...baseOptions.config, - peerDiscovery: { - autoDial: false - } - } - }, - started: true, - populateAddressBooks: false - }) - - await libp2p.start() - await remoteLibp2p.start() - }) - - afterEach(async () => { - sinon.restore() - await Promise.all([libp2p, remoteLibp2p].map(n => n.stop())) - }) - - it('resolves dnsaddr to ws local address', async () => { - const remoteId = remoteLibp2p.peerId.toB58String() - const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId}`) - const relayedAddrFetched = new Multiaddr(relayedAddr(remoteId)) - - // Transport spy - const transport = libp2p.transportManager._transports.get('Circuit') - sinon.spy(transport, 'dial') - - // Resolver stub - const stub = sinon.stub(Resolver.prototype, 'resolveTxt') - stub.onCall(0).returns(Promise.resolve(getDnsRelayedAddrStub(remoteId))) - - // Dial with address resolve - const connection = await libp2p.dial(dialAddr) - expect(connection).to.exist() - expect(connection.remoteAddr.equals(relayedAddrFetched)) - - const dialArgs = transport.dial.firstCall.args - expect(dialArgs[0].equals(relayedAddrFetched)).to.eql(true) - }) - - it('resolves a dnsaddr recursively', async () => { - const remoteId = remoteLibp2p.peerId.toB58String() - const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId}`) - const relayedAddrFetched = new Multiaddr(relayedAddr(remoteId)) - - // Transport spy - const transport = libp2p.transportManager._transports.get('Circuit') - sinon.spy(transport, 'dial') - - // Resolver stub - const stub = sinon.stub(Resolver.prototype, 'resolveTxt') - let firstCall = false - stub.callsFake(() => { - if (!firstCall) { - firstCall = true - // Return an array of dnsaddr - return Promise.resolve(getDnsaddrStub(remoteId)) - } - return Promise.resolve(getDnsRelayedAddrStub(remoteId)) - }) - - // Dial with address resolve - const connection = await libp2p.dial(dialAddr) - expect(connection).to.exist() - expect(connection.remoteAddr.equals(relayedAddrFetched)) - - const dialArgs = transport.dial.firstCall.args - expect(dialArgs[0].equals(relayedAddrFetched)).to.eql(true) - }) - - // TODO: Temporary solution does not resolve dns4/dns6 - // Resolver just returns the received multiaddrs - it('stops recursive resolve if finds dns4/dns6 and dials it', async () => { - const remoteId = remoteLibp2p.peerId.toB58String() - const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId}`) - - // Stub resolver - const dnsMa = new Multiaddr(`/dns4/ams-1.remote.libp2p.io/tcp/443/wss/p2p/${remoteId}`) - const stubResolve = sinon.stub(Resolver.prototype, 'resolveTxt') - stubResolve.returns(Promise.resolve([ - [`dnsaddr=${dnsMa}`] - ])) - - // Stub transport - const transport = libp2p.transportManager._transports.get('WebSockets') - const stubTransport = sinon.stub(transport, 'dial') - stubTransport.callsFake((multiaddr) => { - expect(multiaddr.equals(dnsMa)).to.eql(true) - }) - - await libp2p.dial(dialAddr) - }) - - it('resolves a dnsaddr recursively not failing if one address fails to resolve', async () => { - const remoteId = remoteLibp2p.peerId.toB58String() - const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId}`) - const relayedAddrFetched = new Multiaddr(relayedAddr(remoteId)) - - // Transport spy - const transport = libp2p.transportManager._transports.get('Circuit') - sinon.spy(transport, 'dial') - - // Resolver stub - const stub = sinon.stub(Resolver.prototype, 'resolveTxt') - stub.onCall(0).callsFake(() => Promise.resolve(getDnsaddrStub(remoteId))) - stub.onCall(1).callsFake(() => Promise.reject(new Error())) - stub.callsFake(() => Promise.resolve(getDnsRelayedAddrStub(remoteId))) - - // Dial with address resolve - const connection = await libp2p.dial(dialAddr) - expect(connection).to.exist() - expect(connection.remoteAddr.equals(relayedAddrFetched)) - - const dialArgs = transport.dial.firstCall.args - expect(dialArgs[0].equals(relayedAddrFetched)).to.eql(true) - }) - - it('fails to dial if resolve fails and there are no addresses to dial', async () => { - const remoteId = remoteLibp2p.peerId.toB58String() - const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId}`) - - // Stub resolver - const stubResolve = sinon.stub(Resolver.prototype, 'resolveTxt') - stubResolve.returns(Promise.reject(new Error())) - - // Stub transport - const transport = libp2p.transportManager._transports.get('WebSockets') - const spy = sinon.spy(transport, 'dial') - - await expect(libp2p.dial(dialAddr)) - .to.eventually.be.rejectedWith(Error) - .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) - expect(spy.callCount).to.eql(0) - }) -}) diff --git a/test/dialing/resolver.spec.ts b/test/dialing/resolver.spec.ts new file mode 100644 index 00000000..9c0fc270 --- /dev/null +++ b/test/dialing/resolver.spec.ts @@ -0,0 +1,226 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { Multiaddr } from '@multiformats/multiaddr' +import { codes as ErrorCodes } from '../../src/errors.js' +import { createNode } from '../utils/creators/peer.js' +import { createBaseOptions } from '../utils/base-options.browser.js' +import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Libp2pNode } from '../../src/libp2p.js' +import { Circuit } from '../../src/circuit/transport.js' +import pDefer from 'p-defer' +import { mockConnection, mockDuplex, mockMultiaddrConnection } from '@libp2p/interface-compliance-tests/mocks' +import { peerIdFromString } from '@libp2p/peer-id' +import { WebSockets } from '@libp2p/websockets' + +const relayAddr = MULTIADDRS_WEBSOCKETS[0] + +const getDnsaddrStub = (peerId: PeerId) => [ + `/dnsaddr/ams-1.bootstrap.libp2p.io/p2p/${peerId.toString()}`, + `/dnsaddr/ams-2.bootstrap.libp2p.io/p2p/${peerId.toString()}`, + `/dnsaddr/lon-1.bootstrap.libp2p.io/p2p/${peerId.toString()}`, + `/dnsaddr/nrt-1.bootstrap.libp2p.io/p2p/${peerId.toString()}`, + `/dnsaddr/nyc-1.bootstrap.libp2p.io/p2p/${peerId.toString()}`, + `/dnsaddr/sfo-2.bootstrap.libp2p.io/p2p/${peerId.toString()}` +] + +const relayedAddr = (peerId: PeerId) => `${relayAddr.toString()}/p2p-circuit/p2p/${peerId.toString()}` + +const getDnsRelayedAddrStub = (peerId: PeerId) => [ + `${relayedAddr(peerId)}` +] + +describe('Dialing (resolvable addresses)', () => { + let libp2p: Libp2pNode, remoteLibp2p: Libp2pNode + let resolver: sinon.SinonStub<[Multiaddr], Promise> + + beforeEach(async () => { + resolver = sinon.stub<[Multiaddr], Promise>(); + + [libp2p, remoteLibp2p] = await Promise.all([ + createNode({ + config: createBaseOptions({ + addresses: { + listen: [`${relayAddr.toString()}/p2p-circuit`] + }, + connectionManager: { + autoDial: false + }, + relay: { + enabled: true, + hop: { + enabled: false + } + }, + dialer: { + resolvers: { + dnsaddr: resolver + } + } + }), + started: true + }), + createNode({ + config: createBaseOptions({ + addresses: { + listen: [`${relayAddr.toString()}/p2p-circuit`] + }, + connectionManager: { + autoDial: false + }, + relay: { + enabled: true, + hop: { + enabled: false + } + }, + dialer: { + resolvers: { + dnsaddr: resolver + } + } + }), + started: true + }) + ]) + }) + + afterEach(async () => { + sinon.restore() + await Promise.all([libp2p, remoteLibp2p].map(async n => await n.stop())) + }) + + it('resolves dnsaddr to ws local address', async () => { + const remoteId = remoteLibp2p.peerId + const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId.toString()}`) + const relayedAddrFetched = new Multiaddr(relayedAddr(remoteId)) + + // Transport spy + const transport = getTransport(libp2p, Circuit.prototype[Symbol.toStringTag]) + const transportDialSpy = sinon.spy(transport, 'dial') + + // Resolver stub + resolver.onCall(0).returns(Promise.resolve(getDnsRelayedAddrStub(remoteId))) + + // Dial with address resolve + const connection = await libp2p.dial(dialAddr) + expect(connection).to.exist() + expect(connection.remoteAddr.equals(relayedAddrFetched)) + + const dialArgs = transportDialSpy.firstCall.args + expect(dialArgs[0].equals(relayedAddrFetched)).to.eql(true) + }) + + it('resolves a dnsaddr recursively', async () => { + const remoteId = remoteLibp2p.peerId + const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId.toString()}`) + const relayedAddrFetched = new Multiaddr(relayedAddr(remoteId)) + + // Transport spy + const transport = getTransport(libp2p, Circuit.prototype[Symbol.toStringTag]) + const transportDialSpy = sinon.spy(transport, 'dial') + + // Resolver stub + let firstCall = false + resolver.callsFake(async () => { + if (!firstCall) { + firstCall = true + // Return an array of dnsaddr + return await Promise.resolve(getDnsaddrStub(remoteId)) + } + return await Promise.resolve(getDnsRelayedAddrStub(remoteId)) + }) + + // Dial with address resolve + const connection = await libp2p.dial(dialAddr) + expect(connection).to.exist() + expect(connection.remoteAddr.equals(relayedAddrFetched)) + + const dialArgs = transportDialSpy.firstCall.args + expect(dialArgs[0].equals(relayedAddrFetched)).to.eql(true) + }) + + // TODO: Temporary solution does not resolve dns4/dns6 + // Resolver just returns the received multiaddrs + it('stops recursive resolve if finds dns4/dns6 and dials it', async () => { + const remoteId = remoteLibp2p.peerId + const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId.toString()}`) + + // Stub resolver + const dnsMa = new Multiaddr(`/dns4/ams-1.remote.libp2p.io/tcp/443/wss/p2p/${remoteId.toString()}`) + resolver.returns(Promise.resolve([ + `${dnsMa.toString()}` + ])) + + const deferred = pDefer() + + // Stub transport + const transport = getTransport(libp2p, WebSockets.prototype[Symbol.toStringTag]) + const stubTransport = sinon.stub(transport, 'dial') + stubTransport.callsFake(async (multiaddr) => { + expect(multiaddr.equals(dnsMa)).to.equal(true) + + deferred.resolve() + + return mockConnection(mockMultiaddrConnection(mockDuplex(), peerIdFromString(multiaddr.getPeerId() ?? ''))) + }) + + void libp2p.dial(dialAddr) + + await deferred.promise + }) + + it('resolves a dnsaddr recursively not failing if one address fails to resolve', async () => { + const remoteId = remoteLibp2p.peerId + const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId.toString()}`) + const relayedAddrFetched = new Multiaddr(relayedAddr(remoteId)) + + // Transport spy + const transport = getTransport(libp2p, Circuit.prototype[Symbol.toStringTag]) + const transportDialSpy = sinon.spy(transport, 'dial') + + // Resolver stub + resolver.onCall(0).callsFake(async () => await Promise.resolve(getDnsaddrStub(remoteId))) + resolver.onCall(1).callsFake(async () => await Promise.reject(new Error())) + resolver.callsFake(async () => await Promise.resolve(getDnsRelayedAddrStub(remoteId))) + + // Dial with address resolve + const connection = await libp2p.dial(dialAddr) + expect(connection).to.exist() + expect(connection.remoteAddr.equals(relayedAddrFetched)) + + const dialArgs = transportDialSpy.firstCall.args + expect(dialArgs[0].equals(relayedAddrFetched)).to.eql(true) + }) + + it('fails to dial if resolve fails and there are no addresses to dial', async () => { + const remoteId = remoteLibp2p.peerId + const dialAddr = new Multiaddr(`/dnsaddr/remote.libp2p.io/p2p/${remoteId.toString()}`) + + // Stub resolver + resolver.returns(Promise.reject(new Error())) + + // Stub transport + const transport = getTransport(libp2p, WebSockets.prototype[Symbol.toStringTag]) + const spy = sinon.spy(transport, 'dial') + + await expect(libp2p.dial(dialAddr)) + .to.eventually.be.rejectedWith(Error) + .and.to.have.nested.property('.code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + expect(spy.callCount).to.eql(0) + }) +}) + +function getTransport (libp2p: Libp2pNode, tag: string) { + const transport = libp2p.components.getTransportManager().getTransports().find(t => { + return t[Symbol.toStringTag] === tag + }) + + if (transport != null) { + return transport + } + + throw new Error(`No transport found for ${tag}`) +} diff --git a/test/fetch/fetch.node.js b/test/fetch/fetch.node.ts similarity index 72% rename from test/fetch/fetch.node.js rename to test/fetch/fetch.node.ts index da784959..2da2cd81 100644 --- a/test/fetch/fetch.node.js +++ b/test/fetch/fetch.node.ts @@ -1,46 +1,45 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') -const Libp2p = require('../../src') -const TCP = require('libp2p-tcp') -const Mplex = require('libp2p-mplex') -const { NOISE } = require('@chainsafe/libp2p-noise') -const MDNS = require('libp2p-mdns') -const { createPeerId } = require('../utils/creators/peer') -const { codes } = require('../../src/errors') -const { Multiaddr } = require('multiaddr') +import { expect } from 'aegir/utils/chai.js' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { NOISE } from '@chainsafe/libp2p-noise' +import { createPeerId } from '../utils/creators/peer.js' +import { codes } from '../../src/errors.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' -async function createLibp2pNode (peerId) { - return await Libp2p.create({ +async function createNode (peerId: PeerId) { + return await createLibp2pNode({ peerId, addresses: { listen: ['/ip4/0.0.0.0/tcp/0'] }, - modules: { - transport: [TCP], - streamMuxer: [Mplex], - connEncryption: [NOISE], - peerDiscovery: [MDNS] - } + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] }) } describe('Fetch', () => { - /** @type {Libp2p} */ - let sender - /** @type {Libp2p} */ - let receiver + let sender: Libp2pNode + let receiver: Libp2pNode const PREFIX_A = '/moduleA/' const PREFIX_B = '/moduleB/' const DATA_A = { foobar: 'hello world' } const DATA_B = { foobar: 'goodnight moon' } - const generateLookupFunction = function (prefix, data) { - return async function (key) { + const generateLookupFunction = function (prefix: string, data: Record) { + return async function (key: string): Promise { key = key.slice(prefix.length) // strip prefix from key const val = data[key] - if (val) { + if (val != null) { return (new TextEncoder()).encode(val) } return null @@ -48,16 +47,17 @@ describe('Fetch', () => { } beforeEach(async () => { - const [peerIdA, peerIdB] = await createPeerId({ number: 2 }) - sender = await createLibp2pNode(peerIdA) - receiver = await createLibp2pNode(peerIdB) + const peerIdA = await createPeerId() + const peerIdB = await createPeerId() + sender = await createNode(peerIdA) + receiver = await createNode(peerIdB) await sender.start() await receiver.start() await Promise.all([ - ...sender.multiaddrs.map(addr => receiver.dial(addr.encapsulate(new Multiaddr(`/p2p/${sender.peerId}`)))), - ...receiver.multiaddrs.map(addr => sender.dial(addr.encapsulate(new Multiaddr(`/p2p/${receiver.peerId}`)))) + ...sender.getMultiaddrs().map(async addr => await receiver.dial(addr)), + ...receiver.getMultiaddrs().map(async addr => await sender.dial(addr)) ]) }) @@ -73,6 +73,11 @@ describe('Fetch', () => { receiver.fetchService.registerLookupFunction(PREFIX_A, generateLookupFunction(PREFIX_A, DATA_A)) const rawData = await sender.fetch(receiver.peerId, '/moduleA/foobar') + + if (rawData == null) { + throw new Error('Value was not found') + } + const value = (new TextDecoder()).decode(rawData) expect(value).to.equal('hello world') }) @@ -82,12 +87,22 @@ describe('Fetch', () => { receiver.fetchService.registerLookupFunction(PREFIX_B, generateLookupFunction(PREFIX_B, DATA_B)) const rawDataA = await sender.fetch(receiver.peerId, '/moduleA/foobar') + + if (rawDataA == null) { + throw new Error('Value was not found') + } + const valueA = (new TextDecoder()).decode(rawDataA) expect(valueA).to.equal('hello world') // Different lookup functions can be registered on different prefixes, and have different // values for the same key underneath the different prefix. const rawDataB = await sender.fetch(receiver.peerId, '/moduleB/foobar') + + if (rawDataB == null) { + throw new Error('Value was not found') + } + const valueB = (new TextDecoder()).decode(rawDataB) expect(valueB).to.equal('goodnight moon') }) @@ -117,6 +132,11 @@ describe('Fetch', () => { const lookupFunction = generateLookupFunction(PREFIX_A, DATA_A) receiver.fetchService.registerLookupFunction(PREFIX_A, lookupFunction) const rawDataA = await sender.fetch(receiver.peerId, '/moduleA/foobar') + + if (rawDataA == null) { + throw new Error('Value was not found') + } + const valueA = (new TextDecoder()).decode(rawDataA) expect(valueA).to.equal('hello world') @@ -130,6 +150,11 @@ describe('Fetch', () => { const lookupFunction = generateLookupFunction(PREFIX_A, DATA_A) receiver.fetchService.registerLookupFunction(PREFIX_A, lookupFunction) const rawDataA = await sender.fetch(receiver.peerId, '/moduleA/foobar') + + if (rawDataA == null) { + throw new Error('Value was not found') + } + const valueA = (new TextDecoder()).decode(rawDataA) expect(valueA).to.equal('hello world') @@ -143,12 +168,22 @@ describe('Fetch', () => { const lookupFunction = generateLookupFunction(PREFIX_A, DATA_A) receiver.fetchService.registerLookupFunction(PREFIX_A, lookupFunction) const rawDataA = await sender.fetch(receiver.peerId, '/moduleA/foobar') + + if (rawDataA == null) { + throw new Error('Value was not found') + } + const valueA = (new TextDecoder()).decode(rawDataA) expect(valueA).to.equal('hello world') - receiver.fetchService.unregisterLookupFunction(PREFIX_A, () => {}) + receiver.fetchService.unregisterLookupFunction(PREFIX_A, async () => { return null }) const rawDataB = await sender.fetch(receiver.peerId, '/moduleA/foobar') + + if (rawDataB == null) { + throw new Error('Value was not found') + } + const valueB = (new TextDecoder()).decode(rawDataB) expect(valueB).to.equal('hello world') }) diff --git a/test/fixtures/browser.js b/test/fixtures/browser.ts similarity index 52% rename from test/fixtures/browser.js rename to test/fixtures/browser.ts index 8bcec6a1..6e0a5652 100644 --- a/test/fixtures/browser.js +++ b/test/fixtures/browser.ts @@ -1,7 +1,6 @@ -'use strict' -const { Multiaddr } = require('multiaddr') +import { Multiaddr } from '@multiformats/multiaddr' -module.exports.MULTIADDRS_WEBSOCKETS = [ +export const MULTIADDRS_WEBSOCKETS = [ new Multiaddr('/ip4/127.0.0.1/tcp/15001/ws/p2p/12D3KooWHFKTMzwerBtsVmtz4ZZEQy2heafxzWw6wNn5PPYkBxJ5') ] diff --git a/test/fixtures/peers.js b/test/fixtures/peers.ts similarity index 98% rename from test/fixtures/peers.js rename to test/fixtures/peers.ts index 27150e5e..656e7910 100644 --- a/test/fixtures/peers.js +++ b/test/fixtures/peers.ts @@ -1,6 +1,4 @@ -'use strict' - -module.exports = [{ +export default [{ id: '12D3KooWNvSZnPi3RrhrTwEY4LuuBeB6K6facKUCJcyWG1aoDd2p', privKey: 'CAESYHyCgD+3HtEHm6kzPO6fuwP+BAr/PxfJKlvAOWhc/IqAwrZjCNn0jz93sSl81cP6R6x/g+iVYmR5Wxmn4ZtzJFnCtmMI2fSPP3exKXzVw/pHrH+D6JViZHlbGafhm3MkWQ==', pubKey: 'CAESIMK2YwjZ9I8/d7EpfNXD+kesf4PolWJkeVsZp+GbcyRZ' diff --git a/test/fixtures/swarm.key.js b/test/fixtures/swarm.key.ts similarity index 60% rename from test/fixtures/swarm.key.js rename to test/fixtures/swarm.key.ts index 184f47b6..71331801 100644 --- a/test/fixtures/swarm.key.js +++ b/test/fixtures/swarm.key.ts @@ -1,5 +1,4 @@ -'use strict' -module.exports = '/key/swarm/psk/1.0.0/\n' + +export default '/key/swarm/psk/1.0.0/\n' + '/base16/\n' + '411f0a244cbbc25ecbb2b070d00a1832516ded521eb3ee3aa13189efe2e2b9a2' diff --git a/test/identify/index.spec.js b/test/identify/index.spec.js deleted file mode 100644 index 8b8269d5..00000000 --- a/test/identify/index.spec.js +++ /dev/null @@ -1,605 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { EventEmitter } = require('events') -const PeerId = require('peer-id') -const duplexPair = require('it-pair/duplex') -const { Multiaddr } = require('multiaddr') -const pWaitFor = require('p-wait-for') -const { toString: unit8ArrayToString } = require('uint8arrays/to-string') -const { codes: Errors } = require('../../src/errors') -const IdentifyService = require('../../src/identify') -const multicodecs = IdentifyService.multicodecs -const Peers = require('../fixtures/peers') -const Libp2p = require('../../src') -const Envelope = require('../../src/record/envelope') -const PeerStore = require('../../src/peer-store') -const baseOptions = require('../utils/base-options.browser') -const { updateSelfPeerRecord } = require('../../src/record/utils') -const pkg = require('../../package.json') -const AddressManager = require('../../src/address-manager') -const { MemoryDatastore } = require('datastore-core/memory') -const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser') -const { mockConnectionGater } = require('../utils/mock-connection-gater') -const remoteAddr = MULTIADDRS_WEBSOCKETS[0] -const listenMaddrs = [new Multiaddr('/ip4/127.0.0.1/tcp/15002/ws')] - -describe('Identify', () => { - const connectionGater = mockConnectionGater() - let localPeer, localPeerStore, localAddressManager - let remotePeer, remotePeerStore, remoteAddressManager - const protocols = [multicodecs.IDENTIFY, multicodecs.IDENTIFY_PUSH] - - before(async () => { - [localPeer, remotePeer] = (await Promise.all([ - PeerId.createFromJSON(Peers[0]), - PeerId.createFromJSON(Peers[1]) - ])) - - localPeerStore = new PeerStore({ - peerId: localPeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - await localPeerStore.protoBook.set(localPeer, protocols) - - remotePeerStore = new PeerStore({ - peerId: remotePeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - await remotePeerStore.protoBook.set(remotePeer, protocols) - - localAddressManager = new AddressManager(localPeer) - remoteAddressManager = new AddressManager(remotePeer) - }) - - afterEach(() => { - sinon.restore() - }) - - it('should be able to identify another peer', async () => { - const localIdentify = new IdentifyService({ - libp2p: { - peerId: localPeer, - connectionManager: new EventEmitter(), - peerStore: localPeerStore, - multiaddrs: listenMaddrs, - isStarted: () => true, - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - const remoteIdentify = new IdentifyService({ - libp2p: { - peerId: remotePeer, - connectionManager: new EventEmitter(), - peerStore: remotePeerStore, - multiaddrs: listenMaddrs, - isStarted: () => true, - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - const observedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/1234') - const localConnectionMock = { newStream: () => {}, remotePeer } - const remoteConnectionMock = { remoteAddr: observedAddr } - - const [local, remote] = duplexPair() - sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY }) - - sinon.spy(localIdentify.peerStore.addressBook, 'consumePeerRecord') - sinon.spy(localIdentify.peerStore.protoBook, 'set') - - // Transport Manager creates signed peer record - await updateSelfPeerRecord(remoteIdentify._libp2p) - - // Run identify - await Promise.all([ - localIdentify.identify(localConnectionMock), - remoteIdentify.handleMessage({ - connection: remoteConnectionMock, - stream: remote, - protocol: multicodecs.IDENTIFY - }) - ]) - - expect(localIdentify.peerStore.addressBook.consumePeerRecord.callCount).to.equal(1) - expect(localIdentify.peerStore.protoBook.set.callCount).to.equal(1) - - // Validate the remote peer gets updated in the peer store - const addresses = await localIdentify.peerStore.addressBook.get(remotePeer) - expect(addresses).to.exist() - expect(addresses).have.lengthOf(listenMaddrs.length) - expect(addresses.map((a) => a.multiaddr)[0].equals(listenMaddrs[0])) - expect(addresses.map((a) => a.isCertified)[0]).to.eql(true) - }) - - // LEGACY - it('should be able to identify another peer with no certified peer records support', async () => { - const agentVersion = `js-libp2p/${pkg.version}` - const localIdentify = new IdentifyService({ - libp2p: { - peerId: localPeer, - connectionManager: new EventEmitter(), - addressManager: localAddressManager, - peerStore: localPeerStore, - multiaddrs: listenMaddrs, - isStarted: () => true, - _options: { host: { agentVersion } }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - const remoteIdentify = new IdentifyService({ - libp2p: { - peerId: remotePeer, - connectionManager: new EventEmitter(), - addressManager: remoteAddressManager, - peerStore: remotePeerStore, - multiaddrs: listenMaddrs, - isStarted: () => true, - _options: { host: { agentVersion } }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - const observedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/1234') - const localConnectionMock = { newStream: () => {}, remotePeer } - const remoteConnectionMock = { remoteAddr: observedAddr } - - const [local, remote] = duplexPair() - sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY }) - sinon.stub(Envelope, 'openAndCertify').throws() - - sinon.spy(localIdentify.peerStore.addressBook, 'set') - sinon.spy(localIdentify.peerStore.protoBook, 'set') - sinon.spy(localIdentify.peerStore.metadataBook, 'setValue') - - // Run identify - await Promise.all([ - localIdentify.identify(localConnectionMock), - remoteIdentify.handleMessage({ - connection: remoteConnectionMock, - stream: remote, - protocol: multicodecs.IDENTIFY - }) - ]) - - expect(localIdentify.peerStore.addressBook.set.callCount).to.equal(1) - expect(localIdentify.peerStore.protoBook.set.callCount).to.equal(1) - - const metadataArgs = localIdentify.peerStore.metadataBook.setValue.firstCall.args - expect(metadataArgs[0].id.bytes).to.equal(remotePeer.bytes) - expect(metadataArgs[1]).to.equal('AgentVersion') - expect(unit8ArrayToString(metadataArgs[2])).to.equal(agentVersion) - - // Validate the remote peer gets updated in the peer store - const call = localIdentify.peerStore.addressBook.set.firstCall - expect(call.args[0].id.bytes).to.equal(remotePeer.bytes) - expect(call.args[1]).to.exist() - expect(call.args[1]).have.lengthOf(listenMaddrs.length) - expect(call.args[1][0].equals(listenMaddrs[0])) - }) - - it('should throw if identified peer is the wrong peer', async () => { - const localIdentify = new IdentifyService({ - libp2p: { - peerId: localPeer, - connectionManager: new EventEmitter(), - peerStore: localPeerStore, - multiaddrs: [], - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - const remoteIdentify = new IdentifyService({ - libp2p: { - peerId: remotePeer, - connectionManager: new EventEmitter(), - peerStore: remotePeerStore, - multiaddrs: [], - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - const observedAddr = new Multiaddr('/ip4/127.0.0.1/tcp/1234') - const localConnectionMock = { newStream: () => {}, remotePeer: localPeer } - const remoteConnectionMock = { remoteAddr: observedAddr } - - const [local, remote] = duplexPair() - sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY }) - - // Run identify - const identifyPromise = Promise.all([ - localIdentify.identify(localConnectionMock, localPeer), - remoteIdentify.handleMessage({ - connection: remoteConnectionMock, - stream: remote, - protocol: multicodecs.IDENTIFY - }) - ]) - - await expect(identifyPromise) - .to.eventually.be.rejected() - .and.to.have.property('code', Errors.ERR_INVALID_PEER) - }) - - it('should store host data and protocol version into metadataBook', async () => { - const agentVersion = 'js-project/1.0.0' - const peerStore = new PeerStore({ - peerId: localPeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - - sinon.spy(peerStore.metadataBook, 'setValue') - - const service = new IdentifyService({ // eslint-disable-line no-new - libp2p: { - peerId: localPeer, - connectionManager: new EventEmitter(), - peerStore, - multiaddrs: listenMaddrs, - _options: { - host: { - agentVersion - } - }, - _config: { protocolPrefix: 'ipfs' } - }, - protocols - }) - - await service.start() - - expect(peerStore.metadataBook.setValue.callCount).to.eql(2) - - const storedAgentVersion = await peerStore.metadataBook.getValue(localPeer, 'AgentVersion') - const storedProtocolVersion = await peerStore.metadataBook.getValue(localPeer, 'ProtocolVersion') - - expect(agentVersion).to.eql(unit8ArrayToString(storedAgentVersion)) - expect(storedProtocolVersion).to.exist() - - await service.stop() - }) - - describe('push', () => { - it('should be able to push identify updates to another peer', async () => { - const storedProtocols = [multicodecs.IDENTIFY, multicodecs.IDENTIFY_PUSH, '/echo/1.0.0'].sort() - const connectionManager = new EventEmitter() - connectionManager.getConnection = () => { } - - const localPeerStore = new PeerStore({ - peerId: localPeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - await localPeerStore.protoBook.set(localPeer, storedProtocols) - - const localIdentify = new IdentifyService({ - libp2p: { - peerId: localPeer, - connectionManager: new EventEmitter(), - peerStore: localPeerStore, - multiaddrs: listenMaddrs, - isStarted: () => true, - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - const remotePeerStore = new PeerStore({ - peerId: remotePeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - await remotePeerStore.protoBook.set(remotePeer, storedProtocols) - - const remoteIdentify = new IdentifyService({ - libp2p: { - peerId: remotePeer, - connectionManager, - peerStore: remotePeerStore, - multiaddrs: [], - isStarted: () => true, - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - // Setup peer protocols and multiaddrs - const localProtocols = new Set(storedProtocols) - const localConnectionMock = { newStream: () => { } } - const remoteConnectionMock = { remotePeer: localPeer } - - const [local, remote] = duplexPair() - sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY_PUSH }) - - sinon.spy(remoteIdentify.peerStore.addressBook, 'consumePeerRecord') - sinon.spy(remoteIdentify.peerStore.protoBook, 'set') - - // Transport Manager creates signed peer record - await updateSelfPeerRecord(localIdentify._libp2p) - await updateSelfPeerRecord(remoteIdentify._libp2p) - - // Run identify - await Promise.all([ - localIdentify.push([localConnectionMock]), - remoteIdentify.handleMessage({ - connection: remoteConnectionMock, - stream: remote, - protocol: multicodecs.IDENTIFY_PUSH - }) - ]) - - expect(remoteIdentify.peerStore.addressBook.consumePeerRecord.callCount).to.equal(2) - expect(remoteIdentify.peerStore.protoBook.set.callCount).to.equal(1) - - const addresses = await localIdentify.peerStore.addressBook.get(localPeer) - expect(addresses).to.exist() - expect(addresses).have.lengthOf(listenMaddrs.length) - expect(addresses.map((a) => a.multiaddr)).to.eql(listenMaddrs) - - const [peerId2, protocols] = remoteIdentify.peerStore.protoBook.set.firstCall.args - expect(peerId2.bytes).to.eql(localPeer.bytes) - expect(protocols).to.eql(Array.from(localProtocols)) - }) - - // LEGACY - it('should be able to push identify updates to another peer with no certified peer records support', async () => { - const storedProtocols = [multicodecs.IDENTIFY, multicodecs.IDENTIFY_PUSH, '/echo/1.0.0'].sort() - const connectionManager = new EventEmitter() - connectionManager.getConnection = () => { } - - const localPeerStore = new PeerStore({ - peerId: localPeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - await localPeerStore.protoBook.set(localPeer, storedProtocols) - - const localIdentify = new IdentifyService({ - libp2p: { - peerId: localPeer, - connectionManager: new EventEmitter(), - peerStore: localPeerStore, - multiaddrs: listenMaddrs, - isStarted: () => true, - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' } - } - }) - - const remotePeerStore = new PeerStore({ - peerId: remotePeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - await remotePeerStore.protoBook.set(remotePeer, storedProtocols) - - const remoteIdentify = new IdentifyService({ - libp2p: { - peerId: remotePeer, - connectionManager, - peerStore: remotePeerStore, - multiaddrs: [], - _options: { host: {} }, - _config: { protocolPrefix: 'ipfs' }, - isStarted: () => true - } - }) - - // Setup peer protocols and multiaddrs - const localProtocols = new Set(storedProtocols) - const localConnectionMock = { newStream: () => {} } - const remoteConnectionMock = { remotePeer: localPeer } - - const [local, remote] = duplexPair() - sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY_PUSH }) - sinon.stub(Envelope, 'openAndCertify').throws() - - sinon.spy(remoteIdentify.peerStore.addressBook, 'set') - sinon.spy(remoteIdentify.peerStore.protoBook, 'set') - - // Run identify - await Promise.all([ - localIdentify.push([localConnectionMock]), - remoteIdentify.handleMessage({ - connection: remoteConnectionMock, - stream: remote, - protocol: multicodecs.IDENTIFY_PUSH - }) - ]) - - expect(remoteIdentify.peerStore.addressBook.set.callCount).to.equal(1) - expect(remoteIdentify.peerStore.protoBook.set.callCount).to.equal(1) - - const [peerId, multiaddrs] = remoteIdentify.peerStore.addressBook.set.firstCall.args - expect(peerId.bytes).to.eql(localPeer.bytes) - expect(multiaddrs).to.eql(listenMaddrs) - - const [peerId2, protocols] = remoteIdentify.peerStore.protoBook.set.firstCall.args - expect(peerId2.bytes).to.eql(localPeer.bytes) - expect(protocols).to.eql(Array.from(localProtocols)) - }) - }) - - describe('libp2p.dialer.identifyService', () => { - let peerId - let libp2p - let remoteLibp2p - - before(async () => { - peerId = await PeerId.createFromJSON(Peers[0]) - }) - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - libp2p = null - }) - - after(async () => { - remoteLibp2p && await remoteLibp2p.stop() - }) - - it('should run identify automatically after connecting', async () => { - libp2p = new Libp2p({ - ...baseOptions, - peerId - }) - - await libp2p.start() - - sinon.spy(libp2p.identifyService, 'identify') - const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord') - const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add') - - const connection = await libp2p.dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - - // Wait for peer store to be updated - // Dialer._createDialTarget (add), Identify (consume) - await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1) - expect(libp2p.identifyService.identify.callCount).to.equal(1) - - // The connection should have no open streams - await pWaitFor(() => connection.streams.length === 0) - await connection.close() - }) - - it('should store remote agent and protocol versions in metadataBook after connecting', async () => { - libp2p = new Libp2p({ - ...baseOptions, - peerId - }) - - await libp2p.start() - - sinon.spy(libp2p.identifyService, 'identify') - const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord') - const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add') - - const connection = await libp2p.dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - - // Wait for peer store to be updated - // Dialer._createDialTarget (add), Identify (consume) - await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1) - expect(libp2p.identifyService.identify.callCount).to.equal(1) - - // The connection should have no open streams - await pWaitFor(() => connection.streams.length === 0) - await connection.close() - - const remotePeer = PeerId.createFromB58String(remoteAddr.getPeerId()) - - const storedAgentVersion = libp2p.peerStore.metadataBook.getValue(remotePeer, 'AgentVersion') - const storedProtocolVersion = libp2p.peerStore.metadataBook.getValue(remotePeer, 'ProtocolVersion') - - expect(storedAgentVersion).to.exist() - expect(storedProtocolVersion).to.exist() - }) - - it('should push protocol updates to an already connected peer', async () => { - libp2p = new Libp2p({ - ...baseOptions, - peerId - }) - - await libp2p.start() - - sinon.spy(libp2p.identifyService, 'identify') - sinon.spy(libp2p.identifyService, 'push') - - const connection = await libp2p.dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - - // Wait for identify to finish - await libp2p.identifyService.identify.firstCall.returnValue - sinon.stub(libp2p, 'isStarted').returns(true) - - await libp2p.handle('/echo/2.0.0', () => {}) - await libp2p.unhandle('/echo/2.0.0') - - // the protocol change event listener in the identity service is async - await pWaitFor(() => libp2p.identifyService.push.callCount === 2) - - // Verify the remote peer is notified of both changes - expect(libp2p.identifyService.push.callCount).to.equal(2) - - for (const call of libp2p.identifyService.push.getCalls()) { - const [connections] = call.args - expect(connections.length).to.equal(1) - expect(connections[0].remotePeer.toB58String()).to.equal(remoteAddr.getPeerId()) - const results = await call.returnValue - expect(results.length).to.equal(1) - } - - // Verify the streams close - await pWaitFor(() => connection.streams.length === 0) - }) - - it('should store host data and protocol version into metadataBook', async () => { - const agentVersion = 'js-project/1.0.0' - - libp2p = new Libp2p({ - ...baseOptions, - peerId, - host: { - agentVersion - } - }) - await libp2p.start() - - const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(localPeer, 'AgentVersion') - const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(localPeer, 'ProtocolVersion') - - expect(agentVersion).to.eql(unit8ArrayToString(storedAgentVersion)) - expect(storedProtocolVersion).to.exist() - }) - - it('should push multiaddr updates to an already connected peer', async () => { - libp2p = new Libp2p({ - ...baseOptions, - peerId - }) - - await libp2p.start() - - sinon.spy(libp2p.identifyService, 'identify') - sinon.spy(libp2p.identifyService, 'push') - - const connection = await libp2p.dialer.connectToPeer(remoteAddr) - expect(connection).to.exist() - - // Wait for identify to finish - await libp2p.identifyService.identify.firstCall.returnValue - sinon.stub(libp2p, 'isStarted').returns(true) - - await libp2p.peerStore.addressBook.add(libp2p.peerId, [new Multiaddr('/ip4/180.0.0.1/tcp/15001/ws')]) - - // the protocol change event listener in the identity service is async - await pWaitFor(() => libp2p.identifyService.push.callCount === 1) - - // Verify the remote peer is notified of change - expect(libp2p.identifyService.push.callCount).to.equal(1) - for (const call of libp2p.identifyService.push.getCalls()) { - const [connections] = call.args - expect(connections.length).to.equal(1) - expect(connections[0].remotePeer.toB58String()).to.equal(remoteAddr.getPeerId()) - const results = await call.returnValue - expect(results.length).to.equal(1) - } - - // Verify the streams close - await pWaitFor(() => connection.streams.length === 0) - }) - }) -}) diff --git a/test/identify/index.spec.ts b/test/identify/index.spec.ts new file mode 100644 index 00000000..0297a5af --- /dev/null +++ b/test/identify/index.spec.ts @@ -0,0 +1,619 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { Multiaddr } from '@multiformats/multiaddr' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { codes } from '../../src/errors.js' +import { IdentifyService, Message } from '../../src/identify/index.js' +import Peers from '../fixtures/peers.js' +import { createLibp2pNode } from '../../src/libp2p.js' +import { PersistentPeerStore } from '@libp2p/peer-store' +import { createBaseOptions } from '../utils/base-options.browser.js' +import { DefaultAddressManager } from '../../src/address-manager/index.js' +import { MemoryDatastore } from 'datastore-core/memory' +import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js' +import * as lp from 'it-length-prefixed' +import drain from 'it-drain' +import { pipe } from 'it-pipe' +import { mockConnectionGater, mockRegistrar, mockUpgrader, connectionPair } from '@libp2p/interface-compliance-tests/mocks' +import { createFromJSON } from '@libp2p/peer-id-factory' +import { Components } from '@libp2p/interfaces/components' +import { PeerRecordUpdater } from '../../src/peer-record-updater.js' +import { + MULTICODEC_IDENTIFY, + MULTICODEC_IDENTIFY_PUSH +} from '../../src/identify/consts.js' +import { DefaultConnectionManager } from '../../src/connection-manager/index.js' +import { DefaultTransportManager } from '../../src/transport-manager.js' +import { CustomEvent, Startable } from '@libp2p/interfaces' +import delay from 'delay' +import pWaitFor from 'p-wait-for' +import { peerIdFromString } from '@libp2p/peer-id' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import type { Libp2pNode } from '../../src/libp2p.js' +import { pEvent } from 'p-event' + +const listenMaddrs = [new Multiaddr('/ip4/127.0.0.1/tcp/15002/ws')] + +const defaultInit = { + protocolPrefix: 'ipfs', + host: { + agentVersion: 'v1.0.0' + } +} + +const protocols = [MULTICODEC_IDENTIFY, MULTICODEC_IDENTIFY_PUSH] + +async function createComponents (index: number, services: Startable[]) { + const peerId = await createFromJSON(Peers[index]) + + const components = new Components({ + peerId, + datastore: new MemoryDatastore(), + registrar: mockRegistrar(), + upgrader: mockUpgrader(), + connectionGater: mockConnectionGater() + }) + const peerStore = new PersistentPeerStore(components, { + addressFilter: components.getConnectionGater().filterMultiaddrForPeer + }) + components.setPeerStore(peerStore) + components.setAddressManager(new DefaultAddressManager(components, { + announce: listenMaddrs.map(ma => ma.toString()) + })) + + const connectionManager = new DefaultConnectionManager(components) + services.push(connectionManager) + components.setConnectionManager(connectionManager) + + const transportManager = new DefaultTransportManager(components) + services.push(transportManager) + components.setTransportManager(transportManager) + + await peerStore.protoBook.set(peerId, protocols) + + return components +} + +describe('Identify', () => { + let localComponents: Components + let remoteComponents: Components + + let localPeerRecordUpdater: PeerRecordUpdater + let remotePeerRecordUpdater: PeerRecordUpdater + let services: Startable[] + + beforeEach(async () => { + services = [] + + localComponents = await createComponents(0, services) + remoteComponents = await createComponents(1, services) + + localPeerRecordUpdater = new PeerRecordUpdater(localComponents) + remotePeerRecordUpdater = new PeerRecordUpdater(remoteComponents) + + await Promise.all( + services.map(s => s.start()) + ) + }) + + afterEach(async () => { + sinon.restore() + + await Promise.all( + services.map(s => s.stop()) + ) + }) + + it('should be able to identify another peer', async () => { + const localIdentify = new IdentifyService(localComponents, defaultInit) + const remoteIdentify = new IdentifyService(remoteComponents, defaultInit) + + await localIdentify.start() + await remoteIdentify.start() + + const [localToRemote] = connectionPair({ + peerId: localComponents.getPeerId(), + registrar: localComponents.getRegistrar() + }, { + peerId: remoteComponents.getPeerId(), + registrar: remoteComponents.getRegistrar() + }) + + const localAddressBookConsumePeerRecordSpy = sinon.spy(localComponents.getPeerStore().addressBook, 'consumePeerRecord') + const localProtoBookSetSpy = sinon.spy(localComponents.getPeerStore().protoBook, 'set') + + // Make sure the remote peer has a peer record to share during identify + await remotePeerRecordUpdater.update() + + // Run identify + await localIdentify.identify(localToRemote) + + expect(localAddressBookConsumePeerRecordSpy.callCount).to.equal(1) + expect(localProtoBookSetSpy.callCount).to.equal(1) + + // Validate the remote peer gets updated in the peer store + const addresses = await localComponents.getPeerStore().addressBook.get(remoteComponents.getPeerId()) + expect(addresses).to.exist() + + expect(addresses).have.lengthOf(listenMaddrs.length) + expect(addresses.map((a) => a.multiaddr)[0].equals(listenMaddrs[0])) + expect(addresses.map((a) => a.isCertified)[0]).to.be.true() + }) + + // LEGACY + it('should be able to identify another peer with no certified peer records support', async () => { + const agentVersion = 'js-libp2p/5.0.0' + const localIdentify = new IdentifyService(localComponents, { + protocolPrefix: 'ipfs', + host: { + agentVersion: agentVersion + } + }) + await localIdentify.start() + const remoteIdentify = new IdentifyService(remoteComponents, { + protocolPrefix: 'ipfs', + host: { + agentVersion: agentVersion + } + }) + await remoteIdentify.start() + + const [localToRemote] = connectionPair({ + peerId: localComponents.getPeerId(), + registrar: localComponents.getRegistrar() + }, { + peerId: remoteComponents.getPeerId(), + registrar: remoteComponents.getRegistrar() + }) + + sinon.stub(localComponents.getPeerStore().addressBook, 'consumePeerRecord').throws() + + const localProtoBookSetSpy = sinon.spy(localComponents.getPeerStore().protoBook, 'set') + + // Run identify + await localIdentify.identify(localToRemote) + + expect(localProtoBookSetSpy.callCount).to.equal(1) + + // Validate the remote peer gets updated in the peer store + const addresses = await localComponents.getPeerStore().addressBook.get(remoteComponents.getPeerId()) + expect(addresses).to.exist() + + expect(addresses).have.lengthOf(listenMaddrs.length) + expect(addresses.map((a) => a.multiaddr)[0].equals(listenMaddrs[0])) + expect(addresses.map((a) => a.isCertified)[0]).to.be.false() + }) + + it('should throw if identified peer is the wrong peer', async () => { + const localIdentify = new IdentifyService(localComponents, defaultInit) + const remoteIdentify = new IdentifyService(remoteComponents, defaultInit) + + await localIdentify.start() + await remoteIdentify.start() + + const [localToRemote] = connectionPair({ + peerId: localComponents.getPeerId(), + registrar: localComponents.getRegistrar() + }, { + peerId: remoteComponents.getPeerId(), + registrar: remoteComponents.getRegistrar() + }) + + // send an invalid message + await remoteComponents.getRegistrar().unhandle(MULTICODEC_IDENTIFY) + await remoteComponents.getRegistrar().handle(MULTICODEC_IDENTIFY, (data) => { + void Promise.resolve().then(async () => { + const { connection, stream } = data + const signedPeerRecord = await remoteComponents.getPeerStore().addressBook.getRawEnvelope(remoteComponents.getPeerId()) + + const message = Message.Identify.encode({ + protocolVersion: '123', + agentVersion: '123', + // send bad public key + publicKey: localComponents.getPeerId().publicKey ?? new Uint8Array(0), + listenAddrs: [], + signedPeerRecord, + observedAddr: connection.remoteAddr.bytes, + protocols: [] + }).finish() + + await pipe( + [message], + lp.encode(), + stream, + drain + ) + }) + }) + + // Run identify + await expect(localIdentify.identify(localToRemote)) + .to.eventually.be.rejected() + .and.to.have.property('code', codes.ERR_INVALID_PEER) + }) + + it('should store own host data and protocol version into metadataBook on start', async () => { + const agentVersion = 'js-project/1.0.0' + const localIdentify = new IdentifyService(localComponents, { + protocolPrefix: 'ipfs', + host: { + agentVersion + } + }) + + await expect(localComponents.getPeerStore().metadataBook.getValue(localComponents.getPeerId(), 'AgentVersion')) + .to.eventually.be.undefined() + await expect(localComponents.getPeerStore().metadataBook.getValue(localComponents.getPeerId(), 'ProtocolVersion')) + .to.eventually.be.undefined() + + await localIdentify.start() + + await expect(localComponents.getPeerStore().metadataBook.getValue(localComponents.getPeerId(), 'AgentVersion')) + .to.eventually.deep.equal(uint8ArrayFromString(agentVersion)) + await expect(localComponents.getPeerStore().metadataBook.getValue(localComponents.getPeerId(), 'ProtocolVersion')) + .to.eventually.be.ok() + + await localIdentify.stop() + }) + + describe('push', () => { + it('should be able to push identify updates to another peer', async () => { + const localIdentify = new IdentifyService(localComponents, defaultInit) + const remoteIdentify = new IdentifyService(remoteComponents, defaultInit) + + await localIdentify.start() + await remoteIdentify.start() + + const [localToRemote, remoteToLocal] = connectionPair({ + peerId: localComponents.getPeerId(), + registrar: localComponents.getRegistrar() + }, { + peerId: remoteComponents.getPeerId(), + registrar: remoteComponents.getRegistrar() + }) + + // ensure connections are registered by connection manager + localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { + detail: localToRemote + })) + remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { + detail: remoteToLocal + })) + + // identify both ways + await localIdentify.identify(localToRemote) + await remoteIdentify.identify(remoteToLocal) + + const updatedProtocol = '/special-new-protocol/1.0.0' + const updatedAddress = new Multiaddr('/ip4/127.0.0.1/tcp/48322') + + // should have protocols but not our new one + const identifiedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId()) + expect(identifiedProtocols).to.not.be.empty() + expect(identifiedProtocols).to.not.include(updatedProtocol) + + // should have addresses but not our new one + const identifiedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId()) + expect(identifiedAddresses).to.not.be.empty() + expect(identifiedAddresses.map(a => a.multiaddr.toString())).to.not.include(updatedAddress.toString()) + + // update local data - change event will trigger push + await localComponents.getPeerStore().protoBook.add(localComponents.getPeerId(), [updatedProtocol]) + await localComponents.getPeerStore().addressBook.add(localComponents.getPeerId(), [updatedAddress]) + + // needed to update the peer record and send our supported addresses + const addressManager = localComponents.getAddressManager() + addressManager.getAddresses = () => { + return [updatedAddress] + } + + // ensure sequence number of peer record we are about to create is different + await delay(1000) + + // make sure we have a peer record to send + await localPeerRecordUpdater.update() + + // wait for the remote peer store to notice the changes + const eventPromise = pEvent(remoteComponents.getPeerStore(), 'change:multiaddrs') + + // push updated peer record to connections + await localIdentify.pushToPeerStore() + + await eventPromise + + // should have new protocol + const updatedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId()) + expect(updatedProtocols).to.not.be.empty() + expect(updatedProtocols).to.include(updatedProtocol) + + // should have new address + const updatedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId()) + expect(updatedAddresses.map(a => { + return { + multiaddr: a.multiaddr.toString(), + isCertified: a.isCertified + } + })).to.deep.equal([{ + multiaddr: updatedAddress.toString(), + isCertified: true + }]) + + await localIdentify.stop() + await remoteIdentify.stop() + }) + + // LEGACY + it('should be able to push identify updates to another peer with no certified peer records support', async () => { + const localIdentify = new IdentifyService(localComponents, defaultInit) + const remoteIdentify = new IdentifyService(remoteComponents, defaultInit) + + await localIdentify.start() + await remoteIdentify.start() + + const [localToRemote, remoteToLocal] = connectionPair({ + peerId: localComponents.getPeerId(), + registrar: localComponents.getRegistrar() + }, { + peerId: remoteComponents.getPeerId(), + registrar: remoteComponents.getRegistrar() + }) + + // ensure connections are registered by connection manager + localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { + detail: localToRemote + })) + remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { + detail: remoteToLocal + })) + + // identify both ways + await localIdentify.identify(localToRemote) + await remoteIdentify.identify(remoteToLocal) + + const updatedProtocol = '/special-new-protocol/1.0.0' + const updatedAddress = new Multiaddr('/ip4/127.0.0.1/tcp/48322') + + // should have protocols but not our new one + const identifiedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId()) + expect(identifiedProtocols).to.not.be.empty() + expect(identifiedProtocols).to.not.include(updatedProtocol) + + // should have addresses but not our new one + const identifiedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId()) + expect(identifiedAddresses).to.not.be.empty() + expect(identifiedAddresses.map(a => a.multiaddr.toString())).to.not.include(updatedAddress.toString()) + + // update local data - change event will trigger push + await localComponents.getPeerStore().protoBook.add(localComponents.getPeerId(), [updatedProtocol]) + await localComponents.getPeerStore().addressBook.add(localComponents.getPeerId(), [updatedAddress]) + + // needed to send our supported addresses + const addressManager = localComponents.getAddressManager() + addressManager.getAddresses = () => { + return [updatedAddress] + } + + // wait until remote peer store notices protocol list update + const waitForUpdate = pEvent(remoteComponents.getPeerStore(), 'change:protocols') + + await localIdentify.pushToPeerStore() + + await waitForUpdate + + // should have new protocol + const updatedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId()) + expect(updatedProtocols).to.not.be.empty() + expect(updatedProtocols).to.include(updatedProtocol) + + // should have new address + const updatedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId()) + expect(updatedAddresses.map(a => { + return { + multiaddr: a.multiaddr.toString(), + isCertified: a.isCertified + } + })).to.deep.equal([{ + multiaddr: updatedAddress.toString(), + isCertified: false + }]) + + await localIdentify.stop() + await remoteIdentify.stop() + }) + }) + + describe('libp2p.dialer.identifyService', () => { + let peerId: PeerId + let libp2p: Libp2pNode + let remoteLibp2p: Libp2pNode + const remoteAddr = MULTIADDRS_WEBSOCKETS[0] + + before(async () => { + peerId = await createFromJSON(Peers[0]) + }) + + afterEach(async () => { + sinon.restore() + + if (libp2p != null) { + await libp2p.stop() + } + }) + + after(async () => { + if (remoteLibp2p != null) { + await remoteLibp2p.stop() + } + }) + + it('should run identify automatically after connecting', async () => { + libp2p = await createLibp2pNode(createBaseOptions({ + peerId + })) + + await libp2p.start() + + if (libp2p.identifyService == null) { + throw new Error('Identity service was not configured') + } + + const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify') + const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord') + const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add') + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + + // Wait for peer store to be updated + // Dialer._createDialTarget (add), Identify (consume) + await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1) + expect(identityServiceIdentifySpy.callCount).to.equal(1) + + // The connection should have no open streams + await pWaitFor(() => connection.streams.length === 0) + await connection.close() + }) + + it('should store remote agent and protocol versions in metadataBook after connecting', async () => { + libp2p = await createLibp2pNode(createBaseOptions({ + peerId + })) + + await libp2p.start() + + if (libp2p.identifyService == null) { + throw new Error('Identity service was not configured') + } + + const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify') + const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord') + const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add') + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + + // Wait for peer store to be updated + // Dialer._createDialTarget (add), Identify (consume) + await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1) + expect(identityServiceIdentifySpy.callCount).to.equal(1) + + // The connection should have no open streams + await pWaitFor(() => connection.streams.length === 0) + await connection.close() + + const remotePeer = peerIdFromString(remoteAddr.getPeerId() ?? '') + + const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(remotePeer, 'AgentVersion') + const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(remotePeer, 'ProtocolVersion') + + expect(storedAgentVersion).to.exist() + expect(storedProtocolVersion).to.exist() + }) + + it('should push protocol updates to an already connected peer', async () => { + libp2p = await createLibp2pNode(createBaseOptions({ + peerId + })) + + await libp2p.start() + + if (libp2p.identifyService == null) { + throw new Error('Identity service was not configured') + } + + const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify') + const identityServicePushSpy = sinon.spy(libp2p.identifyService, 'push') + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + + // Wait for identify to finish + await identityServiceIdentifySpy.firstCall.returnValue + sinon.stub(libp2p, 'isStarted').returns(true) + + await libp2p.handle('/echo/2.0.0', () => {}) + await libp2p.unhandle('/echo/2.0.0') + + // the protocol change event listener in the identity service is async + await pWaitFor(() => identityServicePushSpy.callCount === 2) + + // Verify the remote peer is notified of both changes + expect(identityServicePushSpy.callCount).to.equal(2) + + for (const call of identityServicePushSpy.getCalls()) { + const [connections] = call.args + expect(connections.length).to.equal(1) + expect(connections[0].remotePeer.toString()).to.equal(remoteAddr.getPeerId()) + await call.returnValue + } + + // Verify the streams close + await pWaitFor(() => connection.streams.length === 0) + }) + + it('should store host data and protocol version into metadataBook', async () => { + const agentVersion = 'js-project/1.0.0' + + libp2p = await createLibp2pNode(createBaseOptions({ + peerId, + host: { + agentVersion + } + })) + + await libp2p.start() + + if (libp2p.identifyService == null) { + throw new Error('Identity service was not configured') + } + + const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(peerId, 'AgentVersion') + const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(peerId, 'ProtocolVersion') + + expect(agentVersion).to.equal(uint8ArrayToString(storedAgentVersion ?? new Uint8Array())) + expect(storedProtocolVersion).to.exist() + }) + + it('should push multiaddr updates to an already connected peer', async () => { + libp2p = await createLibp2pNode(createBaseOptions({ + peerId + })) + + await libp2p.start() + + if (libp2p.identifyService == null) { + throw new Error('Identity service was not configured') + } + + const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify') + const identityServicePushSpy = sinon.spy(libp2p.identifyService, 'push') + + const connection = await libp2p.dial(remoteAddr) + expect(connection).to.exist() + + // Wait for identify to finish + await identityServiceIdentifySpy.firstCall.returnValue + sinon.stub(libp2p, 'isStarted').returns(true) + + await libp2p.peerStore.addressBook.add(libp2p.peerId, [new Multiaddr('/ip4/180.0.0.1/tcp/15001/ws')]) + + // the protocol change event listener in the identity service is async + await pWaitFor(() => identityServicePushSpy.callCount === 1) + + // Verify the remote peer is notified of change + expect(identityServicePushSpy.callCount).to.equal(1) + for (const call of identityServicePushSpy.getCalls()) { + const [connections] = call.args + expect(connections.length).to.equal(1) + expect(connections[0].remotePeer.toString()).to.equal(remoteAddr.getPeerId()) + await call.returnValue + } + + // Verify the streams close + await pWaitFor(() => connection.streams.length === 0) + }) + }) +}) diff --git a/test/insecure/compliance.spec.js b/test/insecure/compliance.spec.js deleted file mode 100644 index f124dde9..00000000 --- a/test/insecure/compliance.spec.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const tests = require('libp2p-interfaces-compliance-tests/src/crypto') -const plaintext = require('../../src/insecure/plaintext') - -describe('plaintext compliance', () => { - tests({ - setup () { - return plaintext - } - }) -}) diff --git a/test/insecure/compliance.spec.ts b/test/insecure/compliance.spec.ts new file mode 100644 index 00000000..2c919f2d --- /dev/null +++ b/test/insecure/compliance.spec.ts @@ -0,0 +1,15 @@ +/* eslint-env mocha */ + +import suite from '@libp2p/interface-compliance-tests/connection-encrypter' +import { Plaintext } from '../../src/insecure/index.js' + +describe('plaintext compliance', () => { + suite({ + async setup () { + return new Plaintext() + }, + async teardown () { + + } + }) +}) diff --git a/test/insecure/plaintext.spec.js b/test/insecure/plaintext.spec.js deleted file mode 100644 index 41f10c5a..00000000 --- a/test/insecure/plaintext.spec.js +++ /dev/null @@ -1,67 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const PeerId = require('peer-id') -const duplexPair = require('it-pair/duplex') - -const peers = require('../fixtures/peers') -const plaintext = require('../../src/insecure/plaintext') -const { - InvalidCryptoExchangeError, - UnexpectedPeerError -} = require('libp2p-interfaces/src/crypto/errors') - -describe('plaintext', () => { - let localPeer - let remotePeer - let wrongPeer - - before(async () => { - [localPeer, remotePeer, wrongPeer] = await Promise.all([ - PeerId.createFromJSON(peers[0]), - PeerId.createFromJSON(peers[1]), - PeerId.createFromJSON(peers[2]) - ]) - }) - - afterEach(() => { - sinon.restore() - }) - - it('should verify the public key and id match', () => { - const [localConn, remoteConn] = duplexPair() - - // When we attempt to get the remote peer key, return the wrong peers pub key - sinon.stub(remotePeer, 'marshalPubKey').callsFake(() => { - return wrongPeer.marshalPubKey() - }) - - return Promise.all([ - plaintext.secureInbound(remotePeer, localConn), - plaintext.secureOutbound(localPeer, remoteConn, remotePeer) - ]).then(() => expect.fail('should have failed'), (err) => { - expect(err).to.exist() - expect(err).to.have.property('code', UnexpectedPeerError.code) - }) - }) - - it('should fail if the peer does not provide its public key', () => { - const [localConn, remoteConn] = duplexPair() - - // When we attempt to get the remote peer key, return the wrong peers pub key - sinon.stub(remotePeer, 'marshalPubKey').callsFake(() => { - return new Uint8Array(0) - }) - - return Promise.all([ - plaintext.secureInbound(remotePeer, localConn), - plaintext.secureOutbound(localPeer, remoteConn, remotePeer) - ]).then(() => expect.fail('should have failed'), (err) => { - expect(err).to.exist() - expect(err).to.have.property('code', InvalidCryptoExchangeError.code) - }) - }) -}) diff --git a/test/insecure/plaintext.spec.ts b/test/insecure/plaintext.spec.ts new file mode 100644 index 00000000..d4198dab --- /dev/null +++ b/test/insecure/plaintext.spec.ts @@ -0,0 +1,74 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import Peers from '../fixtures/peers.js' +import { Plaintext } from '../../src/insecure/index.js' +import { + InvalidCryptoExchangeError, + UnexpectedPeerError +} from '@libp2p/interfaces/connection-encrypter/errors' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createFromJSON, createRSAPeerId } from '@libp2p/peer-id-factory' +import type { ConnectionEncrypter } from '@libp2p/interfaces/connection-encrypter' +import { mockMultiaddrConnPair } from '@libp2p/interface-compliance-tests/mocks' +import { Multiaddr } from '@multiformats/multiaddr' +import { peerIdFromBytes } from '@libp2p/peer-id' + +describe('plaintext', () => { + let localPeer: PeerId + let remotePeer: PeerId + let wrongPeer: PeerId + let plaintext: ConnectionEncrypter + + beforeEach(async () => { + [localPeer, remotePeer, wrongPeer] = await Promise.all([ + createFromJSON(Peers[0]), + createFromJSON(Peers[1]), + createFromJSON(Peers[2]) + ]) + + plaintext = new Plaintext() + }) + + afterEach(() => { + sinon.restore() + }) + + it('should verify the public key and id match', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ + remotePeer, + addrs: [ + new Multiaddr('/ip4/127.0.0.1/tcp/1234'), + new Multiaddr('/ip4/127.0.0.1/tcp/1235') + ] + }) + + await Promise.all([ + plaintext.secureInbound(remotePeer, inbound), + plaintext.secureOutbound(localPeer, outbound, wrongPeer) + ]).then(() => expect.fail('should have failed'), (err) => { + expect(err).to.exist() + expect(err).to.have.property('code', UnexpectedPeerError.code) + }) + }) + + it('should fail if the peer does not provide its public key', async () => { + const peer = await createRSAPeerId() + remotePeer = peerIdFromBytes(peer.toBytes()) + + const { inbound, outbound } = mockMultiaddrConnPair({ + remotePeer, + addrs: [ + new Multiaddr('/ip4/127.0.0.1/tcp/1234'), + new Multiaddr('/ip4/127.0.0.1/tcp/1235') + ] + }) + + await expect(Promise.all([ + plaintext.secureInbound(localPeer, inbound), + plaintext.secureOutbound(remotePeer, outbound, localPeer) + ])) + .to.eventually.be.rejected.with.property('code', InvalidCryptoExchangeError.code) + }) +}) diff --git a/test/interop.ts b/test/interop.ts new file mode 100644 index 00000000..e457dc36 --- /dev/null +++ b/test/interop.ts @@ -0,0 +1,159 @@ +import { interopTests } from '@libp2p/interop' +import type { SpawnOptions, Daemon, DaemonFactory } from '@libp2p/interop' +import { createServer } from '@libp2p/daemon-server' +import { createClient } from '@libp2p/daemon-client' +import { createLibp2p, Libp2pOptions } from '../src/index.js' +import { Noise } from '@chainsafe/libp2p-noise' +import { TCP } from '@libp2p/tcp' +import { Multiaddr } from '@multiformats/multiaddr' +import { KadDHT } from '@libp2p/kad-dht' +import { path as p2pd } from 'go-libp2p' +import execa from 'execa' +import pDefer from 'p-defer' +import { logger } from '@libp2p/logger' +import { Mplex } from '@libp2p/mplex' +import fs from 'fs' +import { unmarshalPrivateKey } from '@libp2p/crypto/keys' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { peerIdFromKeys } from '@libp2p/peer-id' +import { FloodSub } from '@libp2p/floodsub' +import { Gossipsub } from '@achingbrain/libp2p-gossipsub' + +// IPFS_LOGGING=debug DEBUG=libp2p*,go-libp2p:* npm run test:interop + +async function createGoPeer (options: SpawnOptions): Promise { + const controlPort = Math.floor(Math.random() * (50000 - 10000 + 1)) + 10000 + const apiAddr = new Multiaddr(`/ip4/0.0.0.0/tcp/${controlPort}`) + + const log = logger(`go-libp2p:${controlPort}`) + + const opts = [ + `-listen=${apiAddr.toString()}`, + '-hostAddrs=/ip4/0.0.0.0/tcp/0' + ] + + if (options.noise === true) { + opts.push('-noise=true') + } + + if (options.dht === true) { + opts.push('-dhtServer') + } + + if (options.pubsub === true) { + opts.push('-pubsub') + } + + if (options.pubsubRouter != null) { + opts.push(`-pubsubRouter=${options.pubsubRouter}`) + } + + if (options.key != null) { + opts.push(`-id=${options.key}`) + } + + const deferred = pDefer() + const proc = execa(p2pd(), opts) + + proc.stdout?.on('data', (buf: Buffer) => { + const str = buf.toString() + log(str) + + // daemon has started + if (str.includes('Control socket:')) { + deferred.resolve() + } + }) + + proc.stderr?.on('data', (buf) => { + log.error(buf.toString()) + }) + + await deferred.promise + + return { + client: createClient(apiAddr), + stop: async () => { + await proc.kill() + } + } +} + +async function createJsPeer (options: SpawnOptions): Promise { + let peerId: PeerId | undefined + + if (options.key != null) { + const keyFile = fs.readFileSync(options.key) + const privateKey = await unmarshalPrivateKey(keyFile) + peerId = await peerIdFromKeys(privateKey.public.bytes, privateKey.bytes) + } + + const opts: Libp2pOptions = { + peerId, + addresses: { + listen: ['/ip4/0.0.0.0/tcp/0'] + }, + transports: [new TCP()], + streamMuxers: [new Mplex()], + connectionEncryption: [new Noise()] + } + + if (options.dht === true) { + // go-libp2p-daemon only has the older single-table DHT instead of the dual + // lan/wan version found in recent go-ipfs versions. unfortunately it's been + // abandoned so here we simulate the older config with the js implementation + const dht = new KadDHT({ + clientMode: false + }) + const lan = dht.lan + + const protocol = '/ipfs/kad/1.0.0' + lan.protocol = protocol + // @ts-expect-error + lan.network.protocol = protocol + // @ts-expect-error + lan.topologyListener.protocol = protocol + + // @ts-expect-error + opts.dht = lan + } + + if (options.pubsub === true) { + if (options.pubsubRouter === 'floodsub') { + opts.pubsub = new FloodSub() + } else { + opts.pubsub = new Gossipsub() + } + } + + const node = await createLibp2p(opts) + const server = await createServer(new Multiaddr('/ip4/0.0.0.0/tcp/0'), node) + await server.start() + + return { + client: createClient(server.getMultiaddr()), + stop: async () => { + await server.stop() + await node.stop() + } + } +} + +async function main () { + const factory: DaemonFactory = { + async spawn (options: SpawnOptions) { + if (options.type === 'go') { + return await createGoPeer(options) + } + + return await createJsPeer(options) + } + } + + await interopTests(factory) +} + +main().catch(err => { + console.error(err) // eslint-disable-line no-console + process.exit(1) +}) diff --git a/test/keychain/cms-interop.spec.js b/test/keychain/cms-interop.spec.ts similarity index 84% rename from test/keychain/cms-interop.spec.js rename to test/keychain/cms-interop.spec.ts index bd2f09e7..32748acc 100644 --- a/test/keychain/cms-interop.spec.js +++ b/test/keychain/cms-interop.spec.ts @@ -1,21 +1,21 @@ /* eslint max-nested-callbacks: ["error", 8] */ /* eslint-env mocha */ -'use strict' -const { expect } = require('aegir/utils/chai') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const { MemoryDatastore } = require('datastore-core/memory') -const Keychain = require('../../src/keychain') +import { expect } from 'aegir/utils/chai.js' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { MemoryDatastore } from 'datastore-core/memory' +import { KeyChain } from '../../src/keychain/index.js' +import { Components } from '@libp2p/interfaces/components' describe('cms interop', () => { const passPhrase = 'this is not a secure phrase' const aliceKeyName = 'cms-interop-alice' - let ks + let ks: KeyChain before(() => { const datastore = new MemoryDatastore() - ks = new Keychain(datastore, { passPhrase: passPhrase }) + ks = new KeyChain(new Components({ datastore }), { pass: passPhrase }) }) const plainData = uint8ArrayFromString('This is a message from Alice to Bob') diff --git a/test/keychain/keychain.spec.js b/test/keychain/keychain.spec.ts similarity index 52% rename from test/keychain/keychain.spec.js rename to test/keychain/keychain.spec.ts index 070a233d..6a176ea4 100644 --- a/test/keychain/keychain.spec.js +++ b/test/keychain/keychain.spec.ts @@ -1,35 +1,35 @@ /* eslint max-nested-callbacks: ["error", 8] */ /* eslint-env mocha */ -'use strict' -const { expect } = require('aegir/utils/chai') -const fail = expect.fail -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') - -const peerUtils = require('../utils/creators/peer') - -const { Key } = require('interface-datastore/key') -const { MemoryDatastore } = require('datastore-core/memory') -const Keychain = require('../../src/keychain') -const PeerId = require('peer-id') -const crypto = require('libp2p-crypto') +import { expect } from 'aegir/utils/chai.js' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { createNode } from '../utils/creators/peer.js' +import { Key } from 'interface-datastore/key' +import { MemoryDatastore } from 'datastore-core/memory' +import { KeyChain, KeyChainInit, KeyInfo } from '../../src/keychain/index.js' +import { pbkdf2 } from '@libp2p/crypto' +import { Components } from '@libp2p/interfaces/components' +import type { Datastore } from 'interface-datastore' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createFromPrivKey } from '@libp2p/peer-id-factory' +import { unmarshalPrivateKey } from '@libp2p/crypto/keys' describe('keychain', () => { const passPhrase = 'this is not a secure phrase' const rsaKeyName = 'tajné jméno' const renamedRsaKeyName = 'ชื่อลับ' - let rsaKeyInfo - let emptyKeystore - let ks - let datastore1, datastore2 + let rsaKeyInfo: KeyInfo + let emptyKeystore: KeyChain + let ks: KeyChain + let datastore1: Datastore, datastore2: Datastore before(async () => { datastore1 = new MemoryDatastore() datastore2 = new MemoryDatastore() - ks = new Keychain(datastore2, { pass: passPhrase }) - emptyKeystore = new Keychain(datastore1, { pass: passPhrase }) + ks = new KeyChain(new Components({ datastore: datastore2 }), { pass: passPhrase }) + emptyKeystore = new KeyChain(new Components({ datastore: datastore1 }), { pass: passPhrase }) await datastore1.open() await datastore2.open() @@ -41,78 +41,77 @@ describe('keychain', () => { }) it('can start without a password', () => { - expect(() => new Keychain(datastore2)).to.not.throw() + expect(() => new KeyChain(new Components({ datastore: datastore2 }), {})).to.not.throw() }) it('needs a NIST SP 800-132 non-weak pass phrase', () => { - expect(() => new Keychain(datastore2, { pass: '< 20 character' })).to.throw() - }) - - it('needs a store to persist a key', () => { - expect(() => new Keychain(null, { pass: passPhrase })).to.throw() + expect(() => new KeyChain(new Components({ datastore: datastore2 }), { pass: '< 20 character' })).to.throw() }) it('has default options', () => { - expect(Keychain.options).to.exist() + expect(KeyChain.options).to.exist() }) it('supports supported hashing alorithms', () => { - const ok = new Keychain(datastore2, { pass: passPhrase, dek: { hash: 'sha2-256' } }) + const ok = new KeyChain(new Components({ datastore: datastore2 }), { pass: passPhrase, dek: { hash: 'sha2-256', salt: 'salt-salt-salt-salt', iterationCount: 1000, keyLength: 14 } }) expect(ok).to.exist() }) it('does not support unsupported hashing alorithms', () => { - expect(() => new Keychain(datastore2, { pass: passPhrase, dek: { hash: 'my-hash' } })).to.throw() + expect(() => new KeyChain(new Components({ datastore: datastore2 }), { pass: passPhrase, dek: { hash: 'my-hash', salt: 'salt-salt-salt-salt', iterationCount: 1000, keyLength: 14 } })).to.throw() }) it('can list keys without a password', async () => { - const keychain = new Keychain(datastore2) + const keychain = new KeyChain(new Components({ datastore: datastore2 }), {}) expect(await keychain.listKeys()).to.have.lengthOf(0) }) it('can find a key without a password', async () => { - const keychain = new Keychain(datastore2) - const keychainWithPassword = new Keychain(datastore2, { pass: `hello-${Date.now()}-${Date.now()}` }) + const keychain = new KeyChain(new Components({ datastore: datastore2 }), {}) + const keychainWithPassword = new KeyChain(new Components({ datastore: datastore2 }), { pass: `hello-${Date.now()}-${Date.now()}` }) const name = `key-${Math.random()}` - const { id } = await keychainWithPassword.createKey(name, 'ed25519') + const { id } = await keychainWithPassword.createKey(name, 'Ed25519') await expect(keychain.findKeyById(id)).to.eventually.be.ok() }) it('can remove a key without a password', async () => { - const keychainWithoutPassword = new Keychain(datastore2) - const keychainWithPassword = new Keychain(datastore2, { pass: `hello-${Date.now()}-${Date.now()}` }) + const keychainWithoutPassword = new KeyChain(new Components({ datastore: datastore2 }), {}) + const keychainWithPassword = new KeyChain(new Components({ datastore: datastore2 }), { pass: `hello-${Date.now()}-${Date.now()}` }) const name = `key-${Math.random()}` - expect(await keychainWithPassword.createKey(name, 'ed25519')).to.have.property('name', name) + expect(await keychainWithPassword.createKey(name, 'Ed25519')).to.have.property('name', name) expect(await keychainWithoutPassword.findKeyByName(name)).to.have.property('name', name) await keychainWithoutPassword.removeKey(name) await expect(keychainWithoutPassword.findKeyByName(name)).to.be.rejectedWith(/does not exist/) }) - it('requires a key to create a password', async () => { - const keychain = new Keychain(datastore2) + it('requires a name to create a password', async () => { + const keychain = new KeyChain(new Components({ datastore: datastore2 }), {}) - await expect(keychain.createKey('derp')).to.be.rejected() + // @ts-expect-error invalid parameters + await expect(keychain.createKey(undefined, 'derp')).to.be.rejected() }) it('can generate options', () => { - const options = Keychain.generateOptions() + const options = KeyChain.generateOptions() options.pass = passPhrase - const chain = new Keychain(datastore2, options) + const chain = new KeyChain(new Components({ datastore: datastore2 }), options) expect(chain).to.exist() }) describe('key name', () => { it('is a valid filename and non-ASCII', async () => { const errors = await Promise.all([ - ks.removeKey('../../nasty').then(fail, err => err), - ks.removeKey('').then(fail, err => err), - ks.removeKey(' ').then(fail, err => err), - ks.removeKey(null).then(fail, err => err), - ks.removeKey(undefined).then(fail, err => err) + ks.removeKey('../../nasty').catch(err => err), + ks.removeKey('').catch(err => err), + ks.removeKey(' ').catch(err => err), + // @ts-expect-error invalid parameters + ks.removeKey(null).catch(err => err), + // @ts-expect-error invalid parameters + ks.removeKey(undefined).catch(err => err) ]) expect(errors).to.have.length(5) @@ -124,85 +123,73 @@ describe('keychain', () => { describe('key', () => { it('can be an RSA key', async () => { - rsaKeyInfo = await ks.createKey(rsaKeyName, 'rsa', 2048) + rsaKeyInfo = await ks.createKey(rsaKeyName, 'RSA', 2048) expect(rsaKeyInfo).to.exist() expect(rsaKeyInfo).to.have.property('name', rsaKeyName) expect(rsaKeyInfo).to.have.property('id') }) it('is encrypted PEM encoded PKCS #8', async () => { - const pem = await ks._getPrivateKey(rsaKeyName) + const pem = await ks.getPrivateKey(rsaKeyName) return expect(pem).to.startsWith('-----BEGIN ENCRYPTED PRIVATE KEY-----') }) it('throws if an invalid private key name is given', async () => { - const err = await ks._getPrivateKey(undefined).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + // @ts-expect-error invalid parameters + await expect(ks.getPrivateKey(undefined)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) it('throws if a private key cant be found', async () => { - const err = await ks._getPrivateKey('not real').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_KEY_NOT_FOUND') + await expect(ks.getPrivateKey('not real')).to.eventually.be.rejected.with.property('code', 'ERR_KEY_NOT_FOUND') }) it('does not overwrite existing key', async () => { - const err = await ks.createKey(rsaKeyName, 'rsa', 2048).then(fail, err => err) - expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS') + await expect(ks.createKey(rsaKeyName, 'RSA', 2048)).to.eventually.be.rejected.with.property('code', 'ERR_KEY_ALREADY_EXISTS') }) it('cannot create the "self" key', async () => { - const err = await ks.createKey('self', 'rsa', 2048).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + await expect(ks.createKey('self', 'RSA', 2048)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) it('should validate name is string', async () => { - const err = await ks.createKey(5, 'rsa', 2048).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + // @ts-expect-error invalid parameters + await expect(ks.createKey(5, 'rsa', 2048)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) it('should validate type is string', async () => { - const err = await ks.createKey('TEST' + Date.now(), null, 2048).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_TYPE') + // @ts-expect-error invalid parameters + await expect(ks.createKey(`TEST-${Date.now()}`, null, 2048)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_TYPE') }) it('should validate size is integer', async () => { - const err = await ks.createKey('TEST' + Date.now(), 'rsa', 'string').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_SIZE') + // @ts-expect-error invalid parameters + await expect(ks.createKey(`TEST-${Date.now()}`, 'RSA', 'string')).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_SIZE') }) describe('implements NIST SP 800-131A', () => { it('disallows RSA length < 2048', async () => { - const err = await ks.createKey('bad-nist-rsa', 'rsa', 1024).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_SIZE') + await expect(ks.createKey('bad-nist-rsa', 'RSA', 1024)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_SIZE') }) }) }) - describe('ed25519 keys', () => { + describe('Ed25519 keys', () => { const keyName = 'my custom key' - it('can be an ed25519 key', async () => { - const keyInfo = await ks.createKey(keyName, 'ed25519') + it('can be an Ed25519 key', async () => { + const keyInfo = await ks.createKey(keyName, 'Ed25519') expect(keyInfo).to.exist() expect(keyInfo).to.have.property('name', keyName) expect(keyInfo).to.have.property('id') }) it('does not overwrite existing key', async () => { - const err = await ks.createKey(keyName, 'ed25519').then(fail, err => err) - expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS') + await expect(ks.createKey(keyName, 'Ed25519')).to.eventually.be.rejected.with.property('code', 'ERR_KEY_ALREADY_EXISTS') }) it('can export/import a key', async () => { const keyName = 'a new key' const password = 'my sneaky password' - const keyInfo = await ks.createKey(keyName, 'ed25519') + const keyInfo = await ks.createKey(keyName, 'Ed25519') const exportedKey = await ks.exportKey(keyName, password) // remove it so we can import it await ks.removeKey(keyName) @@ -211,41 +198,7 @@ describe('keychain', () => { }) it('cannot create the "self" key', async () => { - const err = await ks.createKey('self', 'ed25519').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') - }) - }) - - describe('secp256k1 keys', () => { - const keyName = 'my secp256k1 key' - it('can be an secp256k1 key', async () => { - const keyInfo = await ks.createKey(keyName, 'secp256k1') - expect(keyInfo).to.exist() - expect(keyInfo).to.have.property('name', keyName) - expect(keyInfo).to.have.property('id') - }) - - it('does not overwrite existing key', async () => { - const err = await ks.createKey(keyName, 'secp256k1').then(fail, err => err) - expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS') - }) - - it('can export/import a key', async () => { - const keyName = 'a new secp256k1 key' - const password = 'my sneaky password' - const keyInfo = await ks.createKey(keyName, 'secp256k1') - const exportedKey = await ks.exportKey(keyName, password) - // remove it so we can import it - await ks.removeKey(keyName) - const importedKey = await ks.importKey(keyName, exportedKey, password) - expect(importedKey.id).to.eql(keyInfo.id) - }) - - it('cannot create the "self" key', async () => { - const err = await ks.createKey('self', 'secp256k1').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + await expect(ks.createKey('self', 'Ed25519')).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) }) @@ -281,22 +234,19 @@ describe('keychain', () => { describe('CMS protected data', () => { const plainData = uint8ArrayFromString('This is a message from Alice to Bob') - let cms + let cms: Uint8Array it('service is available', () => { expect(ks).to.have.property('cms') }) it('requires a key', async () => { - const err = await ks.cms.encrypt('no-key', plainData).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_KEY_NOT_FOUND') + await expect(ks.cms.encrypt('no-key', plainData)).to.eventually.be.rejected.with.property('code', 'ERR_KEY_NOT_FOUND') }) it('requires plain data as a Uint8Array', async () => { - const err = await ks.cms.encrypt(rsaKeyName, 'plain data').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_PARAMETERS') + // @ts-expect-error invalid parameters + await expect(ks.cms.encrypt(rsaKeyName, 'plain data')).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_PARAMETERS') }) it('encrypts', async () => { @@ -306,23 +256,16 @@ describe('keychain', () => { }) it('is a PKCS #7 message', async () => { - const err = await ks.cms.decrypt('not CMS').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_PARAMETERS') + // @ts-expect-error invalid parameters + await expect(ks.cms.decrypt('not CMS')).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_PARAMETERS') }) it('is a PKCS #7 binary message', async () => { - const err = await ks.cms.decrypt(plainData).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_CMS') + await expect(ks.cms.decrypt(plainData)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_CMS') }) it('cannot be read without the key', async () => { - const err = await emptyKeystore.cms.decrypt(cms).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('missingKeys') - expect(err.missingKeys).to.eql([rsaKeyInfo.id]) - expect(err).to.have.property('code', 'ERR_MISSING_KEYS') + await expect(emptyKeystore.cms.decrypt(cms)).to.eventually.be.rejected.with.property('code', 'ERR_MISSING_KEYS') }) it('can be read with the key', async () => { @@ -333,18 +276,16 @@ describe('keychain', () => { }) describe('exported key', () => { - let pemKey + let pemKey: string it('requires the password', async () => { - const err = await ks.exportKey(rsaKeyName).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_PASSWORD_REQUIRED') + // @ts-expect-error invalid parameters + await expect(ks.exportKey(rsaKeyName)).to.eventually.be.rejected.with.property('code', 'ERR_PASSWORD_REQUIRED') }) it('requires the key name', async () => { - const err = await ks.exportKey(undefined, 'password').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + // @ts-expect-error invalid parameters + await expect(ks.exportKey(undefined, 'password')).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) it('is a PKCS #8 encrypted pem', async () => { @@ -359,89 +300,75 @@ describe('keychain', () => { }) it('requires the pem', async () => { - const err = await ks.importKey('imported-key', undefined, 'password').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_PEM_REQUIRED') + // @ts-expect-error invalid parameters + await expect(ks.importKey('imported-key', undefined, 'password')).to.eventually.be.rejected.with.property('code', 'ERR_PEM_REQUIRED') }) it('cannot be imported as an existing key name', async () => { - const err = await ks.importKey(rsaKeyName, pemKey, 'password').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS') + await expect(ks.importKey(rsaKeyName, pemKey, 'password')).to.eventually.be.rejected.with.property('code', 'ERR_KEY_ALREADY_EXISTS') }) it('cannot be imported with the wrong password', async () => { - const err = await ks.importKey('a-new-name-for-import', pemKey, 'not the password').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_CANNOT_READ_KEY') + await expect(ks.importKey('a-new-name-for-import', pemKey, 'not the password')).to.eventually.be.rejected.with.property('code', 'ERR_CANNOT_READ_KEY') }) }) describe('peer id', () => { const alicePrivKey = 'CAASpgkwggSiAgEAAoIBAQC2SKo/HMFZeBml1AF3XijzrxrfQXdJzjePBZAbdxqKR1Mc6juRHXij6HXYPjlAk01BhF1S3Ll4Lwi0cAHhggf457sMg55UWyeGKeUv0ucgvCpBwlR5cQ020i0MgzjPWOLWq1rtvSbNcAi2ZEVn6+Q2EcHo3wUvWRtLeKz+DZSZfw2PEDC+DGPJPl7f8g7zl56YymmmzH9liZLNrzg/qidokUv5u1pdGrcpLuPNeTODk0cqKB+OUbuKj9GShYECCEjaybJDl9276oalL9ghBtSeEv20kugatTvYy590wFlJkkvyl+nPxIH0EEYMKK9XRWlu9XYnoSfboiwcv8M3SlsjAgMBAAECggEAZtju/bcKvKFPz0mkHiaJcpycy9STKphorpCT83srBVQi59CdFU6Mj+aL/xt0kCPMVigJw8P3/YCEJ9J+rS8BsoWE+xWUEsJvtXoT7vzPHaAtM3ci1HZd302Mz1+GgS8Epdx+7F5p80XAFLDUnELzOzKftvWGZmWfSeDnslwVONkL/1VAzwKy7Ce6hk4SxRE7l2NE2OklSHOzCGU1f78ZzVYKSnS5Ag9YrGjOAmTOXDbKNKN/qIorAQ1bovzGoCwx3iGIatQKFOxyVCyO1PsJYT7JO+kZbhBWRRE+L7l+ppPER9bdLFxs1t5CrKc078h+wuUr05S1P1JjXk68pk3+kQKBgQDeK8AR11373Mzib6uzpjGzgNRMzdYNuExWjxyxAzz53NAR7zrPHvXvfIqjDScLJ4NcRO2TddhXAfZoOPVH5k4PJHKLBPKuXZpWlookCAyENY7+Pd55S8r+a+MusrMagYNljb5WbVTgN8cgdpim9lbbIFlpN6SZaVjLQL3J8TWH6wKBgQDSChzItkqWX11CNstJ9zJyUE20I7LrpyBJNgG1gtvz3ZMUQCn3PxxHtQzN9n1P0mSSYs+jBKPuoSyYLt1wwe10/lpgL4rkKWU3/m1Myt0tveJ9WcqHh6tzcAbb/fXpUFT/o4SWDimWkPkuCb+8j//2yiXk0a/T2f36zKMuZvujqQKBgC6B7BAQDG2H2B/ijofp12ejJU36nL98gAZyqOfpLJ+FeMz4TlBDQ+phIMhnHXA5UkdDapQ+zA3SrFk+6yGk9Vw4Hf46B+82SvOrSbmnMa+PYqKYIvUzR4gg34rL/7AhwnbEyD5hXq4dHwMNsIDq+l2elPjwm/U9V0gdAl2+r50HAoGALtsKqMvhv8HucAMBPrLikhXP/8um8mMKFMrzfqZ+otxfHzlhI0L08Bo3jQrb0Z7ByNY6M8epOmbCKADsbWcVre/AAY0ZkuSZK/CaOXNX/AhMKmKJh8qAOPRY02LIJRBCpfS4czEdnfUhYV/TYiFNnKRj57PPYZdTzUsxa/yVTmECgYBr7slQEjb5Onn5mZnGDh+72BxLNdgwBkhO0OCdpdISqk0F0Pxby22DFOKXZEpiyI9XYP1C8wPiJsShGm2yEwBPWXnrrZNWczaVuCbXHrZkWQogBDG3HGXNdU4MAWCyiYlyinIBpPpoAJZSzpGLmWbMWh28+RJS6AQX6KHrK1o2uw==' - let alice + let alice: PeerId before(async function () { const encoded = uint8ArrayFromString(alicePrivKey, 'base64pad') - alice = await PeerId.createFromPrivKey(encoded) + const privateKey = await unmarshalPrivateKey(encoded) + alice = await createFromPrivKey(privateKey) }) it('private key can be imported', async () => { const key = await ks.importPeer('alice', alice) expect(key.name).to.equal('alice') - expect(key.id).to.equal(alice.toB58String()) + expect(key.id).to.equal(alice.toString()) }) it('private key import requires a valid name', async () => { - const err = await ks.importPeer(undefined, alice).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + // @ts-expect-error invalid parameters + await expect(ks.importPeer(undefined, alice)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) it('private key import requires the peer', async () => { - const err = await ks.importPeer('alice').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_MISSING_PRIVATE_KEY') + // @ts-expect-error invalid parameters + await expect(ks.importPeer('alice')).to.eventually.be.rejected.with.property('code', 'ERR_MISSING_PRIVATE_KEY') }) it('key id exists', async () => { - const key = await ks.findKeyById(alice.toB58String()) + const key = await ks.findKeyById(alice.toString()) expect(key).to.exist() expect(key).to.have.property('name', 'alice') - expect(key).to.have.property('id', alice.toB58String()) + expect(key).to.have.property('id', alice.toString()) }) it('key name exists', async () => { const key = await ks.findKeyByName('alice') expect(key).to.exist() expect(key).to.have.property('name', 'alice') - expect(key).to.have.property('id', alice.toB58String()) + expect(key).to.have.property('id', alice.toString()) }) }) describe('rename', () => { it('requires an existing key name', async () => { - const err = await ks.renameKey('not-there', renamedRsaKeyName).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_NOT_FOUND') + await expect(ks.renameKey('not-there', renamedRsaKeyName)).to.eventually.be.rejected.with.property('code', 'ERR_NOT_FOUND') }) it('requires a valid new key name', async () => { - const err = await ks.renameKey(rsaKeyName, '..\not-valid').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_NEW_KEY_NAME_INVALID') + await expect(ks.renameKey(rsaKeyName, '..\not-valid')).to.eventually.be.rejected.with.property('code', 'ERR_NEW_KEY_NAME_INVALID') }) it('does not overwrite existing key', async () => { - const err = await ks.renameKey(rsaKeyName, rsaKeyName).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS') + await expect(ks.renameKey(rsaKeyName, rsaKeyName)).to.eventually.be.rejected.with.property('code', 'ERR_KEY_ALREADY_EXISTS') }) it('cannot create the "self" key', async () => { - const err = await ks.renameKey(rsaKeyName, 'self').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_NEW_KEY_NAME_INVALID') + await expect(ks.renameKey(rsaKeyName, 'self')).to.eventually.be.rejected.with.property('code', 'ERR_NEW_KEY_NAME_INVALID') }) it('removes the existing key name', async () => { @@ -450,8 +377,7 @@ describe('keychain', () => { expect(key).to.have.property('name', renamedRsaKeyName) expect(key).to.have.property('id', rsaKeyInfo.id) // Try to find the changed key - const err = await ks.findKeyByName(rsaKeyName).then(fail, err => err) - expect(err).to.exist() + await expect(ks.findKeyByName(rsaKeyName)).to.eventually.be.rejected() }) it('creates the new key name', async () => { @@ -468,23 +394,18 @@ describe('keychain', () => { }) it('throws with invalid key names', async () => { - const err = await ks.findKeyByName(undefined).then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + // @ts-expect-error invalid parameters + await expect(ks.findKeyByName(undefined)).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) }) describe('key removal', () => { it('cannot remove the "self" key', async () => { - const err = await ks.removeKey('self').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME') + await expect(ks.removeKey('self')).to.eventually.be.rejected.with.property('code', 'ERR_INVALID_KEY_NAME') }) it('cannot remove an unknown key', async () => { - const err = await ks.removeKey('not-there').then(fail, err => err) - expect(err).to.exist() - expect(err).to.have.property('code', 'ERR_KEY_NOT_FOUND') + await expect(ks.removeKey('not-there')).to.eventually.be.rejected.with.property('code', 'ERR_KEY_NOT_FOUND') }) it('can remove a known key', async () => { @@ -496,10 +417,10 @@ describe('keychain', () => { }) describe('rotate keychain passphrase', () => { - let oldPass - let kc - let options - let ds + let oldPass: string + let kc: KeyChain + let options: KeyChainInit + let ds: Datastore before(async () => { ds = new MemoryDatastore() oldPass = `hello-${Date.now()}-${Date.now()}` @@ -512,36 +433,30 @@ describe('keychain', () => { hash: 'sha2-512' } } - kc = new Keychain(ds, options) + kc = new KeyChain(new Components({ datastore: ds }), options) await ds.open() }) it('should validate newPass is a string', async () => { - try { - await kc.rotateKeychainPass(oldPass, 1234567890) - } catch (/** @type {any} */ err) { - expect(err).to.exist() - } + // @ts-expect-error invalid parameters + await expect(kc.rotateKeychainPass(oldPass, 1234567890)).to.eventually.be.rejected() }) it('should validate oldPass is a string', async () => { - try { - await kc.rotateKeychainPass(1234, 'newInsecurePassword1') - } catch (/** @type {any} */ err) { - expect(err).to.exist() - } + // @ts-expect-error invalid parameters + await expect(kc.rotateKeychainPass(1234, 'newInsecurePassword1')).to.eventually.be.rejected() }) it('should validate newPass is at least 20 characters', async () => { try { await kc.rotateKeychainPass(oldPass, 'not20Chars') - } catch (/** @type {any} */ err) { + } catch (err: any) { expect(err).to.exist() } }) it('can rotate keychain passphrase', async () => { - await kc.createKey('keyCreatedWithOldPassword', 'rsa', 2048) + await kc.createKey('keyCreatedWithOldPassword', 'RSA', 2048) await kc.rotateKeychainPass(oldPass, 'newInsecurePassphrase') // Get Key PEM from datastore @@ -549,24 +464,23 @@ describe('keychain', () => { const res = await ds.get(dsname) const pem = uint8ArrayToString(res) - const oldDek = options.pass - ? crypto.pbkdf2( + const oldDek = options.pass != null + ? pbkdf2( options.pass, - options.dek.salt, - options.dek.iterationCount, - options.dek.keyLength, - options.dek.hash) + options.dek?.salt ?? 'salt', + options.dek?.iterationCount ?? 0, + options.dek?.keyLength ?? 0, + options.dek?.hash ?? 'sha2-256' + ) : '' - // eslint-disable-next-line no-constant-condition - const newDek = 'newInsecurePassphrase' - ? crypto.pbkdf2( - 'newInsecurePassphrase', - options.dek.salt, - options.dek.iterationCount, - options.dek.keyLength, - options.dek.hash) - : '' + const newDek = pbkdf2( + 'newInsecurePassphrase', + options.dek?.salt ?? 'salt', + options.dek?.iterationCount ?? 0, + options.dek?.keyLength ?? 0, + options.dek?.hash ?? 'sha2-256' + ) // Dek with old password should not work: await expect(kc.importKey('keyWhosePassChanged', pem, oldDek)) @@ -579,26 +493,20 @@ describe('keychain', () => { }) describe('libp2p.keychain', () => { - it('needs a passphrase to be used, otherwise throws an error', async () => { - const [libp2p] = await peerUtils.createPeer({ + it.skip('needs a passphrase to be used, otherwise throws an error', async () => { + const libp2p = await createNode({ started: false }) - try { - await libp2p.keychain.createKey('keyName', 'rsa', 2048) - } catch (/** @type {any} */ err) { - expect(err).to.exist() - return - } - throw new Error('should throw an error using the keychain if no passphrase provided') + await expect(libp2p.keychain.createKey('keyName', 'RSA', 2048)).to.be.rejected() }) it('can be used when a passphrase is provided', async () => { - const [libp2p] = await peerUtils.createPeer({ + const libp2p = await createNode({ started: false, config: { + datastore: new MemoryDatastore(), keychain: { - datastore: new MemoryDatastore(), pass: '12345678901234567890' } } @@ -606,47 +514,45 @@ describe('libp2p.keychain', () => { await libp2p.loadKeychain() - const kInfo = await libp2p.keychain.createKey('keyName', 'ed25519') + const kInfo = await libp2p.keychain.createKey('keyName', 'Ed25519') expect(kInfo).to.exist() }) it('does not require a keychain passphrase', async () => { - const [libp2p] = await peerUtils.createPeer({ + const libp2p = await createNode({ started: false, config: { - keychain: { - datastore: new MemoryDatastore() - } + datastore: new MemoryDatastore() } }) await libp2p.loadKeychain() - const kInfo = await libp2p.keychain.createKey('keyName', 'ed25519') + const kInfo = await libp2p.keychain.createKey('keyName', 'Ed25519') expect(kInfo).to.exist() }) it('can reload keys', async () => { const datastore = new MemoryDatastore() - const [libp2p] = await peerUtils.createPeer({ + const libp2p = await createNode({ started: false, config: { + datastore, keychain: { - datastore, pass: '12345678901234567890' } } }) await libp2p.loadKeychain() - const kInfo = await libp2p.keychain.createKey('keyName', 'ed25519') + const kInfo = await libp2p.keychain.createKey('keyName', 'Ed25519') expect(kInfo).to.exist() - const [libp2p2] = await peerUtils.createPeer({ + const libp2p2 = await createNode({ started: false, config: { + datastore, keychain: { - datastore, pass: '12345678901234567890' } } diff --git a/test/keychain/peerid.spec.js b/test/keychain/peerid.spec.ts similarity index 71% rename from test/keychain/peerid.spec.js rename to test/keychain/peerid.spec.ts index 9d9592f3..6d5d1354 100644 --- a/test/keychain/peerid.spec.js +++ b/test/keychain/peerid.spec.ts @@ -1,13 +1,11 @@ /* eslint-env mocha */ -'use strict' -const { expect } = require('aegir/utils/chai') -const PeerId = require('peer-id') -const { base58btc } = require('multiformats/bases/base58') -const crypto = require('libp2p-crypto') -const rsaUtils = require('libp2p-crypto/src/keys/rsa-utils') -const rsaClass = require('libp2p-crypto/src/keys/rsa-class') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') +import { expect } from 'aegir/utils/chai.js' +import { base58btc } from 'multiformats/bases/base58' +import { supportedKeys, unmarshalPrivateKey, unmarshalPublicKey } from '@libp2p/crypto/keys' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createFromPrivKey } from '@libp2p/peer-id-factory' const sample = { id: '122019318b6e5e0cf93a2314bf01269a2cc23cd3dcd452d742cdb9379d8646f6e4a9', @@ -16,32 +14,39 @@ const sample = { } describe('peer ID', () => { - let peer - let publicKeyDer // a buffer + let peer: PeerId + let publicKeyDer: Uint8Array // a buffer before(async () => { const encoded = uint8ArrayFromString(sample.privKey, 'base64pad') - peer = await PeerId.createFromPrivKey(encoded) + peer = await createFromPrivKey(await unmarshalPrivateKey(encoded)) }) it('decoded public key', async () => { + if (peer.publicKey == null) { + throw new Error('PublicKey missing from PeerId') + } + + if (peer.privateKey == null) { + throw new Error('PrivateKey missing from PeerId') + } + // get protobuf version of the public key - const publicKeyProtobuf = peer.marshalPubKey() - const publicKey = crypto.keys.unmarshalPublicKey(publicKeyProtobuf) + const publicKeyProtobuf = peer.publicKey + const publicKey = unmarshalPublicKey(publicKeyProtobuf) publicKeyDer = publicKey.marshal() // get protobuf version of the private key - const privateKeyProtobuf = peer.marshalPrivKey() - const key = await crypto.keys.unmarshalPrivateKey(privateKeyProtobuf) + const privateKeyProtobuf = peer.privateKey + const key = await unmarshalPrivateKey(privateKeyProtobuf) expect(key).to.exist() }) it('encoded public key with DER', async () => { - const jwk = rsaUtils.pkixToJwk(publicKeyDer) - const rsa = new rsaClass.RsaPublicKey(jwk) + const rsa = await supportedKeys.rsa.unmarshalRsaPublicKey(publicKeyDer) const keyId = await rsa.hash() const kids = base58btc.encode(keyId).substring(1) - expect(kids).to.equal(peer.toB58String()) + expect(kids).to.equal(peer.toString()) }) it('encoded public key with JWT', async () => { @@ -52,16 +57,20 @@ describe('peer ID', () => { alg: 'RS256', kid: '2011-04-29' } - const rsa = new rsaClass.RsaPublicKey(jwk) + const rsa = new supportedKeys.rsa.RsaPublicKey(jwk) const keyId = await rsa.hash() const kids = base58btc.encode(keyId).substring(1) - expect(kids).to.equal(peer.toB58String()) + expect(kids).to.equal(peer.toString()) }) it('decoded private key', async () => { + if (peer.privateKey == null) { + throw new Error('PrivateKey missing from PeerId') + } + // get protobuf version of the private key - const privateKeyProtobuf = peer.marshalPrivKey() - const key = await crypto.keys.unmarshalPrivateKey(privateKeyProtobuf) + const privateKeyProtobuf = peer.privateKey + const key = await unmarshalPrivateKey(privateKeyProtobuf) expect(key).to.exist() }) }) diff --git a/test/metrics/index.node.js b/test/metrics/index.node.js deleted file mode 100644 index cdd43e7e..00000000 --- a/test/metrics/index.node.js +++ /dev/null @@ -1,146 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { randomBytes } = require('libp2p-crypto') -const pipe = require('it-pipe') -const concat = require('it-concat') -const delay = require('delay') - -const { createPeer } = require('../utils/creators/peer') -const baseOptions = require('../utils/base-options') - -describe('libp2p.metrics', () => { - let libp2p - - afterEach(async () => { - libp2p && await libp2p.stop() - }) - - it('should disable metrics by default', async () => { - [libp2p] = await createPeer({ - config: { - modules: baseOptions.modules - } - }) - - expect(libp2p.metrics).to.not.exist() - }) - - it('should start/stop metrics on startup/shutdown when enabled', async () => { - const config = { - ...baseOptions, - connectionManager: { - movingAverageIntervals: [10] - }, - metrics: { - enabled: true, - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10] - } - } - ;[libp2p] = await createPeer({ started: false, config }) - - expect(libp2p.metrics).to.exist() - sinon.spy(libp2p.metrics, 'start') - sinon.spy(libp2p.metrics, 'stop') - - await libp2p.start() - expect(libp2p.metrics.start).to.have.property('callCount', 1) - - await libp2p.stop() - expect(libp2p.metrics.stop).to.have.property('callCount', 1) - }) - - it('should record metrics on connections and streams when enabled', async () => { - const config = { - ...baseOptions, - connectionManager: { - movingAverageIntervals: [10] - }, - metrics: { - enabled: true, - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10] - } - } - let remoteLibp2p - ;[libp2p, remoteLibp2p] = await createPeer({ number: 2, config }) - - remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - - const connection = await libp2p.dial(remoteLibp2p.peerId) - const { stream } = await connection.newStream('/echo/1.0.0') - - const bytes = randomBytes(512) - const result = await pipe( - [bytes], - stream, - concat - ) - - // Flush the call stack - await delay(0) - - expect(result).to.have.length(bytes.length) - // Protocol stats should equal the echo size - const protocolStats = libp2p.metrics.forProtocol('/echo/1.0.0').toJSON() - expect(Number(protocolStats.dataReceived)).to.equal(bytes.length) - expect(Number(protocolStats.dataSent)).to.equal(bytes.length) - - // A lot more traffic will be sent over the wire for the peer - const peerStats = libp2p.metrics.forPeer(connection.remotePeer).toJSON() - expect(Number(peerStats.dataReceived)).to.be.at.least(bytes.length) - await remoteLibp2p.stop() - }) - - it('should move disconnected peers to the old peers list', async () => { - const config = { - ...baseOptions, - connectionManager: { - movingAverageIntervals: [10] - }, - metrics: { - enabled: true, - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10] - }, - config: { - peerDiscovery: { - autoDial: false - } - } - } - let remoteLibp2p - ;[libp2p, remoteLibp2p] = await createPeer({ number: 2, config }) - - remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - - const connection = await libp2p.dial(remoteLibp2p.peerId) - const { stream } = await connection.newStream('/echo/1.0.0') - - const bytes = randomBytes(512) - await pipe( - [bytes], - stream, - concat - ) - - sinon.spy(libp2p.metrics, 'onPeerDisconnected') - await libp2p.hangUp(connection.remotePeer) - - // Flush call stack - await delay(0) - - expect(libp2p.metrics.onPeerDisconnected).to.have.property('callCount', 1) - expect(libp2p.metrics.peers).to.have.length(0) - - // forPeer should still give us the old peer stats, - // even though its not in the active peer list - const peerStats = libp2p.metrics.forPeer(connection.remotePeer).toJSON() - expect(Number(peerStats.dataReceived)).to.be.at.least(bytes.length) - await remoteLibp2p.stop() - }) -}) diff --git a/test/metrics/index.node.ts b/test/metrics/index.node.ts new file mode 100644 index 00000000..96871db4 --- /dev/null +++ b/test/metrics/index.node.ts @@ -0,0 +1,187 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { randomBytes } from '@libp2p/crypto' +import { pipe } from 'it-pipe' +import toBuffer from 'it-to-buffer' +import delay from 'delay' +import { createNode, populateAddressBooks } from '../utils/creators/peer.js' +import { createBaseOptions } from '../utils/base-options.js' +import type { Libp2pNode } from '../../src/libp2p.js' +import type { Libp2pOptions } from '../../src/index.js' +import type { DefaultMetrics } from '../../src/metrics/index.js' + +describe('libp2p.metrics', () => { + let libp2p: Libp2pNode + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should disable metrics by default', async () => { + libp2p = await createNode({ + config: createBaseOptions() + }) + + expect(libp2p.components.getMetrics()).to.be.undefined() + }) + + it('should start/stop metrics on startup/shutdown when enabled', async () => { + const config: Libp2pOptions = createBaseOptions({ + metrics: { + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10] + } + }) + libp2p = await createNode({ started: false, config }) + + const metrics = libp2p.components.getMetrics() as DefaultMetrics + + if (metrics == null) { + throw new Error('Metrics not configured') + } + + const metricsStartSpy = sinon.spy(metrics, 'start') + const metricsStopSpy = sinon.spy(metrics, 'stop') + + await libp2p.start() + expect(metricsStartSpy).to.have.property('callCount', 1) + + await libp2p.stop() + expect(metricsStopSpy).to.have.property('callCount', 1) + }) + + it('should record metrics on connections and streams when enabled', async () => { + let remoteLibp2p: Libp2pNode + ;[libp2p, remoteLibp2p] = await Promise.all([ + createNode({ + config: createBaseOptions({ + metrics: { + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10] + } + }) + }), + createNode({ + config: createBaseOptions({ + metrics: { + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10] + } + }) + }) + ]) + + await populateAddressBooks([libp2p, remoteLibp2p]) + + void remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => { + void pipe(stream, stream) + }) + + const connection = await libp2p.dial(remoteLibp2p.peerId) + const { stream } = await connection.newStream('/echo/1.0.0') + + const bytes = randomBytes(512) + const result = await pipe( + [bytes], + stream, + async (source) => await toBuffer(source) + ) + + // Flush the call stack + await delay(0) + + expect(result).to.have.length(bytes.length) + + const metrics = libp2p.components.getMetrics() + + if (metrics == null) { + throw new Error('Metrics not configured') + } + + // Protocol stats should equal the echo size + const protocolStats = metrics.forProtocol('/echo/1.0.0')?.getSnapshot() + expect(protocolStats?.dataReceived).to.equal(BigInt(bytes.length)) + expect(protocolStats?.dataSent).to.equal(BigInt(bytes.length)) + + // A lot more traffic will be sent over the wire for the peer + const peerStats = metrics.forPeer(connection.remotePeer)?.getSnapshot() + expect(parseInt(peerStats?.dataReceived.toString() ?? '0')).to.be.at.least(bytes.length) + await remoteLibp2p.stop() + }) + + it('should move disconnected peers to the old peers list', async () => { + let remoteLibp2p + ;[libp2p, remoteLibp2p] = await Promise.all([ + createNode({ + config: createBaseOptions({ + metrics: { + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10] + }, + connectionManager: { + autoDial: false + } + }) + }), + createNode({ + config: createBaseOptions({ + metrics: { + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10] + }, + connectionManager: { + autoDial: false + } + }) + }) + ]) + await populateAddressBooks([libp2p, remoteLibp2p]) + + void remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => { + void pipe(stream, stream) + }) + + const connection = await libp2p.dial(remoteLibp2p.peerId) + const { stream } = await connection.newStream('/echo/1.0.0') + + const bytes = randomBytes(512) + await pipe( + [bytes], + stream, + async (source) => await toBuffer(source) + ) + + const metrics = libp2p.components.getMetrics() + + if (metrics == null) { + throw new Error('Metrics not configured') + } + + const peerStats = metrics.forPeer(connection.remotePeer)?.getSnapshot() + expect(parseInt(peerStats?.dataReceived.toString() ?? '0')).to.be.at.least(bytes.length) + + const metricsOnPeerDisconnectedSpy = sinon.spy(metrics, 'onPeerDisconnected') + await libp2p.hangUp(connection.remotePeer) + + // Flush call stack + await delay(0) + + expect(metricsOnPeerDisconnectedSpy).to.have.property('callCount', 1) + + // forPeer should still give us the old peer stats, + // even though its not in the active peer list + const peerStatsAfterHangup = metrics.forPeer(connection.remotePeer)?.getSnapshot() + expect(parseInt(peerStatsAfterHangup?.dataReceived.toString() ?? '0')).to.be.at.least(bytes.length) + + await remoteLibp2p.stop() + }) +}) diff --git a/test/metrics/index.spec.js b/test/metrics/index.spec.js deleted file mode 100644 index 7107e97b..00000000 --- a/test/metrics/index.spec.js +++ /dev/null @@ -1,275 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const { randomBytes } = require('libp2p-crypto') -const duplexPair = require('it-pair/duplex') -const pipe = require('it-pipe') -const concat = require('it-concat') -const pushable = require('it-pushable') -const { consume } = require('streaming-iterables') -const delay = require('delay') - -const Metrics = require('../../src/metrics') -const Stats = require('../../src/metrics/stats') -const { createPeerId } = require('../utils/creators/peer') - -describe('Metrics', () => { - let peerId - let peerId2 - - before(async () => { - [peerId, peerId2] = await createPeerId({ number: 2 }) - }) - - afterEach(() => { - sinon.restore() - }) - - it('should not track data if not started', async () => { - const [local, remote] = duplexPair() - const metrics = new Metrics({ - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10, 100, 1000] - }) - - metrics.trackStream({ - stream: local, - remotePeer: peerId - }) - - // Echo back - pipe(remote, remote) - - const bytes = randomBytes(1024) - - const results = await pipe( - [bytes], - local, - concat - ) - - // Flush the call stack - await delay(0) - - expect(results.length).to.eql(bytes.length) - - expect(metrics.forPeer(peerId)).to.equal(undefined) - expect(metrics.peers).to.eql([]) - const globalStats = metrics.global - expect(globalStats.snapshot.dataReceived.toNumber()).to.equal(0) - expect(globalStats.snapshot.dataSent.toNumber()).to.equal(0) - }) - - it('should be able to track a duplex stream', async () => { - const [local, remote] = duplexPair() - const metrics = new Metrics({ - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10, 100, 1000] - }) - - metrics.trackStream({ - stream: local, - remotePeer: peerId - }) - metrics.start() - - // Echo back - pipe(remote, remote) - - const bytes = randomBytes(1024) - const input = (async function * () { - let i = 0 - while (i < 10) { - await delay(10) - yield bytes - i++ - } - })() - - const results = await pipe( - input, - local, - concat - ) - - // Flush the call stack - await delay(0) - - expect(results.length).to.eql(bytes.length * 10) - - const stats = metrics.forPeer(peerId) - expect(metrics.peers).to.eql([peerId.toB58String()]) - expect(stats.snapshot.dataReceived.toNumber()).to.equal(results.length) - expect(stats.snapshot.dataSent.toNumber()).to.equal(results.length) - - const globalStats = metrics.global - expect(globalStats.snapshot.dataReceived.toNumber()).to.equal(results.length) - expect(globalStats.snapshot.dataSent.toNumber()).to.equal(results.length) - }) - - it('should properly track global stats', async () => { - const [local, remote] = duplexPair() - const [local2, remote2] = duplexPair() - const metrics = new Metrics({ - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10, 100, 1000] - }) - const protocol = '/echo/1.0.0' - metrics.start() - - // Echo back remotes - pipe(remote, remote) - pipe(remote2, remote2) - - metrics.trackStream({ - stream: local, - remotePeer: peerId, - protocol - }) - metrics.trackStream({ - stream: local2, - remotePeer: peerId2, - protocol - }) - - const bytes = randomBytes(1024) - - await Promise.all([ - pipe([bytes], local, consume), - pipe([bytes], local2, consume) - ]) - - // Flush the call stack - await delay(0) - - expect(metrics.peers).to.eql([peerId.toB58String(), peerId2.toB58String()]) - // Verify global metrics - const globalStats = metrics.global - expect(globalStats.snapshot.dataReceived.toNumber()).to.equal(bytes.length * 2) - expect(globalStats.snapshot.dataSent.toNumber()).to.equal(bytes.length * 2) - - // Verify individual metrics - for (const peer of [peerId, peerId2]) { - const stats = metrics.forPeer(peer) - - expect(stats.snapshot.dataReceived.toNumber()).to.equal(bytes.length) - expect(stats.snapshot.dataSent.toNumber()).to.equal(bytes.length) - } - - // Verify protocol metrics - const protocolStats = metrics.forProtocol(protocol) - expect(metrics.protocols).to.eql([protocol]) - expect(protocolStats.snapshot.dataReceived.toNumber()).to.equal(bytes.length * 2) - expect(protocolStats.snapshot.dataSent.toNumber()).to.equal(bytes.length * 2) - }) - - it('should be able to replace an existing peer', async () => { - const [local, remote] = duplexPair() - const metrics = new Metrics({ - computeThrottleMaxQueueSize: 1, // compute after every message - movingAverageIntervals: [10, 100, 1000] - }) - metrics.start() - - // Echo back remotes - pipe(remote, remote) - - const mockPeer = { - toB58String: () => 'a temporary id' - } - metrics.trackStream({ - stream: local, - remotePeer: mockPeer - }) - - const bytes = randomBytes(1024) - const input = pushable() - - const deferredPromise = pipe(input, local, consume) - - input.push(bytes) - - await delay(0) - - metrics.updatePlaceholder(mockPeer, peerId) - mockPeer.toB58String = peerId.toB58String.bind(peerId) - - input.push(bytes) - input.end() - - await deferredPromise - await delay(0) - - expect(metrics.peers).to.eql([peerId.toB58String()]) - // Verify global metrics - const globalStats = metrics.global - expect(globalStats.snapshot.dataReceived.toNumber()).to.equal(bytes.length * 2) - expect(globalStats.snapshot.dataSent.toNumber()).to.equal(bytes.length * 2) - - // Verify individual metrics - const stats = metrics.forPeer(peerId) - - expect(stats.snapshot.dataReceived.toNumber()).to.equal(bytes.length * 2) - expect(stats.snapshot.dataSent.toNumber()).to.equal(bytes.length * 2) - }) - - it('should only keep track of a set number of disconnected peers', () => { - const spies = [] - const trackedPeers = new Map([...new Array(50)].map((_, index) => { - const stat = new Stats([], { movingAverageIntervals: [] }) - spies.push(sinon.spy(stat, 'stop')) - return [String(index), stat] - })) - - const metrics = new Metrics({ - maxOldPeersRetention: 5 // Only keep track of 5 - }) - - // Clone so trackedPeers isn't modified - metrics._peerStats = new Map(trackedPeers) - - // Disconnect every peer - for (const id of trackedPeers.keys()) { - metrics.onPeerDisconnected({ - toB58String: () => id - }) - } - - // Verify only the last 5 have been retained - expect(metrics.peers).to.have.length(0) - const retainedPeers = [] - for (const id of trackedPeers.keys()) { - const stat = metrics.forPeer({ - toB58String: () => id - }) - if (stat) retainedPeers.push(id) - } - expect(retainedPeers).to.eql(['45', '46', '47', '48', '49']) - - // Verify all stats were stopped - expect(spies).to.have.length(50) - for (const spy of spies) { - expect(spy).to.have.property('callCount', 1) - } - }) - - it('should allow components to track metrics', () => { - const metrics = new Metrics({ - maxOldPeersRetention: 5 // Only keep track of 5 - }) - - expect(metrics.getComponentMetrics()).to.be.empty() - - const component = 'my-component' - const metric = 'some-metric' - const value = 1 - - metrics.updateComponentMetric({ component, metric, value }) - - expect(metrics.getComponentMetrics()).to.have.lengthOf(1) - expect(metrics.getComponentMetrics().get('libp2p').get(component)).to.have.lengthOf(1) - expect(metrics.getComponentMetrics().get('libp2p').get(component).get(metric)).to.equal(value) - }) -}) diff --git a/test/metrics/index.spec.ts b/test/metrics/index.spec.ts new file mode 100644 index 00000000..c63fe14d --- /dev/null +++ b/test/metrics/index.spec.ts @@ -0,0 +1,301 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { randomBytes } from '@libp2p/crypto' +import { duplexPair } from 'it-pair/duplex' +import { pipe } from 'it-pipe' +import { pushable } from 'it-pushable' +import drain from 'it-drain' +import delay from 'delay' +import { DefaultMetrics } from '../../src/metrics/index.js' +import { DefaultStats } from '../../src/metrics/stats.js' +import { createPeerId } from '../utils/creators/peer.js' +import toBuffer from 'it-to-buffer' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import { peerIdFromString } from '@libp2p/peer-id' +import type { PeerId } from '@libp2p/interfaces/peer-id' + +describe('Metrics', () => { + let peerId: PeerId + let peerId2: PeerId + + before(async () => { + peerId = await createPeerId() + peerId2 = await createPeerId() + }) + + afterEach(() => { + sinon.restore() + }) + + it('should not track data if not started', async () => { + const [local, remote] = duplexPair() + const metrics = new DefaultMetrics({ + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10, 100, 1000], + computeThrottleTimeout: 1000, + maxOldPeersRetention: 50 + }) + + metrics.trackStream({ + stream: local, + remotePeer: peerId + }) + + // Echo back + void pipe(remote, remote) + + const bytes = randomBytes(1024) + + const results = await pipe( + [bytes], + local, + async (source) => await toBuffer(source) + ) + + // Flush the call stack + await delay(0) + + expect(results.length).to.equal(bytes.length) + expect(metrics.getPeers()).to.be.empty() + + expect(metrics.forPeer(peerId)).to.equal(undefined) + const snapshot = metrics.globalStats.getSnapshot() + expect(snapshot.dataReceived).to.equal(0n) + expect(snapshot.dataSent).to.equal(0n) + }) + + it('should be able to track a duplex stream', async () => { + const [local, remote] = duplexPair() + const metrics = new DefaultMetrics({ + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10, 100, 1000], + computeThrottleTimeout: 1000, + maxOldPeersRetention: 50 + }) + + await metrics.start() + + metrics.trackStream({ + stream: local, + remotePeer: peerId + }) + + // Echo back + void pipe(remote, remote) + + const bytes = randomBytes(1024) + const input = (async function * () { + let i = 0 + while (i < 10) { + await delay(10) + yield bytes + i++ + } + })() + + const results = await pipe( + input, + local, + async (source) => await toBuffer(source) + ) + + // Flush the call stack + await delay(0) + + expect(results.length).to.eql(bytes.length * 10) + expect(metrics.getPeers()).to.include(peerId.toString()) + + const snapshot = metrics.forPeer(peerId)?.getSnapshot() + expect(snapshot?.dataReceived).to.equal(BigInt(results.length)) + expect(snapshot?.dataSent).to.equal(BigInt(results.length)) + + const globalSnapshot = metrics.globalStats.getSnapshot() + expect(globalSnapshot.dataReceived).to.equal(BigInt(results.length)) + expect(globalSnapshot.dataSent).to.equal(BigInt(results.length)) + }) + + it('should properly track global stats', async () => { + const [local, remote] = duplexPair() + const [local2, remote2] = duplexPair() + const metrics = new DefaultMetrics({ + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10, 100, 1000], + computeThrottleTimeout: 1000, + maxOldPeersRetention: 50 + }) + const protocol = '/echo/1.0.0' + await metrics.start() + + // Echo back remotes + void pipe(remote, remote) + void pipe(remote2, remote2) + + metrics.trackStream({ + stream: local, + remotePeer: peerId, + protocol + }) + metrics.trackStream({ + stream: local2, + remotePeer: peerId2, + protocol + }) + + const bytes = randomBytes(1024) + + await Promise.all([ + pipe([bytes], local, drain), + pipe([bytes], local2, drain) + ]) + + // Flush the call stack + await delay(0) + + expect(metrics.getPeers()).to.eql([peerId.toString(), peerId2.toString()]) + // Verify global metrics + const globalStats = metrics.globalStats.getSnapshot() + expect(globalStats.dataReceived).to.equal(BigInt(bytes.length * 2)) + expect(globalStats.dataSent).to.equal(BigInt(bytes.length * 2)) + + // Verify individual metrics + for (const peer of [peerId, peerId2]) { + const stats = metrics.forPeer(peer)?.getSnapshot() + + expect(stats?.dataReceived).to.equal(BigInt(bytes.length)) + expect(stats?.dataSent).to.equal(BigInt(bytes.length)) + } + + // Verify protocol metrics + const protocolStats = metrics.forProtocol(protocol)?.getSnapshot() + expect(metrics.getProtocols()).to.eql([protocol]) + expect(protocolStats?.dataReceived).to.equal(BigInt(bytes.length * 2)) + expect(protocolStats?.dataSent).to.equal(BigInt(bytes.length * 2)) + }) + + it('should be able to replace an existing peer', async () => { + const [local, remote] = duplexPair() + const metrics = new DefaultMetrics({ + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10, 100, 1000], + computeThrottleTimeout: 1000, + maxOldPeersRetention: 50 + }) + await metrics.start() + + // Echo back remotes + void pipe(remote, remote) + + const mockPeer = await createEd25519PeerId() + + metrics.trackStream({ + stream: local, + remotePeer: mockPeer + }) + + const bytes = randomBytes(1024) + const input = pushable() + + const deferredPromise = pipe(input, local, drain) + + input.push(bytes) + + await delay(0) + + metrics.updatePlaceholder(mockPeer, peerId) + mockPeer.toString = peerId.toString.bind(peerId) + + input.push(bytes) + input.end() + + await deferredPromise + await delay(0) + + expect(metrics.getPeers()).to.eql([peerId.toString()]) + // Verify global metrics + const globalStats = metrics.globalStats.getSnapshot() + expect(globalStats.dataReceived).to.equal(BigInt(bytes.length * 2)) + expect(globalStats.dataSent).to.equal(BigInt(bytes.length * 2)) + + // Verify individual metrics + const stats = metrics.forPeer(peerId)?.getSnapshot() + + expect(stats?.dataReceived).to.equal(BigInt(bytes.length * 2)) + expect(stats?.dataSent).to.equal(BigInt(bytes.length * 2)) + }) + + it.skip('should only keep track of a set number of disconnected peers', async () => { + const spies: sinon.SinonSpy[] = [] + const peerIds = await Promise.all( + new Array(50).fill(0).map(async () => await createEd25519PeerId()) + ) + + const trackedPeers = new Map([...new Array(50)].fill(0).map((_, index) => { + const stat = new DefaultStats({ + enabled: true, + initialCounters: ['dataReceived', 'dataSent'], + computeThrottleMaxQueueSize: 1000, + computeThrottleTimeout: 5000, + movingAverageIntervals: [] + }) + spies.push(sinon.spy(stat, 'stop')) + return [peerIds[index].toString(), stat] + })) + + const metrics = new DefaultMetrics({ + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10, 100, 1000], + computeThrottleTimeout: 1000, + maxOldPeersRetention: 5 // Only keep track of 5 + }) + + // Disconnect every peer + for (const id of trackedPeers.keys()) { + metrics.onPeerDisconnected(peerIdFromString(id)) + } + + // Verify only the last 5 have been retained + expect(metrics.getPeers()).to.have.length(0) + const retainedPeers = [] + for (const id of trackedPeers.keys()) { + const stat = metrics.forPeer(peerIdFromString(id)) + if (stat != null) retainedPeers.push(id) + } + expect(retainedPeers).to.eql(['45', '46', '47', '48', '49']) + + // Verify all stats were stopped + expect(spies).to.have.length(50) + for (const spy of spies) { + expect(spy).to.have.property('callCount', 1) + } + }) + + it('should allow components to track metrics', () => { + const metrics = new DefaultMetrics({ + enabled: true, + computeThrottleMaxQueueSize: 1, // compute after every message + movingAverageIntervals: [10, 100, 1000], + computeThrottleTimeout: 1000, + maxOldPeersRetention: 50 + }) + + expect(metrics.getComponentMetrics()).to.be.empty() + + const system = 'libp2p' + const component = 'my-component' + const metric = 'some-metric' + const value = 1 + + metrics.updateComponentMetric({ system, component, metric, value }) + + expect(metrics.getComponentMetrics()).to.have.lengthOf(1) + expect(metrics.getComponentMetrics().get('libp2p')?.get(component)).to.have.lengthOf(1) + expect(metrics.getComponentMetrics().get('libp2p')?.get(component)?.get(metric)).to.equal(value) + }) +}) diff --git a/test/nat-manager/nat-manager.node.js b/test/nat-manager/nat-manager.node.js deleted file mode 100644 index 9dcf2b57..00000000 --- a/test/nat-manager/nat-manager.node.js +++ /dev/null @@ -1,295 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const { networkInterfaces } = require('os') -const AddressManager = require('../../src/address-manager') -const TransportManager = require('../../src/transport-manager') -const Transport = require('libp2p-tcp') -const mockUpgrader = require('../utils/mockUpgrader') -const NatManager = require('../../src/nat-manager') -const delay = require('delay') -const peers = require('../fixtures/peers') -const PeerId = require('peer-id') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../../src/errors') - -const DEFAULT_ADDRESSES = [ - '/ip4/127.0.0.1/tcp/0', - '/ip4/0.0.0.0/tcp/0' -] - -describe('Nat Manager (TCP)', () => { - const teardown = [] - - async function createNatManager (addrs = DEFAULT_ADDRESSES, natManagerOptions = {}) { - const peerId = await PeerId.createFromJSON(peers[0]) - const addressManager = new AddressManager(peerId, { listen: addrs }) - const transportManager = new TransportManager({ - libp2p: { - peerId, - addressManager, - peerStore: { - addressBook: { - consumePeerRecord: sinon.stub() - } - } - }, - upgrader: mockUpgrader, - onConnection: () => {}, - faultTolerance: TransportManager.FaultTolerance.NO_FATAL - }) - const natManager = new NatManager({ - peerId, - addressManager, - transportManager, - enabled: true, - ...natManagerOptions - }) - - natManager._client = { - externalIp: sinon.stub().resolves('82.3.1.5'), - map: sinon.stub(), - destroy: sinon.stub() - } - - transportManager.add(Transport.prototype[Symbol.toStringTag], Transport) - await transportManager.listen(addressManager.getListenAddrs()) - - teardown.push(async () => { - await natManager.stop() - await transportManager.removeAll() - expect(transportManager._transports.size).to.equal(0) - }) - - return { - natManager, - addressManager, - transportManager - } - } - - afterEach(() => Promise.all(teardown.map(t => t()))) - - it('should map TCP connections to external ports', async () => { - const { - natManager, - addressManager, - transportManager - } = await createNatManager() - - let addressChangedEventFired = false - - addressManager.on('change:addresses', () => { - addressChangedEventFired = true - }) - - natManager._client = { - externalIp: sinon.stub().resolves('82.3.1.5'), - map: sinon.stub(), - destroy: sinon.stub() - } - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await natManager._start() - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.not.be.empty() - - const internalPorts = transportManager.getAddrs() - .filter(ma => ma.isThinWaistAddress()) - .map(ma => ma.toOptions()) - .filter(({ host, transport }) => host !== '127.0.0.1' && transport === 'tcp') - .map(({ port }) => port) - - expect(natManager._client.map.called).to.be.true() - - internalPorts.forEach(port => { - expect(natManager._client.map.getCall(0).args[0]).to.include({ - privatePort: port, - protocol: 'TCP' - }) - }) - - expect(addressChangedEventFired).to.be.true() - }) - - it('should not map TCP connections when double-natted', async () => { - const { - natManager, - addressManager - } = await createNatManager() - - natManager._client.externalIp = sinon.stub().resolves('192.168.1.1') - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await expect(natManager._start()).to.eventually.be.rejectedWith(/double NAT/) - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - expect(natManager._client.map.called).to.be.false() - }) - - it('should do nothing when disabled', async () => { - const { - natManager - } = await createNatManager(DEFAULT_ADDRESSES, { - enabled: false - }) - - natManager.start() - - await delay(100) - - expect(natManager._client.externalIp.called).to.be.false() - expect(natManager._client.map.called).to.be.false() - }) - - it('should not map non-ipv4 connections to external ports', async () => { - const { - natManager, - addressManager - } = await createNatManager([ - '/ip6/::/tcp/0' - ]) - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await natManager._start() - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - }) - - it('should not map non-ipv6 loopback connections to external ports', async () => { - const { - natManager, - addressManager - } = await createNatManager([ - '/ip6/::1/tcp/0' - ]) - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await natManager._start() - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - }) - - it('should not map non-TCP connections to external ports', async () => { - const { - natManager, - addressManager - } = await createNatManager([ - '/ip4/0.0.0.0/utp' - ]) - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await natManager._start() - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - }) - - it('should not map loopback connections to external ports', async () => { - const { - natManager, - addressManager - } = await createNatManager([ - '/ip4/127.0.0.1/tcp/0' - ]) - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await natManager._start() - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - }) - - it('should not map non-thin-waist connections to external ports', async () => { - const { - natManager, - addressManager - } = await createNatManager([ - '/ip4/0.0.0.0/tcp/0/sctp/0' - ]) - - let observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - - await natManager._start() - - observed = addressManager.getObservedAddrs().map(ma => ma.toString()) - expect(observed).to.be.empty() - }) - - it('should specify large enough TTL', () => { - expect(() => { - new NatManager({ ttl: 5 }) // eslint-disable-line no-new - }).to.throw().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('shuts the nat api down when stopping', async function () { - if (process.env.CI) { - return this.skip('CI environments will not let us map external ports') - } - - function findRoutableAddress () { - const interfaces = networkInterfaces() - - for (const name of Object.keys(interfaces)) { - for (const iface of interfaces[name]) { - // Skip over non-IPv4 and internal (i.e. 127.0.0.1) addresses - if (iface.family === 'IPv4' && !iface.internal) { - return iface.address - } - } - } - } - - const addr = findRoutableAddress() - - if (!addr) { - // skip test if no non-loopback address is found - return this.skip() - } - - const { - natManager - } = await createNatManager([ - `/ip4/${addr}/tcp/0` - ], { - // so we don't try to look up the current computer's external address - externalIp: '184.12.31.4' - }) - - // use the actual nat manager client not the stub - delete natManager._client - - await natManager._start() - - const client = natManager._client - expect(client).to.be.ok() - - // ensure the client was stopped - const spy = sinon.spy(client, 'destroy') - - await natManager.stop() - - expect(spy.called).to.be.true() - }) -}) diff --git a/test/nat-manager/nat-manager.node.ts b/test/nat-manager/nat-manager.node.ts new file mode 100644 index 00000000..55b98849 --- /dev/null +++ b/test/nat-manager/nat-manager.node.ts @@ -0,0 +1,231 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { DefaultAddressManager } from '../../src/address-manager/index.js' +import { DefaultTransportManager, FAULT_TOLERANCE } from '../../src/transport-manager.js' +import { TCP } from '@libp2p/tcp' +import { mockUpgrader } from '@libp2p/interface-compliance-tests/mocks' +import { NatManager } from '../../src/nat-manager.js' +import delay from 'delay' +import Peers from '../fixtures/peers.js' +import { codes } from '../../src/errors.js' +import { createFromJSON } from '@libp2p/peer-id-factory' +import { Components } from '@libp2p/interfaces/components' +import type { NatAPI } from '@achingbrain/nat-port-mapper' +import { StubbedInstance, stubInterface } from 'ts-sinon' + +const DEFAULT_ADDRESSES = [ + '/ip4/127.0.0.1/tcp/0', + '/ip4/0.0.0.0/tcp/0' +] + +describe('Nat Manager (TCP)', () => { + const teardown: Array<() => Promise> = [] + let client: StubbedInstance + + async function createNatManager (addrs = DEFAULT_ADDRESSES, natManagerOptions = {}) { + const components = new Components({ + peerId: await createFromJSON(Peers[0]), + upgrader: mockUpgrader() + }) + components.setAddressManager(new DefaultAddressManager(components, { listen: addrs })) + components.setTransportManager(new DefaultTransportManager(components, { + faultTolerance: FAULT_TOLERANCE.NO_FATAL + })) + + const natManager = new NatManager(components, { + enabled: true, + keepAlive: true, + ...natManagerOptions + }) + + client = stubInterface() + + natManager._getClient = async () => { + return client + } + + components.getTransportManager().add(new TCP()) + await components.getTransportManager().listen(components.getAddressManager().getListenAddrs()) + + teardown.push(async () => { + await natManager.stop() + await components.getTransportManager().removeAll() + }) + + return { + natManager, + components + } + } + + afterEach(async () => await Promise.all(teardown.map(async t => await t()))) + + it('should map TCP connections to external ports', async () => { + const { + natManager, + components + } = await createNatManager() + + let addressChangedEventFired = false + + components.getAddressManager().addEventListener('change:addresses', () => { + addressChangedEventFired = true + }) + + client.externalIp.resolves('82.3.1.5') + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await natManager._start() + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.not.be.empty() + + const internalPorts = components.getTransportManager().getAddrs() + .filter(ma => ma.isThinWaistAddress()) + .map(ma => ma.toOptions()) + .filter(({ host, transport }) => host !== '127.0.0.1' && transport === 'tcp') + .map(({ port }) => port) + + expect(client.map.called).to.be.true() + + internalPorts.forEach(port => { + expect(client.map.getCall(0).args[0]).to.include({ + localPort: port, + protocol: 'TCP' + }) + }) + + expect(addressChangedEventFired).to.be.true() + }) + + it('should not map TCP connections when double-natted', async () => { + const { + natManager, + components + } = await createNatManager() + + client.externalIp.resolves('192.168.1.1') + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await expect(natManager._start()).to.eventually.be.rejectedWith(/double NAT/) + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + expect(client.map.called).to.be.false() + }) + + it('should do nothing when disabled', async () => { + const { + natManager + } = await createNatManager(DEFAULT_ADDRESSES, { + enabled: false + }) + + natManager.start() + + await delay(100) + + expect(client.externalIp.called).to.be.false() + expect(client.map.called).to.be.false() + }) + + it('should not map non-ipv4 connections to external ports', async () => { + const { + natManager, + components + } = await createNatManager([ + '/ip6/::/tcp/0' + ]) + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await natManager._start() + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + }) + + it('should not map non-ipv6 loopback connections to external ports', async () => { + const { + natManager, + components + } = await createNatManager([ + '/ip6/::1/tcp/0' + ]) + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await natManager._start() + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + }) + + it('should not map non-TCP connections to external ports', async () => { + const { + natManager, + components + } = await createNatManager([ + '/ip4/0.0.0.0/utp' + ]) + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await natManager._start() + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + }) + + it('should not map loopback connections to external ports', async () => { + const { + natManager, + components + } = await createNatManager([ + '/ip4/127.0.0.1/tcp/0' + ]) + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await natManager._start() + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + }) + + it('should not map non-thin-waist connections to external ports', async () => { + const { + natManager, + components + } = await createNatManager([ + '/ip4/0.0.0.0/tcp/0/sctp/0' + ]) + + let observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + + await natManager._start() + + observed = components.getAddressManager().getObservedAddrs().map(ma => ma.toString()) + expect(observed).to.be.empty() + }) + + it('should specify large enough TTL', async () => { + const peerId = await createFromJSON(Peers[0]) + + expect(() => { + // @ts-expect-error invalid parameters + new NatManager(new Components({ peerId }), { ttl: 5 }) // eslint-disable-line no-new + }).to.throw().with.property('code', codes.ERR_INVALID_PARAMETERS) + }) +}) diff --git a/test/peer-discovery/index.node.js b/test/peer-discovery/index.node.js deleted file mode 100644 index cc5618bb..00000000 --- a/test/peer-discovery/index.node.js +++ /dev/null @@ -1,206 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const defer = require('p-defer') -const mergeOptions = require('merge-options') - -const Bootstrap = require('libp2p-bootstrap') -const crypto = require('libp2p-crypto') -const KadDht = require('libp2p-kad-dht') -const MulticastDNS = require('libp2p-mdns') -const { Multiaddr } = require('multiaddr') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') - -const Libp2p = require('../../src') -const baseOptions = require('../utils/base-options') -const { createPeerId } = require('../utils/creators/peer') - -const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') - -describe('peer discovery scenarios', () => { - let peerId, remotePeerId1, remotePeerId2 - let libp2p - - before(async () => { - [peerId, remotePeerId1, remotePeerId2] = await createPeerId({ number: 3 }) - }) - - afterEach(async () => { - libp2p && await libp2p.stop() - }) - - it('should ignore self on discovery', async () => { - libp2p = new Libp2p(mergeOptions(baseOptions, { - peerId, - modules: { - peerDiscovery: [MulticastDNS] - } - })) - - await libp2p.start() - const discoverySpy = sinon.spy() - libp2p.on('peer:discovery', discoverySpy) - libp2p._discovery.get('mdns').emit('peer', { id: libp2p.peerId }) - - expect(discoverySpy.called).to.eql(false) - }) - - it('bootstrap should discover all peers in the list', async () => { - const deferred = defer() - - const bootstrappers = [ - `${listenAddr}/p2p/${remotePeerId1.toB58String()}`, - `${listenAddr}/p2p/${remotePeerId2.toB58String()}` - ] - - libp2p = new Libp2p(mergeOptions(baseOptions, { - peerId, - addresses: { - listen: [listenAddr] - }, - modules: { - peerDiscovery: [Bootstrap] - }, - config: { - peerDiscovery: { - autoDial: false, - bootstrap: { - enabled: true, - list: bootstrappers - } - } - } - })) - - const expectedPeers = new Set([ - remotePeerId1.toB58String(), - remotePeerId2.toB58String() - ]) - - libp2p.on('peer:discovery', (peerId) => { - expectedPeers.delete(peerId.toB58String()) - if (expectedPeers.size === 0) { - libp2p.removeAllListeners('peer:discovery') - deferred.resolve() - } - }) - - await libp2p.start() - - return deferred.promise - }) - - it('MulticastDNS should discover all peers on the local network', async () => { - const deferred = defer() - - const getConfig = (peerId) => mergeOptions(baseOptions, { - peerId, - addresses: { - listen: [listenAddr] - }, - modules: { - peerDiscovery: [MulticastDNS] - }, - config: { - peerDiscovery: { - autoDial: false, - mdns: { - enabled: true, - interval: 200, // discover quickly - // use a random tag to prevent CI collision - serviceTag: uint8ArrayToString(crypto.randomBytes(10), 'base16') - } - } - } - }) - - libp2p = new Libp2p(getConfig(peerId)) - const remoteLibp2p1 = new Libp2p(getConfig(remotePeerId1)) - const remoteLibp2p2 = new Libp2p(getConfig(remotePeerId2)) - - const expectedPeers = new Set([ - remotePeerId1.toB58String(), - remotePeerId2.toB58String() - ]) - - libp2p.on('peer:discovery', (peerId) => { - expectedPeers.delete(peerId.toB58String()) - if (expectedPeers.size === 0) { - libp2p.removeAllListeners('peer:discovery') - deferred.resolve() - } - }) - - await Promise.all([ - remoteLibp2p1.start(), - remoteLibp2p2.start(), - libp2p.start() - ]) - - await deferred.promise - - await remoteLibp2p1.stop() - await remoteLibp2p2.stop() - }) - - it('kad-dht should discover other peers', async () => { - const deferred = defer() - - const getConfig = (peerId) => mergeOptions(baseOptions, { - peerId, - addresses: { - listen: [listenAddr] - }, - modules: { - dht: KadDht - }, - config: { - peerDiscovery: { - autoDial: false - }, - dht: { - enabled: true - } - } - }) - - const localConfig = getConfig(peerId) - - libp2p = new Libp2p(localConfig) - - const remoteLibp2p1 = new Libp2p(getConfig(remotePeerId1)) - const remoteLibp2p2 = new Libp2p(getConfig(remotePeerId2)) - - libp2p.on('peer:discovery', (peerId) => { - if (peerId.toB58String() === remotePeerId1.toB58String()) { - libp2p.removeAllListeners('peer:discovery') - deferred.resolve() - } - }) - - await Promise.all([ - libp2p.start(), - remoteLibp2p1.start(), - remoteLibp2p2.start() - ]) - - await libp2p.peerStore.addressBook.set(remotePeerId1, remoteLibp2p1.multiaddrs) - await remoteLibp2p2.peerStore.addressBook.set(remotePeerId1, remoteLibp2p1.multiaddrs) - - // Topology: - // A -> B - // C -> B - await Promise.all([ - libp2p.dial(remotePeerId1), - remoteLibp2p2.dial(remotePeerId1) - ]) - - await deferred.promise - return Promise.all([ - remoteLibp2p1.stop(), - remoteLibp2p2.stop() - ]) - }) -}) diff --git a/test/peer-discovery/index.node.ts b/test/peer-discovery/index.node.ts new file mode 100644 index 00000000..23af1eb7 --- /dev/null +++ b/test/peer-discovery/index.node.ts @@ -0,0 +1,217 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import defer from 'p-defer' +import { Bootstrap } from '@libp2p/bootstrap' +import { randomBytes } from '@libp2p/crypto' +import { KadDHT } from '@libp2p/kad-dht' +import { MulticastDNS } from '@libp2p/mdns' +import { Multiaddr } from '@multiformats/multiaddr' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { createBaseOptions } from '../utils/base-options.js' +import { createPeerId } from '../utils/creators/peer.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { CustomEvent } from '@libp2p/interfaces' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' + +const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') + +describe('peer discovery scenarios', () => { + let peerId: PeerId, remotePeerId1: PeerId, remotePeerId2: PeerId + let libp2p: Libp2pNode + + before(async () => { + [peerId, remotePeerId1, remotePeerId2] = await Promise.all([ + createPeerId(), + createPeerId(), + createPeerId() + ]) + }) + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('should ignore self on discovery', async () => { + libp2p = await createLibp2pNode(createBaseOptions({ + peerId, + peerDiscovery: [ + new MulticastDNS() + ] + })) + + await libp2p.start() + const discoverySpy = sinon.spy() + libp2p.addEventListener('peer:discovery', discoverySpy) + libp2p.onDiscoveryPeer(new CustomEvent('peer', { + detail: { + id: libp2p.peerId, + multiaddrs: [], + protocols: [] + } + })) + + expect(discoverySpy.called).to.eql(false) + }) + + it('bootstrap should discover all peers in the list', async () => { + const deferred = defer() + + const bootstrappers = [ + `${listenAddr.toString()}/p2p/${remotePeerId1.toString()}`, + `${listenAddr.toString()}/p2p/${remotePeerId2.toString()}` + ] + + libp2p = await createLibp2pNode(createBaseOptions({ + peerId, + addresses: { + listen: [ + listenAddr.toString() + ] + }, + connectionManager: { + autoDial: false + }, + peerDiscovery: [ + new Bootstrap({ + list: bootstrappers + }) + ] + })) + + const expectedPeers = new Set([ + remotePeerId1.toString(), + remotePeerId2.toString() + ]) + + libp2p.addEventListener('peer:discovery', (evt) => { + const { id } = evt.detail + + expectedPeers.delete(id.toString()) + if (expectedPeers.size === 0) { + libp2p.removeEventListener('peer:discovery') + deferred.resolve() + } + }) + + await libp2p.start() + + return await deferred.promise + }) + + it('MulticastDNS should discover all peers on the local network', async () => { + const deferred = defer() + + // use a random tag to prevent CI collision + const serviceTag = `libp2p-test-${uint8ArrayToString(randomBytes(4), 'base16')}.local` + + const getConfig = (peerId: PeerId) => createBaseOptions({ + peerId, + addresses: { + listen: [ + listenAddr.toString() + ] + }, + peerDiscovery: [ + new MulticastDNS({ + interval: 200, // discover quickly + serviceTag + }) + ], + connectionManager: { + autoDial: false + } + }) + + libp2p = await createLibp2pNode(getConfig(peerId)) + const remoteLibp2p1 = await createLibp2pNode(getConfig(remotePeerId1)) + const remoteLibp2p2 = await createLibp2pNode(getConfig(remotePeerId2)) + + const expectedPeers = new Set([ + remotePeerId1.toString(), + remotePeerId2.toString() + ]) + + libp2p.addEventListener('peer:discovery', (evt) => { + const { id } = evt.detail + + expectedPeers.delete(id.toString()) + + if (expectedPeers.size === 0) { + libp2p.removeEventListener('peer:discovery') + deferred.resolve() + } + }) + + await Promise.all([ + remoteLibp2p1.start(), + remoteLibp2p2.start(), + libp2p.start() + ]) + + await deferred.promise + + await remoteLibp2p1.stop() + await remoteLibp2p2.stop() + }) + + it('kad-dht should discover other peers', async () => { + const deferred = defer() + + const getConfig = (peerId: PeerId) => createBaseOptions({ + peerId, + addresses: { + listen: [ + listenAddr.toString() + ] + }, + connectionManager: { + autoDial: false + }, + dht: new KadDHT() + }) + + const localConfig = getConfig(peerId) + + libp2p = await createLibp2pNode(localConfig) + + const remoteLibp2p1 = await createLibp2pNode(getConfig(remotePeerId1)) + const remoteLibp2p2 = await createLibp2pNode(getConfig(remotePeerId2)) + + libp2p.addEventListener('peer:discovery', (evt) => { + const { id } = evt.detail + + if (id.equals(remotePeerId1)) { + libp2p.removeEventListener('peer:discovery') + deferred.resolve() + } + }) + + await Promise.all([ + libp2p.start(), + remoteLibp2p1.start(), + remoteLibp2p2.start() + ]) + + await libp2p.peerStore.addressBook.set(remotePeerId1, remoteLibp2p1.getMultiaddrs()) + await remoteLibp2p2.peerStore.addressBook.set(remotePeerId1, remoteLibp2p1.getMultiaddrs()) + + // Topology: + // A -> B + // C -> B + await Promise.all([ + libp2p.dial(remotePeerId1), + remoteLibp2p2.dial(remotePeerId1) + ]) + + await deferred.promise + return await Promise.all([ + remoteLibp2p1.stop(), + remoteLibp2p2.stop() + ]) + }) +}) diff --git a/test/peer-discovery/index.spec.js b/test/peer-discovery/index.spec.js deleted file mode 100644 index 1a43d9f7..00000000 --- a/test/peer-discovery/index.spec.js +++ /dev/null @@ -1,137 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const defer = require('p-defer') -const mergeOptions = require('merge-options') - -const { Multiaddr } = require('multiaddr') -const WebRTCStar = require('libp2p-webrtc-star') - -const Libp2p = require('../../src') -const baseOptions = require('../utils/base-options.browser') -const { createPeerId } = require('../utils/creators/peer') - -describe('peer discovery', () => { - describe('basic functions', () => { - let peerId - let remotePeerId - let libp2p - - before(async () => { - [peerId, remotePeerId] = await createPeerId({ number: 2 }) - }) - - afterEach(async () => { - libp2p && await libp2p.stop() - sinon.reset() - }) - - it('should dial know peers on startup below the minConnections watermark', async () => { - libp2p = new Libp2p({ - ...baseOptions, - peerId, - connectionManager: { - minConnections: 2 - } - }) - - await libp2p.peerStore.addressBook.set(remotePeerId, [new Multiaddr('/ip4/165.1.1.1/tcp/80')]) - - const deferred = defer() - sinon.stub(libp2p.dialer, 'connectToPeer').callsFake((remotePeerId) => { - expect(remotePeerId).to.equal(remotePeerId) - deferred.resolve() - }) - const spy = sinon.spy() - libp2p.on('peer:discovery', spy) - - libp2p.start() - await deferred.promise - - expect(spy.calledOnce).to.eql(true) - expect(spy.getCall(0).args[0].toString()).to.eql(remotePeerId.toString()) - }) - - it('should stop discovery on libp2p start/stop', async () => { - const mockDiscovery = { - tag: 'mock', - start: () => {}, - stop: () => {}, - on: () => {}, - removeListener: () => {} - } - const startSpy = sinon.spy(mockDiscovery, 'start') - const stopSpy = sinon.spy(mockDiscovery, 'stop') - - libp2p = new Libp2p(mergeOptions(baseOptions, { - peerId, - modules: { - peerDiscovery: [mockDiscovery] - } - })) - - await libp2p.start() - expect(startSpy).to.have.property('callCount', 1) - expect(stopSpy).to.have.property('callCount', 0) - await libp2p.stop() - expect(startSpy).to.have.property('callCount', 1) - expect(stopSpy).to.have.property('callCount', 1) - }) - }) - - describe('discovery modules from transports', () => { - let peerId, libp2p - - before(async () => { - [peerId] = await createPeerId() - }) - - afterEach(async () => { - libp2p && await libp2p.stop() - }) - - it('should add discovery module if present in transports and enabled', async () => { - libp2p = new Libp2p(mergeOptions(baseOptions, { - peerId, - modules: { - transport: [WebRTCStar] - }, - config: { - peerDiscovery: { - webRTCStar: { - enabled: true - } - } - } - })) - - await libp2p.start() - - expect(libp2p._discovery.size).to.eql(1) - expect(libp2p._discovery.has('webRTCStar')).to.eql(true) - }) - - it('should not add discovery module if present in transports but disabled', async () => { - libp2p = new Libp2p(mergeOptions(baseOptions, { - peerId, - modules: { - transport: [WebRTCStar] - }, - config: { - peerDiscovery: { - webRTCStar: { - enabled: false - } - } - } - })) - - await libp2p.start() - - expect(libp2p._discovery.size).to.eql(0) - }) - }) -}) diff --git a/test/peer-discovery/index.spec.ts b/test/peer-discovery/index.spec.ts new file mode 100644 index 00000000..f58fded8 --- /dev/null +++ b/test/peer-discovery/index.spec.ts @@ -0,0 +1,101 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import defer from 'p-defer' +import { Multiaddr } from '@multiformats/multiaddr' +import { createBaseOptions } from '../utils/base-options.browser.js' +import { createPeerId } from '../utils/creators/peer.js' +import { isPeerId, PeerId } from '@libp2p/interfaces/peer-id' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { mockConnection, mockDuplex, mockMultiaddrConnection } from '@libp2p/interface-compliance-tests/mocks' + +describe('peer discovery', () => { + describe('basic functions', () => { + let peerId: PeerId + let remotePeerId: PeerId + let libp2p: Libp2pNode + + before(async () => { + [peerId, remotePeerId] = await Promise.all([ + createPeerId(), + createPeerId() + ]) + }) + + afterEach(async () => { + if (libp2p != null) { + await libp2p.stop() + } + + sinon.reset() + }) + + it('should dial known peers on startup below the minConnections watermark', async () => { + libp2p = await createLibp2pNode(createBaseOptions({ + peerId, + connectionManager: { + minConnections: 2 + } + })) + + await libp2p.peerStore.addressBook.set(remotePeerId, [new Multiaddr('/ip4/165.1.1.1/tcp/80')]) + + const deferred = defer() + sinon.stub(libp2p.components.getDialer(), 'dial').callsFake(async (id) => { + if (!isPeerId(id)) { + throw new Error('Tried to dial something that was not a peer ID') + } + + if (!remotePeerId.equals(id)) { + throw new Error('Tried to dial wrong peer ID') + } + + deferred.resolve() + return mockConnection(mockMultiaddrConnection(mockDuplex(), id)) + }) + + const spy = sinon.spy() + libp2p.addEventListener('peer:discovery', spy) + + await libp2p.start() + await deferred.promise + + expect(spy.calledOnce).to.equal(true) + expect(spy.getCall(0).args[0].detail.id.toString()).to.equal(remotePeerId.toString()) + }) + + it('should stop discovery on libp2p start/stop', async () => { + let started = 0 + let stopped = 0 + + class MockDiscovery { + static tag = 'mock' + start () { + started++ + } + + stop () { + stopped++ + } + + addEventListener () {} + removeEventListener () {} + } + + libp2p = await createLibp2pNode(createBaseOptions({ + peerId, + peerDiscovery: [ + new MockDiscovery() + ] + })) + + await libp2p.start() + expect(started).to.equal(1) + expect(stopped).to.equal(0) + await libp2p.stop() + expect(started).to.equal(1) + expect(stopped).to.equal(1) + }) + }) +}) diff --git a/test/peer-routing/peer-routing.node.js b/test/peer-routing/peer-routing.node.js deleted file mode 100644 index 29492bd6..00000000 --- a/test/peer-routing/peer-routing.node.js +++ /dev/null @@ -1,665 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const nock = require('nock') -const sinon = require('sinon') -const intoStream = require('into-stream') - -const delay = require('delay') -const pDefer = require('p-defer') -const pWaitFor = require('p-wait-for') -const mergeOptions = require('merge-options') -const drain = require('it-drain') -const all = require('it-all') - -const ipfsHttpClient = require('ipfs-http-client') -const DelegatedPeerRouter = require('libp2p-delegated-peer-routing') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') - -const peerUtils = require('../utils/creators/peer') -const { baseOptions, routingOptions } = require('./utils') - -describe('peer-routing', () => { - describe('no routers', () => { - let node - - before(async () => { - [node] = await peerUtils.createPeer({ - config: baseOptions - }) - }) - - after(() => node.stop()) - - it('.findPeer should return an error', async () => { - await expect(node.peerRouting.findPeer('a cid')) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ERR_NO_ROUTERS_AVAILABLE') - }) - - it('.getClosestPeers should return an error', async () => { - try { - for await (const _ of node.peerRouting.getClosestPeers('a cid')) { } // eslint-disable-line - throw new Error('.getClosestPeers should return an error') - } catch (/** @type {any} */ err) { - expect(err).to.exist() - expect(err.code).to.equal('ERR_NO_ROUTERS_AVAILABLE') - } - }) - }) - - describe('via dht router', () => { - const number = 5 - let nodes - - before(async () => { - nodes = await peerUtils.createPeer({ - number, - config: routingOptions - }) - - // Ring dial - await Promise.all( - nodes.map((peer, i) => peer.dial(nodes[(i + 1) % number].peerId)) - ) - }) - - after(() => { - sinon.restore() - }) - - after(() => Promise.all(nodes.map((n) => n.stop()))) - - it('should use the nodes dht', async () => { - sinon.stub(nodes[0]._dht, 'findPeer').callsFake(async function * () { - yield { - name: 'FINAL_PEER', - peer: { - id: nodes[1].peerId, - multiaddrs: [] - } - } - }) - - expect(nodes[0]._dht.findPeer.called).to.be.false() - await nodes[0].peerRouting.findPeer(nodes[1].peerId) - expect(nodes[0]._dht.findPeer.called).to.be.true() - nodes[0]._dht.findPeer.restore() - }) - - it('should use the nodes dht to get the closest peers', async () => { - sinon.stub(nodes[0]._dht, 'getClosestPeers').callsFake(async function * () { - yield { - name: 'PEER_RESPONSE', - closer: [{ - id: nodes[1].peerId, - multiaddrs: [] - }] - } - }) - - expect(nodes[0]._dht.getClosestPeers.called).to.be.false() - await drain(nodes[0].peerRouting.getClosestPeers(nodes[1].peerId)) - expect(nodes[0]._dht.getClosestPeers.called).to.be.true() - nodes[0]._dht.getClosestPeers.restore() - }) - - it('should error when peer tries to find itself', async () => { - await expect(nodes[0].peerRouting.findPeer(nodes[0].peerId)) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ERR_FIND_SELF') - }) - - it('should handle error thrown synchronously during find peer', async () => { - const unknownPeers = await peerUtils.createPeerId({ number: 1, fixture: false }) - - nodes[0].peerRouting._routers = [{ - findPeer () { - throw new Error('Thrown sync') - } - }] - - await expect(nodes[0].peerRouting.findPeer(unknownPeers[0])) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ERR_NOT_FOUND') - }) - - it('should handle error thrown asynchronously during find peer', async () => { - const unknownPeers = await peerUtils.createPeerId({ number: 1, fixture: false }) - - nodes[0].peerRouting._routers = [{ - async findPeer () { - throw new Error('Thrown async') - } - }] - - await expect(nodes[0].peerRouting.findPeer(unknownPeers[0])) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ERR_NOT_FOUND') - }) - - it('should handle error thrown asynchronously after delay during find peer', async () => { - const unknownPeers = await peerUtils.createPeerId({ number: 1, fixture: false }) - - nodes[0].peerRouting._routers = [{ - async findPeer () { - await delay(100) - throw new Error('Thrown async after delay') - } - }] - - await expect(nodes[0].peerRouting.findPeer(unknownPeers[0])) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ERR_NOT_FOUND') - }) - - it('should return value when one router errors synchronously and another returns a value', async () => { - const [peer] = await peerUtils.createPeerId({ number: 1, fixture: false }) - - nodes[0].peerRouting._routers = [{ - findPeer () { - throw new Error('Thrown sync') - } - }, { - async findPeer () { - return Promise.resolve({ - id: peer, - multiaddrs: [] - }) - } - }] - - await expect(nodes[0].peerRouting.findPeer(peer)) - .to.eventually.deep.equal({ - id: peer, - multiaddrs: [] - }) - }) - - it('should return value when one router errors asynchronously and another returns a value', async () => { - const [peer] = await peerUtils.createPeerId({ number: 1, fixture: false }) - - nodes[0].peerRouting._routers = [{ - async findPeer () { - throw new Error('Thrown sync') - } - }, { - async findPeer () { - return Promise.resolve({ - id: peer, - multiaddrs: [] - }) - } - }] - - await expect(nodes[0].peerRouting.findPeer(peer)) - .to.eventually.deep.equal({ - id: peer, - multiaddrs: [] - }) - }) - }) - - describe('via delegate router', () => { - let node - let delegate - - beforeEach(async () => { - delegate = new DelegatedPeerRouter(ipfsHttpClient.create({ - host: '0.0.0.0', - protocol: 'http', - port: 60197 - })) - - ;[node] = await peerUtils.createPeer({ - config: mergeOptions(baseOptions, { - modules: { - peerRouting: [delegate] - }, - config: { - dht: { - enabled: false - } - } - }) - }) - }) - - afterEach(() => { - nock.cleanAll() - sinon.restore() - }) - - afterEach(() => node.stop()) - - it('should only have one router', () => { - expect(node.peerRouting._routers).to.have.lengthOf(1) - }) - - it('should use the delegate router to find peers', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - - sinon.stub(delegate, 'findPeer').callsFake(() => { - return { - id: remotePeerId, - multiaddrs: [] - } - }) - - expect(delegate.findPeer.called).to.be.false() - await node.peerRouting.findPeer(remotePeerId) - expect(delegate.findPeer.called).to.be.true() - delegate.findPeer.restore() - }) - - it('should use the delegate router to get the closest peers', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - - sinon.stub(delegate, 'getClosestPeers').callsFake(function * () { - yield { - id: remotePeerId, - multiaddrs: [] - } - }) - - expect(delegate.getClosestPeers.called).to.be.false() - await drain(node.peerRouting.getClosestPeers(remotePeerId)) - expect(delegate.getClosestPeers.called).to.be.true() - delegate.getClosestPeers.restore() - }) - - it('should be able to find a peer', async () => { - const peerKey = PeerId.createFromB58String('QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSySnL') - const mockApi = nock('http://0.0.0.0:60197') - .post('/api/v0/dht/findpeer') - .query(true) - .reply(200, `{"Extra":"","ID":"some other id","Responses":null,"Type":0}\n{"Extra":"","ID":"","Responses":[{"Addrs":["/ip4/127.0.0.1/tcp/4001"],"ID":"${peerKey}"}],"Type":2}\n`, [ - 'Content-Type', 'application/json', - 'X-Chunked-Output', '1' - ]) - - const peer = await node.peerRouting.findPeer(peerKey) - - expect(peer.id).to.equal(peerKey) - expect(mockApi.isDone()).to.equal(true) - }) - - it('should error when peer tries to find itself', async () => { - await expect(node.peerRouting.findPeer(node.peerId)) - .to.eventually.be.rejected() - .and.to.have.property('code', 'ERR_FIND_SELF') - }) - - it('should error when a peer cannot be found', async () => { - const peerId = await PeerId.create({ keyType: 'ed25519' }) - const mockApi = nock('http://0.0.0.0:60197') - .post('/api/v0/dht/findpeer') - .query(true) - .reply(200, '{"Extra":"","ID":"some other id","Responses":null,"Type":6}\n{"Extra":"","ID":"yet another id","Responses":null,"Type":0}\n{"Extra":"routing:not found","ID":"","Responses":null,"Type":3}\n', [ - 'Content-Type', 'application/json', - 'X-Chunked-Output', '1' - ]) - - await expect(node.peerRouting.findPeer(peerId)) - .to.eventually.be.rejected() - - expect(mockApi.isDone()).to.equal(true) - }) - - it('should handle errors from the api', async () => { - const peerId = await PeerId.create({ keyType: 'ed25519' }) - const mockApi = nock('http://0.0.0.0:60197') - .post('/api/v0/dht/findpeer') - .query(true) - .reply(502) - - await expect(node.peerRouting.findPeer(peerId)) - .to.eventually.be.rejected() - - expect(mockApi.isDone()).to.equal(true) - }) - - it('should be able to get the closest peers', async () => { - const peerId = await PeerId.create({ keyType: 'ed25519' }) - const closest1 = '12D3KooWLewYMMdGWAtuX852n4rgCWkK7EBn4CWbwwBzhsVoKxk3' - const closest2 = '12D3KooWDtoQbpKhtnWddfj72QmpFvvLDTsBLTFkjvgQm6cde2AK' - - const mockApi = nock('http://0.0.0.0:60197') - .post('/api/v0/dht/query') - .query(true) - .reply(200, - () => intoStream([ - `{"extra":"","id":"${closest1}","responses":[{"ID":"${closest1}","Addrs":["/ip4/127.0.0.1/tcp/63930","/ip4/127.0.0.1/tcp/63930"]}],"type":1}\n`, - `{"extra":"","id":"${closest2}","responses":[{"ID":"${closest2}","Addrs":["/ip4/127.0.0.1/tcp/63506","/ip4/127.0.0.1/tcp/63506"]}],"type":1}\n`, - `{"Extra":"","ID":"${closest2}","Responses":[],"Type":2}\n`, - `{"Extra":"","ID":"${closest1}","Responses":[],"Type":2}\n` - ]), - [ - 'Content-Type', 'application/json', - 'X-Chunked-Output', '1' - ]) - - const closestPeers = await all(node.peerRouting.getClosestPeers(peerId.id, { timeout: 1000 })) - - expect(closestPeers).to.have.length(2) - expect(closestPeers[0].id.toB58String()).to.equal(closest1) - expect(closestPeers[0].multiaddrs).to.have.lengthOf(2) - expect(closestPeers[1].id.toB58String()).to.equal(closest2) - expect(closestPeers[1].multiaddrs).to.have.lengthOf(2) - expect(mockApi.isDone()).to.equal(true) - }) - - it('should handle errors when getting the closest peers', async () => { - const peerId = await PeerId.create({ keyType: 'ed25519' }) - - const mockApi = nock('http://0.0.0.0:60197') - .post('/api/v0/dht/query') - .query(true) - .reply(502, 'Bad Gateway', [ - 'X-Chunked-Output', '1' - ]) - - try { - for await (const _ of node.peerRouting.getClosestPeers(peerId.id)) { } // eslint-disable-line - throw new Error('should handle errors when getting the closest peers') - } catch (/** @type {any} */ err) { - expect(err).to.exist() - } - - expect(mockApi.isDone()).to.equal(true) - }) - }) - - describe('via dht and delegate routers', () => { - let node - let delegate - - beforeEach(async () => { - delegate = new DelegatedPeerRouter(ipfsHttpClient.create({ - host: '0.0.0.0', - protocol: 'http', - port: 60197 - })) - - ;[node] = await peerUtils.createPeer({ - config: mergeOptions(routingOptions, { - modules: { - peerRouting: [delegate] - } - }) - }) - }) - - afterEach(() => { - sinon.restore() - }) - - afterEach(() => node.stop()) - - it('should use the delegate if the dht fails to find the peer', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const results = { - id: remotePeerId, - multiaddrs: [] - } - - sinon.stub(node._dht, 'findPeer').callsFake(async function * () {}) - sinon.stub(delegate, 'findPeer').callsFake(() => { - return results - }) - - const peer = await node.peerRouting.findPeer(remotePeerId) - expect(peer).to.eql(results) - }) - - it('should not wait for the dht to return if the delegate does first', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const results = { - id: remotePeerId, - multiaddrs: [] - } - - const defer = pDefer() - - sinon.stub(node._dht, 'findPeer').callsFake(async function * () { - yield - await defer.promise - }) - sinon.stub(delegate, 'findPeer').callsFake(() => { - return results - }) - - const peer = await node.peerRouting.findPeer(remotePeerId) - expect(peer).to.eql(results) - - defer.resolve() - }) - - it('should not wait for the delegate to return if the dht does first', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const result = { - id: remotePeerId, - multiaddrs: [] - } - - const defer = pDefer() - - sinon.stub(node._dht, 'findPeer').callsFake(async function * () { - yield { - name: 'FINAL_PEER', - peer: result - } - }) - sinon.stub(delegate, 'findPeer').callsFake(async () => { - await defer.promise - }) - - const peer = await node.peerRouting.findPeer(remotePeerId) - expect(peer).to.eql(result) - - defer.resolve() - }) - - it('should store the addresses of the found peer', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const result = { - id: remotePeerId, - multiaddrs: [ - new Multiaddr('/ip4/123.123.123.123/tcp/38982') - ] - } - - const spy = sinon.spy(node.peerStore.addressBook, 'add') - - sinon.stub(node._dht, 'findPeer').callsFake(async function * () { - yield { - name: 'FINAL_PEER', - peer: result - } - }) - sinon.stub(delegate, 'findPeer').callsFake(() => {}) - - await node.peerRouting.findPeer(remotePeerId) - - expect(spy.calledWith(result.id, result.multiaddrs)).to.be.true() - }) - - it('should use the delegate if the dht fails to get the closest peer', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const results = [{ - id: remotePeerId, - multiaddrs: [] - }] - - sinon.stub(node._dht, 'getClosestPeers').callsFake(function * () { }) - - sinon.stub(delegate, 'getClosestPeers').callsFake(function * () { - yield results[0] - }) - - const closest = await all(node.peerRouting.getClosestPeers('a cid')) - - expect(closest).to.have.length.above(0) - expect(closest).to.eql(results) - }) - - it('should store the addresses of the closest peer', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const result = { - id: remotePeerId, - multiaddrs: [ - new Multiaddr('/ip4/123.123.123.123/tcp/38982') - ] - } - - const spy = sinon.spy(node.peerStore.addressBook, 'add') - - sinon.stub(node._dht, 'getClosestPeers').callsFake(function * () { }) - - sinon.stub(delegate, 'getClosestPeers').callsFake(function * () { - yield result - }) - - await drain(node.peerRouting.getClosestPeers('a cid')) - - expect(spy.calledWith(result.id, result.multiaddrs)).to.be.true() - }) - - it('should dedupe closest peers', async () => { - const [remotePeerId] = await peerUtils.createPeerId({ fixture: false }) - const results = [{ - id: remotePeerId, - multiaddrs: [ - new Multiaddr('/ip4/123.123.123.123/tcp/38982') - ] - }] - - sinon.stub(node._dht, 'getClosestPeers').callsFake(function * () { - yield * results - }) - - sinon.stub(delegate, 'getClosestPeers').callsFake(function * () { - yield * results - }) - - const peers = await all(node.peerRouting.getClosestPeers('a cid')) - - expect(peers).to.be.an('array').with.a.lengthOf(1).that.deep.equals(results) - }) - }) - - describe('peer routing refresh manager service', () => { - let node - let peerIds - - before(async () => { - peerIds = await peerUtils.createPeerId({ number: 2 }) - }) - - afterEach(() => { - sinon.restore() - - return node && node.stop() - }) - - it('should be enabled and start by default', async () => { - const results = [ - { id: peerIds[0], multiaddrs: [new Multiaddr('/ip4/30.0.0.1/tcp/2000')] }, - { id: peerIds[1], multiaddrs: [new Multiaddr('/ip4/32.0.0.1/tcp/2000')] } - ] - - ;[node] = await peerUtils.createPeer({ - config: mergeOptions(routingOptions, { - peerRouting: { - refreshManager: { - bootDelay: 100 - } - } - }), - started: false - }) - - sinon.spy(node.peerStore.addressBook, 'add') - sinon.stub(node._dht, 'getClosestPeers').callsFake(function * () { - yield { - name: 'PEER_RESPONSE', - closer: [ - results[0] - ] - } - yield { - name: 'PEER_RESPONSE', - closer: [ - results[1] - ] - } - }) - - await node.start() - - await pWaitFor(() => node._dht.getClosestPeers.callCount === 1) - await pWaitFor(() => node.peerStore.addressBook.add.callCount === results.length) - - const call0 = node.peerStore.addressBook.add.getCall(0) - expect(call0.args[0].equals(results[0].id)) - call0.args[1].forEach((m, index) => { - expect(m.equals(results[0].multiaddrs[index])) - }) - - const call1 = node.peerStore.addressBook.add.getCall(1) - expect(call1.args[0].equals(results[1].id)) - call0.args[1].forEach((m, index) => { - expect(m.equals(results[1].multiaddrs[index])) - }) - }) - - it('should support being disabled', async () => { - [node] = await peerUtils.createPeer({ - config: mergeOptions(routingOptions, { - peerRouting: { - refreshManager: { - bootDelay: 100, - enabled: false - } - } - }), - started: false - }) - - sinon.stub(node._dht, 'getClosestPeers').callsFake(async function * () { - yield - throw new Error('should not be called') - }) - - await node.start() - await delay(100) - - expect(node._dht.getClosestPeers.callCount === 0) - }) - - it('should start and run recurrently on interval', async () => { - [node] = await peerUtils.createPeer({ - config: mergeOptions(routingOptions, { - peerRouting: { - refreshManager: { - interval: 500, - bootDelay: 200 - } - } - }), - started: false - }) - - sinon.stub(node._dht, 'getClosestPeers').callsFake(function * () { - yield { id: peerIds[0], multiaddrs: [new Multiaddr('/ip4/30.0.0.1/tcp/2000')] } - }) - - await node.start() - - // should run more than once - await pWaitFor(() => node._dht.getClosestPeers.callCount === 2) - }) - }) -}) diff --git a/test/peer-routing/peer-routing.node.ts b/test/peer-routing/peer-routing.node.ts new file mode 100644 index 00000000..db94d643 --- /dev/null +++ b/test/peer-routing/peer-routing.node.ts @@ -0,0 +1,788 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import nock from 'nock' +import sinon from 'sinon' +import intoStream from 'into-stream' +import delay from 'delay' +import pDefer from 'p-defer' +import pWaitFor from 'p-wait-for' +import drain from 'it-drain' +import all from 'it-all' +import { create as createIpfsHttpClient } from 'ipfs-http-client' +import { DelegatedPeerRouting } from '@libp2p/delegated-peer-routing' +import { Multiaddr } from '@multiformats/multiaddr' +import { createNode, createPeerId, populateAddressBooks } from '../utils/creators/peer.js' +import type { Libp2pNode } from '../../src/libp2p.js' +import { createBaseOptions } from '../utils/base-options.js' +import { createRoutingOptions } from './utils.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import { EventTypes, MessageType } from '@libp2p/interfaces/dht' +import { peerIdFromString } from '@libp2p/peer-id' +import type { PeerInfo } from '@libp2p/interfaces/peer-info' +import { KadDHT } from '@libp2p/kad-dht' + +describe('peer-routing', () => { + let peerId: PeerId + + beforeEach(async () => { + peerId = await createEd25519PeerId() + }) + + describe('no routers', () => { + let node: Libp2pNode + + before(async () => { + node = await createNode({ + config: createBaseOptions() + }) + }) + + after(async () => await node.stop()) + + it('.findPeer should return an error', async () => { + await expect(node.peerRouting.findPeer(peerId)) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ERR_NO_ROUTERS_AVAILABLE') + }) + + it('.getClosestPeers should return an error', async () => { + try { + for await (const _ of node.peerRouting.getClosestPeers(peerId.toBytes())) { } // eslint-disable-line + throw new Error('.getClosestPeers should return an error') + } catch (err: any) { + expect(err).to.exist() + expect(err.code).to.equal('ERR_NO_ROUTERS_AVAILABLE') + } + }) + }) + + describe('via dht router', () => { + let nodes: Libp2pNode[] + + before(async () => { + nodes = await Promise.all([ + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }), + createNode({ config: createRoutingOptions() }) + ]) + await populateAddressBooks(nodes) + + // Ring dial + await Promise.all( + nodes.map(async (peer, i) => await peer.dial(nodes[(i + 1) % nodes.length].peerId)) + ) + }) + + after(() => { + sinon.restore() + }) + + after(async () => await Promise.all(nodes.map(async (n) => await n.stop()))) + + it('should use the nodes dht', async () => { + if (nodes[0].dht == null) { + throw new Error('DHT not configured') + } + + const dhtFindPeerStub = sinon.stub(nodes[0].dht, 'findPeer').callsFake(async function * () { + yield { + from: nodes[2].peerId, + type: EventTypes.FINAL_PEER, + name: 'FINAL_PEER', + peer: { + id: nodes[1].peerId, + multiaddrs: [], + protocols: [] + } + } + }) + + expect(dhtFindPeerStub.called).to.be.false() + await nodes[0].peerRouting.findPeer(nodes[1].peerId) + expect(dhtFindPeerStub.called).to.be.true() + dhtFindPeerStub.restore() + }) + + it('should use the nodes dht to get the closest peers', async () => { + if (nodes[0].dht == null) { + throw new Error('DHT not configured') + } + + const dhtGetClosestPeersStub = sinon.stub(nodes[0].dht, 'getClosestPeers').callsFake(async function * () { + yield { + from: nodes[2].peerId, + type: EventTypes.PEER_RESPONSE, + name: 'PEER_RESPONSE', + messageName: 'FIND_NODE', + messageType: MessageType.FIND_NODE, + closer: [{ + id: nodes[1].peerId, + multiaddrs: [], + protocols: [] + }], + providers: [] + } + }) + + expect(dhtGetClosestPeersStub.called).to.be.false() + await drain(nodes[0].peerRouting.getClosestPeers(nodes[1].peerId.toBytes())) + expect(dhtGetClosestPeersStub.called).to.be.true() + dhtGetClosestPeersStub.restore() + }) + + it('should error when peer tries to find itself', async () => { + await expect(nodes[0].peerRouting.findPeer(nodes[0].peerId)) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ERR_FIND_SELF') + }) + + it('should handle error thrown synchronously during find peer', async () => { + const unknownPeer = await createPeerId() + + // @ts-expect-error private field + nodes[0].peerRouting.routers = [{ + findPeer () { + throw new Error('Thrown sync') + } + }] + + await expect(nodes[0].peerRouting.findPeer(unknownPeer)) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ERR_NOT_FOUND') + }) + + it('should handle error thrown asynchronously during find peer', async () => { + const unknownPeer = await createPeerId() + + // @ts-expect-error private field + nodes[0].peerRouting.routers = [{ + async findPeer () { + throw new Error('Thrown async') + } + }] + + await expect(nodes[0].peerRouting.findPeer(unknownPeer)) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ERR_NOT_FOUND') + }) + + it('should handle error thrown asynchronously after delay during find peer', async () => { + const unknownPeer = await createPeerId() + + // @ts-expect-error private field + nodes[0].peerRouting.routers = [{ + async findPeer () { + await delay(100) + throw new Error('Thrown async after delay') + } + }] + + await expect(nodes[0].peerRouting.findPeer(unknownPeer)) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ERR_NOT_FOUND') + }) + + it('should return value when one router errors synchronously and another returns a value', async () => { + const peer = await createPeerId() + + // @ts-expect-error private field + nodes[0].peerRouting.routers = [{ + findPeer () { + throw new Error('Thrown sync') + } + }, { + async findPeer () { + return await Promise.resolve({ + id: peer, + multiaddrs: [] + }) + } + }] + + await expect(nodes[0].peerRouting.findPeer(peer)) + .to.eventually.deep.equal({ + id: peer, + multiaddrs: [] + }) + }) + + it('should return value when one router errors asynchronously and another returns a value', async () => { + const peer = await createPeerId() + + // @ts-expect-error private field + nodes[0].peerRouting.routers = [{ + async findPeer () { + throw new Error('Thrown sync') + } + }, { + async findPeer () { + return await Promise.resolve({ + id: peer, + multiaddrs: [] + }) + } + }] + + await expect(nodes[0].peerRouting.findPeer(peer)) + .to.eventually.deep.equal({ + id: peer, + multiaddrs: [] + }) + }) + }) + + describe('via delegate router', () => { + let node: Libp2pNode + let delegate: DelegatedPeerRouting + + beforeEach(async () => { + delegate = new DelegatedPeerRouting(createIpfsHttpClient({ + host: '0.0.0.0', + protocol: 'http', + port: 60197 + })) + + node = await createNode({ + config: createBaseOptions({ + peerRouters: [delegate] + }) + }) + }) + + afterEach(() => { + nock.cleanAll() + sinon.restore() + }) + + afterEach(async () => await node.stop()) + + it('should only have one router', () => { + // @ts-expect-error private field + expect(node.peerRouting.routers).to.have.lengthOf(1) + }) + + it('should use the delegate router to find peers', async () => { + const remotePeerId = await createPeerId() + + const delegateFindPeerStub = sinon.stub(delegate, 'findPeer').callsFake(async function () { + return { + id: remotePeerId, + multiaddrs: [], + protocols: [] + } + }) + + expect(delegateFindPeerStub.called).to.be.false() + await node.peerRouting.findPeer(remotePeerId) + expect(delegateFindPeerStub.called).to.be.true() + delegateFindPeerStub.restore() + }) + + it('should use the delegate router to get the closest peers', async () => { + const remotePeerId = await createPeerId() + + const delegateGetClosestPeersStub = sinon.stub(delegate, 'getClosestPeers').callsFake(async function * () { + yield { + id: remotePeerId, + multiaddrs: [], + protocols: [] + } + }) + + expect(delegateGetClosestPeersStub.called).to.be.false() + await drain(node.peerRouting.getClosestPeers(remotePeerId.toBytes())) + expect(delegateGetClosestPeersStub.called).to.be.true() + delegateGetClosestPeersStub.restore() + }) + + it('should be able to find a peer', async () => { + const peerKey = peerIdFromString('QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSySnL') + const mockApi = nock('http://0.0.0.0:60197') + .post('/api/v0/dht/findpeer') + .query(true) + .reply(200, `{"Extra":"","ID":"some other id","Responses":null,"Type":0}\n{"Extra":"","ID":"","Responses":[{"Addrs":["/ip4/127.0.0.1/tcp/4001"],"ID":"${peerKey.toString()}"}],"Type":2}\n`, [ + 'Content-Type', 'application/json', + 'X-Chunked-Output', '1' + ]) + + const peer = await node.peerRouting.findPeer(peerKey) + + expect(peer.id.toString()).to.equal(peerKey.toString()) + expect(mockApi.isDone()).to.equal(true) + }) + + it('should error when peer tries to find itself', async () => { + await expect(node.peerRouting.findPeer(node.peerId)) + .to.eventually.be.rejected() + .and.to.have.property('code', 'ERR_FIND_SELF') + }) + + it('should error when a peer cannot be found', async () => { + const peerId = await createEd25519PeerId() + const mockApi = nock('http://0.0.0.0:60197') + .post('/api/v0/dht/findpeer') + .query(true) + .reply(200, '{"Extra":"","ID":"some other id","Responses":null,"Type":6}\n{"Extra":"","ID":"yet another id","Responses":null,"Type":0}\n{"Extra":"routing:not found","ID":"","Responses":null,"Type":3}\n', [ + 'Content-Type', 'application/json', + 'X-Chunked-Output', '1' + ]) + + await expect(node.peerRouting.findPeer(peerId)) + .to.eventually.be.rejected() + + expect(mockApi.isDone()).to.equal(true) + }) + + it('should handle errors from the api', async () => { + const peerId = await createEd25519PeerId() + const mockApi = nock('http://0.0.0.0:60197') + .post('/api/v0/dht/findpeer') + .query(true) + .reply(502) + + await expect(node.peerRouting.findPeer(peerId)) + .to.eventually.be.rejected() + + expect(mockApi.isDone()).to.equal(true) + }) + + it('should be able to get the closest peers', async () => { + const peerId = await createEd25519PeerId() + const closest1 = '12D3KooWLewYMMdGWAtuX852n4rgCWkK7EBn4CWbwwBzhsVoKxk3' + const closest2 = '12D3KooWDtoQbpKhtnWddfj72QmpFvvLDTsBLTFkjvgQm6cde2AK' + + const mockApi = nock('http://0.0.0.0:60197') + .post('/api/v0/dht/query') + .query(true) + .reply(200, + () => intoStream([ + `{"Extra":"","id":"${closest1}","Responses":[{"ID":"${closest1}","Addrs":["/ip4/127.0.0.1/tcp/63930","/ip4/127.0.0.1/tcp/63930"]}],"Type":1}\n`, + `{"Extra":"","id":"${closest2}","Responses":[{"ID":"${closest2}","Addrs":["/ip4/127.0.0.1/tcp/63506","/ip4/127.0.0.1/tcp/63506"]}],"Type":1}\n`, + `{"Extra":"","ID":"${closest2}","Responses":[],"Type":2}\n`, + `{"Extra":"","ID":"${closest1}","Responses":[],"Type":2}\n` + ]), + [ + 'Content-Type', 'application/json', + 'X-Chunked-Output', '1' + ]) + + const closestPeers = await all(node.peerRouting.getClosestPeers(peerId.toBytes())) + + expect(closestPeers).to.have.length(2) + expect(closestPeers[0].id.toString()).to.equal(closest1) + expect(closestPeers[0].multiaddrs).to.have.lengthOf(2) + expect(closestPeers[1].id.toString()).to.equal(closest2) + expect(closestPeers[1].multiaddrs).to.have.lengthOf(2) + expect(mockApi.isDone()).to.equal(true) + }) + + it('should handle errors when getting the closest peers', async () => { + const peerId = await createEd25519PeerId() + + const mockApi = nock('http://0.0.0.0:60197') + .post('/api/v0/dht/query') + .query(true) + .reply(502, 'Bad Gateway', [ + 'X-Chunked-Output', '1' + ]) + + await expect(drain(node.peerRouting.getClosestPeers(peerId.toBytes()))).to.eventually.be.rejected() + + expect(mockApi.isDone()).to.equal(true) + }) + }) + + describe('via dht and delegate routers', () => { + let node: Libp2pNode + let delegate: DelegatedPeerRouting + + beforeEach(async () => { + delegate = new DelegatedPeerRouting(createIpfsHttpClient({ + host: '0.0.0.0', + protocol: 'http', + port: 60197 + })) + + node = await createNode({ + config: createRoutingOptions({ + peerRouters: [delegate], + dht: new KadDHT() + }) + }) + }) + + afterEach(() => { + sinon.restore() + }) + + afterEach(async () => await node.stop()) + + it('should use the delegate if the dht fails to find the peer', async () => { + const remotePeerId = await createPeerId() + const results = { + id: remotePeerId, + multiaddrs: [], + protocols: [] + } + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + sinon.stub(node.dht, 'findPeer').callsFake(async function * () {}) + sinon.stub(delegate, 'findPeer').callsFake(async () => { + return results + }) + + const peer = await node.peerRouting.findPeer(remotePeerId) + expect(peer).to.eql(results) + }) + + it('should not wait for the dht to return if the delegate does first', async () => { + const remotePeerId = await createPeerId() + const results = { + id: remotePeerId, + multiaddrs: [], + protocols: [] + } + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const defer = pDefer() + + sinon.stub(node.dht, 'findPeer').callsFake(async function * () { + yield { + name: 'SENDING_QUERY', + type: EventTypes.SENDING_QUERY, + to: remotePeerId, + messageName: 'FIND_NODE', + messageType: MessageType.FIND_NODE + } + await defer.promise + }) + sinon.stub(delegate, 'findPeer').callsFake(async () => { + return results + }) + + const peer = await node.peerRouting.findPeer(remotePeerId) + expect(peer).to.eql(results) + + defer.resolve() + }) + + it('should not wait for the delegate to return if the dht does first', async () => { + const remotePeerId = await createPeerId() + const result = { + id: remotePeerId, + multiaddrs: [], + protocols: [] + } + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const defer = pDefer() + + sinon.stub(node.dht, 'findPeer').callsFake(async function * () { + yield { + from: remotePeerId, + name: 'FINAL_PEER', + type: EventTypes.FINAL_PEER, + peer: result + } + }) + sinon.stub(delegate, 'findPeer').callsFake(async () => { + return await defer.promise + }) + + const peer = await node.peerRouting.findPeer(remotePeerId) + expect(peer).to.eql(result) + + defer.resolve(result) + }) + + it('should store the addresses of the found peer', async () => { + const remotePeerId = await createPeerId() + const result = { + id: remotePeerId, + multiaddrs: [ + new Multiaddr('/ip4/123.123.123.123/tcp/38982') + ], + protocols: [] + } + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const spy = sinon.spy(node.peerStore.addressBook, 'add') + + sinon.stub(node.dht, 'findPeer').callsFake(async function * () { + yield { + from: remotePeerId, + name: 'FINAL_PEER', + type: EventTypes.FINAL_PEER, + peer: result + } + }) + sinon.stub(delegate, 'findPeer').callsFake(async () => { + const deferred = pDefer() + + return await deferred.promise + }) + + await node.peerRouting.findPeer(remotePeerId) + + expect(spy.calledWith(result.id, result.multiaddrs)).to.be.true() + }) + + it('should use the delegate if the dht fails to get the closest peer', async () => { + const remotePeerId = await createPeerId() + const results = [{ + id: remotePeerId, + multiaddrs: [], + protocols: [] + }] + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + sinon.stub(node.dht, 'getClosestPeers').callsFake(async function * () { }) + + sinon.stub(delegate, 'getClosestPeers').callsFake(async function * () { + yield results[0] + }) + + const closest = await all(node.peerRouting.getClosestPeers(remotePeerId.toBytes())) + + expect(closest).to.have.length.above(0) + expect(closest).to.eql(results) + }) + + it('should store the addresses of the closest peer', async () => { + const remotePeerId = await createPeerId() + const result = { + id: remotePeerId, + multiaddrs: [ + new Multiaddr('/ip4/123.123.123.123/tcp/38982') + ], + protocols: [] + } + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const spy = sinon.spy(node.peerStore.addressBook, 'add') + + sinon.stub(node.dht, 'getClosestPeers').callsFake(async function * () { }) + + sinon.stub(delegate, 'getClosestPeers').callsFake(async function * () { + yield result + }) + + await drain(node.peerRouting.getClosestPeers(remotePeerId.toBytes())) + + expect(spy.calledWith(result.id, result.multiaddrs)).to.be.true() + }) + + it('should dedupe closest peers', async () => { + const remotePeerId = await createPeerId() + const results = [{ + id: remotePeerId, + multiaddrs: [ + new Multiaddr('/ip4/123.123.123.123/tcp/38982') + ], + protocols: [] + }] + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + sinon.stub(node.dht, 'getClosestPeers').callsFake(async function * () { + for (const peer of results) { + yield { + from: remotePeerId, + name: 'FINAL_PEER', + type: EventTypes.FINAL_PEER, + peer + } + } + }) + + sinon.stub(delegate, 'getClosestPeers').callsFake(async function * () { + yield * results + }) + + const peers = await all(node.peerRouting.getClosestPeers(remotePeerId.toBytes())) + + expect(peers).to.be.an('array').with.a.lengthOf(1).that.deep.equals(results) + }) + }) + + describe('peer routing refresh manager service', () => { + let node: Libp2pNode + let peerIds: PeerId[] + + before(async () => { + peerIds = await Promise.all([ + createPeerId(), + createPeerId() + ]) + }) + + afterEach(async () => { + sinon.restore() + + if (node != null) { + await node.stop() + } + }) + + it('should be enabled and start by default', async () => { + const results: PeerInfo[] = [ + { id: peerIds[0], multiaddrs: [new Multiaddr('/ip4/30.0.0.1/tcp/2000')], protocols: [] }, + { id: peerIds[1], multiaddrs: [new Multiaddr('/ip4/32.0.0.1/tcp/2000')], protocols: [] } + ] + + node = await createNode({ + config: createRoutingOptions({ + peerRouting: { + refreshManager: { + enabled: true, + bootDelay: 100 + } + } + }), + started: false + }) + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const peerStoreAddressBookAddStub = sinon.spy(node.peerStore.addressBook, 'add') + const dhtGetClosestPeersStub = sinon.stub(node.dht, 'getClosestPeers').callsFake(async function * () { + yield { + name: 'PEER_RESPONSE', + type: EventTypes.PEER_RESPONSE, + messageName: 'FIND_NODE', + messageType: MessageType.FIND_NODE, + from: peerIds[0], + closer: [ + results[0] + ], + providers: [] + } + yield { + name: 'PEER_RESPONSE', + type: EventTypes.PEER_RESPONSE, + messageName: 'FIND_NODE', + messageType: MessageType.FIND_NODE, + from: peerIds[0], + closer: [ + results[1] + ], + providers: [] + } + }) + + await node.start() + + await pWaitFor(() => dhtGetClosestPeersStub.callCount === 1) + await pWaitFor(() => peerStoreAddressBookAddStub.callCount === results.length) + + const call0 = peerStoreAddressBookAddStub.getCall(0) + expect(call0.args[0].equals(results[0].id)) + call0.args[1].forEach((m, index) => { + expect(m.equals(results[0].multiaddrs[index])) + }) + + const call1 = peerStoreAddressBookAddStub.getCall(1) + expect(call1.args[0].equals(results[1].id)) + call0.args[1].forEach((m, index) => { + expect(m.equals(results[1].multiaddrs[index])) + }) + }) + + it('should support being disabled', async () => { + node = await createNode({ + config: createRoutingOptions({ + peerRouting: { + refreshManager: { + bootDelay: 100, + enabled: false + } + } + }), + started: false + }) + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const dhtGetClosestPeersStub = sinon.stub(node.dht, 'getClosestPeers').callsFake(async function * () { + yield { + name: 'SENDING_QUERY', + type: EventTypes.SENDING_QUERY, + to: peerIds[0], + messageName: 'FIND_NODE', + messageType: MessageType.FIND_NODE + } + throw new Error('should not be called') + }) + + await node.start() + await delay(100) + + expect(dhtGetClosestPeersStub.callCount === 0) + }) + + it('should start and run on interval', async () => { + node = await createNode({ + config: createRoutingOptions({ + peerRouting: { + refreshManager: { + interval: 500, + bootDelay: 200 + } + } + }), + started: false + }) + + if (node.dht == null) { + throw new Error('DHT not configured') + } + + const dhtGetClosestPeersStub = sinon.stub(node.dht, 'getClosestPeers').callsFake(async function * () { + yield { + name: 'PEER_RESPONSE', + type: EventTypes.PEER_RESPONSE, + messageName: 'FIND_NODE', + messageType: MessageType.FIND_NODE, + from: peerIds[0], + closer: [ + { id: peerIds[0], multiaddrs: [new Multiaddr('/ip4/30.0.0.1/tcp/2000')], protocols: [] } + ], + providers: [] + } + }) + + await node.start() + + // should run more than once + await pWaitFor(() => dhtGetClosestPeersStub.callCount === 2) + }) + }) +}) diff --git a/test/peer-routing/utils.js b/test/peer-routing/utils.js deleted file mode 100644 index 7b43d050..00000000 --- a/test/peer-routing/utils.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const KadDht = require('libp2p-kad-dht') -const mergeOptions = require('merge-options') -const baseOptions = require('../utils/base-options') - -module.exports.baseOptions = baseOptions - -const routingOptions = mergeOptions(baseOptions, { - modules: { - dht: KadDht - }, - config: { - dht: { - kBucketSize: 20, - enabled: true - } - } -}) - -module.exports.routingOptions = routingOptions diff --git a/test/peer-routing/utils.ts b/test/peer-routing/utils.ts new file mode 100644 index 00000000..25ae8b23 --- /dev/null +++ b/test/peer-routing/utils.ts @@ -0,0 +1,11 @@ +import { KadDHT } from '@libp2p/kad-dht' +import type { Libp2pOptions } from '../../src/index.js' +import { createBaseOptions } from '../utils/base-options.js' + +export function createRoutingOptions (...overrides: Libp2pOptions[]): Libp2pOptions { + return createBaseOptions({ + dht: new KadDHT({ + kBucketSize: 20 + }) + }, ...overrides) +} diff --git a/test/peer-store/address-book.spec.js b/test/peer-store/address-book.spec.js deleted file mode 100644 index e67a899c..00000000 --- a/test/peer-store/address-book.spec.js +++ /dev/null @@ -1,745 +0,0 @@ -'use strict' -/* eslint-env mocha */ -/* eslint max-nested-callbacks: ["error", 6] */ - -const { expect } = require('aegir/utils/chai') -const { Buffer } = require('buffer') -const { Multiaddr } = require('multiaddr') -const arrayEquals = require('libp2p-utils/src/array-equals') -const addressSort = require('libp2p-utils/src/address-sort') -const PeerId = require('peer-id') -const pDefer = require('p-defer') -const { MemoryDatastore } = require('datastore-core/memory') -const PeerStore = require('../../src/peer-store') -const Envelope = require('../../src/record/envelope') -const PeerRecord = require('../../src/record/peer-record') -const { mockConnectionGater } = require('../utils/mock-connection-gater') -const peerUtils = require('../utils/creators/peer') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../../src/errors') - -/** - * @typedef {import('../../src/peer-store/types').PeerStore} PeerStore - * @typedef {import('../../src/peer-store/types').AddressBook} AddressBook - */ - -const addr1 = new Multiaddr('/ip4/127.0.0.1/tcp/8000') -const addr2 = new Multiaddr('/ip4/20.0.0.1/tcp/8001') -const addr3 = new Multiaddr('/ip4/127.0.0.1/tcp/8002') - -describe('addressBook', () => { - const connectionGater = mockConnectionGater() - let peerId - - before(async () => { - [peerId] = await peerUtils.createPeerId() - }) - - describe('addressBook.set', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {AddressBook} */ - let ab - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - afterEach(() => { - peerStore.removeAllListeners() - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await ab.set('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('throws invalid parameters error if no addresses provided', async () => { - try { - await ab.set(peerId) - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('no addresses should throw error') - }) - - it('throws invalid parameters error if invalid multiaddrs are provided', async () => { - try { - await ab.set(peerId, ['invalid multiaddr']) - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid multiaddrs should throw error') - }) - - it('replaces the stored content by default and emit change event', async () => { - const defer = pDefer() - const supportedMultiaddrs = [addr1, addr2] - - peerStore.once('change:multiaddrs', ({ peerId, multiaddrs }) => { - expect(peerId).to.exist() - expect(multiaddrs).to.eql(supportedMultiaddrs) - defer.resolve() - }) - - await ab.set(peerId, supportedMultiaddrs) - const addresses = await ab.get(peerId) - const multiaddrs = addresses.map((mi) => mi.multiaddr) - expect(multiaddrs).to.have.deep.members(supportedMultiaddrs) - - return defer.promise - }) - - it('emits on set if not storing the exact same content', async () => { - const defer = pDefer() - - const supportedMultiaddrsA = [addr1, addr2] - const supportedMultiaddrsB = [addr2] - - let changeCounter = 0 - peerStore.on('change:multiaddrs', () => { - changeCounter++ - if (changeCounter > 1) { - defer.resolve() - } - }) - - // set 1 - await ab.set(peerId, supportedMultiaddrsA) - - // set 2 (same content) - await ab.set(peerId, supportedMultiaddrsB) - const addresses = await ab.get(peerId) - const multiaddrs = addresses.map((mi) => mi.multiaddr) - expect(multiaddrs).to.have.deep.members(supportedMultiaddrsB) - - await defer.promise - }) - - it('does not emit on set if it is storing the exact same content', async () => { - const defer = pDefer() - - const supportedMultiaddrs = [addr1, addr2] - - let changeCounter = 0 - peerStore.on('change:multiaddrs', () => { - changeCounter++ - if (changeCounter > 1) { - defer.reject() - } - }) - - // set 1 - await ab.set(peerId, supportedMultiaddrs) - - // set 2 (same content) - await ab.set(peerId, supportedMultiaddrs) - - // Wait 50ms for incorrect second event - setTimeout(() => { - defer.resolve() - }, 50) - - await defer.promise - }) - }) - - describe('addressBook.add', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {AddressBook} */ - let ab - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - afterEach(() => { - peerStore.removeAllListeners() - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await ab.add('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('throws invalid parameters error if no addresses provided', async () => { - try { - await ab.add(peerId) - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('no addresses provided should throw error') - }) - - it('throws invalid parameters error if invalid multiaddrs are provided', async () => { - try { - await ab.add(peerId, ['invalid multiaddr']) - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid multiaddr should throw error') - }) - - it('does not emit event if no addresses are added', async () => { - const defer = pDefer() - - peerStore.on('peer', () => { - defer.reject() - }) - - await ab.add(peerId, []) - - // Wait 50ms for incorrect second event - setTimeout(() => { - defer.resolve() - }, 50) - - await defer.promise - }) - - it('adds the new content and emits change event', async () => { - const defer = pDefer() - - const supportedMultiaddrsA = [addr1, addr2] - const supportedMultiaddrsB = [addr3] - const finalMultiaddrs = supportedMultiaddrsA.concat(supportedMultiaddrsB) - - let changeTrigger = 2 - peerStore.on('change:multiaddrs', ({ multiaddrs }) => { - changeTrigger-- - if (changeTrigger === 0 && arrayEquals(multiaddrs, finalMultiaddrs)) { - defer.resolve() - } - }) - - // Replace - await ab.set(peerId, supportedMultiaddrsA) - let addresses = await ab.get(peerId) - let multiaddrs = addresses.map((mi) => mi.multiaddr) - expect(multiaddrs).to.have.deep.members(supportedMultiaddrsA) - - // Add - await ab.add(peerId, supportedMultiaddrsB) - addresses = await ab.get(peerId) - multiaddrs = addresses.map((mi) => mi.multiaddr) - expect(multiaddrs).to.have.deep.members(finalMultiaddrs) - - return defer.promise - }) - - it('emits on add if the content to add not exists', async () => { - const defer = pDefer() - - const supportedMultiaddrsA = [addr1] - const supportedMultiaddrsB = [addr2] - const finalMultiaddrs = supportedMultiaddrsA.concat(supportedMultiaddrsB) - - let changeCounter = 0 - peerStore.on('change:multiaddrs', () => { - changeCounter++ - if (changeCounter > 1) { - defer.resolve() - } - }) - - // set 1 - await ab.set(peerId, supportedMultiaddrsA) - - // set 2 (content already existing) - await ab.add(peerId, supportedMultiaddrsB) - const addresses = await ab.get(peerId) - const multiaddrs = addresses.map((mi) => mi.multiaddr) - expect(multiaddrs).to.have.deep.members(finalMultiaddrs) - - await defer.promise - }) - - it('does not emit on add if the content to add already exists', async () => { - const defer = pDefer() - - const supportedMultiaddrsA = [addr1, addr2] - const supportedMultiaddrsB = [addr2] - - let changeCounter = 0 - peerStore.on('change:multiaddrs', () => { - changeCounter++ - if (changeCounter > 1) { - defer.reject() - } - }) - - // set 1 - await ab.set(peerId, supportedMultiaddrsA) - - // set 2 (content already existing) - await ab.add(peerId, supportedMultiaddrsB) - - // Wait 50ms for incorrect second event - setTimeout(() => { - defer.resolve() - }, 50) - - await defer.promise - }) - - it('does not add replicated content', async () => { - // set 1 - await ab.set(peerId, [addr1, addr1]) - - const addresses = await ab.get(peerId) - expect(addresses).to.have.lengthOf(1) - }) - }) - - describe('addressBook.get', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {AddressBook} */ - let ab - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await ab.get('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('returns empty if no multiaddrs are known for the provided peer', async () => { - const addresses = await ab.get(peerId) - - expect(addresses).to.be.empty() - }) - - it('returns the multiaddrs stored', async () => { - const supportedMultiaddrs = [addr1, addr2] - - await ab.set(peerId, supportedMultiaddrs) - - const addresses = await ab.get(peerId) - const multiaddrs = addresses.map((mi) => mi.multiaddr) - expect(multiaddrs).to.have.deep.members(supportedMultiaddrs) - }) - }) - - describe('addressBook.getMultiaddrsForPeer', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {AddressBook} */ - let ab - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await ab.getMultiaddrsForPeer('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('returns empty if no multiaddrs are known for the provided peer', async () => { - const addresses = await ab.getMultiaddrsForPeer(peerId) - - expect(addresses).to.be.empty() - }) - - it('returns the multiaddrs stored', async () => { - const supportedMultiaddrs = [addr1, addr2] - - await ab.set(peerId, supportedMultiaddrs) - - const multiaddrs = await ab.getMultiaddrsForPeer(peerId) - multiaddrs.forEach((m) => { - expect(m.getPeerId()).to.equal(peerId.toB58String()) - }) - }) - - it('can sort multiaddrs providing a sorter', async () => { - const supportedMultiaddrs = [addr1, addr2] - await ab.set(peerId, supportedMultiaddrs) - - const multiaddrs = await ab.getMultiaddrsForPeer(peerId, addressSort.publicAddressesFirst) - const sortedAddresses = addressSort.publicAddressesFirst(supportedMultiaddrs.map((m) => ({ multiaddr: m }))) - - multiaddrs.forEach((m, index) => { - expect(m.equals(sortedAddresses[index].multiaddr)) - }) - }) - }) - - describe('addressBook.delete', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {AddressBook} */ - let ab - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await ab.delete('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('does not emit an event if no records exist for the peer', async () => { - const defer = pDefer() - - peerStore.on('change:multiaddrs', () => { - defer.reject() - }) - - await ab.delete(peerId) - - // Wait 50ms for incorrect invalid event - setTimeout(() => { - defer.resolve() - }, 50) - - return defer.promise - }) - - it('emits an event if the record exists', async () => { - const defer = pDefer() - - const supportedMultiaddrs = [addr1, addr2] - await ab.set(peerId, supportedMultiaddrs) - - // Listen after set - peerStore.on('change:multiaddrs', ({ multiaddrs }) => { - expect(multiaddrs.length).to.eql(0) - defer.resolve() - }) - - await ab.delete(peerId) - - return defer.promise - }) - }) - - describe('certified records', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {AddressBook} */ - let ab - - describe('consumes a valid peer record and stores its data', () => { - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - it('no previous data in AddressBook', async () => { - const multiaddrs = [addr1, addr2] - const peerRecord = new PeerRecord({ - peerId, - multiaddrs - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - // consume peer record - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(true) - - // Validate AddressBook addresses - const addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(multiaddrs.length) - addrs.forEach((addr, index) => { - expect(addr.isCertified).to.eql(true) - expect(multiaddrs[index].equals(addr.multiaddr)).to.eql(true) - }) - }) - - it('emits change:multiaddrs event when adding multiaddrs', async () => { - const defer = pDefer() - const multiaddrs = [addr1, addr2] - const peerRecord = new PeerRecord({ - peerId, - multiaddrs - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - peerStore.once('change:multiaddrs', ({ peerId, multiaddrs }) => { - expect(peerId).to.exist() - expect(multiaddrs).to.eql(multiaddrs) - defer.resolve() - }) - - // consume peer record - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(true) - - return defer.promise - }) - - it('emits change:multiaddrs event with same data currently in AddressBook (not certified)', async () => { - const defer = pDefer() - const multiaddrs = [addr1, addr2] - - // Set addressBook data - await ab.set(peerId, multiaddrs) - - // Validate data exists, but not certified - let addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(multiaddrs.length) - - addrs.forEach((addr, index) => { - expect(addr.isCertified).to.eql(false) - expect(multiaddrs[index].equals(addr.multiaddr)).to.eql(true) - }) - - // Create peer record - const peerRecord = new PeerRecord({ - peerId, - multiaddrs - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - peerStore.once('change:multiaddrs', ({ peerId, multiaddrs }) => { - expect(peerId).to.exist() - expect(multiaddrs).to.eql(multiaddrs) - defer.resolve() - }) - - // consume peer record - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(true) - - // Wait event - await defer.promise - - // Validate data exists and certified - addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(multiaddrs.length) - addrs.forEach((addr, index) => { - expect(addr.isCertified).to.eql(true) - expect(multiaddrs[index].equals(addr.multiaddr)).to.eql(true) - }) - }) - - it('emits change:multiaddrs event with previous partial data in AddressBook (not certified)', async () => { - const defer = pDefer() - const multiaddrs = [addr1, addr2] - - // Set addressBook data - await ab.set(peerId, [addr1]) - - // Validate data exists, but not certified - let addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(1) - expect(addrs[0].isCertified).to.eql(false) - expect(addrs[0].multiaddr.equals(addr1)).to.eql(true) - - // Create peer record - const peerRecord = new PeerRecord({ - peerId, - multiaddrs - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - peerStore.once('change:multiaddrs', ({ peerId, multiaddrs }) => { - expect(peerId).to.exist() - expect(multiaddrs).to.eql(multiaddrs) - defer.resolve() - }) - - // consume peer record - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(true) - - // Wait event - await defer.promise - - // Validate data exists and certified - addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(multiaddrs.length) - addrs.forEach((addr, index) => { - expect(addr.isCertified).to.eql(true) - expect(multiaddrs[index].equals(addr.multiaddr)).to.eql(true) - }) - }) - - it('with previous different data in AddressBook (not certified)', async () => { - const defer = pDefer() - const multiaddrsUncertified = [addr3] - const multiaddrsCertified = [addr1, addr2] - - // Set addressBook data - await ab.set(peerId, multiaddrsUncertified) - - // Validate data exists, but not certified - let addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(multiaddrsUncertified.length) - addrs.forEach((addr, index) => { - expect(addr.isCertified).to.eql(false) - expect(multiaddrsUncertified[index].equals(addr.multiaddr)).to.eql(true) - }) - - // Create peer record - const peerRecord = new PeerRecord({ - peerId, - multiaddrs: multiaddrsCertified - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - peerStore.once('change:multiaddrs', ({ peerId, multiaddrs }) => { - expect(peerId).to.exist() - expect(multiaddrs).to.eql(multiaddrs) - defer.resolve() - }) - - // consume peer record - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(true) - - // Wait event - await defer.promise - - // Validate data exists and certified - addrs = await ab.get(peerId) - expect(addrs).to.exist() - expect(addrs).to.have.lengthOf(multiaddrsCertified.length) - addrs.forEach((addr, index) => { - expect(addr.isCertified).to.eql(true) - expect(multiaddrsCertified[index].equals(addr.multiaddr)).to.eql(true) - }) - }) - }) - - describe('fails to consume invalid peer records', () => { - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - ab = peerStore.addressBook - }) - - it('invalid peer record', async () => { - const invalidEnvelope = { - payload: Buffer.from('invalid-peerRecord') - } - - const consumed = await ab.consumePeerRecord(invalidEnvelope) - expect(consumed).to.eql(false) - }) - - it('peer that created the envelope is not the same as the peer record', async () => { - const multiaddrs = [addr1, addr2] - - // Create peer record - const peerId2 = await PeerId.create() - const peerRecord = new PeerRecord({ - peerId: peerId2, - multiaddrs - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(false) - }) - - it('does not store an outdated record', async () => { - const multiaddrs = [addr1, addr2] - const peerRecord1 = new PeerRecord({ - peerId, - multiaddrs, - seqNumber: Date.now() - }) - const peerRecord2 = new PeerRecord({ - peerId, - multiaddrs, - seqNumber: Date.now() - 1 - }) - const envelope1 = await Envelope.seal(peerRecord1, peerId) - const envelope2 = await Envelope.seal(peerRecord2, peerId) - - // Consume envelope1 (bigger seqNumber) - let consumed = await ab.consumePeerRecord(envelope1) - expect(consumed).to.eql(true) - - consumed = await ab.consumePeerRecord(envelope2) - expect(consumed).to.eql(false) - }) - - it('empty multiaddrs', async () => { - const peerRecord = new PeerRecord({ - peerId, - multiaddrs: [] - }) - const envelope = await Envelope.seal(peerRecord, peerId) - - const consumed = await ab.consumePeerRecord(envelope) - expect(consumed).to.eql(false) - }) - }) - }) -}) diff --git a/test/peer-store/key-book.spec.js b/test/peer-store/key-book.spec.js deleted file mode 100644 index c5c70db7..00000000 --- a/test/peer-store/key-book.spec.js +++ /dev/null @@ -1,114 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const { MemoryDatastore } = require('datastore-core/memory') -const PeerStore = require('../../src/peer-store') -const pDefer = require('p-defer') -const peerUtils = require('../utils/creators/peer') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../../src/errors') - -/** - * @typedef {import('../../src/peer-store/types').PeerStore} PeerStore - * @typedef {import('../../src/peer-store/types').KeyBook} KeyBook - * @typedef {import('peer-id')} PeerId - */ - -describe('keyBook', () => { - /** @type {PeerId} */ - let peerId - /** @type {PeerStore} */ - let peerStore - /** @type {KeyBook} */ - let kb - /** @type {MemoryDatastore} */ - let datastore - - beforeEach(async () => { - [peerId] = await peerUtils.createPeerId() - datastore = new MemoryDatastore() - peerStore = new PeerStore({ - peerId, - datastore - }) - kb = peerStore.keyBook - }) - - it('throws invalid parameters error if invalid PeerId is provided in set', async () => { - try { - await kb.set('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('throws invalid parameters error if invalid PeerId is provided in get', async () => { - try { - await kb.get('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('stores the peerId in the book and returns the public key', async () => { - // Set PeerId - await kb.set(peerId, peerId.pubKey) - - // Get public key - const pubKey = await kb.get(peerId) - expect(peerId.pubKey.bytes).to.equalBytes(pubKey.bytes) - }) - - it('should not store if already stored', async () => { - const spy = sinon.spy(datastore, 'put') - - // Set PeerId - await kb.set(peerId, peerId.pubKey) - await kb.set(peerId, peerId.pubKey) - - expect(spy).to.have.property('callCount', 1) - }) - - it('should emit an event when setting a key', async () => { - const defer = pDefer() - - peerStore.on('change:pubkey', ({ peerId: id, pubKey }) => { - expect(id.toB58String()).to.equal(peerId.toB58String()) - expect(pubKey.bytes).to.equalBytes(peerId.pubKey.bytes) - defer.resolve() - }) - - // Set PeerId - await kb.set(peerId, peerId.pubKey) - await defer.promise - }) - - it('should not set when key does not match', async () => { - const [edKey] = await peerUtils.createPeerId({ fixture: false, opts: { keyType: 'Ed25519' } }) - - // Set PeerId - await expect(kb.set(edKey, peerId.pubKey)).to.eventually.be.rejectedWith(/bytes do not match/) - }) - - it('should emit an event when deleting a key', async () => { - const defer = pDefer() - - await kb.set(peerId, peerId.pubKey) - - peerStore.on('change:pubkey', ({ peerId: id, pubKey }) => { - expect(id.toB58String()).to.equal(peerId.toB58String()) - expect(pubKey).to.be.undefined() - defer.resolve() - }) - - await kb.delete(peerId) - await defer.promise - }) -}) diff --git a/test/peer-store/metadata-book.spec.js b/test/peer-store/metadata-book.spec.js deleted file mode 100644 index 214a23ca..00000000 --- a/test/peer-store/metadata-book.spec.js +++ /dev/null @@ -1,384 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { MemoryDatastore } = require('datastore-core/memory') -const pDefer = require('p-defer') -const PeerStore = require('../../src/peer-store') - -const peerUtils = require('../utils/creators/peer') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../../src/errors') - -/** - * @typedef {import('../../src/peer-store/types').PeerStore} PeerStore - * @typedef {import('../../src/peer-store/types').MetadataBook} MetadataBook - * @typedef {import('peer-id')} PeerId - */ - -describe('metadataBook', () => { - /** @type {PeerId} */ - let peerId - - before(async () => { - [peerId] = await peerUtils.createPeerId() - }) - - describe('metadataBook.set', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {MetadataBook} */ - let mb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - mb = peerStore.metadataBook - }) - - afterEach(() => { - peerStore.removeAllListeners() - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await mb.set('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('throws invalid parameters error if no metadata provided', async () => { - try { - await mb.set(peerId) - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('no key provided should throw error') - }) - - it('throws invalid parameters error if no value provided', async () => { - try { - await mb.setValue(peerId, 'location') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('no value provided should throw error') - }) - - it('throws invalid parameters error if value is not a buffer', async () => { - try { - await mb.setValue(peerId, 'location', 'mars') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid value provided should throw error') - }) - - it('stores the content and emit change event', async () => { - const defer = pDefer() - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('mars') - - peerStore.once('change:metadata', ({ peerId, metadata }) => { - expect(peerId).to.exist() - expect(metadata.get(metadataKey)).to.equalBytes(metadataValue) - defer.resolve() - }) - - await mb.setValue(peerId, metadataKey, metadataValue) - - const value = await mb.getValue(peerId, metadataKey) - expect(value).to.equalBytes(metadataValue) - - const peerMetadata = await mb.get(peerId) - expect(peerMetadata).to.exist() - expect(peerMetadata.get(metadataKey)).to.equalBytes(metadataValue) - - return defer.promise - }) - - it('emits on set if not storing the exact same content', async () => { - const defer = pDefer() - const metadataKey = 'location' - const metadataValue1 = uint8ArrayFromString('mars') - const metadataValue2 = uint8ArrayFromString('saturn') - - let changeCounter = 0 - peerStore.on('change:metadata', () => { - changeCounter++ - if (changeCounter > 1) { - defer.resolve() - } - }) - - // set 1 - await mb.setValue(peerId, metadataKey, metadataValue1) - - // set 2 (same content) - await mb.setValue(peerId, metadataKey, metadataValue2) - - const value = await mb.getValue(peerId, metadataKey) - expect(value).to.equalBytes(metadataValue2) - - const peerMetadata = await mb.get(peerId) - expect(peerMetadata).to.exist() - expect(peerMetadata.get(metadataKey)).to.equalBytes(metadataValue2) - - return defer.promise - }) - - it('does not emit on set if it is storing the exact same content', async () => { - const defer = pDefer() - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('mars') - - let changeCounter = 0 - peerStore.on('change:metadata', () => { - changeCounter++ - if (changeCounter > 1) { - defer.reject() - } - }) - - // set 1 - await mb.setValue(peerId, metadataKey, metadataValue) - - // set 2 (same content) - await mb.setValue(peerId, metadataKey, metadataValue) - - // Wait 50ms for incorrect second event - setTimeout(() => { - defer.resolve() - }, 50) - - return defer.promise - }) - }) - - describe('metadataBook.get', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {MetadataBook} */ - let mb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - mb = peerStore.metadataBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await mb.get('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('returns empty if no metadata is known for the provided peer', async () => { - const metadata = await mb.get(peerId) - - expect(metadata).to.be.empty() - }) - - it('returns the metadata stored', async () => { - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('mars') - const metadata = new Map() - metadata.set(metadataKey, metadataValue) - - await mb.set(peerId, metadata) - - const peerMetadata = await mb.get(peerId) - expect(peerMetadata).to.exist() - expect(peerMetadata.get(metadataKey)).to.equalBytes(metadataValue) - }) - }) - - describe('metadataBook.getValue', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {MetadataBook} */ - let mb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - mb = peerStore.metadataBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await mb.getValue('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('returns undefined if no metadata is known for the provided peer', async () => { - const metadataKey = 'location' - const metadata = await mb.getValue(peerId, metadataKey) - - expect(metadata).to.not.exist() - }) - - it('returns the metadata value stored for the given key', async () => { - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('mars') - - await mb.setValue(peerId, metadataKey, metadataValue) - - const value = await mb.getValue(peerId, metadataKey) - expect(value).to.exist() - expect(value).to.equalBytes(metadataValue) - }) - - it('returns undefined if no metadata is known for the provided peer and key', async () => { - const metadataKey = 'location' - const metadataBadKey = 'nickname' - const metadataValue = uint8ArrayFromString('mars') - - await mb.setValue(peerId, metadataKey, metadataValue) - - const metadata = await mb.getValue(peerId, metadataBadKey) - expect(metadata).to.not.exist() - }) - }) - - describe('metadataBook.delete', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {MetadataBook} */ - let mb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - mb = peerStore.metadataBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await mb.delete('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('should not emit event if no records exist for the peer', async () => { - const defer = pDefer() - - peerStore.on('change:metadata', () => { - defer.reject() - }) - - await mb.delete(peerId) - - // Wait 50ms for incorrect invalid event - setTimeout(() => { - defer.resolve() - }, 50) - - return defer.promise - }) - - it('should emit an event if the record exists for the peer', async () => { - const defer = pDefer() - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('mars') - - await mb.setValue(peerId, metadataKey, metadataValue) - - // Listen after set - peerStore.on('change:metadata', () => { - defer.resolve() - }) - - await mb.delete(peerId) - - return defer.promise - }) - }) - - describe('metadataBook.deleteValue', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {MetadataBook} */ - let mb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - mb = peerStore.metadataBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - try { - await mb.deleteValue('invalid peerId') - } catch (/** @type {any} */ err) { - expect(err.code).to.equal(ERR_INVALID_PARAMETERS) - return - } - throw new Error('invalid peerId should throw error') - }) - - it('should not emit event if no records exist for the peer', async () => { - const defer = pDefer() - const metadataKey = 'location' - - peerStore.on('change:metadata', () => { - defer.reject() - }) - - await mb.deleteValue(peerId, metadataKey) - - // Wait 50ms for incorrect invalid event - setTimeout(() => { - defer.resolve() - }, 50) - - return defer.promise - }) - - it('should emit event if a record exists for the peer', async () => { - const defer = pDefer() - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('mars') - - await mb.setValue(peerId, metadataKey, metadataValue) - - // Listen after set - peerStore.on('change:metadata', () => { - defer.resolve() - }) - - await mb.deleteValue(peerId, metadataKey) - - return defer.promise - }) - }) -}) diff --git a/test/peer-store/peer-store.node.js b/test/peer-store/peer-store.node.js deleted file mode 100644 index c7d14f11..00000000 --- a/test/peer-store/peer-store.node.js +++ /dev/null @@ -1,50 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const baseOptions = require('../utils/base-options') -const peerUtils = require('../utils/creators/peer') -const all = require('it-all') - -describe('libp2p.peerStore', () => { - let libp2p, remoteLibp2p - - beforeEach(async () => { - [libp2p, remoteLibp2p] = await peerUtils.createPeer({ - number: 2, - populateAddressBooks: false, - config: { - ...baseOptions - } - }) - }) - - afterEach(() => Promise.all([libp2p, remoteLibp2p].map(l => l.stop()))) - - it('adds peer address to AddressBook and keys to the keybook when establishing connection', async () => { - const remoteIdStr = remoteLibp2p.peerId.toB58String() - - const spyAddressBook = sinon.spy(libp2p.peerStore.addressBook, 'add') - const spyKeyBook = sinon.spy(libp2p.peerStore.keyBook, 'set') - - const remoteMultiaddr = `${remoteLibp2p.multiaddrs[0]}/p2p/${remoteIdStr}` - const conn = await libp2p.dial(remoteMultiaddr) - - expect(conn).to.exist() - expect(spyAddressBook).to.have.property('called', true) - expect(spyKeyBook).to.have.property('called', true) - - const localPeers = await all(libp2p.peerStore.getPeers()) - - expect(localPeers.length).to.equal(1) - - const publicKeyInLocalPeer = localPeers[0].id.pubKey - expect(publicKeyInLocalPeer.bytes).to.equalBytes(remoteLibp2p.peerId.pubKey.bytes) - - const publicKeyInRemotePeer = await remoteLibp2p.peerStore.keyBook.get(libp2p.peerId) - expect(publicKeyInRemotePeer).to.exist() - expect(publicKeyInRemotePeer.bytes).to.equalBytes(libp2p.peerId.pubKey.bytes) - }) -}) diff --git a/test/peer-store/peer-store.spec.js b/test/peer-store/peer-store.spec.js deleted file mode 100644 index d0380025..00000000 --- a/test/peer-store/peer-store.spec.js +++ /dev/null @@ -1,227 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const all = require('it-all') -const PeerStore = require('../../src/peer-store') -const { Multiaddr } = require('multiaddr') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { MemoryDatastore } = require('datastore-core/memory') -const peerUtils = require('../utils/creators/peer') -const { mockConnectionGater } = require('../utils/mock-connection-gater') - -const addr1 = new Multiaddr('/ip4/127.0.0.1/tcp/8000') -const addr2 = new Multiaddr('/ip4/127.0.0.1/tcp/8001') -const addr3 = new Multiaddr('/ip4/127.0.0.1/tcp/8002') -const addr4 = new Multiaddr('/ip4/127.0.0.1/tcp/8003') - -const proto1 = '/protocol1' -const proto2 = '/protocol2' -const proto3 = '/protocol3' - -/** - * @typedef {import('../../src/peer-store/types').PeerStore} PeerStore - */ - -describe('peer-store', () => { - const connectionGater = mockConnectionGater() - let peerIds - before(async () => { - peerIds = await peerUtils.createPeerId({ - number: 5 - }) - }) - - describe('empty books', () => { - /** @type {PeerStore} */ - let peerStore - - beforeEach(() => { - peerStore = new PeerStore({ - peerId: peerIds[4], - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - }) - - it('has an empty map of peers', async () => { - const peers = await all(peerStore.getPeers()) - expect(peers.length).to.equal(0) - }) - - it('deletes a peerId', async () => { - await peerStore.addressBook.set(peerIds[0], [new Multiaddr('/ip4/127.0.0.1/tcp/4001')]) - await expect(peerStore.has(peerIds[0])).to.eventually.be.true() - await peerStore.delete(peerIds[0]) - await expect(peerStore.has(peerIds[0])).to.eventually.be.false() - }) - - it('sets the peer\'s public key to the KeyBook', async () => { - await peerStore.keyBook.set(peerIds[0], peerIds[0].pubKey) - await expect(peerStore.keyBook.get(peerIds[0])).to.eventually.deep.equal(peerIds[0].pubKey) - }) - }) - - describe('previously populated books', () => { - /** @type {PeerStore} */ - let peerStore - - beforeEach(async () => { - peerStore = new PeerStore({ - peerId: peerIds[4], - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - - // Add peer0 with { addr1, addr2 } and { proto1 } - await peerStore.addressBook.set(peerIds[0], [addr1, addr2]) - await peerStore.protoBook.set(peerIds[0], [proto1]) - - // Add peer1 with { addr3 } and { proto2, proto3 } - await peerStore.addressBook.set(peerIds[1], [addr3]) - await peerStore.protoBook.set(peerIds[1], [proto2, proto3]) - - // Add peer2 with { addr4 } - await peerStore.addressBook.set(peerIds[2], [addr4]) - - // Add peer3 with { addr4 } and { proto2 } - await peerStore.addressBook.set(peerIds[3], [addr4]) - await peerStore.protoBook.set(peerIds[3], [proto2]) - }) - - it('has peers', async () => { - const peers = await all(peerStore.getPeers()) - - expect(peers.length).to.equal(4) - expect(peers.map(peer => peer.id.toB58String())).to.have.members([ - peerIds[0].toB58String(), - peerIds[1].toB58String(), - peerIds[2].toB58String(), - peerIds[3].toB58String() - ]) - }) - - it('deletes a stored peer', async () => { - await peerStore.delete(peerIds[0]) - - const peers = await all(peerStore.getPeers()) - expect(peers.length).to.equal(3) - expect(Array.from(peers.keys())).to.not.have.members([peerIds[0].toB58String()]) - }) - - it('deletes a stored peer which is only on one book', async () => { - await peerStore.delete(peerIds[2]) - - const peers = await all(peerStore.getPeers()) - expect(peers.length).to.equal(3) - }) - - it('gets the stored information of a peer in all its books', async () => { - const peer = await peerStore.get(peerIds[0]) - expect(peer).to.exist() - expect(peer.protocols).to.have.members([proto1]) - - const peerMultiaddrs = peer.addresses.map((mi) => mi.multiaddr) - expect(peerMultiaddrs).to.have.deep.members([addr1, addr2]) - - expect(peer.id.toB58String()).to.equal(peerIds[0].toB58String()) - }) - - it('gets the stored information of a peer that is not present in all its books', async () => { - const peers = await peerStore.get(peerIds[2]) - expect(peers).to.exist() - expect(peers.protocols.length).to.eql(0) - - const peerMultiaddrs = peers.addresses.map((mi) => mi.multiaddr) - expect(peerMultiaddrs).to.have.deep.members([addr4]) - }) - - it('can find all the peers supporting a protocol', async () => { - const peerSupporting2 = [] - - for await (const peer of peerStore.getPeers()) { - if (peer.protocols.includes(proto2)) { - peerSupporting2.push(peer) - } - } - - expect(peerSupporting2.length).to.eql(2) - expect(peerSupporting2[0].id.toB58String()).to.eql(peerIds[1].toB58String()) - expect(peerSupporting2[1].id.toB58String()).to.eql(peerIds[3].toB58String()) - }) - - it('can find all the peers listening on a given address', async () => { - const peerListening4 = [] - - for await (const peer of peerStore.getPeers()) { - const multiaddrs = peer.addresses.map((mi) => mi.multiaddr.toString()) - - if (multiaddrs.includes(addr4.toString())) { - peerListening4.push(peer) - } - } - - expect(peerListening4.length).to.eql(2) - expect(peerListening4[0].id.toB58String()).to.eql(peerIds[2].toB58String()) - expect(peerListening4[1].id.toB58String()).to.eql(peerIds[3].toB58String()) - }) - }) - - describe('peerStore.getPeers', () => { - /** @type {PeerStore} */ - let peerStore - - beforeEach(() => { - peerStore = new PeerStore({ - peerId: peerIds[4], - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - }) - - it('returns peers if only addresses are known', async () => { - await peerStore.addressBook.set(peerIds[0], [addr1]) - - const peers = await all(peerStore.getPeers()) - expect(peers.length).to.equal(1) - - const peerData = peers[0] - expect(peerData).to.exist() - expect(peerData.id).to.exist() - expect(peerData.addresses).to.have.lengthOf(1) - expect(peerData.protocols).to.have.lengthOf(0) - expect(peerData.metadata).to.be.empty() - }) - - it('returns peers if only protocols are known', async () => { - await peerStore.protoBook.set(peerIds[0], [proto1]) - - const peers = await all(peerStore.getPeers()) - expect(peers.length).to.equal(1) - - const peerData = peers[0] - expect(peerData).to.exist() - expect(peerData.id).to.exist() - expect(peerData.addresses).to.have.lengthOf(0) - expect(peerData.protocols).to.have.lengthOf(1) - expect(peerData.metadata).to.be.empty() - }) - - it('returns peers if only metadata is known', async () => { - const metadataKey = 'location' - const metadataValue = uint8ArrayFromString('earth') - await peerStore.metadataBook.setValue(peerIds[0], metadataKey, metadataValue) - - const peers = await all(peerStore.getPeers()) - expect(peers.length).to.equal(1) - - const peerData = peers[0] - expect(peerData).to.exist() - expect(peerData.id).to.exist() - expect(peerData.addresses).to.have.lengthOf(0) - expect(peerData.protocols).to.have.lengthOf(0) - expect(peerData.metadata).to.exist() - expect(peerData.metadata.get(metadataKey)).to.equalBytes(metadataValue) - }) - }) -}) diff --git a/test/peer-store/proto-book.spec.js b/test/peer-store/proto-book.spec.js deleted file mode 100644 index 667ec4aa..00000000 --- a/test/peer-store/proto-book.spec.js +++ /dev/null @@ -1,416 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const { MemoryDatastore } = require('datastore-core/memory') -const pDefer = require('p-defer') -const pWaitFor = require('p-wait-for') - -const PeerStore = require('../../src/peer-store') - -const peerUtils = require('../utils/creators/peer') -const { - codes: { ERR_INVALID_PARAMETERS } -} = require('../../src/errors') - -/** - * @typedef {import('../../src/peer-store/types').PeerStore} PeerStore - * @typedef {import('../../src/peer-store/types').ProtoBook} ProtoBook - * @typedef {import('peer-id')} PeerId - */ - -const arraysAreEqual = (a, b) => a.length === b.length && a.sort().every((item, index) => b[index] === item) - -describe('protoBook', () => { - /** @type {PeerId} */ - let peerId - - before(async () => { - [peerId] = await peerUtils.createPeerId() - }) - - describe('protoBook.set', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {ProtoBook} */ - let pb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - pb = peerStore.protoBook - }) - - afterEach(() => { - peerStore.removeAllListeners() - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - await expect(pb.set('invalid peerId')).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('throws invalid parameters error if no protocols provided', async () => { - await expect(pb.set(peerId)).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('replaces the stored content by default and emit change event', async () => { - const defer = pDefer() - const supportedProtocols = ['protocol1', 'protocol2'] - - peerStore.once('change:protocols', ({ peerId, protocols }) => { - expect(peerId).to.exist() - expect(protocols).to.have.deep.members(supportedProtocols) - defer.resolve() - }) - - await pb.set(peerId, supportedProtocols) - const protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(supportedProtocols) - - await defer.promise - }) - - it('emits on set if not storing the exact same content', async () => { - const defer = pDefer() - - const supportedProtocolsA = ['protocol1', 'protocol2'] - const supportedProtocolsB = ['protocol2'] - - let changeCounter = 0 - peerStore.on('change:protocols', () => { - changeCounter++ - if (changeCounter > 1) { - defer.resolve() - } - }) - - // set 1 - await pb.set(peerId, supportedProtocolsA) - - // set 2 (same content) - await pb.set(peerId, supportedProtocolsB) - const protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(supportedProtocolsB) - - await defer.promise - }) - - it('does not emit on set if it is storing the exact same content', async () => { - const defer = pDefer() - - const supportedProtocols = ['protocol1', 'protocol2'] - - let changeCounter = 0 - peerStore.on('change:protocols', () => { - changeCounter++ - if (changeCounter > 1) { - defer.reject() - } - }) - - // set 1 - await pb.set(peerId, supportedProtocols) - - // set 2 (same content) - await pb.set(peerId, supportedProtocols) - - // Wait 50ms for incorrect second event - setTimeout(() => { - defer.resolve() - }, 50) - - return defer.promise - }) - }) - - describe('protoBook.add', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {ProtoBook} */ - let pb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - pb = peerStore.protoBook - }) - - afterEach(() => { - peerStore.removeAllListeners() - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - await expect(pb.add('invalid peerId')).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('throws invalid parameters error if no protocols provided', async () => { - await expect(pb.add(peerId)).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('adds the new content and emits change event', async () => { - const defer = pDefer() - - const supportedProtocolsA = ['protocol1', 'protocol2'] - const supportedProtocolsB = ['protocol3'] - const finalProtocols = supportedProtocolsA.concat(supportedProtocolsB) - - let changeTrigger = 2 - peerStore.on('change:protocols', ({ protocols }) => { - changeTrigger-- - if (changeTrigger === 0 && arraysAreEqual(protocols, finalProtocols)) { - defer.resolve() - } - }) - - // Replace - await pb.set(peerId, supportedProtocolsA) - let protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(supportedProtocolsA) - - // Add - await pb.add(peerId, supportedProtocolsB) - protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(finalProtocols) - - return defer.promise - }) - - it('emits on add if the content to add not exists', async () => { - const defer = pDefer() - - const supportedProtocolsA = ['protocol1'] - const supportedProtocolsB = ['protocol2'] - const finalProtocols = supportedProtocolsA.concat(supportedProtocolsB) - - let changeCounter = 0 - peerStore.on('change:protocols', () => { - changeCounter++ - if (changeCounter > 1) { - defer.resolve() - } - }) - - // set 1 - await pb.set(peerId, supportedProtocolsA) - - // set 2 (content already existing) - await pb.add(peerId, supportedProtocolsB) - const protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(finalProtocols) - - return defer.promise - }) - - it('does not emit on add if the content to add already exists', async () => { - const defer = pDefer() - - const supportedProtocolsA = ['protocol1', 'protocol2'] - const supportedProtocolsB = ['protocol2'] - - let changeCounter = 0 - peerStore.on('change:protocols', () => { - changeCounter++ - if (changeCounter > 1) { - defer.reject() - } - }) - - // set 1 - await pb.set(peerId, supportedProtocolsA) - - // set 2 (content already existing) - await pb.add(peerId, supportedProtocolsB) - - // Wait 50ms for incorrect second event - setTimeout(() => { - defer.resolve() - }, 50) - - return defer.promise - }) - }) - - describe('protoBook.remove', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {ProtoBook} */ - let pb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - pb = peerStore.protoBook - }) - - afterEach(() => { - peerStore.removeAllListeners() - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - await expect(pb.remove('invalid peerId')).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('throws invalid parameters error if no protocols provided', async () => { - await expect(pb.remove(peerId)).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('removes the given protocol and emits change event', async () => { - const spy = sinon.spy() - - const supportedProtocols = ['protocol1', 'protocol2'] - const removedProtocols = ['protocol1'] - const finalProtocols = supportedProtocols.filter(p => !removedProtocols.includes(p)) - - peerStore.on('change:protocols', spy) - - // Replace - await pb.set(peerId, supportedProtocols) - let protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(supportedProtocols) - - // Remove - await pb.remove(peerId, removedProtocols) - protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(finalProtocols) - - await pWaitFor(() => spy.callCount === 2) - - const [firstCallArgs] = spy.firstCall.args - const [secondCallArgs] = spy.secondCall.args - expect(arraysAreEqual(firstCallArgs.protocols, supportedProtocols)) - expect(arraysAreEqual(secondCallArgs.protocols, finalProtocols)) - }) - - it('emits on remove if the content changes', async () => { - const spy = sinon.spy() - - const supportedProtocols = ['protocol1', 'protocol2'] - const removedProtocols = ['protocol2'] - const finalProtocols = supportedProtocols.filter(p => !removedProtocols.includes(p)) - - peerStore.on('change:protocols', spy) - - // set - await pb.set(peerId, supportedProtocols) - - // remove (content already existing) - await pb.remove(peerId, removedProtocols) - const protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(finalProtocols) - - return pWaitFor(() => spy.callCount === 2) - }) - - it('does not emit on remove if the content does not change', async () => { - const spy = sinon.spy() - - const supportedProtocols = ['protocol1', 'protocol2'] - const removedProtocols = ['protocol3'] - - peerStore.on('change:protocols', spy) - - // set - await pb.set(peerId, supportedProtocols) - - // remove - await pb.remove(peerId, removedProtocols) - - // Only one event - expect(spy.callCount).to.eql(1) - }) - }) - - describe('protoBook.get', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {ProtoBook} */ - let pb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - pb = peerStore.protoBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - await expect(pb.get('invalid peerId')).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('returns empty if no protocols are known for the provided peer', async () => { - const protocols = await pb.get(peerId) - - expect(protocols).to.be.empty() - }) - - it('returns the protocols stored', async () => { - const supportedProtocols = ['protocol1', 'protocol2'] - - await pb.set(peerId, supportedProtocols) - - const protocols = await pb.get(peerId) - expect(protocols).to.have.deep.members(supportedProtocols) - }) - }) - - describe('protoBook.delete', () => { - /** @type {PeerStore} */ - let peerStore - /** @type {ProtoBook} */ - let pb - - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore() - }) - pb = peerStore.protoBook - }) - - it('throws invalid parameters error if invalid PeerId is provided', async () => { - await expect(pb.delete('invalid peerId')).to.eventually.be.rejected().with.property('code', ERR_INVALID_PARAMETERS) - }) - - it('should not emit event if no records exist for the peer', async () => { - const defer = pDefer() - - peerStore.on('change:protocols', () => { - defer.reject() - }) - - await pb.delete(peerId) - - // Wait 50ms for incorrect invalid event - setTimeout(() => { - defer.resolve() - }, 50) - - await defer.promise - }) - - it('should emit event if a record exists for the peer', async () => { - const defer = pDefer() - - const supportedProtocols = ['protocol1', 'protocol2'] - await pb.set(peerId, supportedProtocols) - - // Listen after set - peerStore.on('change:protocols', ({ protocols }) => { - expect(protocols.length).to.eql(0) - defer.resolve() - }) - - await pb.delete(peerId) - - await defer.promise - }) - }) -}) diff --git a/test/pnet/index.spec.js b/test/pnet/index.spec.js deleted file mode 100644 index 76278e38..00000000 --- a/test/pnet/index.spec.js +++ /dev/null @@ -1,92 +0,0 @@ -/* eslint-env mocha */ -'use strict' - -const { expect } = require('aegir/utils/chai') -const duplexPair = require('it-pair/duplex') -const pipe = require('it-pipe') -const { collect } = require('streaming-iterables') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -const Protector = require('../../src/pnet') -const Errors = Protector.errors -const generate = Protector.generate - -const swarmKeyBuffer = new Uint8Array(95) -const wrongSwarmKeyBuffer = new Uint8Array(95) - -// Write new psk files to the buffers -generate(swarmKeyBuffer) -generate(wrongSwarmKeyBuffer) - -describe('private network', () => { - it('should accept a valid psk buffer', () => { - const protector = new Protector(swarmKeyBuffer) - - expect(protector.tag).to.equal('/key/swarm/psk/1.0.0/') - expect(protector.psk.byteLength).to.equal(32) - }) - - it('should protect a simple connection', async () => { - const [inbound, outbound] = duplexPair() - const protector = new Protector(swarmKeyBuffer) - - const [aToB, bToA] = await Promise.all([ - protector.protect(inbound), - protector.protect(outbound) - ]) - - pipe( - [uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')], - aToB - ) - - const output = await pipe( - bToA, - source => (async function * () { - for await (const chunk of source) { - yield chunk.slice() - } - })(), - collect - ) - - expect(output).to.eql([uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')]) - }) - - it('should not be able to share correct data with different keys', async () => { - const [inbound, outbound] = duplexPair() - const protector = new Protector(swarmKeyBuffer) - const protectorB = new Protector(wrongSwarmKeyBuffer) - - const [aToB, bToA] = await Promise.all([ - protector.protect(inbound), - protectorB.protect(outbound) - ]) - - pipe( - [uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')], - aToB - ) - - const output = await pipe( - bToA, - collect - ) - - expect(output).to.not.eql([uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')]) - }) - - describe('invalid psks', () => { - it('should not accept a bad psk', () => { - expect(() => { - return new Protector(uint8ArrayFromString('not-a-key')) - }).to.throw(Errors.INVALID_PSK) - }) - - it('should not accept a psk of incorrect length', () => { - expect(() => { - return new Protector(uint8ArrayFromString('/key/swarm/psk/1.0.0/\n/base16/\ndffb7e')) - }).to.throw(Errors.INVALID_PSK) - }) - }) -}) diff --git a/test/pnet/index.spec.ts b/test/pnet/index.spec.ts new file mode 100644 index 00000000..1cf1f3c1 --- /dev/null +++ b/test/pnet/index.spec.ts @@ -0,0 +1,115 @@ +/* eslint-env mocha */ +import { expect } from 'aegir/utils/chai.js' +import { pipe } from 'it-pipe' +import all from 'it-all' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { PreSharedKeyConnectionProtector } from '../../src/pnet/index.js' +import { generate } from '../../src/pnet/key-generator.js' +import { INVALID_PSK } from '../../src/pnet/errors.js' +import { mockMultiaddrConnPair } from '@libp2p/interface-compliance-tests/mocks' +import { Multiaddr } from '@multiformats/multiaddr' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' + +const swarmKeyBuffer = new Uint8Array(95) +const wrongSwarmKeyBuffer = new Uint8Array(95) + +// Write new psk files to the buffers +generate(swarmKeyBuffer) +generate(wrongSwarmKeyBuffer) + +describe('private network', () => { + it('should accept a valid psk buffer', () => { + const protector = new PreSharedKeyConnectionProtector({ + psk: swarmKeyBuffer + }) + + expect(protector.tag).to.equal('/key/swarm/psk/1.0.0/') + }) + + it('should protect a simple connection', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ + addrs: [ + new Multiaddr('/ip4/127.0.0.1/tcp/1234'), + new Multiaddr('/ip4/127.0.0.1/tcp/1235') + ], + remotePeer: await createEd25519PeerId() + }) + const protector = new PreSharedKeyConnectionProtector({ + psk: swarmKeyBuffer + }) + + const [aToB, bToA] = await Promise.all([ + protector.protect(inbound), + protector.protect(outbound) + ]) + + void pipe( + [uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')], + aToB + ) + + const output = await pipe( + bToA, + async function * (source) { + for await (const chunk of source) { + yield chunk.slice() + } + }, + async (source) => await all(source) + ) + + expect(output).to.eql([uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')]) + }) + + it('should not be able to share correct data with different keys', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ + addrs: [ + new Multiaddr('/ip4/127.0.0.1/tcp/1234'), + new Multiaddr('/ip4/127.0.0.1/tcp/1235') + ], + remotePeer: await createEd25519PeerId() + }) + const protector = new PreSharedKeyConnectionProtector({ + psk: swarmKeyBuffer + }) + const protectorB = new PreSharedKeyConnectionProtector({ + enabled: true, + psk: wrongSwarmKeyBuffer + }) + + const [aToB, bToA] = await Promise.all([ + protector.protect(inbound), + protectorB.protect(outbound) + ]) + + void pipe( + [uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')], + aToB + ) + + const output = await pipe( + bToA, + async (source) => await all(source) + ) + + expect(output).to.not.eql([uint8ArrayFromString('hello world'), uint8ArrayFromString('doo dah')]) + }) + + describe('invalid psks', () => { + it('should not accept a bad psk', () => { + expect(() => { + return new PreSharedKeyConnectionProtector({ + psk: uint8ArrayFromString('not-a-key') + }) + }).to.throw(INVALID_PSK) + }) + + it('should not accept a psk of incorrect length', () => { + expect(() => { + return new PreSharedKeyConnectionProtector({ + psk: uint8ArrayFromString('/key/swarm/psk/1.0.0/\n/base16/\ndffb7e') + }) + }).to.throw(INVALID_PSK) + }) + }) +}) diff --git a/test/record/envelope.spec.js b/test/record/envelope.spec.js deleted file mode 100644 index c932528f..00000000 --- a/test/record/envelope.spec.js +++ /dev/null @@ -1,87 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const { fromString: uint8arrayFromString } = require('uint8arrays/from-string') -const { equals: uint8arrayEquals } = require('uint8arrays/equals') -const Envelope = require('../../src/record/envelope') -const { codes: ErrorCodes } = require('../../src/errors') - -const peerUtils = require('../utils/creators/peer') - -const domain = 'libp2p-testing' -const codec = uint8arrayFromString('/libp2p/testdata') - -class TestRecord { - constructor (data) { - this.domain = domain - this.codec = codec - this.data = data - } - - marshal () { - return uint8arrayFromString(this.data) - } - - equals (other) { - return uint8arrayEquals(this.data, other.data) - } -} - -describe('Envelope', () => { - const payloadType = codec - let peerId - let testRecord - - before(async () => { - [peerId] = await peerUtils.createPeerId() - testRecord = new TestRecord('test-data') - }) - - it('creates an envelope with a random key', () => { - const payload = testRecord.marshal() - const signature = uint8arrayFromString(Math.random().toString(36).substring(7)) - - const envelope = new Envelope({ - peerId, - payloadType, - payload, - signature - }) - - expect(envelope).to.exist() - expect(envelope.peerId.equals(peerId)).to.eql(true) - expect(envelope.payloadType).to.equalBytes(payloadType) - expect(envelope.payload).to.equalBytes(payload) - expect(envelope.signature).to.equalBytes(signature) - }) - - it('can seal a record', async () => { - const envelope = await Envelope.seal(testRecord, peerId) - expect(envelope).to.exist() - expect(envelope.peerId.equals(peerId)).to.eql(true) - expect(envelope.payloadType).to.eql(payloadType) - expect(envelope.payload).to.exist() - expect(envelope.signature).to.exist() - }) - - it('can open and verify a sealed record', async () => { - const envelope = await Envelope.seal(testRecord, peerId) - const rawEnvelope = envelope.marshal() - - const unmarshalledEnvelope = await Envelope.openAndCertify(rawEnvelope, testRecord.domain) - expect(unmarshalledEnvelope).to.exist() - - const equals = envelope.equals(unmarshalledEnvelope) - expect(equals).to.eql(true) - }) - - it('throw on open and verify when a different domain is used', async () => { - const envelope = await Envelope.seal(testRecord, peerId) - const rawEnvelope = envelope.marshal() - - await expect(Envelope.openAndCertify(rawEnvelope, '/bad-domain')) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_SIGNATURE_NOT_VALID) - }) -}) diff --git a/test/record/peer-record.spec.js b/test/record/peer-record.spec.js deleted file mode 100644 index 532c79a5..00000000 --- a/test/record/peer-record.spec.js +++ /dev/null @@ -1,157 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') - -const tests = require('libp2p-interfaces-compliance-tests/src/record') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') - -const Envelope = require('../../src/record/envelope') -const PeerRecord = require('../../src/record/peer-record') - -const peerUtils = require('../utils/creators/peer') - -describe('interface-record compliance', () => { - tests({ - async setup () { - const [peerId] = await peerUtils.createPeerId() - return new PeerRecord({ peerId }) - }, - async teardown () { - // cleanup resources created by setup() - } - }) -}) - -describe('PeerRecord', () => { - let peerId - - before(async () => { - [peerId] = await peerUtils.createPeerId() - }) - - it('de/serializes the same as a go record', async () => { - const privKey = Uint8Array.from([8, 1, 18, 64, 133, 251, 231, 43, 96, 100, 40, 144, 4, 165, 49, 249, 103, 137, 141, 245, 49, 158, 224, 41, 146, 253, 216, 64, 33, 250, 80, 82, 67, 75, 246, 238, 17, 187, 163, 237, 23, 33, 148, 140, 239, 180, 229, 11, 10, 11, 181, 202, 216, 166, 181, 45, 199, 177, 164, 15, 79, 102, 82, 16, 92, 145, 226, 196]) - const rawEnvelope = Uint8Array.from([10, 36, 8, 1, 18, 32, 17, 187, 163, 237, 23, 33, 148, 140, 239, 180, 229, 11, 10, 11, 181, 202, 216, 166, 181, 45, 199, 177, 164, 15, 79, 102, 82, 16, 92, 145, 226, 196, 18, 2, 3, 1, 26, 170, 1, 10, 38, 0, 36, 8, 1, 18, 32, 17, 187, 163, 237, 23, 33, 148, 140, 239, 180, 229, 11, 10, 11, 181, 202, 216, 166, 181, 45, 199, 177, 164, 15, 79, 102, 82, 16, 92, 145, 226, 196, 16, 216, 184, 224, 191, 147, 145, 182, 151, 22, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 0, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 1, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 2, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 3, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 4, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 5, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 6, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 7, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 8, 26, 10, 10, 8, 4, 1, 2, 3, 4, 6, 0, 9, 42, 64, 177, 151, 247, 107, 159, 40, 138, 242, 180, 103, 254, 102, 111, 119, 68, 118, 40, 112, 73, 180, 36, 183, 57, 117, 200, 134, 14, 251, 2, 55, 45, 2, 106, 121, 149, 132, 84, 26, 215, 47, 38, 84, 52, 100, 133, 188, 163, 236, 227, 100, 98, 183, 209, 177, 57, 28, 141, 39, 109, 196, 171, 139, 202, 11]) - const peerId = await PeerId.createFromPrivKey(privKey) - - const env = await Envelope.openAndCertify(rawEnvelope, PeerRecord.DOMAIN) - expect(peerId.equals(env.peerId)) - - const record = PeerRecord.createFromProtobuf(env.payload) - - // The payload isn't going to match because of how the protobuf encodes uint64 values - // They are marshalled correctly on both sides, but will be off by 1 value - // Signatures will still be validated - const jsEnv = await Envelope.seal(record, peerId) - expect(env.payloadType).to.eql(jsEnv.payloadType) - }) - - it('creates a peer record with peerId', () => { - const peerRecord = new PeerRecord({ peerId }) - - expect(peerRecord).to.exist() - expect(peerRecord.peerId).to.exist() - expect(peerRecord.multiaddrs).to.exist() - expect(peerRecord.multiaddrs).to.have.lengthOf(0) - expect(peerRecord.seqNumber).to.exist() - }) - - it('creates a peer record with provided data', () => { - const multiaddrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/2000') - ] - const seqNumber = Date.now() - const peerRecord = new PeerRecord({ peerId, multiaddrs, seqNumber }) - - expect(peerRecord).to.exist() - expect(peerRecord.peerId).to.exist() - expect(peerRecord.multiaddrs).to.exist() - expect(peerRecord.multiaddrs).to.eql(multiaddrs) - expect(peerRecord.seqNumber).to.exist() - expect(peerRecord.seqNumber).to.eql(seqNumber) - }) - - it('marshals and unmarshals a peer record', () => { - const multiaddrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/2000') - ] - const seqNumber = Date.now() - const peerRecord = new PeerRecord({ peerId, multiaddrs, seqNumber }) - - // Marshal - const rawData = peerRecord.marshal() - expect(rawData).to.exist() - - // Unmarshal - const unmarshalPeerRecord = PeerRecord.createFromProtobuf(rawData) - expect(unmarshalPeerRecord).to.exist() - - const equals = peerRecord.equals(unmarshalPeerRecord) - expect(equals).to.eql(true) - }) - - it('equals returns false if the peer record has a different peerId', async () => { - const peerRecord0 = new PeerRecord({ peerId }) - - const [peerId1] = await peerUtils.createPeerId({ fixture: false }) - const peerRecord1 = new PeerRecord({ peerId: peerId1 }) - - const equals = peerRecord0.equals(peerRecord1) - expect(equals).to.eql(false) - }) - - it('equals returns false if the peer record has a different seqNumber', () => { - const ts0 = Date.now() - const peerRecord0 = new PeerRecord({ peerId, seqNumber: ts0 }) - - const ts1 = ts0 + 20 - const peerRecord1 = new PeerRecord({ peerId, seqNumber: ts1 }) - - const equals = peerRecord0.equals(peerRecord1) - expect(equals).to.eql(false) - }) - - it('equals returns false if the peer record has a different multiaddrs', () => { - const multiaddrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/2000') - ] - const peerRecord0 = new PeerRecord({ peerId, multiaddrs }) - - const multiaddrs1 = [ - new Multiaddr('/ip4/127.0.0.1/tcp/2001') - ] - const peerRecord1 = new PeerRecord({ peerId, multiaddrs: multiaddrs1 }) - - const equals = peerRecord0.equals(peerRecord1) - expect(equals).to.eql(false) - }) -}) - -describe('PeerRecord inside Envelope', () => { - let peerId - let peerRecord - - before(async () => { - [peerId] = await peerUtils.createPeerId() - const multiaddrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/2000') - ] - const seqNumber = Date.now() - peerRecord = new PeerRecord({ peerId, multiaddrs, seqNumber }) - }) - - it('creates an envelope with the PeerRecord and can unmarshal it', async () => { - const e = await Envelope.seal(peerRecord, peerId) - const byteE = e.marshal() - - const decodedE = await Envelope.openAndCertify(byteE, peerRecord.domain) - expect(decodedE).to.exist() - - const decodedPeerRecord = PeerRecord.createFromProtobuf(decodedE.payload) - - const equals = peerRecord.equals(decodedPeerRecord) - expect(equals).to.eql(true) - }) -}) diff --git a/test/registrar/registrar.spec.js b/test/registrar/registrar.spec.js deleted file mode 100644 index fa9be0c8..00000000 --- a/test/registrar/registrar.spec.js +++ /dev/null @@ -1,198 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const pDefer = require('p-defer') - -const { EventEmitter } = require('events') -const { MemoryDatastore } = require('datastore-core/memory') -const Topology = require('libp2p-interfaces/src/topology/multicodec-topology') -const PeerStore = require('../../src/peer-store') -const Registrar = require('../../src/registrar') -const { mockConnectionGater } = require('../utils/mock-connection-gater') -const createMockConnection = require('../utils/mockConnection') -const peerUtils = require('../utils/creators/peer') -const baseOptions = require('../utils/base-options.browser') - -const multicodec = '/test/1.0.0' - -describe('registrar', () => { - const connectionGater = mockConnectionGater() - let peerStore - let registrar - let peerId - - before(async () => { - [peerId] = await peerUtils.createPeerId() - }) - - describe('errors', () => { - beforeEach(() => { - peerStore = new PeerStore({ - peerId, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - registrar = new Registrar({ peerStore, connectionManager: new EventEmitter() }) - }) - - it('should fail to register a protocol if no multicodec is provided', () => { - return expect(registrar.register()).to.eventually.be.rejected() - }) - - it('should fail to register a protocol if an invalid topology is provided', () => { - const fakeTopology = { - random: 1 - } - - return expect(registrar.register(fakeTopology)).to.eventually.be.rejected() - }) - }) - - describe('registration', () => { - let libp2p - - beforeEach(async () => { - [libp2p] = await peerUtils.createPeer({ - config: { - modules: baseOptions.modules - }, - started: false - }) - }) - - afterEach(() => libp2p.stop()) - - it('should be able to register a protocol', async () => { - const topologyProps = new Topology({ - multicodecs: multicodec, - handlers: { - onConnect: () => { }, - onDisconnect: () => { } - } - }) - - const identifier = await libp2p.registrar.register(topologyProps) - - expect(identifier).to.exist() - }) - - it('should be able to unregister a protocol', async () => { - const topologyProps = new Topology({ - multicodecs: multicodec, - handlers: { - onConnect: () => { }, - onDisconnect: () => { } - } - }) - - const identifier = await libp2p.registrar.register(topologyProps) - const success = libp2p.registrar.unregister(identifier) - - expect(success).to.eql(true) - }) - - it('should fail to unregister if no register was made', () => { - const success = libp2p.registrar.unregister('bad-identifier') - - expect(success).to.eql(false) - }) - - it('should call onConnect handler for connected peers after register', async () => { - const onConnectDefer = pDefer() - const onDisconnectDefer = pDefer() - - // Setup connections before registrar - const conn = await createMockConnection() - const remotePeerId = conn.remotePeer - - const topologyProps = new Topology({ - multicodecs: multicodec, - handlers: { - onConnect: (peerId, connection) => { - expect(peerId.toB58String()).to.eql(remotePeerId.toB58String()) - expect(connection.id).to.eql(conn.id) - - onConnectDefer.resolve() - }, - onDisconnect: (peerId) => { - expect(peerId.toB58String()).to.eql(remotePeerId.toB58String()) - - onDisconnectDefer.resolve() - } - } - }) - - await libp2p.start() - - // Register protocol - const identifier = await libp2p.registrar.register(topologyProps) - const topology = libp2p.registrar.topologies.get(identifier) - - // Topology created - expect(topology).to.exist() - - // Add connected peer with protocol to peerStore and registrar - await libp2p.peerStore.protoBook.add(remotePeerId, [multicodec]) - - await libp2p.connectionManager.onConnect(conn) - expect(libp2p.connectionManager.size).to.eql(1) - - await conn.close() - - libp2p.connectionManager.onDisconnect(conn) - expect(libp2p.connectionManager.size).to.eql(0) - - // Wait for handlers to be called - return Promise.all([ - onConnectDefer.promise, - onDisconnectDefer.promise - ]) - }) - - it('should call onConnect handler after register, once a peer is connected and protocols are updated', async () => { - const onConnectDefer = pDefer() - const onDisconnectDefer = pDefer() - - const topologyProps = new Topology({ - multicodecs: multicodec, - handlers: { - onConnect: () => { - onConnectDefer.resolve() - }, - onDisconnect: () => { - onDisconnectDefer.resolve() - } - } - }) - - await libp2p.start() - - // Register protocol - const identifier = await libp2p.registrar.register(topologyProps) - const topology = libp2p.registrar.topologies.get(identifier) - - // Topology created - expect(topology).to.exist() - expect(libp2p.connectionManager.size).to.eql(0) - - // Setup connections before registrar - const conn = await createMockConnection() - const remotePeerId = conn.remotePeer - - // Add connected peer to peerStore and registrar - await libp2p.peerStore.protoBook.set(remotePeerId, []) - - // Add protocol to peer and update it - await libp2p.peerStore.protoBook.add(remotePeerId, [multicodec]) - - await libp2p.connectionManager.onConnect(conn) - await onConnectDefer.promise - - // Remove protocol to peer and update it - await libp2p.peerStore.protoBook.set(remotePeerId, []) - - await onDisconnectDefer.promise - }) - }) -}) diff --git a/test/registrar/registrar.spec.ts b/test/registrar/registrar.spec.ts new file mode 100644 index 00000000..6ed291b3 --- /dev/null +++ b/test/registrar/registrar.spec.ts @@ -0,0 +1,228 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import pDefer from 'p-defer' +import { MemoryDatastore } from 'datastore-core/memory' +import { createTopology } from '@libp2p/topology' +import { PersistentPeerStore } from '@libp2p/peer-store' +import { DefaultRegistrar } from '../../src/registrar.js' +import { mockConnectionGater, mockDuplex, mockMultiaddrConnection, mockUpgrader, mockConnection } from '@libp2p/interface-compliance-tests/mocks' +import { createPeerId, createNode } from '../utils/creators/peer.js' +import { createBaseOptions } from '../utils/base-options.browser.js' +import type { Registrar } from '@libp2p/interfaces/registrar' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { Components } from '@libp2p/interfaces/components' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' +import { createEd25519PeerId } from '@libp2p/peer-id-factory' +import { CustomEvent } from '@libp2p/interfaces' +import type { Connection } from '@libp2p/interfaces/connection' +import { DefaultConnectionManager } from '../../src/connection-manager/index.js' +import { Plaintext } from '../../src/insecure/index.js' +import { WebSockets } from '@libp2p/websockets' +import { Mplex } from '@libp2p/mplex' + +const protocol = '/test/1.0.0' + +describe('registrar', () => { + const connectionGater = mockConnectionGater() + let components: Components + let registrar: Registrar + let peerId: PeerId + + before(async () => { + peerId = await createPeerId() + }) + + describe('errors', () => { + beforeEach(() => { + components = new Components({ + peerId, + datastore: new MemoryDatastore(), + upgrader: mockUpgrader() + }) + components.setPeerStore(new PersistentPeerStore(components, { + addressFilter: connectionGater.filterMultiaddrForPeer + })) + components.setConnectionManager(new DefaultConnectionManager(components)) + registrar = new DefaultRegistrar(components) + }) + + it('should fail to register a protocol if no multicodec is provided', () => { + // @ts-expect-error invalid parameters + return expect(registrar.register()).to.eventually.be.rejected() + }) + + it('should fail to register a protocol if an invalid topology is provided', () => { + const fakeTopology = { + random: 1 + } + + // @ts-expect-error invalid parameters + return expect(registrar.register(fakeTopology)).to.eventually.be.rejected() + }) + }) + + describe('registration', () => { + let libp2p: Libp2pNode + + beforeEach(async () => { + libp2p = await createNode({ + config: createBaseOptions(), + started: false + }) + }) + + afterEach(async () => await libp2p.stop()) + + it('should be able to register a protocol', async () => { + const topology = createTopology({ + onConnect: () => { }, + onDisconnect: () => { } + }) + + expect(libp2p.components.getRegistrar().getTopologies(protocol)).to.have.lengthOf(0) + + const identifier = await libp2p.components.getRegistrar().register(protocol, topology) + + expect(identifier).to.exist() + expect(libp2p.components.getRegistrar().getTopologies(protocol)).to.have.lengthOf(1) + }) + + it('should be able to unregister a protocol', async () => { + const topology = createTopology({ + onConnect: () => { }, + onDisconnect: () => { } + }) + + expect(libp2p.components.getRegistrar().getTopologies(protocol)).to.have.lengthOf(0) + + const identifier = await libp2p.components.getRegistrar().register(protocol, topology) + + expect(libp2p.components.getRegistrar().getTopologies(protocol)).to.have.lengthOf(1) + + libp2p.components.getRegistrar().unregister(identifier) + + expect(libp2p.components.getRegistrar().getTopologies(protocol)).to.have.lengthOf(0) + }) + + it('should not error if unregistering unregistered topology handler', () => { + libp2p.components.getRegistrar().unregister('bad-identifier') + }) + + it('should call onConnect handler for connected peers after register', async () => { + const onConnectDefer = pDefer() + const onDisconnectDefer = pDefer() + + // Setup connections before registrar + const remotePeerId = await createEd25519PeerId() + const conn = mockConnection(mockMultiaddrConnection(mockDuplex(), remotePeerId)) + + const topology = createTopology({ + onConnect: (peerId, connection) => { + expect(peerId.equals(remotePeerId)).to.be.true() + expect(connection.id).to.eql(conn.id) + + onConnectDefer.resolve() + }, + onDisconnect: (peerId) => { + expect(peerId.equals(remotePeerId)).to.be.true() + + onDisconnectDefer.resolve() + } + }) + + await libp2p.start() + + // Register protocol + await libp2p.components.getRegistrar().register(protocol, topology) + + // Add connected peer with protocol to peerStore and registrar + await libp2p.peerStore.protoBook.add(remotePeerId, [protocol]) + + // remote peer connects + await libp2p.components.getUpgrader().dispatchEvent(new CustomEvent('connection', { + detail: conn + })) + + // remote peer disconnects + await conn.close() + await libp2p.components.getUpgrader().dispatchEvent(new CustomEvent('connectionEnd', { + detail: conn + })) + + // Wait for handlers to be called + return await Promise.all([ + onConnectDefer.promise, + onDisconnectDefer.promise + ]) + }) + + it('should call onConnect handler after register, once a peer is connected and protocols are updated', async () => { + const onConnectDefer = pDefer() + const onDisconnectDefer = pDefer() + + // Setup connections before registrar + const remotePeerId = await createEd25519PeerId() + const conn = mockConnection(mockMultiaddrConnection(mockDuplex(), remotePeerId)) + + const topology = createTopology({ + onConnect: () => { + onConnectDefer.resolve() + }, + onDisconnect: () => { + onDisconnectDefer.resolve() + } + }) + + await libp2p.start() + + // Register protocol + await libp2p.components.getRegistrar().register(protocol, topology) + + // Add connected peer to peerStore and registrar + await libp2p.peerStore.protoBook.set(remotePeerId, []) + + // Add protocol to peer and update it + await libp2p.peerStore.protoBook.add(remotePeerId, [protocol]) + + await libp2p.components.getUpgrader().dispatchEvent(new CustomEvent('connection', { + detail: conn + })) + + await onConnectDefer.promise + + // Peer no longer supports the protocol our topology is registered for + await libp2p.peerStore.protoBook.set(remotePeerId, []) + + await onDisconnectDefer.promise + }) + + it('should be able to register and unregister a handler', async () => { + libp2p = await createLibp2pNode({ + peerId: await createEd25519PeerId(), + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Plaintext() + ] + }) + + const registrar = libp2p.components.getRegistrar() + + expect(registrar.getProtocols()).to.not.have.any.keys(['/echo/1.0.0', '/echo/1.0.1']) + + const echoHandler = () => {} + await libp2p.handle(['/echo/1.0.0', '/echo/1.0.1'], echoHandler) + expect(registrar.getHandler('/echo/1.0.0')).to.equal(echoHandler) + expect(registrar.getHandler('/echo/1.0.1')).to.equal(echoHandler) + + await libp2p.unhandle(['/echo/1.0.0']) + expect(registrar.getProtocols()).to.not.have.any.keys(['/echo/1.0.0']) + expect(registrar.getHandler('/echo/1.0.1')).to.equal(echoHandler) + }) + }) +}) diff --git a/test/relay/auto-relay.node.js b/test/relay/auto-relay.node.ts similarity index 56% rename from test/relay/auto-relay.node.js rename to test/relay/auto-relay.node.ts index 2316563d..53e8a1b2 100644 --- a/test/relay/auto-relay.node.js +++ b/test/relay/auto-relay.node.ts @@ -1,28 +1,25 @@ -'use strict' /* eslint-env mocha */ -const { expect } = require('aegir/utils/chai') -const defer = require('p-defer') -const pWaitFor = require('p-wait-for') -const sinon = require('sinon') -const nock = require('nock') +import { expect } from 'aegir/utils/chai.js' +import defer from 'p-defer' +import pWaitFor from 'p-wait-for' +import sinon from 'sinon' +import nock from 'nock' +import { create as createIpfsHttpClient } from 'ipfs-http-client' +import { DelegatedContentRouting } from '@libp2p/delegated-content-routing' +import { RELAY_CODEC } from '../../src/circuit/multicodec.js' +import { createNode } from '../utils/creators/peer.js' +import type { Libp2pNode } from '../../src/libp2p.js' +import type { Options as PWaitForOptions } from 'p-wait-for' +import type Sinon from 'sinon' +import { createRelayOptions, createNodeOptions } from './utils.js' +import { protocols } from '@multiformats/multiaddr' -const ipfsHttpClient = require('ipfs-http-client') -const DelegatedContentRouter = require('libp2p-delegated-content-routing') -const { Multiaddr } = require('multiaddr') -const Libp2p = require('../../src') -const { relay: relayMulticodec } = require('../../src/circuit/multicodec') - -const { createPeerId } = require('../utils/creators/peer') -const baseOptions = require('../utils/base-options') - -const listenAddr = '/ip4/0.0.0.0/tcp/0' - -async function usingAsRelay (node, relay, opts) { +async function usingAsRelay (node: Libp2pNode, relay: Libp2pNode, opts?: PWaitForOptions) { // Wait for peer to be used as a relay await pWaitFor(() => { - for (const addr of node.multiaddrs) { - if (addr.toString().includes(`${relay.peerId.toB58String()}/p2p-circuit`)) { + for (const addr of node.getMultiaddrs()) { + if (addr.toString().includes(`${relay.peerId.toString()}/p2p-circuit`)) { return true } } @@ -31,13 +28,11 @@ async function usingAsRelay (node, relay, opts) { }, opts) } -async function discoveredRelayConfig (node, relay) { +async function discoveredRelayConfig (node: Libp2pNode, relay: Libp2pNode) { await pWaitFor(async () => { - const protos = await node.peerStore.protoBook.get(relay.peerId) - const supportsRelay = protos.includes('/libp2p/circuit/relay/0.1.0') - - const metadata = await node.peerStore.metadataBook.get(relay.peerId) - const supportsHop = metadata.has('hop_relay') + const peerData = await node.peerStore.get(relay.peerId) + const supportsRelay = peerData.protocols.includes(RELAY_CODEC) + const supportsHop = peerData.metadata.has('hop_relay') return supportsRelay && supportsHop }) @@ -45,58 +40,32 @@ async function discoveredRelayConfig (node, relay) { describe('auto-relay', () => { describe('basics', () => { - let libp2p - let relayLibp2p + let libp2p: Libp2pNode + let relayLibp2p: Libp2pNode beforeEach(async () => { - const peerIds = await createPeerId({ number: 2 }) // Create 2 nodes, and turn HOP on for the relay - ;[libp2p, relayLibp2p] = peerIds.map((peerId, index) => { - const opts = { - ...baseOptions, - config: { - ...baseOptions.config, - relay: { - hop: { - enabled: index !== 0 - }, - autoRelay: { - enabled: true, - maxListeners: 1 - } - } - } - } - - return new Libp2p({ - ...opts, - addresses: { - listen: [listenAddr] - }, - connectionManager: { - autoDial: false - }, - peerDiscovery: { - autoDial: false - }, - peerId - }) + libp2p = await createNode({ + config: createNodeOptions() + }) + relayLibp2p = await createNode({ + config: createRelayOptions() }) }) - beforeEach(() => { + beforeEach(async () => { // Start each node - return Promise.all([libp2p, relayLibp2p].map(libp2p => libp2p.start())) + return await Promise.all([libp2p, relayLibp2p].map(async libp2p => await libp2p.start())) }) - afterEach(() => { + afterEach(async () => { // Stop each node - return Promise.all([libp2p, relayLibp2p].map(libp2p => libp2p.stop())) + return await Promise.all([libp2p, relayLibp2p].map(async libp2p => await libp2p.stop())) }) it('should ask if node supports hop on protocol change (relay protocol) and add to listen multiaddrs', async () => { // Discover relay - await libp2p.peerStore.addressBook.add(relayLibp2p.peerId, relayLibp2p.multiaddrs) + await libp2p.peerStore.addressBook.add(relayLibp2p.peerId, relayLibp2p.getMultiaddrs()) await libp2p.dial(relayLibp2p.peerId) // Wait for peer added as listen relay @@ -107,69 +76,37 @@ describe('auto-relay', () => { // Peer has relay multicodec const knownProtocols = await libp2p.peerStore.protoBook.get(relayLibp2p.peerId) - expect(knownProtocols).to.include(relayMulticodec) + expect(knownProtocols).to.include(RELAY_CODEC) }) }) describe('flows with 1 listener max', () => { - let libp2p - let relayLibp2p1 - let relayLibp2p2 - let relayLibp2p3 + let libp2p: Libp2pNode + let relayLibp2p1: Libp2pNode + let relayLibp2p2: Libp2pNode + let relayLibp2p3: Libp2pNode beforeEach(async () => { - const peerIds = await createPeerId({ number: 4 }) // Create 4 nodes, and turn HOP on for the relay - ;[libp2p, relayLibp2p1, relayLibp2p2, relayLibp2p3] = peerIds.map((peerId, index) => { - let opts = baseOptions + [libp2p, relayLibp2p1, relayLibp2p2, relayLibp2p3] = await Promise.all([ + createNode({ config: createNodeOptions() }), + createNode({ config: createRelayOptions() }), + createNode({ config: createRelayOptions() }), + createNode({ config: createRelayOptions() }) + ]) - if (index !== 0) { - opts = { - ...baseOptions, - config: { - ...baseOptions.config, - relay: { - hop: { - enabled: true - }, - autoRelay: { - enabled: true, - maxListeners: 1 - } - } - } - } - } - - return new Libp2p({ - ...opts, - addresses: { - listen: [listenAddr] - }, - connectionManager: { - autoDial: false - }, - peerDiscovery: { - autoDial: false - }, - peerId - }) - }) - }) - - beforeEach(() => { // Start each node - return Promise.all([libp2p, relayLibp2p1, relayLibp2p2, relayLibp2p3].map(libp2p => libp2p.start())) + await Promise.all([libp2p, relayLibp2p1, relayLibp2p2, relayLibp2p3].map(async libp2p => await libp2p.start())) }) - afterEach(() => { + afterEach(async () => { // Stop each node - return Promise.all([libp2p, relayLibp2p1, relayLibp2p2, relayLibp2p3].map(libp2p => libp2p.stop())) + return await Promise.all([libp2p, relayLibp2p1, relayLibp2p2, relayLibp2p3].map(async libp2p => await libp2p.stop())) }) it('should ask if node supports hop on protocol change (relay protocol) and add to listen multiaddrs', async () => { // Discover relay - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p2) @@ -178,12 +115,12 @@ describe('auto-relay', () => { // Peer has relay multicodec const knownProtocols = await relayLibp2p1.peerStore.protoBook.get(relayLibp2p2.peerId) - expect(knownProtocols).to.include(relayMulticodec) + expect(knownProtocols).to.include(RELAY_CODEC) }) it('should be able to dial a peer from its relayed address previously added', async () => { // Discover relay - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p2) @@ -191,14 +128,14 @@ describe('auto-relay', () => { await usingAsRelay(relayLibp2p1, relayLibp2p2) // Dial from the other through a relay - const relayedMultiaddr2 = new Multiaddr(`${relayLibp2p1.multiaddrs[0]}/p2p/${relayLibp2p1.peerId.toB58String()}/p2p-circuit`) + const relayedMultiaddr2 = relayLibp2p1.getMultiaddrs()[0].encapsulate('/p2p-circuit') await libp2p.peerStore.addressBook.add(relayLibp2p2.peerId, [relayedMultiaddr2]) await libp2p.dial(relayLibp2p2.peerId) }) it('should only add maxListeners relayed addresses', async () => { // Discover one relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p2) @@ -207,10 +144,10 @@ describe('auto-relay', () => { // Relay2 has relay multicodec const knownProtocols2 = await relayLibp2p1.peerStore.protoBook.get(relayLibp2p2.peerId) - expect(knownProtocols2).to.include(relayMulticodec) + expect(knownProtocols2).to.include(RELAY_CODEC) // Discover an extra relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p3.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p3) @@ -221,15 +158,19 @@ describe('auto-relay', () => { // Relay2 has relay multicodec const knownProtocols3 = await relayLibp2p1.peerStore.protoBook.get(relayLibp2p3.peerId) - expect(knownProtocols3).to.include(relayMulticodec) + expect(knownProtocols3).to.include(RELAY_CODEC) }) it('should not listen on a relayed address we disconnect from peer', async () => { + if (relayLibp2p1.identifyService == null) { + throw new Error('Identify service not configured') + } + // Spy if identify push is fired on adding/removing listen addr sinon.spy(relayLibp2p1.identifyService, 'pushToPeerStore') // Discover one relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p2) @@ -247,13 +188,13 @@ describe('auto-relay', () => { it('should try to listen on other connected peers relayed address if one used relay disconnects', async () => { // Discover one relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p2) await usingAsRelay(relayLibp2p1, relayLibp2p2) // Discover an extra relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p3.peerId) await discoveredRelayConfig(relayLibp2p1, relayLibp2p3) @@ -276,14 +217,14 @@ describe('auto-relay', () => { it('should try to listen on stored peers relayed address if one used relay disconnects and there are not enough connected', async () => { // Discover one relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) // Wait for peer to be used as a relay await usingAsRelay(relayLibp2p1, relayLibp2p2) // Discover an extra relay and connect to gather its Hop support - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p3.peerId) // wait for identify for newly dialled peer @@ -295,7 +236,7 @@ describe('auto-relay', () => { // Remove peer used as relay from peerStore and disconnect it await relayLibp2p1.hangUp(relayLibp2p2.peerId) await relayLibp2p1.peerStore.delete(relayLibp2p2.peerId) - await pWaitFor(() => relayLibp2p1.connectionManager.size === 0) + await pWaitFor(() => relayLibp2p1.getConnections().length === 0) // Wait for other peer connected to be added as listen addr await usingAsRelay(relayLibp2p1, relayLibp2p3) @@ -305,11 +246,11 @@ describe('auto-relay', () => { const deferred = defer() // Discover one relay and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, relayLibp2p2.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p2.peerId) // Discover an extra relay and connect to gather its Hop support - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p3.peerId) // Wait for peer to be used as a relay @@ -322,15 +263,15 @@ describe('auto-relay', () => { await relayLibp2p1.hangUp(relayLibp2p3.peerId) // Stub dial - sinon.stub(relayLibp2p1, 'dial').callsFake(() => { + sinon.stub(relayLibp2p1.components.getDialer(), 'dial').callsFake(async () => { deferred.resolve() - return Promise.reject(new Error('failed to dial')) + return await Promise.reject(new Error('failed to dial')) }) // Remove peer used as relay from peerStore and disconnect it await relayLibp2p1.hangUp(relayLibp2p2.peerId) await relayLibp2p1.peerStore.delete(relayLibp2p2.peerId) - expect(relayLibp2p1.connectionManager.size).to.equal(0) + expect(relayLibp2p1.getConnections()).to.be.empty() // Wait for failed dial await deferred.promise @@ -338,66 +279,40 @@ describe('auto-relay', () => { }) describe('flows with 2 max listeners', () => { - let relayLibp2p1 - let relayLibp2p2 - let relayLibp2p3 + let relayLibp2p1: Libp2pNode + let relayLibp2p2: Libp2pNode + let relayLibp2p3: Libp2pNode beforeEach(async () => { - const peerIds = await createPeerId({ number: 3 }) // Create 3 nodes, and turn HOP on for the relay - ;[relayLibp2p1, relayLibp2p2, relayLibp2p3] = peerIds.map((peerId) => { - return new Libp2p({ - ...baseOptions, - config: { - ...baseOptions.config, - relay: { - ...baseOptions.config.relay, - hop: { - enabled: true - }, - autoRelay: { - enabled: true, - maxListeners: 2 - } - } - }, - addresses: { - listen: [listenAddr] - }, - connectionManager: { - autoDial: false - }, - peerDiscovery: { - autoDial: false - }, - peerId - }) - }) - }) + [relayLibp2p1, relayLibp2p2, relayLibp2p3] = await Promise.all([ + createNode({ config: createRelayOptions() }), + createNode({ config: createRelayOptions() }), + createNode({ config: createRelayOptions() }) + ]) - beforeEach(() => { // Start each node - return Promise.all([relayLibp2p1, relayLibp2p2, relayLibp2p3].map(libp2p => libp2p.start())) + await Promise.all([relayLibp2p1, relayLibp2p2, relayLibp2p3].map(async libp2p => await libp2p.start())) }) - afterEach(() => { + afterEach(async () => { // Stop each node - return Promise.all([relayLibp2p1, relayLibp2p2, relayLibp2p3].map(libp2p => libp2p.stop())) + return await Promise.all([relayLibp2p1, relayLibp2p2, relayLibp2p3].map(async libp2p => await libp2p.stop())) }) it('should not add listener to a already relayed connection', async () => { // Relay 1 discovers Relay 3 and connect - await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.multiaddrs) + await relayLibp2p1.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.getMultiaddrs()) await relayLibp2p1.dial(relayLibp2p3.peerId) await usingAsRelay(relayLibp2p1, relayLibp2p3) // Relay 2 discovers Relay 3 and connect - await relayLibp2p2.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.multiaddrs) + await relayLibp2p2.peerStore.addressBook.add(relayLibp2p3.peerId, relayLibp2p3.getMultiaddrs()) await relayLibp2p2.dial(relayLibp2p3.peerId) await usingAsRelay(relayLibp2p2, relayLibp2p3) // Relay 1 discovers Relay 2 relayed multiaddr via Relay 3 - const ma2RelayedBy3 = relayLibp2p2.multiaddrs[relayLibp2p2.multiaddrs.length - 1] + const ma2RelayedBy3 = relayLibp2p2.getMultiaddrs()[relayLibp2p2.getMultiaddrs().length - 1] await relayLibp2p1.peerStore.addressBook.add(relayLibp2p2.peerId, [ma2RelayedBy3]) await relayLibp2p1.dial(relayLibp2p2.peerId) @@ -409,64 +324,54 @@ describe('auto-relay', () => { }) describe('discovery', () => { - let local - let remote - let relayLibp2p + let local: Libp2pNode + let remote: Libp2pNode + let relayLibp2p: Libp2pNode + let contentRoutingProvideSpy: Sinon.SinonSpy beforeEach(async () => { - const peerIds = await createPeerId({ number: 3 }) + const delegate = new DelegatedContentRouting(createIpfsHttpClient({ + host: '0.0.0.0', + protocol: 'http', + port: 60197 + })) - // Create 2 nodes, and turn HOP on for the relay - ;[local, remote, relayLibp2p] = peerIds.map((peerId, index) => { - const delegate = new DelegatedContentRouter(peerId, ipfsHttpClient.create({ - host: '0.0.0.0', - protocol: 'http', - port: 60197 - }), [ - new Multiaddr('/ip4/0.0.0.0/tcp/60197') - ]) - - const opts = { - ...baseOptions, - config: { - ...baseOptions.config, + ;[local, remote, relayLibp2p] = await Promise.all([ + createNode({ + config: createNodeOptions({ + contentRouters: [ + delegate + ] + }) + }), + createNode({ + config: createNodeOptions({ + contentRouters: [ + delegate + ] + }) + }), + createNode({ + config: createRelayOptions({ relay: { advertise: { bootDelay: 1000, ttl: 1000, enabled: true }, - hop: { - enabled: index === 2 - }, autoRelay: { enabled: true, maxListeners: 1 } - } - } - } - - return new Libp2p({ - ...opts, - modules: { - ...opts.modules, - contentRouting: [delegate] - }, - addresses: { - listen: [listenAddr] - }, - connectionManager: { - autoDial: false - }, - peerDiscovery: { - autoDial: false - }, - peerId + }, + contentRouters: [ + delegate + ] + }) }) - }) + ]) - sinon.spy(relayLibp2p.contentRouting, 'provide') + contentRoutingProvideSpy = sinon.spy(relayLibp2p.contentRouting, 'provide') }) beforeEach(async () => { @@ -474,19 +379,19 @@ describe('auto-relay', () => { // mock the refs call .post('/api/v0/refs') .query(true) - .reply(200, null, [ + .reply(200, undefined, [ 'Content-Type', 'application/json', 'X-Chunked-Output', '1' ]) // Start each node - await Promise.all([local, remote, relayLibp2p].map(libp2p => libp2p.start())) + await Promise.all([local, remote, relayLibp2p].map(async libp2p => await libp2p.start())) // Should provide on start - await pWaitFor(() => relayLibp2p.contentRouting.provide.callCount === 1) + await pWaitFor(() => contentRoutingProvideSpy.callCount === 1) - const provider = relayLibp2p.peerId.toB58String() - const multiaddrs = relayLibp2p.multiaddrs.map((m) => m.toString()) + const provider = relayLibp2p.peerId.toString() + const multiaddrs = relayLibp2p.getMultiaddrs().map(ma => ma.decapsulateCode(protocols('p2p').code)) // Mock findProviders nock('http://0.0.0.0:60197') @@ -498,34 +403,44 @@ describe('auto-relay', () => { ]) }) - afterEach(() => { + afterEach(async () => { // Stop each node - return Promise.all([local, remote, relayLibp2p].map(libp2p => libp2p.stop())) + return await Promise.all([local, remote, relayLibp2p].map(async libp2p => await libp2p.stop())) }) it('should find providers for relay and add it as listen relay', async () => { - const originalMultiaddrsLength = local.multiaddrs.length + const originalMultiaddrsLength = local.getMultiaddrs().length - // Spy add listen relay - sinon.spy(local.relay._autoRelay, '_addListenRelay') // Spy Find Providers - sinon.spy(local.contentRouting, 'findProviders') + const contentRoutingFindProvidersSpy = sinon.spy(local.contentRouting, 'findProviders') - // Try to listen on Available hop relays - await local.relay._autoRelay._listenOnAvailableHopRelays() + const relayAddr = relayLibp2p.getMultiaddrs().pop() + + if (relayAddr == null) { + throw new Error('Relay had no addresses') + } + + // connect to relay + await local.dial(relayAddr) + + // should start using the relay + await usingAsRelay(local, relayLibp2p) + + // disconnect from relay, should start looking for new relays + await local.hangUp(relayAddr) // Should try to find relay service providers - await pWaitFor(() => local.contentRouting.findProviders.callCount === 1) - // Wait for peer added as listen relay - await pWaitFor(() => local.relay._autoRelay._addListenRelay.callCount === 1) - expect(local.relay._autoRelay._listenRelays.size).to.equal(1) - await pWaitFor(() => local.multiaddrs.length === originalMultiaddrsLength + 1) + await pWaitFor(() => contentRoutingFindProvidersSpy.callCount === 1) - const relayedAddr = local.multiaddrs[local.multiaddrs.length - 1] + // Wait for peer added as listen relay + await pWaitFor(() => local.getMultiaddrs().length === originalMultiaddrsLength + 1) + + const relayedAddr = local.getMultiaddrs()[local.getMultiaddrs().length - 1] await remote.peerStore.addressBook.set(local.peerId, [relayedAddr]) // Dial from remote through the relayed address const conn = await remote.dial(local.peerId) + expect(conn).to.exist() }) }) diff --git a/test/relay/relay.node.js b/test/relay/relay.node.js deleted file mode 100644 index e9c12078..00000000 --- a/test/relay/relay.node.js +++ /dev/null @@ -1,165 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { Multiaddr } = require('multiaddr') -const { collect } = require('streaming-iterables') -const pipe = require('it-pipe') -const AggregateError = require('aggregate-error') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -const { createPeerId } = require('../utils/creators/peer') -const baseOptions = require('../utils/base-options') -const Libp2p = require('../../src') -const { codes: Errors } = require('../../src/errors') - -const listenAddr = '/ip4/0.0.0.0/tcp/0' - -describe('Dialing (via relay, TCP)', () => { - let srcLibp2p - let relayLibp2p - let dstLibp2p - - beforeEach(async () => { - const peerIds = await createPeerId({ number: 3 }) - // Create 3 nodes, and turn HOP on for the relay - ;[srcLibp2p, relayLibp2p, dstLibp2p] = peerIds.map((peerId, index) => { - const opts = baseOptions - index === 1 && (opts.config.relay.hop.enabled = true) - return new Libp2p({ - ...opts, - addresses: { - listen: [listenAddr] - }, - peerId - }) - }) - - dstLibp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - }) - - beforeEach(() => { - // Start each node - return Promise.all([srcLibp2p, relayLibp2p, dstLibp2p].map(libp2p => libp2p.start())) - }) - - afterEach(async () => { - // Stop each node - return Promise.all([srcLibp2p, relayLibp2p, dstLibp2p].map(libp2p => libp2p.stop())) - }) - - it('should be able to connect to a peer over a relay with active connections', async () => { - const relayAddr = relayLibp2p.transportManager.getAddrs()[0] - const relayIdString = relayLibp2p.peerId.toB58String() - - const dialAddr = relayAddr - .encapsulate(`/p2p/${relayIdString}`) - .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toB58String()}`) - - const tcpAddrs = dstLibp2p.transportManager.getAddrs() - sinon.stub(dstLibp2p.addressManager, 'listen').value([new Multiaddr(`/p2p-circuit${relayAddr}/p2p/${relayIdString}`)]) - - await dstLibp2p.transportManager.listen(dstLibp2p.addressManager.getListenAddrs()) - expect(dstLibp2p.transportManager.getAddrs()).to.have.deep.members([...tcpAddrs, dialAddr.decapsulate('p2p')]) - - const connection = await srcLibp2p.dial(dialAddr) - expect(connection).to.exist() - expect(connection.remotePeer.toBytes()).to.eql(dstLibp2p.peerId.toBytes()) - expect(connection.localPeer.toBytes()).to.eql(srcLibp2p.peerId.toBytes()) - expect(connection.remoteAddr).to.eql(dialAddr) - expect(connection.localAddr).to.eql( - relayAddr // the relay address - .encapsulate(`/p2p/${relayIdString}`) // with its peer id - .encapsulate('/p2p-circuit') // the local peer is connected over the relay - .encapsulate(`/p2p/${srcLibp2p.peerId.toB58String()}`) // and the local peer id - ) - - const { stream: echoStream } = await connection.newStream('/echo/1.0.0') - const input = uint8ArrayFromString('hello') - const [output] = await pipe( - [input], - echoStream, - collect - ) - - expect(output.slice()).to.eql(input) - }) - - it('should fail to connect to a peer over a relay with inactive connections', async () => { - const relayAddr = relayLibp2p.transportManager.getAddrs()[0] - const relayIdString = relayLibp2p.peerId.toB58String() - - const dialAddr = relayAddr - .encapsulate(`/p2p/${relayIdString}`) - .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toB58String()}`) - - await expect(srcLibp2p.dial(dialAddr)) - .to.eventually.be.rejectedWith(AggregateError) - .and.to.have.nested.property('._errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) - }) - - it('should not stay connected to a relay when not already connected and HOP fails', async () => { - const relayAddr = relayLibp2p.transportManager.getAddrs()[0] - const relayIdString = relayLibp2p.peerId.toB58String() - - const dialAddr = relayAddr - .encapsulate(`/p2p/${relayIdString}`) - .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toB58String()}`) - - await expect(srcLibp2p.dial(dialAddr)) - .to.eventually.be.rejectedWith(AggregateError) - .and.to.have.nested.property('._errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) - - // We should not be connected to the relay, because we weren't before the dial - const srcToRelayConn = srcLibp2p.connectionManager.get(relayLibp2p.peerId) - expect(srcToRelayConn).to.not.exist() - }) - - it('dialer should stay connected to an already connected relay on hop failure', async () => { - const relayIdString = relayLibp2p.peerId.toB58String() - const relayAddr = relayLibp2p.transportManager.getAddrs()[0].encapsulate(`/p2p/${relayIdString}`) - - const dialAddr = relayAddr - .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toB58String()}`) - - await srcLibp2p.dial(relayAddr) - - await expect(srcLibp2p.dial(dialAddr)) - .to.eventually.be.rejectedWith(AggregateError) - .and.to.have.nested.property('._errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) - - const srcToRelayConn = srcLibp2p.connectionManager.get(relayLibp2p.peerId) - expect(srcToRelayConn).to.exist() - expect(srcToRelayConn.stat.status).to.equal('open') - }) - - it('destination peer should stay connected to an already connected relay on hop failure', async () => { - const relayIdString = relayLibp2p.peerId.toB58String() - const relayAddr = relayLibp2p.transportManager.getAddrs()[0].encapsulate(`/p2p/${relayIdString}`) - - const dialAddr = relayAddr - .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toB58String()}`) - - // Connect the destination peer and the relay - const tcpAddrs = dstLibp2p.transportManager.getAddrs() - sinon.stub(dstLibp2p.addressManager, 'getListenAddrs').returns([new Multiaddr(`${relayAddr}/p2p-circuit`)]) - - await dstLibp2p.transportManager.listen(dstLibp2p.addressManager.getListenAddrs()) - expect(dstLibp2p.transportManager.getAddrs()).to.have.deep.members([...tcpAddrs, dialAddr.decapsulate('p2p')]) - - // Tamper with the our multiaddrs for the circuit message - sinon.stub(srcLibp2p, 'multiaddrs').value([{ - bytes: uint8ArrayFromString('an invalid multiaddr') - }]) - - await expect(srcLibp2p.dial(dialAddr)) - .to.eventually.be.rejectedWith(AggregateError) - .and.to.have.nested.property('._errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) - - const dstToRelayConn = dstLibp2p.connectionManager.get(relayLibp2p.peerId) - expect(dstToRelayConn).to.exist() - expect(dstToRelayConn.stat.status).to.equal('open') - }) -}) diff --git a/test/relay/relay.node.ts b/test/relay/relay.node.ts new file mode 100644 index 00000000..7061f5db --- /dev/null +++ b/test/relay/relay.node.ts @@ -0,0 +1,173 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { Multiaddr } from '@multiformats/multiaddr' +import { pipe } from 'it-pipe' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { createNode } from '../utils/creators/peer.js' +import { codes as Errors } from '../../src/errors.js' +import type { Libp2pNode } from '../../src/libp2p.js' +import all from 'it-all' +import { RELAY_CODEC } from '../../src/circuit/multicodec.js' +import { StreamHandler } from '../../src/circuit/circuit/stream-handler.js' +import { CircuitRelay } from '../../src/circuit/pb/index.js' +import { createNodeOptions, createRelayOptions } from './utils.js' + +describe('Dialing (via relay, TCP)', () => { + let srcLibp2p: Libp2pNode + let relayLibp2p: Libp2pNode + let dstLibp2p: Libp2pNode + + beforeEach(async () => { + // Create 3 nodes, and turn HOP on for the relay + [srcLibp2p, relayLibp2p, dstLibp2p] = await Promise.all([ + createNode({ + config: createNodeOptions({ + relay: { + autoRelay: { + enabled: false + } + } + }) + }), + createNode({ + config: createRelayOptions({ + relay: { + autoRelay: { + enabled: false + } + } + }) + }), + createNode({ + config: createNodeOptions({ + relay: { + autoRelay: { + enabled: false + } + } + }) + }) + ]) + + await dstLibp2p.handle('/echo/1.0.0', ({ stream }) => { + void pipe(stream, stream) + }) + + // Start each node + await Promise.all([srcLibp2p, relayLibp2p, dstLibp2p].map(async libp2p => await libp2p.start())) + }) + + afterEach(async () => { + // Stop each node + return await Promise.all([srcLibp2p, relayLibp2p, dstLibp2p].map(async libp2p => await libp2p.stop())) + }) + + it('should be able to connect to a peer over a relay with active connections', async () => { + const relayAddr = relayLibp2p.components.getTransportManager().getAddrs()[0] + const relayIdString = relayLibp2p.peerId.toString() + + const dialAddr = relayAddr + .encapsulate(`/p2p/${relayIdString}`) + .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toString()}`) + + await relayLibp2p.dial(dstLibp2p.getMultiaddrs()[0]) + + const connection = await srcLibp2p.dial(dialAddr) + + expect(connection).to.exist() + expect(connection.remotePeer.toBytes()).to.eql(dstLibp2p.peerId.toBytes()) + expect(connection.remoteAddr).to.eql(dialAddr) + + const { stream: echoStream } = await connection.newStream('/echo/1.0.0') + + const input = uint8ArrayFromString('hello') + const [output] = await pipe( + [input], + echoStream, + async (source) => await all(source) + ) + + expect(output.slice()).to.eql(input) + }) + + it('should fail to connect to a peer over a relay with inactive connections', async () => { + const relayAddr = relayLibp2p.components.getTransportManager().getAddrs()[0] + const relayIdString = relayLibp2p.peerId.toString() + + const dialAddr = relayAddr + .encapsulate(`/p2p/${relayIdString}`) + .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toString()}`) + + await expect(srcLibp2p.dial(dialAddr)) + .to.eventually.be.rejected() + .and.to.have.nested.property('.errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) + }) + + it('should not stay connected to a relay when not already connected and HOP fails', async () => { + const relayAddr = relayLibp2p.components.getTransportManager().getAddrs()[0] + const relayIdString = relayLibp2p.peerId.toString() + + const dialAddr = relayAddr + .encapsulate(`/p2p/${relayIdString}`) + .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toString()}`) + + await expect(srcLibp2p.dial(dialAddr)) + .to.eventually.be.rejected() + .and.to.have.nested.property('.errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) + + // We should not be connected to the relay, because we weren't before the dial + const srcToRelayConn = srcLibp2p.components.getConnectionManager().getConnection(relayLibp2p.peerId) + expect(srcToRelayConn).to.not.exist() + }) + + it('dialer should stay connected to an already connected relay on hop failure', async () => { + const relayIdString = relayLibp2p.peerId.toString() + const relayAddr = relayLibp2p.components.getTransportManager().getAddrs()[0].encapsulate(`/p2p/${relayIdString}`) + + const dialAddr = relayAddr + .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toString()}`) + + await srcLibp2p.dial(relayAddr) + + await expect(srcLibp2p.dial(dialAddr)) + .to.eventually.be.rejected() + .and.to.have.nested.property('.errors[0].code', Errors.ERR_HOP_REQUEST_FAILED) + + const srcToRelayConn = srcLibp2p.components.getConnectionManager().getConnection(relayLibp2p.peerId) + expect(srcToRelayConn).to.exist() + expect(srcToRelayConn?.stat.status).to.equal('OPEN') + }) + + it('destination peer should stay connected to an already connected relay on hop failure', async () => { + const relayIdString = relayLibp2p.peerId.toString() + const relayAddr = relayLibp2p.components.getTransportManager().getAddrs()[0].encapsulate(`/p2p/${relayIdString}`) + + const dialAddr = relayAddr + .encapsulate(`/p2p-circuit/p2p/${dstLibp2p.peerId.toString()}`) + + // Connect the destination peer and the relay + const tcpAddrs = dstLibp2p.components.getTransportManager().getAddrs() + sinon.stub(dstLibp2p.components.getAddressManager(), 'getListenAddrs').returns([new Multiaddr(`${relayAddr.toString()}/p2p-circuit`)]) + + await dstLibp2p.components.getTransportManager().listen(dstLibp2p.components.getAddressManager().getListenAddrs()) + expect(dstLibp2p.components.getTransportManager().getAddrs()).to.have.deep.members([...tcpAddrs, dialAddr.decapsulate('p2p')]) + + // send an invalid relay message from the relay to the destination peer + const connections = relayLibp2p.getConnections(dstLibp2p.peerId) + const { stream } = await connections[0].newStream(RELAY_CODEC) + const streamHandler = new StreamHandler({ stream }) + streamHandler.write({ + type: CircuitRelay.Type.STATUS + }) + const res = await streamHandler.read() + expect(res?.code).to.equal(CircuitRelay.Status.MALFORMED_MESSAGE) + streamHandler.close() + + // should still be connected + const dstToRelayConn = dstLibp2p.components.getConnectionManager().getConnection(relayLibp2p.peerId) + expect(dstToRelayConn).to.exist() + expect(dstToRelayConn?.stat.status).to.equal('OPEN') + }) +}) diff --git a/test/relay/utils.ts b/test/relay/utils.ts new file mode 100644 index 00000000..3e78f26c --- /dev/null +++ b/test/relay/utils.ts @@ -0,0 +1,34 @@ +import type { Libp2pOptions } from '../../src/index.js' +import { createBaseOptions } from '../utils/base-options.js' + +const listenAddr = '/ip4/0.0.0.0/tcp/0' + +export function createNodeOptions (...overrides: Libp2pOptions[]): Libp2pOptions { + return createBaseOptions({ + addresses: { + listen: [listenAddr] + }, + connectionManager: { + autoDial: false + }, + relay: { + hop: { + enabled: false + }, + autoRelay: { + enabled: true, + maxListeners: 1 + } + } + }, ...overrides) +} + +export function createRelayOptions (...overrides: Libp2pOptions[]): Libp2pOptions { + return createNodeOptions({ + relay: { + hop: { + enabled: true + } + } + }, ...overrides) +} diff --git a/test/transports/transport-manager.node.js b/test/transports/transport-manager.node.js deleted file mode 100644 index d22d091d..00000000 --- a/test/transports/transport-manager.node.js +++ /dev/null @@ -1,106 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const { MemoryDatastore } = require('datastore-core/memory') -const AddressManager = require('../../src/address-manager') -const TransportManager = require('../../src/transport-manager') -const PeerStore = require('../../src/peer-store') -const PeerRecord = require('../../src/record/peer-record') -const Transport = require('libp2p-tcp') -const PeerId = require('peer-id') -const { Multiaddr } = require('multiaddr') -const mockUpgrader = require('../utils/mockUpgrader') -const sinon = require('sinon') -const Peers = require('../fixtures/peers') -const pWaitFor = require('p-wait-for') -const { mockConnectionGater } = require('../utils/mock-connection-gater') -const addrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/0'), - new Multiaddr('/ip4/127.0.0.1/tcp/0') -] - -describe('Transport Manager (TCP)', () => { - const connectionGater = mockConnectionGater() - let tm - let localPeer - - before(async () => { - localPeer = await PeerId.createFromJSON(Peers[0]) - }) - - beforeEach(() => { - tm = new TransportManager({ - libp2p: { - peerId: localPeer, - multiaddrs: addrs, - addressManager: new AddressManager({ listen: addrs }), - peerStore: new PeerStore({ - peerId: localPeer, - datastore: new MemoryDatastore(), - addressFilter: connectionGater.filterMultiaddrForPeer - }) - }, - upgrader: mockUpgrader, - onConnection: () => {} - }) - }) - - afterEach(async () => { - await tm.removeAll() - expect(tm._transports.size).to.equal(0) - }) - - it('should be able to add and remove a transport', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport) - expect(tm._transports.size).to.equal(1) - await tm.remove(Transport.prototype[Symbol.toStringTag]) - }) - - it('should be able to listen', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport, { listenerOptions: { listen: 'carefully' } }) - const transport = tm._transports.get(Transport.prototype[Symbol.toStringTag]) - const spyListener = sinon.spy(transport, 'createListener') - await tm.listen(addrs) - expect(tm._listeners).to.have.key(Transport.prototype[Symbol.toStringTag]) - expect(tm._listeners.get(Transport.prototype[Symbol.toStringTag])).to.have.length(addrs.length) - - // Ephemeral ip addresses may result in multiple listeners - expect(tm.getAddrs().length).to.equal(addrs.length) - await tm.close() - expect(tm._listeners.get(Transport.prototype[Symbol.toStringTag])).to.have.length(0) - expect(spyListener.firstCall.firstArg).to.deep.equal({ listen: 'carefully' }) - }) - - it('should create self signed peer record on listen', async () => { - let signedPeerRecord = await tm.libp2p.peerStore.addressBook.getRawEnvelope(localPeer) - expect(signedPeerRecord).to.not.exist() - - tm.add(Transport.prototype[Symbol.toStringTag], Transport) - await tm.listen(addrs) - - // Should created Self Peer record on new listen address, but it is done async - // with no event so we have to wait a bit - await pWaitFor(async () => { - signedPeerRecord = await tm.libp2p.peerStore.addressBook.getPeerRecord(localPeer) - - return signedPeerRecord != null - }, { interval: 100, timeout: 2000 }) - - const record = PeerRecord.createFromProtobuf(signedPeerRecord.payload) - expect(record).to.exist() - expect(record.multiaddrs.length).to.equal(addrs.length) - addrs.forEach((a, i) => { - expect(record.multiaddrs[i].equals(a)).to.be.true() - }) - }) - - it('should be able to dial', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport) - await tm.listen(addrs) - const addr = tm.getAddrs().shift() - const connection = await tm.dial(addr) - expect(connection).to.exist() - await connection.close() - }) -}) diff --git a/test/transports/transport-manager.node.ts b/test/transports/transport-manager.node.ts new file mode 100644 index 00000000..0098382e --- /dev/null +++ b/test/transports/transport-manager.node.ts @@ -0,0 +1,123 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import { MemoryDatastore } from 'datastore-core/memory' +import { DefaultAddressManager } from '../../src/address-manager/index.js' +import { DefaultTransportManager } from '../../src/transport-manager.js' +import { PersistentPeerStore } from '@libp2p/peer-store' +import { PeerRecord } from '@libp2p/peer-record' +import { TCP } from '@libp2p/tcp' +import { Multiaddr } from '@multiformats/multiaddr' +import { mockUpgrader, mockConnectionGater } from '@libp2p/interface-compliance-tests/mocks' +import sinon from 'sinon' +import Peers from '../fixtures/peers.js' +import pWaitFor from 'p-wait-for' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createFromJSON } from '@libp2p/peer-id-factory' +import { Components } from '@libp2p/interfaces/components' +import { PeerRecordUpdater } from '../../src/peer-record-updater.js' + +const addrs = [ + new Multiaddr('/ip4/127.0.0.1/tcp/0'), + new Multiaddr('/ip4/127.0.0.1/tcp/0') +] + +describe('Transport Manager (TCP)', () => { + const connectionGater = mockConnectionGater() + let tm: DefaultTransportManager + let localPeer: PeerId + let components: Components + + before(async () => { + localPeer = await createFromJSON(Peers[0]) + }) + + beforeEach(() => { + components = new Components({ + peerId: localPeer, + datastore: new MemoryDatastore(), + upgrader: mockUpgrader() + }) + components.setAddressManager(new DefaultAddressManager(components, { listen: addrs.map(addr => addr.toString()) })) + components.setPeerStore(new PersistentPeerStore(components, { + addressFilter: connectionGater.filterMultiaddrForPeer + })) + + tm = new DefaultTransportManager(components) + + components.setTransportManager(tm) + }) + + afterEach(async () => { + await tm.removeAll() + expect(tm.getTransports()).to.be.empty() + }) + + it('should be able to add and remove a transport', async () => { + expect(tm.getTransports()).to.have.lengthOf(0) + tm.add(new TCP()) + expect(tm.getTransports()).to.have.lengthOf(1) + await tm.remove(TCP.prototype[Symbol.toStringTag]) + expect(tm.getTransports()).to.have.lengthOf(0) + }) + + it('should be able to listen', async () => { + const transport = new TCP() + + expect(tm.getTransports()).to.be.empty() + + tm.add(transport) + + expect(tm.getTransports()).to.have.lengthOf(1) + + const spyListener = sinon.spy(transport, 'createListener') + await tm.listen(addrs) + + // Ephemeral ip addresses may result in multiple listeners + expect(tm.getAddrs().length).to.equal(addrs.length) + await tm.stop() + expect(spyListener.called).to.be.true() + }) + + it('should create self signed peer record on listen', async () => { + const peerRecordUpdater = new PeerRecordUpdater(components) + await peerRecordUpdater.start() + + let signedPeerRecord = await components.getPeerStore().addressBook.getPeerRecord(localPeer) + expect(signedPeerRecord).to.not.exist() + + tm.add(new TCP()) + await tm.listen(addrs) + + // Should created Self Peer record on new listen address, but it is done async + // with no event so we have to wait a bit + await pWaitFor(async () => { + signedPeerRecord = await components.getPeerStore().addressBook.getPeerRecord(localPeer) + + return signedPeerRecord != null + }, { interval: 100, timeout: 2000 }) + + if (signedPeerRecord == null) { + throw new Error('Could not get signed peer record') + } + + const record = PeerRecord.createFromProtobuf(signedPeerRecord.payload) + expect(record).to.exist() + expect(record.multiaddrs.length).to.equal(addrs.length) + await peerRecordUpdater.stop() + }) + + it('should be able to dial', async () => { + tm.add(new TCP()) + await tm.listen(addrs) + const addr = tm.getAddrs().shift() + + if (addr == null) { + throw new Error('Could not find addr') + } + + const connection = await tm.dial(addr) + expect(connection).to.exist() + await connection.close() + }) +}) diff --git a/test/transports/transport-manager.spec.js b/test/transports/transport-manager.spec.js deleted file mode 100644 index 23006cdd..00000000 --- a/test/transports/transport-manager.spec.js +++ /dev/null @@ -1,247 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') - -const { Multiaddr } = require('multiaddr') -const Transport = require('libp2p-websockets') -const filters = require('libp2p-websockets/src/filters') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') -const AddressManager = require('../../src/address-manager') -const TransportManager = require('../../src/transport-manager') -const mockUpgrader = require('../utils/mockUpgrader') -const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser') -const { codes: ErrorCodes } = require('../../src/errors') -const Libp2p = require('../../src') -const { FaultTolerance } = require('../../src/transport-manager') - -const Peers = require('../fixtures/peers') -const PeerId = require('peer-id') - -const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') - -describe('Transport Manager (WebSockets)', () => { - let tm - - before(() => { - tm = new TransportManager({ - libp2p: { - addressManager: new AddressManager({ listen: [listenAddr] }) - }, - upgrader: mockUpgrader, - onConnection: () => {} - }) - }) - - afterEach(async () => { - await tm.removeAll() - expect(tm._transports.size).to.equal(0) - }) - - it('should be able to add and remove a transport', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport, { filter: filters.all }) - expect(tm._transports.size).to.equal(1) - await tm.remove(Transport.prototype[Symbol.toStringTag]) - }) - - it('should not be able to add a transport without a key', async () => { - // Chai as promised conflicts with normal `throws` validation, - // so wrap the call in an async function - await expect((async () => { // eslint-disable-line - tm.add(undefined, Transport) - })()) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_INVALID_KEY) - }) - - it('should not be able to add a transport twice', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport) - // Chai as promised conflicts with normal `throws` validation, - // so wrap the call in an async function - await expect((async () => { // eslint-disable-line - tm.add(Transport.prototype[Symbol.toStringTag], Transport) - })()) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_DUPLICATE_TRANSPORT) - }) - - it('should be able to dial', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport, { filter: filters.all }) - const addr = MULTIADDRS_WEBSOCKETS[0] - const connection = await tm.dial(addr) - expect(connection).to.exist() - await connection.close() - }) - - it('should fail to dial an unsupported address', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport, { filter: filters.all }) - const addr = new Multiaddr('/ip4/127.0.0.1/tcp/0') - await expect(tm.dial(addr)) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_TRANSPORT_UNAVAILABLE) - }) - - it('should fail to listen with no valid address', async () => { - tm.add(Transport.prototype[Symbol.toStringTag], Transport, { filter: filters.all }) - - await expect(tm.listen([listenAddr])) - .to.eventually.be.rejected() - .and.to.have.property('code', ErrorCodes.ERR_NO_VALID_ADDRESSES) - }) -}) - -describe('libp2p.transportManager', () => { - let peerId - let libp2p - - before(async () => { - peerId = await PeerId.createFromJSON(Peers[0]) - }) - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - libp2p = null - }) - - it('should create a TransportManager', () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - }) - - expect(libp2p.transportManager).to.exist() - // Our transport and circuit relay - expect(libp2p.transportManager._transports.size).to.equal(2) - }) - - it('should be able to customize a transport', () => { - const spy = sinon.spy() - const key = spy.prototype[Symbol.toStringTag] = 'TransportSpy' - const customOptions = { - another: 'value', - listenerOptions: { - listen: 'carefully' - } - } - libp2p = new Libp2p({ - peerId, - modules: { - transport: [spy], - connEncryption: [Crypto] - }, - config: { - transport: { - [key]: customOptions - } - } - }) - - expect(libp2p.transportManager).to.exist() - // Our transport and circuit relay - expect(libp2p.transportManager._transports.size).to.equal(2) - expect(libp2p.transportManager._listenerOptions.size).to.equal(2) - expect(spy).to.have.property('callCount', 1) - expect(spy.getCall(0)).to.have.deep.property('args', [{ - ...customOptions, - libp2p, - upgrader: libp2p.upgrader - }]) - }) - - it('starting and stopping libp2p should start and stop TransportManager', async () => { - libp2p = new Libp2p({ - peerId, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - }) - - // We don't need to listen, stub it - sinon.stub(libp2p.transportManager, 'listen').returns(true) - sinon.spy(libp2p.transportManager, 'close') - - await libp2p.start() - await libp2p.stop() - - expect(libp2p.transportManager.listen.callCount).to.equal(1) - expect(libp2p.transportManager.close.callCount).to.equal(1) - }) -}) - -describe('libp2p.transportManager (dial only)', () => { - let peerId - let libp2p - - before(async () => { - peerId = await PeerId.createFromJSON(Peers[0]) - }) - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - }) - - it('fails to start if multiaddr fails to listen', async () => { - libp2p = new Libp2p({ - peerId, - addresses: { - listen: [new Multiaddr('/ip4/127.0.0.1/tcp/0')] - }, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - }) - - try { - await libp2p.start() - } catch (/** @type {any} */ err) { - expect(err).to.exist() - expect(err.code).to.equal(ErrorCodes.ERR_NO_VALID_ADDRESSES) - return - } - throw new Error('it should fail to start if multiaddr fails to listen') - }) - - it('does not fail to start if provided listen multiaddr are not compatible to configured transports (when supporting dial only mode)', async () => { - libp2p = new Libp2p({ - peerId, - addresses: { - listen: [new Multiaddr('/ip4/127.0.0.1/tcp/0')] - }, - transportManager: { - faultTolerance: FaultTolerance.NO_FATAL - }, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - }) - - it('does not fail to start if provided listen multiaddr fail to listen on configured transports (when supporting dial only mode)', async () => { - libp2p = new Libp2p({ - peerId, - addresses: { - listen: [new Multiaddr('/ip4/127.0.0.1/tcp/12345/p2p/QmWDn2LY8nannvSWJzruUYoLZ4vV83vfCBwd8DipvdgQc3/p2p-circuit')] - }, - transportManager: { - faultTolerance: FaultTolerance.NO_FATAL - }, - modules: { - transport: [Transport], - connEncryption: [Crypto] - } - }) - - await libp2p.start() - }) -}) diff --git a/test/transports/transport-manager.spec.ts b/test/transports/transport-manager.spec.ts new file mode 100644 index 00000000..93721fc5 --- /dev/null +++ b/test/transports/transport-manager.spec.ts @@ -0,0 +1,158 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { Multiaddr } from '@multiformats/multiaddr' +import { WebSockets } from '@libp2p/websockets' +import * as filters from '@libp2p/websockets/filters' +import { NOISE } from '@chainsafe/libp2p-noise' +import { DefaultAddressManager } from '../../src/address-manager/index.js' +import { DefaultTransportManager, FAULT_TOLERANCE } from '../../src/transport-manager.js' +import { mockUpgrader } from '@libp2p/interface-compliance-tests/mocks' +import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js' +import { codes as ErrorCodes } from '../../src/errors.js' +import Peers from '../fixtures/peers.js' +import { Components } from '@libp2p/interfaces/components' +import { createEd25519PeerId, createFromJSON } from '@libp2p/peer-id-factory' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' + +const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') + +describe('Transport Manager (WebSockets)', () => { + let tm: DefaultTransportManager + let components: Components + + before(async () => { + components = new Components({ + peerId: await createEd25519PeerId(), + upgrader: mockUpgrader() + }) + components.setAddressManager(new DefaultAddressManager(components, { listen: [listenAddr.toString()] })) + + tm = new DefaultTransportManager(components) + }) + + afterEach(async () => { + await tm.removeAll() + expect(tm.getTransports()).to.be.empty() + }) + + it('should be able to add and remove a transport', async () => { + const transport = new WebSockets({ + filter: filters.all + }) + + expect(tm.getTransports()).to.have.lengthOf(0) + tm.add(transport) + + expect(tm.getTransports()).to.have.lengthOf(1) + await tm.remove(transport.constructor.name) + expect(tm.getTransports()).to.have.lengthOf(0) + }) + + it('should not be able to add a transport twice', async () => { + tm.add(new WebSockets()) + + expect(() => { + tm.add(new WebSockets()) + }) + .to.throw() + .and.to.have.property('code', ErrorCodes.ERR_DUPLICATE_TRANSPORT) + }) + + it('should be able to dial', async () => { + tm.add(new WebSockets({ filter: filters.all })) + const addr = MULTIADDRS_WEBSOCKETS[0] + const connection = await tm.dial(addr) + expect(connection).to.exist() + await connection.close() + }) + + it('should fail to dial an unsupported address', async () => { + tm.add(new WebSockets({ filter: filters.all })) + const addr = new Multiaddr('/ip4/127.0.0.1/tcp/0') + await expect(tm.dial(addr)) + .to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.ERR_TRANSPORT_UNAVAILABLE) + }) + + it('should fail to listen with no valid address', async () => { + tm.add(new WebSockets({ filter: filters.all })) + + await expect(tm.listen([listenAddr])) + .to.eventually.be.rejected() + .and.to.have.property('code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) +}) + +describe('libp2p.transportManager (dial only)', () => { + let peerId: PeerId + let libp2p: Libp2pNode + + before(async () => { + peerId = await createFromJSON(Peers[0]) + }) + + afterEach(async () => { + sinon.restore() + + if (libp2p != null) { + await libp2p.stop() + } + }) + + it('fails to start if multiaddr fails to listen', async () => { + libp2p = await createLibp2pNode({ + peerId, + addresses: { + listen: ['/ip4/127.0.0.1/tcp/0'] + }, + transports: [new WebSockets()], + connectionEncryption: [NOISE] + }) + + await expect(libp2p.start()).to.eventually.be.rejected + .with.property('code', ErrorCodes.ERR_NO_VALID_ADDRESSES) + }) + + it('does not fail to start if provided listen multiaddr are not compatible to configured transports (when supporting dial only mode)', async () => { + libp2p = await createLibp2pNode({ + peerId, + addresses: { + listen: ['/ip4/127.0.0.1/tcp/0'] + }, + transportManager: { + faultTolerance: FAULT_TOLERANCE.NO_FATAL + }, + transports: [ + new WebSockets() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + }) + + it('does not fail to start if provided listen multiaddr fail to listen on configured transports (when supporting dial only mode)', async () => { + libp2p = await createLibp2pNode({ + peerId, + addresses: { + listen: ['/ip4/127.0.0.1/tcp/12345/p2p/QmWDn2LY8nannvSWJzruUYoLZ4vV83vfCBwd8DipvdgQc3/p2p-circuit'] + }, + transportManager: { + faultTolerance: FAULT_TOLERANCE.NO_FATAL + }, + transports: [ + new WebSockets() + ], + connectionEncryption: [ + NOISE + ] + }) + + await libp2p.start() + }) +}) diff --git a/test/ts-use/package.json b/test/ts-use/package.json deleted file mode 100644 index 87ec9b5b..00000000 --- a/test/ts-use/package.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "ts-use", - "private": true, - "dependencies": { - "@achingbrain/libp2p-gossipsub": "^0.12.2", - "@chainsafe/libp2p-noise": "^5.0.0", - "datastore-level": "^7.0.1", - "ipfs-http-client": "^55.0.0", - "libp2p": "file:../..", - "libp2p-bootstrap": "^0.14.0", - "libp2p-delegated-content-routing": "^0.11.0", - "libp2p-delegated-peer-routing": "^0.11.1", - "libp2p-interfaces": "^4.0.0", - "libp2p-kad-dht": "^0.28.6", - "libp2p-mplex": "^0.10.4", - "libp2p-record": "^0.10.4", - "libp2p-tcp": "^0.17.1", - "libp2p-websockets": "^0.16.1", - "peer-id": "^0.16.0" - }, - "scripts": { - "build": "npx tsc", - "test": "npm install && npx -p typescript tsc --noEmit" - } -} diff --git a/test/ts-use/src/main.ts b/test/ts-use/src/main.ts deleted file mode 100644 index 15b927b8..00000000 --- a/test/ts-use/src/main.ts +++ /dev/null @@ -1,195 +0,0 @@ -import Libp2p = require('libp2p') -import Libp2pRecord = require('libp2p-record') -import TCP = require('libp2p-tcp') - -const WEBSOCKETS = require('libp2p-websockets') -const NOISE = require('@chainsafe/libp2p-noise') -const MPLEX = require('libp2p-mplex') -const Gossipsub = require('libp2p-gossipsub') -const DHT = require('libp2p-kad-dht') - -const { dnsaddrResolver } = require('multiaddr/src/resolvers') -const { publicAddressesFirst } = require('libp2p-utils/src/address-sort') - -const { SignaturePolicy } = require('libp2p-interfaces/src/pubsub/signature-policy') -const { FaultTolerance } = require('libp2p/src/transport-manager') -const filters = require('libp2p-websockets/src/filters') - -const Bootstrap = require('libp2p-bootstrap') -const LevelStore = require('datastore-level') - -const ipfsHttpClient = require('ipfs-http-client') -const DelegatedPeerRouter = require('libp2p-delegated-peer-routing') -const DelegatedContentRouter = require('libp2p-delegated-content-routing') -const PeerId = require('peer-id') - - -// Known peers addresses -const bootstrapMultiaddrs = [ - '/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb', - '/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN' -] -const transportKey = WEBSOCKETS.prototype[Symbol.toStringTag] - -async function main() { - // create a peerId - const peerId = await PeerId.create() - - const delegatedPeerRouting = new DelegatedPeerRouter(ipfsHttpClient.create({ - host: 'node0.delegate.ipfs.io', // In production you should setup your own delegates - protocol: 'https', - port: 443 - })) - - const delegatedContentRouting = new DelegatedContentRouter(peerId, ipfsHttpClient.create({ - host: 'node0.delegate.ipfs.io', // In production you should setup your own delegates - protocol: 'https', - port: 443 - })) - - const libp2p = await Libp2p.create({ - peerId, - addresses: { - listen: ['/ip4/127.0.0.1/tcp/8000', '/ip4/127.0.0.1/tcp/8001/ws'] - }, - modules: { - transport: [TCP, WEBSOCKETS], - streamMuxer: [MPLEX], - connEncryption: [NOISE], - peerDiscovery: [Bootstrap], - pubsub: Gossipsub, - dht: DHT, - contentRouting: [delegatedContentRouting], - peerRouting: [delegatedPeerRouting] - }, - peerRouting: { - refreshManager: { - enabled: true, - interval: 1000, - bootDelay: 11111 - } - }, - dialer: { - maxParallelDials: 100, - maxDialsPerPeer: 4, - dialTimeout: 30e3, - resolvers: { - dnsaddr: dnsaddrResolver - }, - addressSorter: publicAddressesFirst - }, - connectionManager: { - maxConnections: Infinity, - minConnections: 0, - pollInterval: 2000, - defaultPeerValue: 1, - maxData: Infinity, - maxSentData: Infinity, - maxReceivedData: Infinity, - maxEventLoopDelay: Infinity, - movingAverageInterval: 60000 - }, - transportManager: { - faultTolerance: FaultTolerance.NO_FATAL - }, - metrics: { - enabled: true, - computeThrottleMaxQueueSize: 1000, - computeThrottleTimeout: 2000, - movingAverageIntervals: [ - 60 * 1000, // 1 minute - 5 * 60 * 1000, // 5 minutes - 15 * 60 * 1000 // 15 minutes - ], - maxOldPeersRetention: 50 - }, - datastore: new LevelStore('path/to/store'), - peerStore: { - persistence: false - }, - keychain: { - pass: 'notsafepassword123456789', - datastore: new LevelStore('path/to/store-keys') - }, - config: { - peerDiscovery: { - autoDial: true, - [Bootstrap.tag]: { - enabled: true, - list: bootstrapMultiaddrs // provide array of multiaddrs - } - }, - dht: { - enabled: true, - kBucketSize: 20, - clientMode: true, - validators: { - pk: Libp2pRecord.validator.validators.pk - }, - selectors: { - pk: Libp2pRecord.selection.selectors.pk - } - }, - nat: { - description: 'my-node', // set as the port mapping description on the router, defaults the current libp2p version and your peer id - enabled: true, // defaults to true - gateway: '192.168.1.1', // leave unset to auto-discover - externalIp: '80.1.1.1', // leave unset to auto-discover - ttl: 7200, // TTL for port mappings (min 20 minutes) - keepAlive: true, // Refresh port mapping after TTL expires - pmp: { - enabled: false, // defaults to false - } - }, - relay: { - enabled: true, // Allows you to dial and accept relayed connections. Does not make you a relay. - hop: { - enabled: true, // Allows you to be a relay for other peers - active: true // You will attempt to dial destination peers if you are not connected to them - }, - advertise: { - bootDelay: 15 * 60 * 1000, // Delay before HOP relay service is advertised on the network - enabled: true, // Allows you to disable the advertise of the Hop service - ttl: 30 * 60 * 1000 // Delay Between HOP relay service advertisements on the network - }, - autoRelay: { - enabled: true, // Allows you to bind to relays with HOP enabled for improving node dialability - maxListeners: 2 // Configure maximum number of HOP relays to use - } - }, - transport: { - [transportKey]: { - filter: filters.all - } - }, - pubsub: { // The pubsub options (and defaults) can be found in the pubsub router documentation - enabled: true, - emitSelf: false, // whether the node should emit to self on publish - globalSignaturePolicy: SignaturePolicy.StrictSign // message signing policy - } - } - }) - - libp2p.connectionManager.on('peer:connect', (connection) => { - console.log(`Connected to ${connection.remotePeer.toB58String()}`) - }) - - - - // Listen for new connections to peers - libp2p.connectionManager.on('peer:connect', (connection) => { - console.log(`Connected to ${connection.remotePeer.toB58String()}`) - }) - - // Listen for peers disconnecting - libp2p.connectionManager.on('peer:disconnect', (connection) => { - console.log(`Disconnected from ${connection.remotePeer.toB58String()}`) - }) - - - await libp2p.start() - console.log('started') - await libp2p.stop() -} - -main() diff --git a/test/ts-use/tsconfig.json b/test/ts-use/tsconfig.json deleted file mode 100644 index 8cd47e1e..00000000 --- a/test/ts-use/tsconfig.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "compilerOptions": { - "strict": true, - "noImplicitAny": true, - "skipLibCheck": true - } -} \ No newline at end of file diff --git a/test/upgrading/upgrader.spec.js b/test/upgrading/upgrader.spec.js deleted file mode 100644 index 5caf63f2..00000000 --- a/test/upgrading/upgrader.spec.js +++ /dev/null @@ -1,477 +0,0 @@ -'use strict' -/* eslint-env mocha */ - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const Muxer = require('libp2p-mplex') -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') -const pipe = require('it-pipe') -const { collect } = require('streaming-iterables') -const pSettle = require('p-settle') -const Transport = require('libp2p-websockets') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') -const Protector = require('../../src/pnet') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const swarmKeyBuffer = uint8ArrayFromString(require('../fixtures/swarm.key')) - -const Libp2p = require('../../src') -const Upgrader = require('../../src/upgrader') -const { codes } = require('../../src/errors') -const { mockConnectionGater } = require('../utils/mock-connection-gater') -const mockMultiaddrConnPair = require('../utils/mockMultiaddrConn') -const Peers = require('../fixtures/peers') -const addrs = [ - new Multiaddr('/ip4/127.0.0.1/tcp/0'), - new Multiaddr('/ip4/127.0.0.1/tcp/0') -] - -describe('Upgrader', () => { - let localUpgrader - let remoteUpgrader - let localPeer - let remotePeer - const connectionGater = mockConnectionGater() - - const mockConnectionManager = { - gater: { - allowDialPeer: async () => true, - allowDialMultiaddr: async () => true, - acceptConnection: async () => true, - acceptEncryptedConnection: async () => true, - acceptUpgradedConnection: async () => true - } - } - - before(async () => { - ([ - localPeer, - remotePeer - ] = await Promise.all([ - PeerId.createFromJSON(Peers[0]), - PeerId.createFromJSON(Peers[1]) - ])) - - localUpgrader = new Upgrader({ - connectionManager: mockConnectionManager, - localPeer, - connectionGater - }) - remoteUpgrader = new Upgrader({ - connectionManager: mockConnectionManager, - localPeer: remotePeer, - connectionGater - }) - - localUpgrader.protocols.set('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - remoteUpgrader.protocols.set('/echo/1.0.0', ({ stream }) => pipe(stream, stream)) - }) - - afterEach(() => { - sinon.restore() - }) - - it('should upgrade with valid muxers and crypto', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxers = new Map([[Muxer.multicodec, Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - const connections = await Promise.all([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - expect(connections).to.have.length(2) - - const { stream, protocol } = await connections[0].newStream('/echo/1.0.0') - expect(protocol).to.equal('/echo/1.0.0') - - const hello = uint8ArrayFromString('hello there!') - const result = await pipe( - [hello], - stream, - function toBuffer (source) { - return (async function * () { - for await (const val of source) yield val.slice() - })() - }, - collect - ) - - expect(result).to.eql([hello]) - }) - - it('should upgrade with only crypto', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - // No available muxers - const muxers = new Map() - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - const connections = await Promise.all([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - expect(connections).to.have.length(2) - - await expect(connections[0].newStream('/echo/1.0.0')).to.be.rejected() - - // Verify the MultiaddrConnection close method is called - sinon.spy(inbound, 'close') - sinon.spy(outbound, 'close') - await Promise.all(connections.map(conn => conn.close())) - expect(inbound.close.callCount).to.equal(1) - expect(outbound.close.callCount).to.equal(1) - }) - - it('should use a private connection protector when provided', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxers = new Map([[Muxer.multicodec, Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - const protector = new Protector(swarmKeyBuffer) - sinon.stub(localUpgrader, 'protector').value(protector) - sinon.stub(remoteUpgrader, 'protector').value(protector) - sinon.spy(protector, 'protect') - - const connections = await Promise.all([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - expect(connections).to.have.length(2) - - const { stream, protocol } = await connections[0].newStream('/echo/1.0.0') - expect(protocol).to.equal('/echo/1.0.0') - - const hello = uint8ArrayFromString('hello there!') - const result = await pipe( - [hello], - stream, - function toBuffer (source) { - return (async function * () { - for await (const val of source) yield val.slice() - })() - }, - collect - ) - - expect(result).to.eql([hello]) - expect(protector.protect.callCount).to.eql(2) - }) - - it('should fail if crypto fails', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxers = new Map([[Muxer.multicodec, Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const crypto = { - tag: '/insecure', - secureInbound: () => { throw new Error('Boom') }, - secureOutbound: () => { throw new Error('Boom') } - } - - const cryptos = new Map([[crypto.tag, crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - // Wait for the results of each side of the connection - const results = await pSettle([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - // Ensure both sides fail - expect(results).to.have.length(2) - results.forEach(result => { - expect(result.isRejected).to.equal(true) - expect(result.reason.code).to.equal(codes.ERR_ENCRYPTION_FAILED) - }) - }) - - it('should fail if muxers do not match', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxersLocal = new Map([['/muxer-local', Muxer]]) - const muxersRemote = new Map([['/muxer-remote', Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxersLocal) - sinon.stub(remoteUpgrader, 'muxers').value(muxersRemote) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - // Wait for the results of each side of the connection - const results = await pSettle([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - // Ensure both sides fail - expect(results).to.have.length(2) - results.forEach(result => { - expect(result.isRejected).to.equal(true) - expect(result.reason.code).to.equal(codes.ERR_MUXER_UNAVAILABLE) - }) - }) - - it('should map getStreams and close methods', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxers = new Map([[Muxer.multicodec, Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - const connections = await Promise.all([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - expect(connections).to.have.length(2) - - // Create a few streams, at least 1 in each direction - await connections[0].newStream('/echo/1.0.0') - await connections[1].newStream('/echo/1.0.0') - await connections[0].newStream('/echo/1.0.0') - connections.forEach(conn => { - expect(conn.streams).to.have.length(3) - }) - - // Verify the MultiaddrConnection close method is called - sinon.spy(inbound, 'close') - sinon.spy(outbound, 'close') - await Promise.all(connections.map(conn => conn.close())) - expect(inbound.close.callCount).to.equal(1) - expect(outbound.close.callCount).to.equal(1) - }) - - it('should call connection handlers', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxers = new Map([[Muxer.multicodec, Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - // Verify onConnection is called with the connection - sinon.spy(localUpgrader, 'onConnection') - sinon.spy(remoteUpgrader, 'onConnection') - const connections = await Promise.all([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - expect(connections).to.have.length(2) - expect(localUpgrader.onConnection.callCount).to.equal(1) - expect(localUpgrader.onConnection.getCall(0).args).to.eql([connections[0]]) - expect(remoteUpgrader.onConnection.callCount).to.equal(1) - expect(remoteUpgrader.onConnection.getCall(0).args).to.eql([connections[1]]) - - // Verify onConnectionEnd is called with the connection - sinon.spy(localUpgrader, 'onConnectionEnd') - sinon.spy(remoteUpgrader, 'onConnectionEnd') - await Promise.all(connections.map(conn => conn.close())) - expect(localUpgrader.onConnectionEnd.callCount).to.equal(1) - expect(localUpgrader.onConnectionEnd.getCall(0).args).to.eql([connections[0]]) - expect(remoteUpgrader.onConnectionEnd.callCount).to.equal(1) - expect(remoteUpgrader.onConnectionEnd.getCall(0).args).to.eql([connections[1]]) - }) - - it('should fail to create a stream for an unsupported protocol', async () => { - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const muxers = new Map([[Muxer.multicodec, Muxer]]) - sinon.stub(localUpgrader, 'muxers').value(muxers) - sinon.stub(remoteUpgrader, 'muxers').value(muxers) - - const cryptos = new Map([[Crypto.protocol, Crypto]]) - sinon.stub(localUpgrader, 'cryptos').value(cryptos) - sinon.stub(remoteUpgrader, 'cryptos').value(cryptos) - - const connections = await Promise.all([ - localUpgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - - expect(connections).to.have.length(2) - - const results = await pSettle([ - connections[0].newStream('/unsupported/1.0.0'), - connections[1].newStream('/unsupported/1.0.0') - ]) - expect(results).to.have.length(2) - results.forEach(result => { - expect(result.isRejected).to.equal(true) - expect(result.reason.code).to.equal(codes.ERR_UNSUPPORTED_PROTOCOL) - }) - }) -}) - -describe('libp2p.upgrader', () => { - let peers - let libp2p - const connectionGater = mockConnectionGater() - - before(async () => { - peers = await Promise.all([ - PeerId.createFromJSON(Peers[0]), - PeerId.createFromJSON(Peers[1]) - ]) - }) - - afterEach(async () => { - sinon.restore() - libp2p && await libp2p.stop() - libp2p = null - }) - - it('should create an Upgrader', () => { - const protector = new Protector(swarmKeyBuffer) - libp2p = new Libp2p({ - peerId: peers[0], - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto], - connProtector: protector - } - }) - - expect(libp2p.upgrader).to.exist() - expect(libp2p.upgrader.muxers).to.eql(new Map([[Muxer.multicodec, Muxer]])) - expect(libp2p.upgrader.cryptos).to.eql(new Map([[Crypto.protocol, Crypto]])) - expect(libp2p.upgrader.protector).to.equal(protector) - // Ensure the transport manager also has the upgrader - expect(libp2p.upgrader).to.equal(libp2p.transportManager.upgrader) - }) - - it('should be able to register and unregister a handler', async () => { - libp2p = new Libp2p({ - peerId: peers[0], - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - expect(libp2p.upgrader.protocols).to.not.have.any.keys(['/echo/1.0.0', '/echo/1.0.1']) - - const echoHandler = () => {} - await libp2p.handle(['/echo/1.0.0', '/echo/1.0.1'], echoHandler) - expect(libp2p.upgrader.protocols.get('/echo/1.0.0')).to.equal(echoHandler) - expect(libp2p.upgrader.protocols.get('/echo/1.0.1')).to.equal(echoHandler) - - await libp2p.unhandle(['/echo/1.0.0']) - expect(libp2p.upgrader.protocols.get('/echo/1.0.0')).to.equal(undefined) - expect(libp2p.upgrader.protocols.get('/echo/1.0.1')).to.equal(echoHandler) - }) - - it('should return muxed streams', async () => { - const remotePeer = peers[1] - libp2p = new Libp2p({ - peerId: peers[0], - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - await libp2p.start() - const echoHandler = () => {} - libp2p.handle(['/echo/1.0.0'], echoHandler) - - const remoteUpgrader = new Upgrader({ - localPeer: remotePeer, - connectionManager: libp2p.connectionManager, - muxers: new Map([[Muxer.multicodec, Muxer]]), - cryptos: new Map([[Crypto.protocol, Crypto]]), - connectionGater - }) - remoteUpgrader.protocols.set('/echo/1.0.0', echoHandler) - - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - const [localConnection] = await Promise.all([ - libp2p.upgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - sinon.spy(remoteUpgrader, '_onStream') - - const { stream } = await localConnection.newStream(['/echo/1.0.0']) - expect(stream).to.include.keys(['id', 'close', 'reset', 'timeline']) - - const [arg0] = remoteUpgrader._onStream.getCall(0).args - expect(arg0.stream).to.include.keys(['id', 'close', 'reset', 'timeline']) - }) - - it('should emit connect and disconnect events', async () => { - const remotePeer = peers[1] - libp2p = new Libp2p({ - peerId: peers[0], - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - } - }) - - const remoteUpgrader = new Upgrader({ - localPeer: remotePeer, - connectionManager: libp2p.connectionManager, - muxers: new Map([[Muxer.multicodec, Muxer]]), - cryptos: new Map([[Crypto.protocol, Crypto]]), - connectionGater - }) - - await libp2p.start() - - const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) - - // Spy on emit for easy verification - sinon.spy(libp2p.connectionManager, 'emit') - - // Upgrade and check the connect event - const connections = await Promise.all([ - libp2p.upgrader.upgradeOutbound(outbound), - remoteUpgrader.upgradeInbound(inbound) - ]) - expect(libp2p.connectionManager.emit.callCount).to.equal(1) - - let [event, connection] = libp2p.connectionManager.emit.getCall(0).args - expect(event).to.equal('peer:connect') - expect(connection.remotePeer.equals(remotePeer)).to.equal(true) - - // Close and check the disconnect event - await Promise.all(connections.map(conn => conn.close())) - expect(libp2p.connectionManager.emit.callCount).to.equal(2) - ;([event, connection] = libp2p.connectionManager.emit.getCall(1).args) - expect(event).to.equal('peer:disconnect') - expect(connection.remotePeer.equals(remotePeer)).to.equal(true) - }) -}) diff --git a/test/upgrading/upgrader.spec.ts b/test/upgrading/upgrader.spec.ts new file mode 100644 index 00000000..6afb6cfc --- /dev/null +++ b/test/upgrading/upgrader.spec.ts @@ -0,0 +1,517 @@ +/* eslint-env mocha */ + +import { expect } from 'aegir/utils/chai.js' +import sinon from 'sinon' +import { Mplex } from '@libp2p/mplex' +import { Multiaddr } from '@multiformats/multiaddr' +import { pipe } from 'it-pipe' +import all from 'it-all' +import pSettle from 'p-settle' +import { WebSockets } from '@libp2p/websockets' +import { NOISE } from '@chainsafe/libp2p-noise' +import { PreSharedKeyConnectionProtector } from '../../src/pnet/index.js' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import swarmKey from '../fixtures/swarm.key.js' +import { DefaultUpgrader } from '../../src/upgrader.js' +import { codes } from '../../src/errors.js' +import { mockConnectionGater, mockMultiaddrConnPair, mockRegistrar } from '@libp2p/interface-compliance-tests/mocks' +import Peers from '../fixtures/peers.js' +import type { Upgrader } from '@libp2p/interfaces/transport' +import type { PeerId } from '@libp2p/interfaces/peer-id' +import { createFromJSON } from '@libp2p/peer-id-factory' +import { Components } from '@libp2p/interfaces/components' +import { Plaintext } from '../../src/insecure/index.js' +import type { ConnectionEncrypter, SecuredConnection } from '@libp2p/interfaces/connection-encrypter' +import type { StreamMuxer, StreamMuxerFactory, StreamMuxerInit } from '@libp2p/interfaces/stream-muxer' +import type { Stream } from '@libp2p/interfaces/connection' +import pDefer from 'p-defer' +import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js' + +const addrs = [ + new Multiaddr('/ip4/127.0.0.1/tcp/0'), + new Multiaddr('/ip4/127.0.0.1/tcp/0') +] + +describe('Upgrader', () => { + let localUpgrader: Upgrader + let remoteUpgrader: Upgrader + let localPeer: PeerId + let remotePeer: PeerId + let localComponents: Components + let remoteComponents: Components + + beforeEach(async () => { + ([ + localPeer, + remotePeer + ] = await Promise.all([ + createFromJSON(Peers[0]), + createFromJSON(Peers[1]) + ])) + + localComponents = new Components({ + peerId: localPeer, + connectionGater: mockConnectionGater(), + registrar: mockRegistrar() + }) + localUpgrader = new DefaultUpgrader(localComponents, { + connectionEncryption: [ + new Plaintext() + ], + muxers: [ + new Mplex() + ] + }) + + remoteComponents = new Components({ + peerId: remotePeer, + connectionGater: mockConnectionGater(), + registrar: mockRegistrar() + }) + remoteUpgrader = new DefaultUpgrader(remoteComponents, { + connectionEncryption: [ + new Plaintext() + ], + muxers: [ + new Mplex() + ] + }) + + await localComponents.getRegistrar().handle('/echo/1.0.0', ({ stream }) => { + void pipe(stream, stream) + }) + await remoteComponents.getRegistrar().handle('/echo/1.0.0', ({ stream }) => { + void pipe(stream, stream) + }) + }) + + afterEach(() => { + sinon.restore() + }) + + it('should upgrade with valid muxers and crypto', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + const connections = await Promise.all([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + expect(connections).to.have.length(2) + + const { stream, protocol } = await connections[0].newStream('/echo/1.0.0') + expect(protocol).to.equal('/echo/1.0.0') + + const hello = uint8ArrayFromString('hello there!') + const result = await pipe( + [hello], + stream, + function toBuffer (source) { + return (async function * () { + for await (const val of source) yield val.slice() + })() + }, + async (source) => await all(source) + ) + + expect(result).to.eql([hello]) + }) + + it('should upgrade with only crypto', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + // No available muxers + localUpgrader = new DefaultUpgrader(localComponents, { + connectionEncryption: [ + new Plaintext() + ], + muxers: [] + }) + remoteUpgrader = new DefaultUpgrader(remoteComponents, { + connectionEncryption: [ + new Plaintext() + ], + muxers: [] + }) + + const connections = await Promise.all([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + expect(connections).to.have.length(2) + + await expect(connections[0].newStream('/echo/1.0.0')).to.be.rejected() + + // Verify the MultiaddrConnection close method is called + const inboundCloseSpy = sinon.spy(inbound, 'close') + const outboundCloseSpy = sinon.spy(outbound, 'close') + await Promise.all(connections.map(async conn => await conn.close())) + expect(inboundCloseSpy.callCount).to.equal(1) + expect(outboundCloseSpy.callCount).to.equal(1) + }) + + it('should use a private connection protector when provided', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + const protector = new PreSharedKeyConnectionProtector({ + psk: uint8ArrayFromString(swarmKey) + }) + const protectorProtectSpy = sinon.spy(protector, 'protect') + + localComponents.setConnectionProtector(protector) + remoteComponents.setConnectionProtector(protector) + + const connections = await Promise.all([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + expect(connections).to.have.length(2) + + const { stream, protocol } = await connections[0].newStream('/echo/1.0.0') + expect(protocol).to.equal('/echo/1.0.0') + + const hello = uint8ArrayFromString('hello there!') + const result = await pipe( + [hello], + stream, + function toBuffer (source) { + return (async function * () { + for await (const val of source) yield val.slice() + })() + }, + async (source) => await all(source) + ) + + expect(result).to.eql([hello]) + expect(protectorProtectSpy.callCount).to.eql(2) + }) + + it('should fail if crypto fails', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + class BoomCrypto implements ConnectionEncrypter { + static protocol = '/insecure' + public protocol = '/insecure' + async secureInbound (): Promise { throw new Error('Boom') } + async secureOutbound (): Promise { throw new Error('Boom') } + } + + localUpgrader = new DefaultUpgrader(localComponents, { + connectionEncryption: [ + new BoomCrypto() + ], + muxers: [] + }) + remoteUpgrader = new DefaultUpgrader(remoteComponents, { + connectionEncryption: [ + new BoomCrypto() + ], + muxers: [] + }) + + // Wait for the results of each side of the connection + const results = await pSettle([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + // Ensure both sides fail + expect(results).to.have.length(2) + results.forEach(result => { + expect(result).to.have.property('isRejected', true) + expect(result).to.have.nested.property('reason.code', codes.ERR_ENCRYPTION_FAILED) + }) + }) + + it('should fail if muxers do not match', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + class OtherMuxer implements StreamMuxer { + protocol = '/muxer-local' + streams = [] + newStream (name?: string): Stream { + throw new Error('Not implemented') + } + + source = [] + async sink () {} + } + + class OtherMuxerFactory implements StreamMuxerFactory { + protocol = '/muxer-local' + + createStreamMuxer (components: Components, init?: StreamMuxerInit): StreamMuxer { + return new OtherMuxer() + } + } + + localUpgrader = new DefaultUpgrader(localComponents, { + connectionEncryption: [ + new Plaintext() + ], + muxers: [ + new OtherMuxerFactory() + ] + }) + remoteUpgrader = new DefaultUpgrader(remoteComponents, { + connectionEncryption: [ + new Plaintext() + ], + muxers: [ + new Mplex() + ] + }) + + // Wait for the results of each side of the connection + const results = await pSettle([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + // Ensure both sides fail + expect(results).to.have.length(2) + results.forEach(result => { + expect(result).to.have.property('isRejected', true) + expect(result).to.have.nested.property('reason.code', codes.ERR_MUXER_UNAVAILABLE) + }) + }) + + it('should map getStreams and close methods', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + const connections = await Promise.all([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + expect(connections).to.have.length(2) + + // Create a few streams, at least 1 in each direction + await connections[0].newStream('/echo/1.0.0') + await connections[1].newStream('/echo/1.0.0') + await connections[0].newStream('/echo/1.0.0') + connections.forEach(conn => { + expect(conn.streams).to.have.length(3) + }) + + // Verify the MultiaddrConnection close method is called + const inboundCloseSpy = sinon.spy(inbound, 'close') + const outboundCloseSpy = sinon.spy(outbound, 'close') + await Promise.all(connections.map(async conn => await conn.close())) + expect(inboundCloseSpy.callCount).to.equal(1) + expect(outboundCloseSpy.callCount).to.equal(1) + }) + + it('should call connection handlers', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + const localConnectionEventReceived = pDefer() + const localConnectionEndEventReceived = pDefer() + const remoteConnectionEventReceived = pDefer() + const remoteConnectionEndEventReceived = pDefer() + + localUpgrader.addEventListener('connection', () => { + localConnectionEventReceived.resolve() + }) + localUpgrader.addEventListener('connectionEnd', () => { + localConnectionEndEventReceived.resolve() + }) + remoteUpgrader.addEventListener('connection', () => { + remoteConnectionEventReceived.resolve() + }) + remoteUpgrader.addEventListener('connectionEnd', () => { + remoteConnectionEndEventReceived.resolve() + }) + + // Verify onConnection is called with the connection + const connections = await Promise.all([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + expect(connections).to.have.length(2) + + await Promise.all([ + localConnectionEventReceived.promise, + remoteConnectionEventReceived.promise + ]) + + // Verify onConnectionEnd is called with the connection + await Promise.all(connections.map(async conn => await conn.close())) + + await Promise.all([ + localConnectionEndEventReceived.promise, + remoteConnectionEndEventReceived.promise + ]) + }) + + it('should fail to create a stream for an unsupported protocol', async () => { + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + const connections = await Promise.all([ + localUpgrader.upgradeOutbound(outbound), + remoteUpgrader.upgradeInbound(inbound) + ]) + + expect(connections).to.have.length(2) + + const results = await pSettle([ + connections[0].newStream('/unsupported/1.0.0'), + connections[1].newStream('/unsupported/1.0.0') + ]) + expect(results).to.have.length(2) + results.forEach(result => { + expect(result).to.have.property('isRejected', true) + expect(result).to.have.nested.property('reason.code', codes.ERR_UNSUPPORTED_PROTOCOL) + }) + }) +}) + +describe('libp2p.upgrader', () => { + let peers: PeerId[] + let libp2p: Libp2pNode + let remoteLibp2p: Libp2pNode + + before(async () => { + peers = await Promise.all([ + createFromJSON(Peers[0]), + createFromJSON(Peers[1]) + ]) + }) + + afterEach(async () => { + sinon.restore() + + if (libp2p != null) { + await libp2p.stop() + } + + if (remoteLibp2p != null) { + await remoteLibp2p.stop() + } + }) + + it('should create an Upgrader', async () => { + libp2p = await createLibp2pNode({ + peerId: peers[0], + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ], + connectionProtector: new PreSharedKeyConnectionProtector({ + psk: uint8ArrayFromString(swarmKey) + }) + }) + + expect(libp2p.components.getUpgrader()).to.exist() + expect(libp2p.components.getConnectionProtector()).to.exist() + }) + + it('should return muxed streams', async () => { + const remotePeer = peers[1] + libp2p = await createLibp2pNode({ + peerId: peers[0], + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + await libp2p.start() + const echoHandler = () => {} + await libp2p.handle(['/echo/1.0.0'], echoHandler) + + remoteLibp2p = await createLibp2pNode({ + peerId: remotePeer, + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + await remoteLibp2p.start() + await remoteLibp2p.handle('/echo/1.0.0', echoHandler) + + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + const [localConnection] = await Promise.all([ + libp2p.components.getUpgrader().upgradeOutbound(outbound), + remoteLibp2p.components.getUpgrader().upgradeInbound(inbound) + ]) + const remoteLibp2pUpgraderOnStreamSpy = sinon.spy(remoteLibp2p.components.getUpgrader() as DefaultUpgrader, '_onStream') + + const { stream } = await localConnection.newStream(['/echo/1.0.0']) + expect(stream).to.include.keys(['id', 'close', 'reset', 'timeline']) + + const [arg0] = remoteLibp2pUpgraderOnStreamSpy.getCall(0).args + expect(arg0.stream).to.include.keys(['id', 'close', 'reset', 'timeline']) + }) + + it('should emit connect and disconnect events', async () => { + const remotePeer = peers[1] + libp2p = await createLibp2pNode({ + peerId: peers[0], + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + await libp2p.start() + + remoteLibp2p = await createLibp2pNode({ + peerId: remotePeer, + transports: [ + new WebSockets() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + NOISE + ] + }) + await remoteLibp2p.start() + + const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer }) + + // Spy on emit for easy verification + const connectionManagerDispatchEventSpy = sinon.spy(libp2p.components.getConnectionManager(), 'dispatchEvent') + + // Upgrade and check the connect event + const connections = await Promise.all([ + libp2p.components.getUpgrader().upgradeOutbound(outbound), + remoteLibp2p.components.getUpgrader().upgradeInbound(inbound) + ]) + expect(connectionManagerDispatchEventSpy.callCount).to.equal(1) + + let [event] = connectionManagerDispatchEventSpy.getCall(0).args + expect(event).to.have.property('type', 'peer:connect') + // @ts-expect-error detail is only on CustomEvent type + expect(remotePeer.equals(event.detail.remotePeer)).to.equal(true) + + // Close and check the disconnect event + await Promise.all(connections.map(async conn => await conn.close())) + expect(connectionManagerDispatchEventSpy.callCount).to.equal(2) + ;([event] = connectionManagerDispatchEventSpy.getCall(1).args) + expect(event).to.have.property('type', 'peer:disconnect') + // @ts-expect-error detail is only on CustomEvent type + expect(remotePeer.equals(event.detail.remotePeer)).to.equal(true) + }) +}) diff --git a/test/utils/base-options.browser.js b/test/utils/base-options.browser.js deleted file mode 100644 index 1d8b5941..00000000 --- a/test/utils/base-options.browser.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const Transport = require('libp2p-websockets') -const filters = require('libp2p-websockets/src/filters') -const Muxer = require('libp2p-mplex') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') - -const transportKey = Transport.prototype[Symbol.toStringTag] - -module.exports = { - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - relay: { - enabled: true, - hop: { - enabled: false - } - }, - transport: { - [transportKey]: { - filter: filters.all - } - } - } -} diff --git a/test/utils/base-options.browser.ts b/test/utils/base-options.browser.ts new file mode 100644 index 00000000..0fb4bc9a --- /dev/null +++ b/test/utils/base-options.browser.ts @@ -0,0 +1,31 @@ + +import { WebSockets } from '@libp2p/websockets' +import * as filters from '@libp2p/websockets/filters' +import { Mplex } from '@libp2p/mplex' +import { Plaintext } from '../../src/insecure/index.js' +import type { Libp2pOptions } from '../../src' +import mergeOptions from 'merge-options' + +export function createBaseOptions (overrides?: Libp2pOptions): Libp2pOptions { + const options: Libp2pOptions = { + transports: [ + new WebSockets({ + filter: filters.all + }) + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Plaintext() + ], + relay: { + enabled: false, + hop: { + enabled: false + } + } + } + + return mergeOptions(options, overrides) +} diff --git a/test/utils/base-options.js b/test/utils/base-options.js deleted file mode 100644 index e403ff0f..00000000 --- a/test/utils/base-options.js +++ /dev/null @@ -1,24 +0,0 @@ -'use strict' - -const Transport = require('libp2p-tcp') -const Muxer = require('libp2p-mplex') -const { NOISE: Crypto } = require('@chainsafe/libp2p-noise') - -module.exports = { - modules: { - transport: [Transport], - streamMuxer: [Muxer], - connEncryption: [Crypto] - }, - config: { - relay: { - enabled: true, - hop: { - enabled: false - } - }, - nat: { - enabled: false - } - } -} diff --git a/test/utils/base-options.ts b/test/utils/base-options.ts new file mode 100644 index 00000000..610bdb22 --- /dev/null +++ b/test/utils/base-options.ts @@ -0,0 +1,30 @@ +import { TCP } from '@libp2p/tcp' +import { Mplex } from '@libp2p/mplex' +import { Plaintext } from '../../src/insecure/index.js' +import type { Libp2pOptions } from '../../src' +import mergeOptions from 'merge-options' + +export function createBaseOptions (...overrides: Libp2pOptions[]): Libp2pOptions { + const options: Libp2pOptions = { + transports: [ + new TCP() + ], + streamMuxers: [ + new Mplex() + ], + connectionEncryption: [ + new Plaintext() + ], + relay: { + enabled: true, + hop: { + enabled: false + } + }, + nat: { + enabled: false + } + } + + return mergeOptions(options, ...overrides) +} diff --git a/test/utils/creators/peer.js b/test/utils/creators/peer.js deleted file mode 100644 index 63b943f7..00000000 --- a/test/utils/creators/peer.js +++ /dev/null @@ -1,72 +0,0 @@ -'use strict' - -const pTimes = require('p-times') - -const { Multiaddr } = require('multiaddr') -const PeerId = require('peer-id') - -const Libp2p = require('../../../src') -const Peers = require('../../fixtures/peers') -const defaultOptions = require('../base-options.browser') - -const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') - -/** - * Create libp2p nodes. - * - * @param {Object} [properties] - * @param {Object} [properties.config] - * @param {number} [properties.number] - number of peers (default: 1). - * @param {boolean} [properties.fixture] - use fixture for peer-id generation (default: true) - * @param {boolean} [properties.started] - nodes should start (default: true) - * @param {boolean} [properties.populateAddressBooks] - nodes addressBooks should be populated with other peers (default: true) - * @returns {Promise>} - */ -async function createPeer ({ number = 1, fixture = true, started = true, populateAddressBooks = true, config = {} } = {}) { - const peerIds = await createPeerId({ number, fixture }) - - const addresses = started ? { listen: [listenAddr] } : {} - const peers = await pTimes(number, (i) => Libp2p.create({ - peerId: peerIds[i], - addresses, - ...defaultOptions, - ...config - })) - - if (started) { - await Promise.all(peers.map((p) => p.start())) - - populateAddressBooks && await _populateAddressBooks(peers) - } - - return peers -} - -async function _populateAddressBooks (peers) { - for (let i = 0; i < peers.length; i++) { - for (let j = 0; j < peers.length; j++) { - if (i !== j) { - await peers[i].peerStore.addressBook.set(peers[j].peerId, peers[j].multiaddrs) - } - } - } -} - -/** - * Create Peer-ids. - * - * @param {Object} [properties] - * @param {number} [properties.number] - number of peers (default: 1). - * @param {boolean} [properties.fixture] - use fixture for peer-id generation (default: true) - * @param {PeerId.CreateOptions} [properties.opts] - * @returns {Promise>} - */ -function createPeerId ({ number = 1, fixture = true, opts = {} } = {}) { - return pTimes(number, (i) => fixture - ? PeerId.createFromJSON(Peers[i]) - : PeerId.create(opts) - ) -} - -module.exports.createPeer = createPeer -module.exports.createPeerId = createPeerId diff --git a/test/utils/creators/peer.ts b/test/utils/creators/peer.ts new file mode 100644 index 00000000..e010c0e6 --- /dev/null +++ b/test/utils/creators/peer.ts @@ -0,0 +1,104 @@ +import { Multiaddr } from '@multiformats/multiaddr' +import Peers from '../../fixtures/peers.js' +import { createBaseOptions } from '../base-options.browser.js' +import { createEd25519PeerId, createFromJSON, createRSAPeerId } from '@libp2p/peer-id-factory' +import { createLibp2pNode, Libp2pNode } from '../../../src/libp2p.js' +import type { AddressesConfig, Libp2pOptions } from '../../../src/index.js' +import type { PeerId } from '@libp2p/interfaces/peer-id' + +const listenAddr = new Multiaddr('/ip4/127.0.0.1/tcp/0') + +export interface CreatePeerOptions { + /** + * number of peers (default: 1) + */ + number?: number + + /** + * fixture index for peer-id generation + */ + fixture?: number + + /** + * nodes should start (default: true) + */ + started?: boolean + + config?: Libp2pOptions +} + +/** + * Create libp2p nodes. + */ +export async function createNode (options: CreatePeerOptions = {}): Promise { + const started = options.started ?? true + const config = options.config ?? {} + const peerId = await createPeerId({ fixture: options.fixture }) + const addresses: AddressesConfig = started + ? { + listen: [listenAddr.toString()], + announce: [], + noAnnounce: [], + announceFilter: (addrs) => addrs + } + : { + listen: [], + announce: [], + noAnnounce: [], + announceFilter: (addrs) => addrs + } + const peer = await createLibp2pNode(createBaseOptions({ + peerId, + addresses, + ...config + })) + + if (started) { + await peer.start() + } + + return peer +} + +export async function populateAddressBooks (peers: Libp2pNode[]) { + for (let i = 0; i < peers.length; i++) { + for (let j = 0; j < peers.length; j++) { + if (i !== j) { + await peers[i].components.getPeerStore().addressBook.set(peers[j].peerId, peers[j].components.getAddressManager().getAddresses()) + } + } + } +} + +export interface CreatePeerIdOptions { + /** + * number of peers (default: 1) + */ + number?: number + + /** + * fixture index for peer-id generation (default: 0) + */ + fixture?: number + + /** + * Options to pass to the PeerId constructor + */ + opts?: { + type?: 'rsa' | 'ed25519' + bits?: number + } +} + +/** + * Create Peer-ids + */ +export async function createPeerId (options: CreatePeerIdOptions = {}): Promise { + const opts = options.opts ?? {} + + if (options.fixture == null) { + return opts.type === 'rsa' ? await createRSAPeerId({ bits: opts.bits ?? 512 }) : await createEd25519PeerId() + } + + return await createFromJSON(Peers[options.fixture]) +} diff --git a/test/utils/mock-connection-gater.js b/test/utils/mock-connection-gater.js deleted file mode 100644 index b1081f49..00000000 --- a/test/utils/mock-connection-gater.js +++ /dev/null @@ -1,19 +0,0 @@ -'use strict' - -function mockConnectionGater () { - return { - denyDialPeer: async () => Promise.resolve(false), - denyDialMultiaddr: async () => Promise.resolve(false), - denyInboundConnection: async () => Promise.resolve(false), - denyOutboundConnection: async () => Promise.resolve(false), - denyInboundEncryptedConnection: async () => Promise.resolve(false), - denyOutboundEncryptedConnection: async () => Promise.resolve(false), - denyInboundUpgradedConnection: async () => Promise.resolve(false), - denyOutboundUpgradedConnection: async () => Promise.resolve(false), - filterMultiaddrForPeer: async () => Promise.resolve(true) - } -} - -module.exports = { - mockConnectionGater -} diff --git a/test/utils/mockConnection.js b/test/utils/mockConnection.js deleted file mode 100644 index 55c8d606..00000000 --- a/test/utils/mockConnection.js +++ /dev/null @@ -1,155 +0,0 @@ -'use strict' - -const pipe = require('it-pipe') -const { Connection } = require('libp2p-interfaces/src/connection') -const { Multiaddr } = require('multiaddr') -const Muxer = require('libp2p-mplex') -const Multistream = require('multistream-select') -const pair = require('it-pair') -const errCode = require('err-code') -const { codes } = require('../../src/errors') - -const mockMultiaddrConnPair = require('./mockMultiaddrConn') -const peerUtils = require('./creators/peer') - -module.exports = async (properties = {}) => { - const localAddr = new Multiaddr('/ip4/127.0.0.1/tcp/8080') - const remoteAddr = new Multiaddr('/ip4/127.0.0.1/tcp/8081') - - const [localPeer, remotePeer] = await peerUtils.createPeerId({ number: 2 }) - const openStreams = [] - let streamId = 0 - - return new Connection({ - localPeer: localPeer, - remotePeer: remotePeer, - localAddr, - remoteAddr, - stat: { - timeline: { - open: Date.now() - 10, - upgraded: Date.now() - }, - direction: 'outbound', - encryption: '/noise', - multiplexer: '/mplex/6.7.0' - }, - newStream: (protocols) => { - const id = streamId++ - const stream = pair() - - stream.close = () => stream.sink([]) - stream.id = id - - openStreams.push(stream) - - return { - stream, - protocol: protocols[0] - } - }, - close: async () => { }, - getStreams: () => openStreams, - ...properties - }) -} - -/** - * Creates a full connection pair, without the transport or encryption - * - * @param {object} options - * @param {Multiaddr[]} options.addrs - Should contain two addresses for the local and remote peer respectively - * @param {Array} options.peers - Array containing local and remote peer ids - * @param {Map} options.protocols - The protocols the connections should support - * @returns {{inbound:Connection, outbound:Connection}} - */ -module.exports.pair = function connectionPair ({ addrs, peers, protocols }) { - const [localPeer, remotePeer] = peers - - const { - inbound: inboundMaConn, - outbound: outboundMaConn - } = mockMultiaddrConnPair({ addrs, remotePeer }) - - const inbound = createConnection({ - direction: 'inbound', - maConn: inboundMaConn, - protocols, - // Inbound connection peers are reversed - localPeer: remotePeer, - remotePeer: localPeer - }) - const outbound = createConnection({ - direction: 'outbound', - maConn: outboundMaConn, - protocols, - localPeer, - remotePeer - }) - - return { inbound, outbound } -} - -function createConnection ({ - direction, - maConn, - localPeer, - remotePeer, - protocols -}) { - // Create the muxer - const muxer = new Muxer({ - // Run anytime a remote stream is created - onStream: async muxedStream => { - const mss = new Multistream.Listener(muxedStream) - try { - const { stream, protocol } = await mss.handle(Array.from(protocols.keys())) - connection.addStream(stream, protocol) - // Need to be able to notify a peer of this this._onStream({ connection, stream, protocol }) - const handler = protocols.get(protocol) - handler({ connection, stream, protocol }) - } catch (/** @type {any} */ err) { - // Do nothing - } - }, - // Run anytime a stream closes - onStreamEnd: muxedStream => { - connection.removeStream(muxedStream.id) - } - }) - - const newStream = async protocols => { - const muxedStream = muxer.newStream() - const mss = new Multistream.Dialer(muxedStream) - try { - const { stream, protocol } = await mss.select(protocols) - return { stream: { ...muxedStream, ...stream }, protocol } - } catch (/** @type {any} */ err) { - throw errCode(err, codes.ERR_UNSUPPORTED_PROTOCOL) - } - } - - // Pipe all data through the muxer - pipe(maConn, muxer, maConn) - - maConn.timeline.upgraded = Date.now() - - // Create the connection - const connection = new Connection({ - localAddr: maConn.localAddr, - remoteAddr: maConn.remoteAddr, - localPeer: localPeer, - remotePeer: remotePeer, - stat: { - direction, - timeline: maConn.timeline, - multiplexer: Muxer.multicodec, - encryption: 'N/A' - }, - newStream, - getStreams: () => muxer.streams, - close: err => maConn.close(err) - }) - - return connection -} diff --git a/test/utils/mockCrypto.js b/test/utils/mockCrypto.js deleted file mode 100644 index 6a5055c6..00000000 --- a/test/utils/mockCrypto.js +++ /dev/null @@ -1,24 +0,0 @@ -'use strict' - -const PeerId = require('peer-id') -const Peers = require('../fixtures/peers') - -module.exports = { - protocol: '/insecure', - secureInbound: (localPeer, stream) => { - return { - conn: stream, - remotePeer: localPeer - } - }, - secureOutbound: async (localPeer, stream, remotePeer) => { - // Crypto should always return a remotePeer - if (!remotePeer) { - remotePeer = await PeerId.createFromJSON(Peers[0]) - } - return { - conn: stream, - remotePeer: remotePeer - } - } -} diff --git a/test/utils/mockMultiaddrConn.js b/test/utils/mockMultiaddrConn.js deleted file mode 100644 index 26482dd3..00000000 --- a/test/utils/mockMultiaddrConn.js +++ /dev/null @@ -1,44 +0,0 @@ -'use strict' - -const duplexPair = require('it-pair/duplex') -const abortable = require('abortable-iterator') - -/** - * Returns both sides of a mocked MultiaddrConnection - * - * @param {object} options - * @param {Multiaddr[]} options.addrs - Should contain two addresses for the local and remote peer - * @param {PeerId} options.remotePeer - The peer that is being "dialed" - * @returns {{inbound:MultiaddrConnection, outbound:MultiaddrConnection}} - */ -module.exports = function mockMultiaddrConnPair ({ addrs, remotePeer }) { - const controller = new AbortController() - const [localAddr, remoteAddr] = addrs - - const [inbound, outbound] = duplexPair() - outbound.localAddr = localAddr - outbound.remoteAddr = remoteAddr.encapsulate(`/p2p/${remotePeer.toB58String()}`) - outbound.timeline = { - open: Date.now() - } - outbound.close = () => { - outbound.timeline.close = Date.now() - controller.abort() - } - - inbound.localAddr = remoteAddr - inbound.remoteAddr = localAddr - inbound.timeline = { - open: Date.now() - } - inbound.close = () => { - inbound.timeline.close = Date.now() - controller.abort() - } - - // Make the sources abortable so we can close them easily - inbound.source = abortable(inbound.source, controller.signal) - outbound.source = abortable(outbound.source, controller.signal) - - return { inbound, outbound } -} diff --git a/test/utils/mockUpgrader.js b/test/utils/mockUpgrader.js deleted file mode 100644 index 5e72e2de..00000000 --- a/test/utils/mockUpgrader.js +++ /dev/null @@ -1,6 +0,0 @@ -'use strict' - -module.exports = { - upgradeInbound: (maConn) => maConn, - upgradeOutbound: (maConn) => maConn -} diff --git a/tsconfig.json b/tsconfig.json index eafbf8ca..63499404 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,18 +1,19 @@ { "extends": "aegir/src/config/tsconfig.aegir.json", "compilerOptions": { - "outDir": "dist" + "outDir": "dist", + "emitDeclarationOnly": false, + "module": "ES2020", + "lib": ["ES2021", "ES2021.Promise", "ES2021.String", "ES2020.BigInt", "DOM", "DOM.Iterable"] }, "include": [ - "src" + "src", + "test" ], "exclude": [ - "src/circuit/protocol/index.js", // exclude generated file - "src/fetch/proto.js", // exclude generated file - "src/identify/message.js", // exclude generated file - "src/insecure/proto.js", // exclude generated file - "src/peer-store/pb/peer.js", // exclude generated file - "src/record/peer-record/peer-record.js", // exclude generated file - "src/record/envelope/envelope.js" // exclude generated file + "src/circuit/pb/index.js", + "src/fetch/pb/proto.js", + "src/identify/pb/message.js", + "src/insecure/pb/proto.js" ] -} \ No newline at end of file +} From 8cca8e4bfc6a339e58b5a5efa8a84fd891aa08ee Mon Sep 17 00:00:00 2001 From: Alex Potsides Date: Mon, 28 Mar 2022 16:09:43 +0100 Subject: [PATCH 3/5] fix: update deps (#1181) Update to released versions of interop suite and webrtc-direct --- examples/webrtc-direct/package.json | 2 +- package.json | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/webrtc-direct/package.json b/examples/webrtc-direct/package.json index 950112b1..03a33b62 100644 --- a/examples/webrtc-direct/package.json +++ b/examples/webrtc-direct/package.json @@ -12,7 +12,7 @@ }, "license": "ISC", "dependencies": { - "@achingbrain/webrtc-direct": "^0.7.2", + "@libp2p/webrtc-direct": "^1.0.0", "@chainsafe/libp2p-noise": "^6.0.1", "@libp2p/bootstrap": "^1.0.1", "@libp2p/mplex": "^1.0.2", diff --git a/package.json b/package.json index 166aa402..1a220706 100644 --- a/package.json +++ b/package.json @@ -156,13 +156,13 @@ "@achingbrain/libp2p-gossipsub": "^0.13.5", "@chainsafe/libp2p-noise": "^6.0.1", "@libp2p/bootstrap": "^1.0.2", - "@libp2p/daemon-client": "^0.0.2", - "@libp2p/daemon-server": "^0.0.2", + "@libp2p/daemon-client": "^1.0.0", + "@libp2p/daemon-server": "^1.0.0", "@libp2p/delegated-content-routing": "^1.0.2", "@libp2p/delegated-peer-routing": "^1.0.2", "@libp2p/floodsub": "^1.0.2", "@libp2p/interface-compliance-tests": "^1.1.20", - "@libp2p/interop": "^0.0.3", + "@libp2p/interop": "^1.0.0", "@libp2p/kad-dht": "^1.0.3", "@libp2p/mdns": "^1.0.3", "@libp2p/mplex": "^1.0.1", From cc60cfde1a0907ca68f658f6de5362a708189222 Mon Sep 17 00:00:00 2001 From: Alex Potsides Date: Tue, 29 Mar 2022 15:39:50 +0100 Subject: [PATCH 4/5] fix: add transport manager to exports map and fix docs (#1182) Addresses PR comments from #1172 - fixes syntax of examples in docs, adds the transport manager to the exports map and renames fault tolerance enum for consistency. --- doc/CONFIGURATION.md | 7 +++---- doc/STREAMING_ITERABLES.md | 2 +- doc/migrations/v0.26-v0.27.md | 8 ++++---- package.json | 3 +++ src/config.ts | 4 ++-- src/index.ts | 4 ++-- src/transport-manager.ts | 12 ++++++------ test/nat-manager/nat-manager.node.ts | 4 ++-- test/transports/transport-manager.spec.ts | 6 +++--- 9 files changed, 26 insertions(+), 24 deletions(-) diff --git a/doc/CONFIGURATION.md b/doc/CONFIGURATION.md index 41cd0e7a..df5e689a 100644 --- a/doc/CONFIGURATION.md +++ b/doc/CONFIGURATION.md @@ -246,8 +246,8 @@ import { GossipSub } from 'libp2p-gossipsub' const node = await createLibp2p({ transports: [ - TCP, - new WS() // It can take instances too! + new TCP(), + new WS() ], streamMuxers: [new Mplex()], connectionEncryption: [new Noise()], @@ -697,8 +697,7 @@ import { createLibp2p } from 'libp2p' import { TCP } from '@libp2p/tcp' import { Mplex } from '@libp2p/mplex' import { Noise } from '@chainsafe/libp2p-noise' - -const { FaultTolerance } from 'libp2p/src/transport-manager') +import { FaultTolerance } from 'libp2p/transport-manager' const node = await createLibp2p({ transports: [new TCP()], diff --git a/doc/STREAMING_ITERABLES.md b/doc/STREAMING_ITERABLES.md index fe6a349d..2a374c3b 100644 --- a/doc/STREAMING_ITERABLES.md +++ b/doc/STREAMING_ITERABLES.md @@ -22,7 +22,7 @@ Sometimes you may need to wrap an existing duplex stream in order to perform incoming and outgoing [transforms](#transform) on data. This type of wrapping is commonly used in stream encryption/decryption. Using [it-pair][it-pair] and [it-pipe][it-pipe], we can do this rather easily, given an existing [duplex iterable](#duplex). ```js -const duplexPair from 'it-pair/duplex') +import { duplexPair } from 'it-pair/duplex' import { pipe } from 'it-pipe' // Wrapper is what we will write and read from diff --git a/doc/migrations/v0.26-v0.27.md b/doc/migrations/v0.26-v0.27.md index 67dd44c6..db37e6ea 100644 --- a/doc/migrations/v0.26-v0.27.md +++ b/doc/migrations/v0.26-v0.27.md @@ -49,13 +49,13 @@ Protocol registration is very similar to how it previously was, however, the han **Before** ```js -const pull from 'pull-stream') +const pull = require('pull-stream') libp2p.handle('/echo/1.0.0', (protocol, conn) => pull(conn, conn)) ``` **After** ```js -const pipe from 'it-pipe') +const pipe = require('it-pipe') libp2p.handle(['/echo/1.0.0'], ({ protocol, stream }) => pipe(stream, stream)) ``` @@ -65,7 +65,7 @@ libp2p.handle(['/echo/1.0.0'], ({ protocol, stream }) => pipe(stream, stream)) **Before** ```js -const pull from 'pull-stream') +const pull = require('pull-stream') libp2p.dialProtocol(peerInfo, '/echo/1.0.0', (err, conn) => { if (err) { throw err } pull( @@ -82,7 +82,7 @@ libp2p.dialProtocol(peerInfo, '/echo/1.0.0', (err, conn) => { **After** ```js -const pipe from 'it-pipe') +const pipe = require('it-pipe') const { protocol, stream } = await libp2p.dialProtocol(peerInfo, '/echo/1.0.0') await pipe( ['hey'], diff --git a/package.json b/package.json index 1a220706..e1d74575 100644 --- a/package.json +++ b/package.json @@ -59,6 +59,9 @@ }, "./pnet/generate": { "import": "./dist/src/pnet/key-generator.js" + }, + "./transport-manager": { + "import": "./dist/src/transport-manager.js" } }, "eslintConfig": { diff --git a/src/config.ts b/src/config.ts index 2720c8a9..7f77235c 100644 --- a/src/config.ts +++ b/src/config.ts @@ -4,7 +4,7 @@ import * as Constants from './constants.js' import { AGENT_VERSION } from './identify/consts.js' import * as RelayConstants from './circuit/constants.js' import { publicAddressesFirst } from '@libp2p/utils/address-sort' -import { FAULT_TOLERANCE } from './transport-manager.js' +import { FaultTolerance } from './transport-manager.js' import type { Multiaddr } from '@multiformats/multiaddr' import type { Libp2pInit } from './index.js' import { codes, messages } from './errors.js' @@ -26,7 +26,7 @@ const DefaultConfig: Partial = { }, connectionGater: {}, transportManager: { - faultTolerance: FAULT_TOLERANCE.FATAL_ALL + faultTolerance: FaultTolerance.FATAL_ALL }, dialer: { maxParallelDials: Constants.MAX_PARALLEL_DIALS, diff --git a/src/index.ts b/src/index.ts index 0f063df9..1cec1521 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,7 +1,7 @@ import { createLibp2pNode } from './libp2p.js' import type { AbortOptions, EventEmitter, RecursivePartial, Startable } from '@libp2p/interfaces' import type { Multiaddr } from '@multiformats/multiaddr' -import type { FAULT_TOLERANCE } from './transport-manager.js' +import type { FaultTolerance } from './transport-manager.js' import type { HostProperties } from './identify/index.js' import type { DualDHT } from '@libp2p/interfaces/dht' import type { Datastore } from 'interface-datastore' @@ -95,7 +95,7 @@ export interface ConnectionManagerConfig { } export interface TransportManagerConfig { - faultTolerance?: FAULT_TOLERANCE + faultTolerance?: FaultTolerance } export interface PeerStoreConfig { diff --git a/src/transport-manager.ts b/src/transport-manager.ts index d6e43642..2eab5cb7 100644 --- a/src/transport-manager.ts +++ b/src/transport-manager.ts @@ -12,14 +12,14 @@ import { trackedMap } from '@libp2p/tracked-map' const log = logger('libp2p:transports') export interface TransportManagerInit { - faultTolerance?: FAULT_TOLERANCE + faultTolerance?: FaultTolerance } export class DefaultTransportManager extends EventEmitter implements TransportManager, Startable { private readonly components: Components private readonly transports: Map private readonly listeners: Map - private readonly faultTolerance: FAULT_TOLERANCE + private readonly faultTolerance: FaultTolerance private started: boolean constructor (components: Components, init: TransportManagerInit = {}) { @@ -33,7 +33,7 @@ export class DefaultTransportManager extends EventEmitter r.isFulfilled) - if ((isListening == null) && this.faultTolerance !== FAULT_TOLERANCE.NO_FATAL) { + if ((isListening == null) && this.faultTolerance !== FaultTolerance.NO_FATAL) { throw errCode(new Error(`Transport (${key}) could not listen on any available address`), codes.ERR_NO_VALID_ADDRESSES) } } @@ -224,7 +224,7 @@ export class DefaultTransportManager extends EventEmitter { }) components.setAddressManager(new DefaultAddressManager(components, { listen: addrs })) components.setTransportManager(new DefaultTransportManager(components, { - faultTolerance: FAULT_TOLERANCE.NO_FATAL + faultTolerance: FaultTolerance.NO_FATAL })) const natManager = new NatManager(components, { diff --git a/test/transports/transport-manager.spec.ts b/test/transports/transport-manager.spec.ts index 93721fc5..0edb29d3 100644 --- a/test/transports/transport-manager.spec.ts +++ b/test/transports/transport-manager.spec.ts @@ -7,7 +7,7 @@ import { WebSockets } from '@libp2p/websockets' import * as filters from '@libp2p/websockets/filters' import { NOISE } from '@chainsafe/libp2p-noise' import { DefaultAddressManager } from '../../src/address-manager/index.js' -import { DefaultTransportManager, FAULT_TOLERANCE } from '../../src/transport-manager.js' +import { DefaultTransportManager, FaultTolerance } from '../../src/transport-manager.js' import { mockUpgrader } from '@libp2p/interface-compliance-tests/mocks' import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js' import { codes as ErrorCodes } from '../../src/errors.js' @@ -123,7 +123,7 @@ describe('libp2p.transportManager (dial only)', () => { listen: ['/ip4/127.0.0.1/tcp/0'] }, transportManager: { - faultTolerance: FAULT_TOLERANCE.NO_FATAL + faultTolerance: FaultTolerance.NO_FATAL }, transports: [ new WebSockets() @@ -143,7 +143,7 @@ describe('libp2p.transportManager (dial only)', () => { listen: ['/ip4/127.0.0.1/tcp/12345/p2p/QmWDn2LY8nannvSWJzruUYoLZ4vV83vfCBwd8DipvdgQc3/p2p-circuit'] }, transportManager: { - faultTolerance: FAULT_TOLERANCE.NO_FATAL + faultTolerance: FaultTolerance.NO_FATAL }, transports: [ new WebSockets() From 64bfcee5093b368df0b381f78afc2ddff3d339a9 Mon Sep 17 00:00:00 2001 From: Alex Potsides Date: Sat, 2 Apr 2022 10:11:01 +0100 Subject: [PATCH 5/5] fix: expose metrics and registrar, use dht for peer discovery (#1183) Exposes fields used by bitswap and also uses dht for peer discovery which was missed. --- src/index.ts | 6 ++++-- src/libp2p.ts | 14 +++++++++++--- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/index.ts b/src/index.ts index 1cec1521..d64bb224 100644 --- a/src/index.ts +++ b/src/index.ts @@ -16,8 +16,8 @@ import type { ConnectionEncrypter } from '@libp2p/interfaces/connection-encrypte import type { PeerRouting } from '@libp2p/interfaces/peer-routing' import type { ContentRouting } from '@libp2p/interfaces/content-routing' import type { PubSub } from '@libp2p/interfaces/pubsub' -import type { ConnectionManager, StreamHandler } from '@libp2p/interfaces/registrar' -import type { MetricsInit } from '@libp2p/interfaces/metrics' +import type { ConnectionManager, Registrar, StreamHandler } from '@libp2p/interfaces/registrar' +import type { Metrics, MetricsInit } from '@libp2p/interfaces/metrics' import type { PeerInfo } from '@libp2p/interfaces/peer-info' import type { DialerInit } from '@libp2p/interfaces/dialer' import type { KeyChain } from './keychain/index.js' @@ -152,6 +152,8 @@ export interface Libp2p extends Startable, EventEmitter { contentRouting: ContentRouting keychain: KeyChain connectionManager: ConnectionManager + registrar: Registrar + metrics?: Metrics pubsub?: PubSub dht?: DualDHT diff --git a/src/libp2p.ts b/src/libp2p.ts index 142031e1..62f412bf 100644 --- a/src/libp2p.ts +++ b/src/libp2p.ts @@ -32,7 +32,7 @@ import type { Connection } from '@libp2p/interfaces/connection' import type { PeerRouting } from '@libp2p/interfaces/peer-routing' import type { ContentRouting } from '@libp2p/interfaces/content-routing' import type { PubSub } from '@libp2p/interfaces/pubsub' -import type { ConnectionManager, StreamHandler } from '@libp2p/interfaces/registrar' +import type { ConnectionManager, Registrar, StreamHandler } from '@libp2p/interfaces/registrar' import type { PeerInfo } from '@libp2p/interfaces/peer-info' import type { Libp2p, Libp2pEvents, Libp2pInit, Libp2pOptions } from './index.js' import { validateConfig } from './config.js' @@ -43,6 +43,7 @@ import { concat as uint8ArrayConcat } from 'uint8arrays/concat' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import errCode from 'err-code' import { unmarshalPublicKey } from '@libp2p/crypto/keys' +import type { Metrics } from '@libp2p/interfaces/metrics' const log = logger('libp2p') @@ -59,6 +60,8 @@ export class Libp2pNode extends EventEmitter implements Libp2p { public peerRouting: PeerRouting public keychain: KeyChain public connectionManager: ConnectionManager + public registrar: Registrar + public metrics?: Metrics private started: boolean private readonly services: Startable[] @@ -78,7 +81,7 @@ export class Libp2pNode extends EventEmitter implements Libp2p { // Create Metrics if (init.metrics.enabled) { - this.components.setMetrics(this.configureComponent(new DefaultMetrics(init.metrics))) + this.metrics = this.components.setMetrics(this.configureComponent(new DefaultMetrics(init.metrics))) } this.components.setConnectionGater(this.configureComponent({ @@ -117,7 +120,7 @@ export class Libp2pNode extends EventEmitter implements Libp2p { this.connectionManager = this.components.setConnectionManager(this.configureComponent(new DefaultConnectionManager(this.components, init.connectionManager))) // Create the Registrar - this.components.setRegistrar(this.configureComponent(new DefaultRegistrar(this.components))) + this.registrar = this.components.setRegistrar(this.configureComponent(new DefaultRegistrar(this.components))) // Setup the transport manager this.components.setTransportManager(this.configureComponent(new DefaultTransportManager(this.components, init.transportManager))) @@ -180,6 +183,11 @@ export class Libp2pNode extends EventEmitter implements Libp2p { if (this.dht != null) { // add dht to routers peerRouters.push(this.configureComponent(new DHTPeerRouting(this.dht))) + + // use dht for peer discovery + this.dht.addEventListener('peer', (evt) => { + this.onDiscoveryPeer(evt) + }) } this.peerRouting = this.components.setPeerRouting(this.configureComponent(new DefaultPeerRouting(this.components, {