Compare commits

...

18 Commits

Author SHA1 Message Date
ddca720b2a chore: travis cache 2019-11-15 16:03:29 +01:00
1ca2d87287 chore: address review 2019-11-15 15:48:14 +01:00
340edf53e3 chore: address review 2019-11-15 15:15:12 +01:00
4cc9736485 chore: use topology interface 2019-11-14 19:13:45 +01:00
cdfd02306f chore: address review 2019-11-14 15:34:35 +01:00
dcd127b7a5 refactor: pubsub subsystem 2019-11-14 15:34:35 +01:00
751bc00f1b chore: address review 2019-11-14 15:33:12 +01:00
ec5319a50e chore: apply suggestions from code review
Co-Authored-By: Jacob Heun <jacobheun@gmail.com>
2019-11-14 15:33:12 +01:00
dca4727112 feat: peer-store v0 2019-11-14 15:33:12 +01:00
695d71e44f chore: update it-length-prefixed (#476)
fix: decode.fromReader usage
2019-11-13 16:38:07 +01:00
0fc6668321 refactor: async identify and identify push (#473)
* chore: add missing dep

* feat: import from identify push branch

https://github.com/libp2p/js-libp2p-identify/tree/feat/identify-push

* feat: add the connection to stream handlers

* refactor: identify to async/await

* chore: fix lint

* test: add identify tests

* refactor: add identify to the dialer flow

* feat: connect identify to the registrar

* fix: resolve review feedback

* fix: perform identify push when our protocols change
2019-11-07 12:11:50 +01:00
d7c38d3fec feat: registrar (#471)
* feat: peer-store v0

* feat: registrar

* chore: apply suggestions from code review

Co-Authored-By: Jacob Heun <jacobheun@gmail.com>

* chore: address review

* chore: support multiple conns

* chore: address review

* fix: no remote peer from topology on disconnect
2019-11-06 15:47:11 +01:00
53bcd2d745 feat: peer store (#470)
* feat: peer-store v0

* chore: apply suggestions from code review

Co-Authored-By: Jacob Heun <jacobheun@gmail.com>
2019-11-06 15:11:13 +01:00
fba058370a refactor: crypto and pnet (#469)
* feat: add initial plaintext 2 module

* refactor: initial refactor of pnet

* chore: fix lint

* fix: update plaintext api usage

* test: use plaintext for test crypto

* chore: update deps

test: update dialer suite scope

* feat: add connection protection to the upgrader

* refactor: cleanup and lint fix

* chore: remove unncessary transforms

* chore: temporarily disable bundlesize

* chore: add missing dep

* fix: use it-handshake to prevent overreading

* chore(fix): PR feedback updates

* chore: apply suggestions from code review

Co-Authored-By: Vasco Santos <vasco.santos@moxy.studio>
2019-11-04 14:05:58 +01:00
a23d4d23cb refactor(async): add dialer and upgrader (#462)
* chore(deps): update connection and multistream

* feat: add basic dial support for addresses and peers

* test: automatically require all node test files

* fix: dont catch and log in the wrong place

* test: add direct spec test

fix: improve dial error consistency

* feat: add dial timeouts and concurrency

Queue timeouts will result in aborts of the dials

* chore: fix linting

* test: verify dialer defaults

* feat: add initial upgrader

* fix: add more test coverage and fix bugs

* feat: libp2p creates the upgrader

* feat: hook up handle to the upgrader

* feat: hook up the dialer to libp2p

test: add node dialer libp2p tests

* feat: add connection listeners to upgrader

* feat: emit connect and disconnect events

* chore: use libp2p-interfaces

* fix: address review feedback

* fix: correct import

* refactor: dedupe connection creation code
2019-10-21 16:53:58 +02:00
6ecc9b80c3 docs: add stream wrapping example (#466)
* docs: add duplex wrapping example

docs: add iterable types from @alanshaw's gist

* docs(fix): add feedback fix

Co-Authored-By: Vasco Santos <vasco.santos@moxy.studio>

* docs: clean up based on feedback
2019-10-21 13:36:05 +02:00
cd97abfcc3 refactor(async): update transports subsystem (#461)
* test: remove all tests for a clean slate

The refactor will require a large number of updates to the tests. In order
to ensure we have done a decent deduplication, and have a cleaner suite of tests
we've removed all tests. This will also allow us to more easily see tests
for the refactored systems.

We have a record of the latest test suites in master, so we are not losing any history.

* chore: update tcp and websockets
* chore: remove other transports until they are converted
* chore: use mafmt and multiaddr async versions
* chore: add and fix dependencies
* chore: clean up travis file
* feat: add new transport manager
* docs: add constructor jsdocs
* refactor(config): check that transports exist
This also removes the other logic, it can be added when those subsystems are refactored

* chore(deps): use async peer-id and peer-info
* feat: wire up the transport manager with libp2p
* chore: remove superstruct dep
2019-10-02 13:31:28 +02:00
c1da30bc74 Add streaming iterables guide (#459)
* docs: add streaming iterables guide placeholder

* chore: move peer discovery readme to doc fold:wqer

* docs: add link to async refactor issue
2019-09-30 18:20:25 +02:00
146 changed files with 4625 additions and 12751 deletions

134
.aegir.js
View File

@ -1,130 +1,38 @@
'use strict'
const pull = require('pull-stream')
const WebSocketStarRendezvous = require('libp2p-websocket-star-rendezvous')
const sigServer = require('libp2p-webrtc-star/src/sig-server')
const promisify = require('promisify-es6')
const mplex = require('pull-mplex')
const spdy = require('libp2p-spdy')
const PeerBook = require('peer-book')
const Libp2p = require('./src')
const { MULTIADDRS_WEBSOCKETS } = require('./test/fixtures/browser')
const Peers = require('./test/fixtures/peers')
const PeerId = require('peer-id')
const PeerInfo = require('peer-info')
const path = require('path')
const Switch = require('./src/switch')
const WebSockets = require('libp2p-websockets')
const Node = require('./test/utils/bundle-nodejs.js')
const {
getPeerRelay,
WRTC_RENDEZVOUS_MULTIADDR,
WS_RENDEZVOUS_MULTIADDR
} = require('./test/utils/constants')
let wrtcRendezvous
let wsRendezvous
let node
let peerInfo
let switchA
let switchB
function echo (protocol, conn) { pull(conn, conn) }
function idJSON (id) {
const p = path.join(__dirname, `./test/switch/test-data/id-${id}.json`)
return require(p)
}
function createSwitchA () {
return new Promise((resolve, reject) => {
PeerId.createFromJSON(idJSON(1), (err, id) => {
if (err) { return reject(err) }
const peerA = new PeerInfo(id)
const maA = '/ip4/127.0.0.1/tcp/15337/ws'
peerA.multiaddrs.add(maA)
const sw = new Switch(peerA, new PeerBook())
sw.transport.add('ws', new WebSockets())
sw.start((err) => {
if (err) { return reject(err) }
resolve(sw)
})
})
})
}
function createSwitchB () {
return new Promise((resolve, reject) => {
PeerId.createFromJSON(idJSON(2), (err, id) => {
if (err) { return reject(err) }
const peerB = new PeerInfo(id)
const maB = '/ip4/127.0.0.1/tcp/15347/ws'
peerB.multiaddrs.add(maB)
const sw = new Switch(peerB, new PeerBook())
sw.transport.add('ws', new WebSockets())
sw.connection.addStreamMuxer(mplex)
sw.connection.addStreamMuxer(spdy)
sw.connection.reuse()
sw.handle('/echo/1.0.0', echo)
sw.start((err) => {
if (err) { return reject(err) }
resolve(sw)
})
})
})
}
const Muxer = require('libp2p-mplex')
const Crypto = require('./src/insecure/plaintext')
const pipe = require('it-pipe')
let libp2p
const before = async () => {
[
wrtcRendezvous,
wsRendezvous,
peerInfo,
switchA,
switchB
] = await Promise.all([
sigServer.start({
port: WRTC_RENDEZVOUS_MULTIADDR.nodeAddress().port
// cryptoChallenge: true TODO: needs https://github.com/libp2p/js-libp2p-webrtc-star/issues/128
}),
WebSocketStarRendezvous.start({
port: WS_RENDEZVOUS_MULTIADDR.nodeAddress().port,
refreshPeerListIntervalMS: 1000,
strictMultiaddr: false,
cryptoChallenge: true
}),
getPeerRelay(),
createSwitchA(),
createSwitchB()
])
// Use the last peer
const peerId = await PeerId.createFromJSON(Peers[Peers.length - 1])
const peerInfo = new PeerInfo(peerId)
peerInfo.multiaddrs.add(MULTIADDRS_WEBSOCKETS[0])
node = new Node({
libp2p = new Libp2p({
peerInfo,
config: {
relay: {
enabled: true,
hop: {
enabled: true,
active: true
}
}
modules: {
transport: [WebSockets],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
})
// Add the echo protocol
libp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream))
node.handle('/echo/1.0.0', (protocol, conn) => pull(conn, conn))
await node.start()
await libp2p.start()
}
const after = () => {
return Promise.all([
wrtcRendezvous.stop(),
wsRendezvous.stop(),
node.stop(),
promisify(switchA.stop, { context: switchA })(),
promisify(switchB.stop, { context: switchB })()
])
const after = async () => {
await libp2p.stop()
}
module.exports = {

View File

@ -20,7 +20,7 @@ jobs:
include:
- stage: check
script:
- npx aegir build --bundlesize
# - npx aegir build --bundlesize
- npx aegir dep-check -- -i wrtc -i electron-webrtc
- npm run lint
@ -29,16 +29,14 @@ jobs:
addons:
chrome: stable
script:
- npx aegir test -t browser
- npx aegir test -t webworker
- npx aegir test -t browser -t webworker
- stage: test
name: firefox
addons:
firefox: latest
script:
- npx aegir test -t browser -- --browsers FirefoxHeadless
- npx aegir test -t webworker -- --browsers FirefoxHeadless
- npx aegir test -t browser -t webworker -- --browsers FirefoxHeadless
notifications:
email: false

View File

@ -211,22 +211,18 @@ class Node extends Libp2p {
**IMPORTANT NOTE**: All the methods listed in the API section that take a callback are also now Promisified. Libp2p is migrating away from callbacks to async/await, and in a future release (that will be announced in advance), callback support will be removed entirely. You can follow progress of the async/await endeavor at https://github.com/ipfs/js-ipfs/issues/1670.
#### Create a Node - `Libp2p.createLibp2p(options, callback)`
#### Create a Node - `Libp2p.create(options)`
> Behaves exactly like `new Libp2p(options)`, but doesn't require a PeerInfo. One will be generated instead
```js
const { createLibp2p } = require('libp2p')
createLibp2p(options, (err, libp2p) => {
if (err) throw err
libp2p.start((err) => {
if (err) throw err
})
})
const { create } = require('libp2p')
const libp2p = await create(options)
await libp2p.start()
```
- `options`: Object of libp2p configuration options
- `callback`: Function with signature `function (Error, Libp2p) {}`
#### Create a Node alternative - `new Libp2p(options)`
@ -336,7 +332,7 @@ Required keys in the `options` object:
> Peer has been discovered.
If `autoDial` is `true`, applications should **not** attempt to connect to the peer
unless they are performing a specific action. See [peer discovery and auto dial](./PEER_DISCOVERY.md) for more information.
unless they are performing a specific action. See [peer discovery and auto dial](./doc/PEER_DISCOVERY.md) for more information.
- `peer`: instance of [PeerInfo][]

164
doc/STREAMING_ITERABLES.md Normal file
View File

@ -0,0 +1,164 @@
# Iterable Streams
> This document is a guide on how to use Iterable Streams in Libp2p. As a part of the [refactor away from callbacks](https://github.com/ipfs/js-ipfs/issues/1670), we have also moved to using Iterable Streams instead of [pull-streams](https://pull-stream.github.io/). If there are missing usage guides you feel should be added, please submit a PR!
## Table of Contents
- [Iterable Streams](#iterable-streams)
- [Table of Contents](#table-of-contents)
- [Usage Guide](#usage-guide)
- [Transforming Bidirectional Data](#transforming-bidirectional-data)
- [Iterable Stream Types](#iterable-stream-types)
- [Source](#source)
- [Sink](#sink)
- [Transform](#transform)
- [Duplex](#duplex)
- [Iterable Modules](#iterable-modules)
## Usage Guide
### Transforming Bidirectional Data
Sometimes you may need to wrap an existing duplex stream in order to perform incoming and outgoing [transforms](#transform) on data. This type of wrapping is commonly used in stream encryption/decryption. Using [it-pair][it-pair] and [it-pipe][it-pipe], we can do this rather easily, given an existing [duplex iterable](#duplex).
```js
const duplexPair = require('it-pair/duplex')
const pipe = require('it-pipe')
// Wrapper is what we will write and read from
// This gives us two duplex iterables that are internally connected
const [internal, external] = duplexPair()
// Now we can pipe our wrapper to the existing duplex iterable
pipe(
external, // The external half of the pair interacts with the existing duplex
outgoingTransform, // A transform iterable to send data through (ie: encrypting)
existingDuplex, // The original duplex iterable we are wrapping
incomingTransform, // A transform iterable to read data through (ie: decrypting)
external
)
// We can now read and write from the other half of our pair
pipe(
['some data'],
internal, // The internal half of the pair is what we will interact with to read/write data
async (source) => {
for await (const chunk of source) {
console.log('Data: %s', chunk.toString())
// > Data: some data
}
}
)
```
## Iterable Stream Types
These types are pulled from [@alanshaw's gist](https://gist.github.com/alanshaw/591dc7dd54e4f99338a347ef568d6ee9) on streaming iterables.
### Source
A "source" is something that can be consumed. It is an iterable object.
```js
const ints = {
[Symbol.asyncIterator] () {
let i = 0
return {
async next () {
return { done: false, value: i++ }
}
}
}
}
// or, more succinctly using a generator and for/await:
const ints = (async function * () {
let i = 0
while (true) yield i++
})()
```
### Sink
A "sink" is something that consumes (or drains) a source. It is a function that takes a source and iterates over it. It optionally returns a value.
```js
const logger = async source => {
const it = source[Symbol.asyncIterator]()
while (true) {
const { done, value } = await it.next()
if (done) break
console.log(value) // prints 0, 1, 2, 3...
}
}
// or, more succinctly using a generator and for/await:
const logger = async source => {
for await (const chunk of source) {
console.log(chunk) // prints 0, 1, 2, 3...
}
}
```
### Transform
A "transform" is both a sink _and_ a source where the values it consumes and the values that can be consumed from it are connected in some way. It is a function that takes a source and returns a source.
```js
const doubler = source => {
return {
[Symbol.asyncIterator] () {
const it = source[Symbol.asyncIterator]()
return {
async next () {
const { done, value } = await it.next()
if (done) return { done }
return { done, value: value * 2 }
}
return () {
return it.return && it.return()
}
}
}
}
}
// or, more succinctly using a generator and for/await:
const doubler = source => (async function * () {
for await (const chunk of source) {
yield chunk * 2
}
})()
```
### Duplex
A "duplex" is similar to a transform but the values it consumes are not necessarily connected to the values that can be consumed from it. It is an object with two properties, `sink` and `source`.
```js
const duplex = {
sink: async source => {/* ... */},
source: { [Symbol.asyncIterator] () {/* ... */} }
}
```
## Iterable Modules
- [it-handshake][it-handshake] Handshakes for binary protocols with iterable streams.
- [it-length-prefixed][it-length-prefixed] Streaming length prefixed buffers with async iterables.
- [it-pair][it-pair] Paired streams that are internally connected.
- [it-pipe][it-pipe] Create a pipeline of iterables. Works with duplex streams.
- [it-pushable][it-pushable] An iterable that you can push values into.
- [it-reader][it-reader] Read an exact number of bytes from a binary, async iterable.
- [streaming-iterables][streaming-iterables] A Swiss army knife for async iterables.
[it-handshake]: https://github.com/jacobheun/it-handshake
[it-length-prefixed]: https://github.com/alanshaw/it-length-prefixed
[it-pair]: https://github.com/alanshaw/it-pair
[it-pipe]: https://github.com/alanshaw/it-pipe
[it-pushable]: https://github.com/alanshaw/it-pushable
[it-reader]: https://github.com/alanshaw/it-reader
[streaming-iterables]: https://github.com/bustle/streaming-iterables

View File

@ -36,14 +36,12 @@
},
"homepage": "https://libp2p.io",
"license": "MIT",
"browser": {
"./test/utils/bundle-nodejs": "./test/utils/bundle-browser"
},
"engines": {
"node": ">=10.0.0",
"npm": ">=6.0.0"
},
"dependencies": {
"abort-controller": "^3.0.0",
"async": "^2.6.2",
"bignumber.js": "^9.0.0",
"class-is": "^1.1.0",
@ -51,32 +49,35 @@
"err-code": "^1.1.2",
"fsm-event": "^2.1.0",
"hashlru": "^2.3.0",
"interface-connection": "~0.3.3",
"it-handshake": "^1.0.1",
"it-length-prefixed": "^3.0.0",
"it-pipe": "^1.1.0",
"it-protocol-buffers": "^0.2.0",
"latency-monitor": "~0.2.1",
"libp2p-crypto": "^0.16.2",
"libp2p-websockets": "^0.12.2",
"mafmt": "^6.0.7",
"libp2p-crypto": "^0.17.1",
"libp2p-interfaces": "^0.1.5",
"mafmt": "^7.0.0",
"merge-options": "^1.0.1",
"moving-average": "^1.0.0",
"multiaddr": "^6.1.0",
"multistream-select": "~0.14.6",
"multiaddr": "^7.1.0",
"multistream-select": "^0.15.0",
"once": "^1.4.0",
"peer-book": "^0.9.1",
"peer-id": "^0.12.2",
"peer-info": "~0.15.1",
"pull-cat": "^1.1.11",
"pull-defer": "~0.2.3",
"pull-handshake": "^1.1.4",
"pull-reader": "^1.3.1",
"pull-stream": "^3.6.9",
"p-map": "^3.0.0",
"p-queue": "^6.1.1",
"p-settle": "^3.1.0",
"peer-id": "^0.13.3",
"peer-info": "^0.17.0",
"promisify-es6": "^1.0.3",
"protons": "^1.0.1",
"pull-cat": "^1.1.11",
"pull-handshake": "^1.1.4",
"pull-stream": "^3.6.9",
"retimer": "^2.0.0",
"superstruct": "^0.6.0",
"xsalsa20": "^1.0.2"
},
"devDependencies": {
"@nodeutils/defaults-deep": "^1.1.0",
"abortable-iterator": "^2.1.0",
"aegir": "^20.0.0",
"chai": "^4.2.0",
"chai-checkmark": "^1.0.1",
@ -84,24 +85,26 @@
"delay": "^4.3.0",
"dirty-chai": "^2.0.1",
"electron-webrtc": "^0.3.0",
"glob": "^7.1.4",
"interface-datastore": "^0.6.0",
"it-pair": "^1.0.0",
"libp2p-bootstrap": "^0.9.7",
"libp2p-delegated-content-routing": "^0.2.2",
"libp2p-delegated-peer-routing": "^0.2.2",
"libp2p-floodsub": "~0.17.0",
"libp2p-gossipsub": "~0.0.4",
"libp2p-floodsub": "^0.19.0",
"libp2p-gossipsub": "ChainSafe/gossipsub-js#beta/async",
"libp2p-kad-dht": "^0.15.3",
"libp2p-mdns": "^0.12.3",
"libp2p-mplex": "^0.8.4",
"libp2p-mplex": "^0.9.1",
"libp2p-pnet": "~0.1.0",
"libp2p-secio": "^0.11.1",
"libp2p-spdy": "^0.13.2",
"libp2p-tcp": "^0.13.0",
"libp2p-webrtc-star": "^0.16.1",
"libp2p-websocket-star": "~0.10.2",
"libp2p-websocket-star-rendezvous": "~0.4.1",
"libp2p-tcp": "^0.14.1",
"libp2p-websockets": "^0.13.1",
"lodash.times": "^4.3.2",
"nock": "^10.0.6",
"p-defer": "^3.0.0",
"p-wait-for": "^3.1.0",
"portfinder": "^1.0.20",
"pull-goodbye": "0.0.2",
"pull-length-prefixed": "^1.3.3",
@ -110,6 +113,7 @@
"pull-protocol-buffers": "~0.1.2",
"pull-serializer": "^0.3.2",
"sinon": "^7.2.7",
"streaming-iterables": "^4.1.0",
"wrtc": "^0.4.1"
},
"contributors": [

View File

@ -1,6 +1,6 @@
# js-libp2p-circuit
> Node.js implementation of the Circuit module that libp2p uses, which implements the [interface-connection](https://github.com/libp2p/interface-connection) interface for dial/listen.
> Node.js implementation of the Circuit module that libp2p uses, which implements the [interface-connection](https://github.com/libp2p/js-interfaces/tree/master/src/connection) interface for dial/listen.
**Note**: git history prior to merging into js-libp2p can be found in the original repository, https://github.com/libp2p/js-libp2p-circuit.
@ -24,15 +24,18 @@ Prior to `libp2p-circuit` there was a rift in the IPFS network, were IPFS nodes
## Table of Contents
- [Install](#install)
- [npm](#npm)
- [Usage](#usage)
- [js-libp2p-circuit](#js-libp2p-circuit)
- [Why?](#why)
- [libp2p-circuit and IPFS](#libp2p-circuit-and-ipfs)
- [Table of Contents](#table-of-contents)
- [Usage](#usage)
- [Example](#example)
- [Create dialer/listener](#create-dialerlistener)
- [Create `relay`](#create-relay)
- [This module uses `pull-streams`](#this-module-uses-pull-streams)
- [Converting `pull-streams` to Node.js Streams](#converting-pull-streams-to-nodejs-streams)
- [API](#api)
- [Contribute](#contribute)
- [License](#license)
- [API](#api)
- [Implementation rational](#implementation-rational)
## Usage

View File

@ -6,7 +6,7 @@ const waterfall = require('async/waterfall')
const setImmediate = require('async/setImmediate')
const multiaddr = require('multiaddr')
const Connection = require('interface-connection').Connection
const { Connection } = require('libp2p-interfaces/src/connection')
const utilsFactory = require('./utils')
const StreamHandler = require('./stream-handler')

View File

@ -3,7 +3,7 @@
const setImmediate = require('async/setImmediate')
const EE = require('events').EventEmitter
const Connection = require('interface-connection').Connection
const { Connection } = require('libp2p-interfaces/src/connection')
const utilsFactory = require('./utils')
const PeerInfo = require('peer-info')
const proto = require('../protocol').CircuitRelay

View File

@ -1,8 +1,6 @@
'use strict'
const mergeOptions = require('merge-options')
const { struct, superstruct } = require('superstruct')
const { optional, list } = struct
const DefaultConfig = {
connectionManager: {
@ -38,67 +36,10 @@ const DefaultConfig = {
}
}
// Define custom types
const s = superstruct({
types: {
transport: value => {
if (value.length === 0) return 'ERROR_EMPTY'
value.forEach(i => {
if (!i.dial) return 'ERR_NOT_A_TRANSPORT'
})
return true
},
protector: value => {
if (!value.protect) return 'ERR_NOT_A_PROTECTOR'
return true
}
}
})
const modulesSchema = s({
connEncryption: optional(list([s('object|function')])),
// this is hacky to simulate optional because interface doesnt work correctly with it
// change to optional when fixed upstream
connProtector: s('undefined|protector'),
contentRouting: optional(list(['object'])),
dht: optional(s('null|function|object')),
pubsub: optional(s('null|function|object')),
peerDiscovery: optional(list([s('object|function')])),
peerRouting: optional(list(['object'])),
streamMuxer: optional(list([s('object|function')])),
transport: 'transport'
})
const configSchema = s({
peerDiscovery: 'object?',
relay: 'object?',
dht: 'object?',
pubsub: 'object?'
})
const optionsSchema = s({
switch: 'object?',
connectionManager: 'object?',
datastore: 'object?',
peerInfo: 'object',
peerBook: 'object?',
modules: modulesSchema,
config: configSchema
})
module.exports.validate = (opts) => {
opts = mergeOptions(DefaultConfig, opts)
const [error, options] = optionsSchema.validate(opts)
// Improve errors throwed, reduce stack by throwing here and add reason to the message
if (error) {
throw new Error(`${error.message}${error.reason ? ' - ' + error.reason : ''}`)
} else {
// Throw when dht is enabled but no dht module provided
if (options.config.dht.enabled) {
s('function|object')(options.modules.dht)
}
}
if (opts.modules.transport.length < 1) throw new Error("'options.modules.transport' must contain at least 1 transport")
return options
return opts
}

12
src/constants.js Normal file
View File

@ -0,0 +1,12 @@
'use strict'
module.exports = {
DENY_TTL: 5 * 60 * 1e3, // How long before an errored peer can be dialed again
DENY_ATTEMPTS: 5, // Num of unsuccessful dials before a peer is permanently denied
DIAL_TIMEOUT: 30e3, // How long in ms a dial attempt is allowed to take
MAX_COLD_CALLS: 50, // How many dials w/o protocols that can be queued
MAX_PARALLEL_DIALS: 100, // Maximum allowed concurrent dials
QUARTER_HOUR: 15 * 60e3,
PRIORITY_HIGH: 10,
PRIORITY_LOW: 20
}

127
src/dialer.js Normal file
View File

@ -0,0 +1,127 @@
'use strict'
const nextTick = require('async/nextTick')
const multiaddr = require('multiaddr')
const errCode = require('err-code')
const { default: PQueue } = require('p-queue')
const AbortController = require('abort-controller')
const debug = require('debug')
const log = debug('libp2p:dialer')
log.error = debug('libp2p:dialer:error')
const { codes } = require('./errors')
const {
MAX_PARALLEL_DIALS,
DIAL_TIMEOUT
} = require('./constants')
class Dialer {
/**
* @constructor
* @param {object} options
* @param {TransportManager} options.transportManager
* @param {number} options.concurrency Number of max concurrent dials. Defaults to `MAX_PARALLEL_DIALS`
* @param {number} options.timeout How long a dial attempt is allowed to take. Defaults to `DIAL_TIMEOUT`
*/
constructor ({
transportManager,
concurrency = MAX_PARALLEL_DIALS,
timeout = DIAL_TIMEOUT
}) {
this.transportManager = transportManager
this.concurrency = concurrency
this.timeout = timeout
this.queue = new PQueue({ concurrency, timeout, throwOnTimeout: true })
/**
* @property {IdentifyService}
*/
this._identifyService = null
}
set identifyService (service) {
this._identifyService = service
}
/**
* @type {IdentifyService}
*/
get identifyService () {
return this._identifyService
}
/**
* Connects to a given `Multiaddr`. `addr` should include the id of the peer being
* dialed, it will be used for encryption verification.
*
* @async
* @param {Multiaddr} addr The address to dial
* @param {object} [options]
* @param {AbortSignal} [options.signal] An AbortController signal
* @returns {Promise<Connection>}
*/
async connectToMultiaddr (addr, options = {}) {
addr = multiaddr(addr)
let conn
let controller
if (!options.signal) {
controller = new AbortController()
options.signal = controller.signal
}
try {
conn = await this.queue.add(() => this.transportManager.dial(addr, options))
} catch (err) {
if (err.name === 'TimeoutError') {
controller.abort()
err.code = codes.ERR_TIMEOUT
}
log.error('Error dialing address %s,', addr, err)
throw err
}
// Perform a delayed Identify handshake
if (this.identifyService) {
nextTick(async () => {
try {
await this.identifyService.identify(conn, conn.remotePeer)
// TODO: Update the PeerStore with the information from identify
} catch (err) {
log.error(err)
}
})
}
return conn
}
/**
* Connects to a given `PeerInfo` by dialing all of its known addresses.
* The dial to the first address that is successfully able to upgrade a connection
* will be used.
*
* @async
* @param {PeerInfo} peerInfo The remote peer to dial
* @param {object} [options]
* @param {AbortSignal} [options.signal] An AbortController signal
* @returns {Promise<Connection>}
*/
async connectToPeer (peerInfo, options = {}) {
const addrs = peerInfo.multiaddrs.toArray()
for (const addr of addrs) {
try {
return await this.connectToMultiaddr(addr, options)
} catch (_) {
// The error is already logged, just move to the next addr
continue
}
}
const err = errCode(new Error('Could not dial peer, all addresses failed'), codes.ERR_CONNECTION_FAILED)
log.error(err)
throw err
}
}
module.exports = Dialer

View File

@ -8,6 +8,19 @@ exports.messages = {
exports.codes = {
DHT_DISABLED: 'ERR_DHT_DISABLED',
PUBSUB_NOT_STARTED: 'ERR_PUBSUB_NOT_STARTED',
ERR_CONNECTION_ENDED: 'ERR_CONNECTION_ENDED',
ERR_CONNECTION_FAILED: 'ERR_CONNECTION_FAILED',
ERR_NODE_NOT_STARTED: 'ERR_NODE_NOT_STARTED',
ERR_DISCOVERED_SELF: 'ERR_DISCOVERED_SELF'
ERR_NO_VALID_ADDRESSES: 'ERR_NO_VALID_ADDRESSES',
ERR_DISCOVERED_SELF: 'ERR_DISCOVERED_SELF',
ERR_DUPLICATE_TRANSPORT: 'ERR_DUPLICATE_TRANSPORT',
ERR_ENCRYPTION_FAILED: 'ERR_ENCRYPTION_FAILED',
ERR_INVALID_KEY: 'ERR_INVALID_KEY',
ERR_INVALID_MESSAGE: 'ERR_INVALID_MESSAGE',
ERR_INVALID_PEER: 'ERR_INVALID_PEER',
ERR_MUXER_UNAVAILABLE: 'ERR_MUXER_UNAVAILABLE',
ERR_TIMEOUT: 'ERR_TIMEOUT',
ERR_TRANSPORT_UNAVAILABLE: 'ERR_TRANSPORT_UNAVAILABLE',
ERR_TRANSPORT_DIAL_FAILED: 'ERR_TRANSPORT_DIAL_FAILED',
ERR_UNSUPPORTED_PROTOCOL: 'ERR_UNSUPPORTED_PROTOCOL'
}

View File

@ -7,14 +7,14 @@ const errCode = require('err-code')
/**
* Converts the given `peer` to a `PeerInfo` instance.
* The `PeerBook` will be checked for the resulting peer, and
* the peer will be updated in the `PeerBook`.
* The `PeerStore` will be checked for the resulting peer, and
* the peer will be updated in the `PeerStore`.
*
* @param {PeerInfo|PeerId|Multiaddr|string} peer
* @param {PeerBook} peerBook
* @param {PeerStore} peerStore
* @returns {PeerInfo}
*/
function getPeerInfo (peer, peerBook) {
function getPeerInfo (peer, peerStore) {
if (typeof peer === 'string') {
peer = multiaddr(peer)
}
@ -38,7 +38,7 @@ function getPeerInfo (peer, peerBook) {
addr && peer.multiaddrs.add(addr)
return peerBook ? peerBook.put(peer) : peer
return peerStore ? peerStore.put(peer) : peer
}
/**
@ -54,7 +54,7 @@ function getPeerInfoRemote (peer, libp2p) {
let peerInfo
try {
peerInfo = getPeerInfo(peer, libp2p.peerBook)
peerInfo = getPeerInfo(peer, libp2p.peerStore)
} catch (err) {
return Promise.reject(errCode(
new Error(`${peer} is not a valid peer type`),

View File

@ -6,32 +6,8 @@
## Description
Identify is a STUN protocol, used by libp2p-swarm in order to broadcast and learn about the `ip:port` pairs a specific peer is available through and to know when a new stream muxer is established, so a conn can be reused.
Identify is a STUN protocol, used by libp2p in order to broadcast and learn about the `ip:port` pairs a specific peer is available through and to know when a new stream muxer is established, so a conn can be reused.
## How does it work
Best way to understand the current design is through this issue: https://github.com/libp2p/js-libp2p-swarm/issues/78
### This module uses `pull-streams`
We expose a streaming interface based on `pull-streams`, rather then on the Node.js core streams implementation (aka Node.js streams). `pull-streams` offers us a better mechanism for error handling and flow control guarantees. If you would like to know more about why we did this, see the discussion at this [issue](https://github.com/ipfs/js-ipfs/issues/362).
You can learn more about pull-streams at:
- [The history of Node.js streams, nodebp April 2014](https://www.youtube.com/watch?v=g5ewQEuXjsQ)
- [The history of streams, 2016](http://dominictarr.com/post/145135293917/history-of-streams)
- [pull-streams, the simple streaming primitive](http://dominictarr.com/post/149248845122/pull-streams-pull-streams-are-a-very-simple)
- [pull-streams documentation](https://pull-stream.github.io/)
#### Converting `pull-streams` to Node.js Streams
If you are a Node.js streams user, you can convert a pull-stream to a Node.js stream using the module [`pull-stream-to-stream`](https://github.com/pull-stream/pull-stream-to-stream), giving you an instance of a Node.js stream that is linked to the pull-stream. For example:
```js
const pullToStream = require('pull-stream-to-stream')
const nodeStreamInstance = pullToStream(pullStreamInstance)
// nodeStreamInstance is an instance of a Node.js Stream
```
To learn more about this utility, visit https://pull-stream.github.io/#pull-stream-to-stream.
The spec for Identify and Identify Push is at [libp2p/specs](https://github.com/libp2p/specs/tree/master/identify).

6
src/identify/consts.js Normal file
View File

@ -0,0 +1,6 @@
'use strict'
module.exports.PROTOCOL_VERSION = 'ipfs/0.1.0'
module.exports.AGENT_VERSION = 'js-libp2p/0.1.0'
module.exports.MULTICODEC_IDENTIFY = '/ipfs/id/1.0.0'
module.exports.MULTICODEC_IDENTIFY_PUSH = '/ipfs/id/push/1.0.0'

View File

@ -1,87 +0,0 @@
'use strict'
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const multiaddr = require('multiaddr')
const pull = require('pull-stream/pull')
const take = require('pull-stream/throughs/take')
const collect = require('pull-stream/sinks/collect')
const lp = require('pull-length-prefixed')
const msg = require('./message')
module.exports = (conn, expectedPeerInfo, callback) => {
if (typeof expectedPeerInfo === 'function') {
callback = expectedPeerInfo
expectedPeerInfo = null
// eslint-disable-next-line no-console
console.warn('WARNING: no expected peer info was given, identify will not be able to verify peer integrity')
}
pull(
conn,
lp.decode(),
take(1),
collect((err, data) => {
if (err) {
return callback(err)
}
// connection got closed graciously
if (data.length === 0) {
return callback(new Error('conn was closed, did not receive data'))
}
const input = msg.decode(data[0])
PeerId.createFromPubKey(input.publicKey, (err, id) => {
if (err) {
return callback(err)
}
const peerInfo = new PeerInfo(id)
if (expectedPeerInfo && expectedPeerInfo.id.toB58String() !== id.toB58String()) {
return callback(new Error('invalid peer'))
}
try {
input.listenAddrs
.map(multiaddr)
.forEach((ma) => peerInfo.multiaddrs.add(ma))
} catch (err) {
return callback(err)
}
let observedAddr
try {
observedAddr = getObservedAddrs(input)
} catch (err) {
return callback(err)
}
// Copy the protocols
peerInfo.protocols = new Set(input.protocols)
callback(null, peerInfo, observedAddr)
})
})
)
}
function getObservedAddrs (input) {
if (!hasObservedAddr(input)) {
return []
}
let addrs = input.observedAddr
if (!Array.isArray(addrs)) {
addrs = [addrs]
}
return addrs.map((oa) => multiaddr(oa))
}
function hasObservedAddr (input) {
return input.observedAddr && input.observedAddr.length > 0
}

View File

@ -1,7 +1,299 @@
'use strict'
exports = module.exports
exports.multicodec = '/ipfs/id/1.0.0'
exports.listener = require('./listener')
exports.dialer = require('./dialer')
exports.message = require('./message')
const debug = require('debug')
const pb = require('it-protocol-buffers')
const lp = require('it-length-prefixed')
const pipe = require('it-pipe')
const { collect, take } = require('streaming-iterables')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const multiaddr = require('multiaddr')
const { toBuffer } = require('../util')
const Message = require('./message')
const log = debug('libp2p:identify')
log.error = debug('libp2p:identify:error')
const {
MULTICODEC_IDENTIFY,
MULTICODEC_IDENTIFY_PUSH,
AGENT_VERSION,
PROTOCOL_VERSION
} = require('./consts')
const errCode = require('err-code')
const { codes } = require('../errors')
class IdentifyService {
/**
* Replaces the multiaddrs on the given `peerInfo`,
* with the provided `multiaddrs`
* @param {PeerInfo} peerInfo
* @param {Array<Multiaddr>|Array<Buffer>} multiaddrs
*/
static updatePeerAddresses (peerInfo, multiaddrs) {
if (multiaddrs && multiaddrs.length > 0) {
peerInfo.multiaddrs.clear()
multiaddrs.forEach(ma => {
try {
peerInfo.multiaddrs.add(ma)
} catch (err) {
log.error('could not add multiaddr', err)
}
})
}
}
/**
* Replaces the protocols on the given `peerInfo`,
* with the provided `protocols`
* @static
* @param {PeerInfo} peerInfo
* @param {Array<string>} protocols
*/
static updatePeerProtocols (peerInfo, protocols) {
if (protocols && protocols.length > 0) {
peerInfo.protocols.clear()
protocols.forEach(proto => peerInfo.protocols.add(proto))
}
}
/**
* Takes the `addr` and converts it to a Multiaddr if possible
* @param {Buffer|String} addr
* @returns {Multiaddr|null}
*/
static getCleanMultiaddr (addr) {
if (addr && addr.length > 0) {
try {
return multiaddr(addr)
} catch (_) {
return null
}
}
return null
}
/**
* @constructor
* @param {object} options
* @param {Registrar} options.registrar
* @param {Map<string, handler>} options.protocols A reference to the protocols we support
* @param {PeerInfo} options.peerInfo The peer running the identify service
*/
constructor (options) {
/**
* @property {Registrar}
*/
this.registrar = options.registrar
/**
* @property {PeerInfo}
*/
this.peerInfo = options.peerInfo
this._protocols = options.protocols
this.handleMessage = this.handleMessage.bind(this)
}
/**
* Send an Identify Push update to the list of connections
* @param {Array<Connection>} connections
* @returns {Promise<void>}
*/
push (connections) {
const pushes = connections.map(async connection => {
try {
const { stream } = await connection.newStream(MULTICODEC_IDENTIFY_PUSH)
await pipe(
[{
listenAddrs: this.peerInfo.multiaddrs.toArray().map((ma) => ma.buffer),
protocols: Array.from(this._protocols.keys())
}],
pb.encode(Message),
stream
)
} catch (err) {
// Just log errors
log.error('could not push identify update to peer', err)
}
})
return Promise.all(pushes)
}
/**
* Calls `push` for all peers in the `peerStore` that are connected
* @param {PeerStore} peerStore
*/
pushToPeerStore (peerStore) {
const connections = []
let connection
for (const peer of peerStore.peers.values()) {
if (peer.protocols.has(MULTICODEC_IDENTIFY_PUSH) && (connection = this.registrar.getConnection(peer))) {
connections.push(connection)
}
}
this.push(connections)
}
/**
* Requests the `Identify` message from peer associated with the given `connection`.
* If the identified peer does not match the `PeerId` associated with the connection,
* an error will be thrown.
*
* @async
* @param {Connection} connection
* @param {PeerID} expectedPeer The PeerId the identify response should match
* @returns {Promise<void>}
*/
async identify (connection, expectedPeer) {
const { stream } = await connection.newStream(MULTICODEC_IDENTIFY)
const [data] = await pipe(
stream,
lp.decode(),
take(1),
toBuffer,
collect
)
if (!data) {
throw errCode(new Error('No data could be retrieved'), codes.ERR_CONNECTION_ENDED)
}
let message
try {
message = Message.decode(data)
} catch (err) {
throw errCode(err, codes.ERR_INVALID_MESSAGE)
}
let {
publicKey,
listenAddrs,
protocols,
observedAddr
} = message
const id = await PeerId.createFromPubKey(publicKey)
const peerInfo = new PeerInfo(id)
if (expectedPeer && expectedPeer.toB58String() !== id.toB58String()) {
throw errCode(new Error('identified peer does not match the expected peer'), codes.ERR_INVALID_PEER)
}
// Get the observedAddr if there is one
observedAddr = IdentifyService.getCleanMultiaddr(observedAddr)
// Copy the listenAddrs and protocols
IdentifyService.updatePeerAddresses(peerInfo, listenAddrs)
IdentifyService.updatePeerProtocols(peerInfo, protocols)
this.registrar.peerStore.update(peerInfo)
// TODO: Track our observed address so that we can score it
log('received observed address of %s', observedAddr)
}
/**
* A handler to register with Libp2p to process identify messages.
*
* @param {object} options
* @param {String} options.protocol
* @param {*} options.stream
* @param {Connection} options.connection
* @returns {Promise<void>}
*/
handleMessage ({ connection, stream, protocol }) {
switch (protocol) {
case MULTICODEC_IDENTIFY:
return this._handleIdentify({ connection, stream })
case MULTICODEC_IDENTIFY_PUSH:
return this._handlePush({ connection, stream })
default:
log.error('cannot handle unknown protocol %s', protocol)
}
}
/**
* Sends the `Identify` response to the requesting peer over the
* given `connection`
* @private
* @param {object} options
* @param {*} options.stream
* @param {Connection} options.connection
*/
_handleIdentify ({ connection, stream }) {
let publicKey = Buffer.alloc(0)
if (this.peerInfo.id.pubKey) {
publicKey = this.peerInfo.id.pubKey.bytes
}
const message = Message.encode({
protocolVersion: PROTOCOL_VERSION,
agentVersion: AGENT_VERSION,
publicKey,
listenAddrs: this.peerInfo.multiaddrs.toArray().map((ma) => ma.buffer),
observedAddr: connection.remoteAddr.buffer,
protocols: Array.from(this._protocols.keys())
})
pipe(
[message],
lp.encode(),
stream
)
}
/**
* Reads the Identify Push message from the given `connection`
* @private
* @param {object} options
* @param {*} options.stream
* @param {Connection} options.connection
*/
async _handlePush ({ connection, stream }) {
const [data] = await pipe(
stream,
lp.decode(),
take(1),
toBuffer,
collect
)
let message
try {
message = Message.decode(data)
} catch (err) {
return log.error('received invalid message', err)
}
// Update the listen addresses
const peerInfo = new PeerInfo(connection.remotePeer)
try {
IdentifyService.updatePeerAddresses(peerInfo, message.listenAddrs)
} catch (err) {
return log.error('received invalid listen addrs', err)
}
// Update the protocols
IdentifyService.updatePeerProtocols(peerInfo, message.protocols)
// Update the peer in the PeerStore
this.registrar.peerStore.update(peerInfo)
}
}
module.exports.IdentifyService = IdentifyService
/**
* The protocols the IdentifyService supports
* @property multicodecs
*/
module.exports.multicodecs = {
IDENTIFY: MULTICODEC_IDENTIFY,
IDENTIFY_PUSH: MULTICODEC_IDENTIFY_PUSH
}
module.exports.Message = Message

View File

@ -1,35 +0,0 @@
'use strict'
const pull = require('pull-stream/pull')
const values = require('pull-stream/sources/values')
const lp = require('pull-length-prefixed')
const msg = require('./message')
module.exports = (conn, pInfoSelf) => {
// send what I see from the other + my Info
conn.getObservedAddrs((err, observedAddrs) => {
if (err) { return }
observedAddrs = observedAddrs[0]
let publicKey = Buffer.alloc(0)
if (pInfoSelf.id.pubKey) {
publicKey = pInfoSelf.id.pubKey.bytes
}
const msgSend = msg.encode({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: publicKey,
listenAddrs: pInfoSelf.multiaddrs.toArray().map((ma) => ma.buffer),
observedAddr: observedAddrs ? observedAddrs.buffer : Buffer.from(''),
protocols: Array.from(pInfoSelf.protocols)
})
pull(
values([msgSend]),
lp.encode(),
conn
)
})
}

View File

@ -1,7 +1,7 @@
'use strict'
const FSM = require('fsm-event')
const EventEmitter = require('events').EventEmitter
const { EventEmitter } = require('events')
const debug = require('debug')
const log = debug('libp2p')
log.error = debug('libp2p:error')
@ -9,26 +9,31 @@ const errCode = require('err-code')
const promisify = require('promisify-es6')
const each = require('async/each')
const series = require('async/series')
const parallel = require('async/parallel')
const nextTick = require('async/nextTick')
const PeerBook = require('peer-book')
const PeerInfo = require('peer-info')
const multiaddr = require('multiaddr')
const Switch = require('./switch')
const Ping = require('./ping')
const WebSockets = require('libp2p-websockets')
const ConnectionManager = require('./connection-manager')
const { emitFirst } = require('./util')
const peerRouting = require('./peer-routing')
const contentRouting = require('./content-routing')
const dht = require('./dht')
const pubsub = require('./pubsub')
const { getPeerInfoRemote } = require('./get-peer-info')
const validateConfig = require('./config').validate
const { getPeerInfo, getPeerInfoRemote } = require('./get-peer-info')
const { validate: validateConfig } = require('./config')
const { codes } = require('./errors')
const Dialer = require('./dialer')
const TransportManager = require('./transport-manager')
const Upgrader = require('./upgrader')
const PeerStore = require('./peer-store')
const Registrar = require('./registrar')
const {
IdentifyService,
multicodecs: IDENTIFY_PROTOCOLS
} = require('./identify')
const notStarted = (action, state) => {
return errCode(
new Error(`libp2p cannot ${action} when not started; state is ${state}`),
@ -53,61 +58,81 @@ class Libp2p extends EventEmitter {
this.datastore = this._options.datastore
this.peerInfo = this._options.peerInfo
this.peerBook = this._options.peerBook || new PeerBook()
this.peerStore = new PeerStore()
this._modules = this._options.modules
this._config = this._options.config
this._transport = [] // Transport instances/references
this._discovery = [] // Discovery service instances/references
this.peerStore = new PeerStore()
// create the switch, and listen for errors
this._switch = new Switch(this.peerInfo, this.peerBook, this._options.switch)
this._switch.on('error', (...args) => this.emit('error', ...args))
this._switch = new Switch(this.peerInfo, this.peerStore, this._options.switch)
this.stats = this._switch.stats
this.connectionManager = new ConnectionManager(this, this._options.connectionManager)
// Setup the Upgrader
this.upgrader = new Upgrader({
localPeer: this.peerInfo.id,
onConnection: (connection) => {
const peerInfo = getPeerInfo(connection.remotePeer)
// Attach stream multiplexers
if (this._modules.streamMuxer) {
const muxers = this._modules.streamMuxer
muxers.forEach((muxer) => this._switch.connection.addStreamMuxer(muxer))
// If muxer exists
// we can use Identify
this._switch.connection.reuse()
// we can use Relay for listening/dialing
this._switch.connection.enableCircuitRelay(this._config.relay)
// Received incomming dial and muxer upgrade happened,
// reuse this muxed connection
this._switch.on('peer-mux-established', (peerInfo) => {
this.peerStore.put(peerInfo)
this.registrar.onConnect(peerInfo, connection)
this.emit('peer:connect', peerInfo)
})
},
onConnectionEnd: (connection) => {
const peerInfo = getPeerInfo(connection.remotePeer)
this._switch.on('peer-mux-closed', (peerInfo) => {
this.registrar.onDisconnect(peerInfo, connection)
this.emit('peer:disconnect', peerInfo)
})
}
// Events for anytime connections are created/removed
this._switch.on('connection:start', (peerInfo) => {
this.emit('connection:start', peerInfo)
})
this._switch.on('connection:end', (peerInfo) => {
this.emit('connection:end', peerInfo)
// Create the Registrar
this.registrar = new Registrar({ peerStore: this.peerStore })
this.handle = this.handle.bind(this)
this.registrar.handle = this.handle
// Setup the transport manager
this.transportManager = new TransportManager({
libp2p: this,
upgrader: this.upgrader
})
this._modules.transport.forEach((Transport) => {
this.transportManager.add(Transport.prototype[Symbol.toStringTag], Transport)
})
// Attach crypto channels
if (this._modules.connEncryption) {
const cryptos = this._modules.connEncryption
cryptos.forEach((crypto) => {
this._switch.connection.crypto(crypto.tag, crypto.encrypt)
this.upgrader.cryptos.set(crypto.protocol, crypto)
})
}
this.dialer = new Dialer({
transportManager: this.transportManager
})
// Attach stream multiplexers
if (this._modules.streamMuxer) {
const muxers = this._modules.streamMuxer
muxers.forEach((muxer) => {
this.upgrader.muxers.set(muxer.multicodec, muxer)
})
// Add the identify service since we can multiplex
this.dialer.identifyService = new IdentifyService({
registrar: this.registrar,
peerInfo: this.peerInfo,
protocols: this.upgrader.protocols
})
this.handle(Object.values(IDENTIFY_PROTOCOLS), this.dialer.identifyService.handleMessage)
}
// Attach private network protector
if (this._modules.connProtector) {
this._switch.protector = this._modules.connProtector
this.upgrader.protector = this._modules.connProtector
} else if (process.env.LIBP2P_FORCE_PNET) {
throw new Error('Private network is enforced, but no protector was provided')
}
@ -123,7 +148,7 @@ class Libp2p extends EventEmitter {
}
// start pubsub
if (this._modules.pubsub && this._config.pubsub.enabled !== false) {
if (this._modules.pubsub) {
this.pubsub = pubsub(this, this._modules.pubsub, this._config.pubsub)
}
@ -139,7 +164,8 @@ class Libp2p extends EventEmitter {
this.state = new FSM('STOPPED', {
STOPPED: {
start: 'STARTING',
stop: 'STOPPED'
stop: 'STOPPED',
done: 'STOPPED'
},
STARTING: {
done: 'STARTED',
@ -161,7 +187,6 @@ class Libp2p extends EventEmitter {
})
this.state.on('STOPPING', () => {
log('libp2p is stopping')
this._onStopping()
})
this.state.on('STARTED', () => {
log('libp2p has started')
@ -178,16 +203,16 @@ class Libp2p extends EventEmitter {
// Once we start, emit and dial any peers we may have already discovered
this.state.on('STARTED', () => {
this.peerBook.getAllArray().forEach((peerInfo) => {
for (const peerInfo of this.peerStore.peers) {
this.emit('peer:discovery', peerInfo)
this._maybeConnect(peerInfo)
})
}
})
this._peerDiscovered = this._peerDiscovered.bind(this)
// promisify all instance methods
;['start', 'stop', 'dial', 'dialProtocol', 'dialFSM', 'hangUp', 'ping'].forEach(method => {
;['start', 'hangUp', 'ping'].forEach(method => {
this[method] = promisify(this[method], { context: this })
})
}
@ -220,13 +245,23 @@ class Libp2p extends EventEmitter {
/**
* Stop the libp2p node by closing its listeners and open connections
*
* @param {function(Error)} callback
* @async
* @returns {void}
*/
stop (callback = () => {}) {
emitFirst(this, ['error', 'stop'], callback)
async stop () {
this.state('stop')
try {
this.pubsub && await this.pubsub.stop()
await this.transportManager.close()
await this._switch.stop()
} catch (err) {
if (err) {
log.error(err)
this.emit('error', err)
}
}
this.state('done')
}
isStarted () {
@ -235,65 +270,52 @@ class Libp2p extends EventEmitter {
/**
* Dials to the provided peer. If successful, the `PeerInfo` of the
* peer will be added to the nodes `PeerBook`
* peer will be added to the nodes `peerStore`
*
* @param {PeerInfo|PeerId|Multiaddr|string} peer The peer to dial
* @param {function(Error)} callback
* @returns {void}
* @param {object} options
* @param {AbortSignal} [options.signal]
* @returns {Promise<Connection>}
*/
dial (peer, callback) {
this.dialProtocol(peer, null, callback)
dial (peer, options) {
return this.dialProtocol(peer, null, options)
}
/**
* Dials to the provided peer and handshakes with the given protocol.
* If successful, the `PeerInfo` of the peer will be added to the nodes `PeerBook`,
* If successful, the `PeerInfo` of the peer will be added to the nodes `peerStore`,
* and the `Connection` will be sent in the callback
*
* @async
* @param {PeerInfo|PeerId|Multiaddr|string} peer The peer to dial
* @param {string} protocol
* @param {function(Error, Connection)} callback
* @returns {void}
* @param {string[]|string} protocols
* @param {object} options
* @param {AbortSignal} [options.signal]
* @returns {Promise<Connection|*>}
*/
dialProtocol (peer, protocol, callback) {
if (!this.isStarted()) {
return callback(notStarted('dial', this.state._state))
async dialProtocol (peer, protocols, options) {
let connection
if (multiaddr.isMultiaddr(peer)) {
connection = await this.dialer.connectToMultiaddr(peer, options)
} else {
peer = await getPeerInfoRemote(peer, this)
connection = await this.dialer.connectToPeer(peer, options)
}
if (typeof protocol === 'function') {
callback = protocol
protocol = undefined
const peerInfo = getPeerInfo(connection.remotePeer)
// If a protocol was provided, create a new stream
if (protocols) {
const stream = await connection.newStream(protocols)
peerInfo.protocols.add(stream.protocol)
this.peerStore.put(peerInfo)
return stream
}
getPeerInfoRemote(peer, this)
.then(peerInfo => {
this._switch.dial(peerInfo, protocol, callback)
}, callback)
}
/**
* Similar to `dial` and `dialProtocol`, but the callback will contain a
* Connection State Machine.
*
* @param {PeerInfo|PeerId|Multiaddr|string} peer The peer to dial
* @param {string} protocol
* @param {function(Error, ConnectionFSM)} callback
* @returns {void}
*/
dialFSM (peer, protocol, callback) {
if (!this.isStarted()) {
return callback(notStarted('dial', this.state._state))
}
if (typeof protocol === 'function') {
callback = protocol
protocol = undefined
}
getPeerInfoRemote(peer, this)
.then(peerInfo => {
this._switch.dialFSM(peerInfo, protocol, callback)
}, callback)
this.peerStore.put(peerInfo)
return connection
}
/**
@ -328,154 +350,61 @@ class Libp2p extends EventEmitter {
}, callback)
}
handle (protocol, handlerFunc, matchFunc) {
this._switch.handle(protocol, handlerFunc, matchFunc)
/**
* Registers the `handler` for each protocol
* @param {string[]|string} protocols
* @param {function({ connection:*, stream:*, protocol:string })} handler
*/
handle (protocols, handler) {
protocols = Array.isArray(protocols) ? protocols : [protocols]
protocols.forEach(protocol => {
this.upgrader.protocols.set(protocol, handler)
})
this.dialer.identifyService.pushToPeerStore(this.peerStore)
}
unhandle (protocol) {
this._switch.unhandle(protocol)
/**
* Removes the handler for each protocol. The protocol
* will no longer be supported on streams.
* @param {string[]|string} protocols
*/
unhandle (protocols) {
protocols = Array.isArray(protocols) ? protocols : [protocols]
protocols.forEach(protocol => {
this.upgrader.protocols.delete(protocol)
})
this.dialer.identifyService.pushToPeerStore(this.peerStore)
}
_onStarting () {
async _onStarting () {
if (!this._modules.transport) {
this.emit('error', new Error('no transports were present'))
return this.state('abort')
}
let ws
// so that we can have webrtc-star addrs without adding manually the id
const maOld = []
const maNew = []
this.peerInfo.multiaddrs.toArray().forEach((ma) => {
if (!ma.getPeerId()) {
maOld.push(ma)
maNew.push(ma.encapsulate('/p2p/' + this.peerInfo.id.toB58String()))
}
})
this.peerInfo.multiaddrs.replace(maOld, maNew)
const multiaddrs = this.peerInfo.multiaddrs.toArray()
this._modules.transport.forEach((Transport) => {
let t
// Start parallel tasks
const tasks = [
this.transportManager.listen(multiaddrs)
]
if (typeof Transport === 'function') {
t = new Transport({ libp2p: this })
} else {
t = Transport
if (this._config.pubsub.enabled) {
this.pubsub && this.pubsub.start()
}
if (t.filter(multiaddrs).length > 0) {
this._switch.transport.add(t.tag || t[Symbol.toStringTag], t)
} else if (WebSockets.isWebSockets(t)) {
// TODO find a cleaner way to signal that a transport is always used
// for dialing, even if no listener
ws = t
}
this._transport.push(t)
})
series([
(cb) => {
this.connectionManager.start()
this._switch.start(cb)
},
(cb) => {
if (ws) {
// always add dialing on websockets
this._switch.transport.add(ws.tag || ws.constructor.name, ws)
}
// detect which multiaddrs we don't have a transport for and remove them
const multiaddrs = this.peerInfo.multiaddrs.toArray()
multiaddrs.forEach((multiaddr) => {
if (!multiaddr.toString().match(/\/p2p-circuit($|\/)/) &&
!this._transport.find((transport) => transport.filter(multiaddr).length > 0)) {
this.peerInfo.multiaddrs.delete(multiaddr)
}
})
cb()
},
(cb) => {
if (this._dht) {
this._dht.start(() => {
this._dht.on('peer', this._peerDiscovered)
cb()
})
} else {
cb()
}
},
(cb) => {
if (this.pubsub) {
return this.pubsub.start(cb)
}
cb()
},
// Peer Discovery
(cb) => {
if (this._modules.peerDiscovery) {
this._setupPeerDiscovery(cb)
} else {
cb()
}
}
], (err) => {
if (err) {
try {
await Promise.all(tasks)
} catch (err) {
log.error(err)
this.emit('error', err)
return this.state('stop')
}
this.state('done')
})
}
_onStopping () {
series([
(cb) => {
// stop all discoveries before continuing with shutdown
parallel(
this._discovery.map((d) => {
d.removeListener('peer', this._peerDiscovered)
return (_cb) => d.stop((err) => {
log.error('an error occurred stopping the discovery service', err)
_cb()
})
}),
cb
)
},
(cb) => {
if (this.pubsub) {
return this.pubsub.stop(cb)
}
cb()
},
(cb) => {
if (this._dht) {
this._dht.removeListener('peer', this._peerDiscovered)
return this._dht.stop(cb)
}
cb()
},
(cb) => {
this.connectionManager.stop()
this._switch.stop(cb)
},
(cb) => {
// Ensures idempotent restarts, ignore any errors
// from removeAll, they're not useful at this point
this._switch.transport.removeAll(() => cb())
}
], (err) => {
if (err) {
log.error(err)
this.emit('error', err)
}
// libp2p has started
this.state('done')
})
}
/**
@ -483,12 +412,6 @@ class Libp2p extends EventEmitter {
* the `peer:discovery` event. If auto dial is enabled for libp2p
* and the current connection count is under the low watermark, the
* peer will be dialed.
*
* TODO: If `peerBook.put` becomes centralized, https://github.com/libp2p/js-libp2p/issues/345,
* it would be ideal if only new peers were emitted. Currently, with
* other modules adding peers to the `PeerBook` we have no way of knowing
* if a peer is new or not, so it has to be emitted.
*
* @private
* @param {PeerInfo} peerInfo
*/
@ -497,7 +420,7 @@ class Libp2p extends EventEmitter {
log.error(new Error(codes.ERR_DISCOVERED_SELF))
return
}
peerInfo = this.peerBook.put(peerInfo)
peerInfo = this.peerStore.put(peerInfo)
if (!this.isStarted()) return
@ -568,16 +491,15 @@ module.exports = Libp2p
* Like `new Libp2p(options)` except it will create a `PeerInfo`
* instance if one is not provided in options.
* @param {object} options Libp2p configuration options
* @param {function(Error, Libp2p)} callback
* @returns {void}
* @returns {Libp2p}
*/
module.exports.createLibp2p = promisify((options, callback) => {
module.exports.create = async (options = {}) => {
if (options.peerInfo) {
return nextTick(callback, null, new Libp2p(options))
return new Libp2p(options)
}
PeerInfo.create((err, peerInfo) => {
if (err) return callback(err)
const peerInfo = await PeerInfo.create()
options.peerInfo = peerInfo
callback(null, new Libp2p(options))
})
})
return new Libp2p(options)
}

67
src/insecure/plaintext.js Normal file
View File

@ -0,0 +1,67 @@
'use strict'
const handshake = require('it-handshake')
const lp = require('it-length-prefixed')
const PeerId = require('peer-id')
const debug = require('debug')
const log = debug('libp2p:plaintext')
log.error = debug('libp2p:plaintext:error')
const { UnexpectedPeerError, InvalidCryptoExchangeError } = require('libp2p-interfaces/src/crypto/errors')
const { Exchange, KeyType } = require('./proto')
const protocol = '/plaintext/2.0.0'
function lpEncodeExchange (exchange) {
const pb = Exchange.encode(exchange)
return lp.encode.single(pb)
}
async function encrypt (localId, conn, remoteId) {
const shake = handshake(conn)
// Encode the public key and write it to the remote peer
shake.write(lpEncodeExchange({
id: localId.toBytes(),
pubkey: {
Type: KeyType.RSA, // TODO: dont hard code
Data: localId.marshalPubKey()
}
}))
log('write pubkey exchange to peer %j', remoteId)
// Get the Exchange message
const response = (await lp.decode.fromReader(shake.reader).next()).value
const id = Exchange.decode(response.slice())
log('read pubkey exchange from peer %j', remoteId)
let peerId
try {
peerId = await PeerId.createFromPubKey(id.pubkey.Data)
} catch (err) {
log.error(err)
throw new InvalidCryptoExchangeError('Remote did not provide its public key')
}
if (remoteId && !peerId.isEqual(remoteId)) {
throw new UnexpectedPeerError()
}
log('plaintext key exchange completed successfully with peer %j', peerId)
shake.rest()
return {
conn: shake.stream,
remotePeer: peerId
}
}
module.exports = {
protocol,
secureInbound: (localId, conn, remoteId) => {
return encrypt(localId, conn, remoteId)
},
secureOutbound: (localId, conn, remoteId) => {
return encrypt(localId, conn, remoteId)
}
}

22
src/insecure/proto.js Normal file
View File

@ -0,0 +1,22 @@
'use strict'
const protobuf = require('protons')
module.exports = protobuf(`
message Exchange {
optional bytes id = 1;
optional PublicKey pubkey = 2;
}
enum KeyType {
RSA = 0;
Ed25519 = 1;
Secp256k1 = 2;
ECDSA = 3;
}
message PublicKey {
required KeyType Type = 1;
required bytes Data = 2;
}
`)

3
src/peer-store/README.md Normal file
View File

@ -0,0 +1,3 @@
# Peerstore
WIP

190
src/peer-store/index.js Normal file
View File

@ -0,0 +1,190 @@
'use strict'
const assert = require('assert')
const debug = require('debug')
const log = debug('libp2p:peer-store')
log.error = debug('libp2p:peer-store:error')
const { EventEmitter } = require('events')
const PeerInfo = require('peer-info')
/**
* Responsible for managing known peers, as well as their addresses and metadata
* @fires PeerStore#peer Emitted when a peer is connected to this node
* @fires PeerStore#change:protocols
* @fires PeerStore#change:multiaddrs
*/
class PeerStore extends EventEmitter {
constructor () {
super()
/**
* Map of peers
*
* @type {Map<string, PeerInfo>}
*/
this.peers = new Map()
// TODO: Track ourselves. We should split `peerInfo` up into its pieces so we get better
// control and observability. This will be the initial step for removing PeerInfo
// https://github.com/libp2p/go-libp2p-core/blob/master/peerstore/peerstore.go
// this.addressBook = new Map()
// this.protoBook = new Map()
}
/**
* Stores the peerInfo of a new peer.
* If already exist, its info is updated.
* @param {PeerInfo} peerInfo
*/
put (peerInfo) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
// Already know the peer?
if (this.peers.has(peerInfo.id.toB58String())) {
this.update(peerInfo)
} else {
this.add(peerInfo)
// Emit the new peer found
this.emit('peer', peerInfo)
}
}
/**
* Add a new peer to the store.
* @param {PeerInfo} peerInfo
*/
add (peerInfo) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
// Create new instance and add values to it
const newPeerInfo = new PeerInfo(peerInfo.id)
peerInfo.multiaddrs.forEach((ma) => newPeerInfo.multiaddrs.add(ma))
peerInfo.protocols.forEach((p) => newPeerInfo.protocols.add(p))
const connectedMa = peerInfo.isConnected()
connectedMa && newPeerInfo.connect(connectedMa)
const peerProxy = new Proxy(newPeerInfo, {
set: (obj, prop, value) => {
if (prop === 'multiaddrs') {
this.emit('change:multiaddrs', {
peerInfo: obj,
multiaddrs: value.toArray()
})
} else if (prop === 'protocols') {
this.emit('change:protocols', {
peerInfo: obj,
protocols: Array.from(value)
})
}
return Reflect.set(...arguments)
}
})
this.peers.set(peerInfo.id.toB58String(), peerProxy)
}
/**
* Updates an already known peer.
* @param {PeerInfo} peerInfo
*/
update (peerInfo) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
const id = peerInfo.id.toB58String()
const recorded = this.peers.get(id)
// pass active connection state
const ma = peerInfo.isConnected()
if (ma) {
recorded.connect(ma)
}
// Verify new multiaddrs
// TODO: better track added and removed multiaddrs
const multiaddrsIntersection = [
...recorded.multiaddrs.toArray()
].filter((m) => peerInfo.multiaddrs.has(m))
if (multiaddrsIntersection.length !== peerInfo.multiaddrs.size ||
multiaddrsIntersection.length !== recorded.multiaddrs.size) {
// recorded.multiaddrs = peerInfo.multiaddrs
recorded.multiaddrs.clear()
for (const ma of peerInfo.multiaddrs.toArray()) {
recorded.multiaddrs.add(ma)
}
this.emit('change:multiaddrs', {
peerInfo: peerInfo,
multiaddrs: recorded.multiaddrs.toArray()
})
}
// Update protocols
// TODO: better track added and removed protocols
const protocolsIntersection = new Set(
[...recorded.protocols].filter((p) => peerInfo.protocols.has(p))
)
if (protocolsIntersection.size !== peerInfo.protocols.size ||
protocolsIntersection.size !== recorded.protocols.size) {
recorded.protocols.clear()
for (const protocol of peerInfo.protocols) {
recorded.protocols.add(protocol)
}
this.emit('change:protocols', {
peerInfo: peerInfo,
protocols: Array.from(recorded.protocols)
})
}
// Add the public key if missing
if (!recorded.id.pubKey && peerInfo.id.pubKey) {
recorded.id.pubKey = peerInfo.id.pubKey
}
}
/**
* Get the info to the given id.
* @param {string} peerId b58str id
* @returns {PeerInfo}
*/
get (peerId) {
const peerInfo = this.peers.get(peerId)
if (peerInfo) {
return peerInfo
}
return undefined
}
/**
* Removes the Peer with the matching `peerId` from the PeerStore
* @param {string} peerId b58str id
* @returns {boolean} true if found and removed
*/
remove (peerId) {
return this.peers.delete(peerId)
}
/**
* Completely replaces the existing peers metadata with the given `peerInfo`
* @param {PeerInfo} peerInfo
* @returns {void}
*/
replace (peerInfo) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
this.remove(peerInfo.id.toB58String())
this.add(peerInfo)
}
}
module.exports = PeerStore

View File

@ -1,6 +1,5 @@
'use strict'
const pull = require('pull-stream')
const debug = require('debug')
const Errors = require('./errors')
const xsalsa20 = require('xsalsa20')
@ -8,45 +7,40 @@ const KEY_LENGTH = require('./key-generator').KEY_LENGTH
const log = debug('libp2p:pnet')
log.trace = debug('libp2p:pnet:trace')
log.err = debug('libp2p:pnet:err')
log.error = debug('libp2p:pnet:err')
/**
* Creates a pull stream to encrypt messages in a private network
* Creates a stream iterable to encrypt messages in a private network
*
* @param {Buffer} nonce The nonce to use in encryption
* @param {Buffer} psk The private shared key to use in encryption
* @returns {PullStream} a through stream
* @returns {*} a through iterable
*/
module.exports.createBoxStream = (nonce, psk) => {
const xor = xsalsa20(nonce, psk)
return pull(
ensureBuffer(),
pull.map((chunk) => {
return xor.update(chunk, chunk)
})
)
return (source) => (async function * () {
for await (const chunk of source) {
yield Buffer.from(xor.update(chunk.slice()))
}
})()
}
/**
* Creates a pull stream to decrypt messages in a private network
* Creates a stream iterable to decrypt messages in a private network
*
* @param {Object} remote Holds the nonce of the peer
* @param {Buffer} nonce The nonce of the remote peer
* @param {Buffer} psk The private shared key to use in decryption
* @returns {PullStream} a through stream
* @returns {*} a through iterable
*/
module.exports.createUnboxStream = (remote, psk) => {
let xor
return pull(
ensureBuffer(),
pull.map((chunk) => {
if (!xor) {
xor = xsalsa20(remote.nonce, psk)
module.exports.createUnboxStream = (nonce, psk) => {
return (source) => (async function * () {
const xor = xsalsa20(nonce, psk)
log.trace('Decryption enabled')
}
return xor.update(chunk, chunk)
})
)
for await (const chunk of source) {
yield Buffer.from(xor.update(chunk.slice()))
}
})()
}
/**
@ -61,7 +55,7 @@ module.exports.decodeV1PSK = (pskBuffer) => {
// This should pull from multibase/multicodec to allow for
// more encoding flexibility. Ideally we'd consume the codecs
// from the buffer line by line to evaluate the next line
// programatically instead of making assumptions about the
// programmatically instead of making assumptions about the
// encodings of each line.
const metadata = pskBuffer.toString().split(/(?:\r\n|\r|\n)/g)
const pskTag = metadata.shift()
@ -78,21 +72,7 @@ module.exports.decodeV1PSK = (pskBuffer) => {
psk: psk
}
} catch (err) {
log.error(err)
throw new Error(Errors.INVALID_PSK)
}
}
/**
* Returns a through pull-stream that ensures the passed chunks
* are buffers instead of strings
* @returns {PullStream} a through stream
*/
function ensureBuffer () {
return pull.map((chunk) => {
if (typeof chunk === 'string') {
return Buffer.from(chunk, 'utf-8')
}
return chunk
})
}

View File

@ -1,12 +1,17 @@
'use strict'
const pull = require('pull-stream')
const Connection = require('interface-connection').Connection
const pipe = require('it-pipe')
const assert = require('assert')
const duplexPair = require('it-pair/duplex')
const crypto = require('libp2p-crypto')
const Errors = require('./errors')
const State = require('./state')
const decodeV1PSK = require('./crypto').decodeV1PSK
const {
createBoxStream,
createUnboxStream,
decodeV1PSK
} = require('./crypto')
const handshake = require('it-handshake')
const { NONCE_LENGTH } = require('./key-generator')
const debug = require('debug')
const log = debug('libp2p:pnet')
log.err = debug('libp2p:pnet:err')
@ -27,41 +32,41 @@ class Protector {
}
/**
* Takes a given Connection and creates a privaste encryption stream
* Takes a given Connection and creates a private encryption stream
* between its two peers from the PSK the Protector instance was
* created with.
*
* @param {Connection} connection The connection to protect
* @param {function(Error)} callback
* @returns {Connection} The protected connection
* @returns {*} A protected duplex iterable
*/
protect (connection, callback) {
async protect (connection) {
assert(connection, Errors.NO_HANDSHAKE_CONNECTION)
const protectedConnection = new Connection(undefined, connection)
const state = new State(this.psk)
// Exchange nonces
log('protecting the connection')
const localNonce = crypto.randomBytes(NONCE_LENGTH)
// Run the connection through an encryptor
pull(
connection,
state.encrypt((err, encryptedOuterStream) => {
if (err) {
log.err('There was an error attempting to protect the connection', err)
return callback(err)
}
const shake = handshake(connection)
shake.write(localNonce)
connection.getPeerInfo(() => {
protectedConnection.setInnerConn(new Connection(encryptedOuterStream, connection))
log('the connection has been successfully wrapped by the protector')
callback()
})
}),
connection
const result = await shake.reader.next(NONCE_LENGTH)
const remoteNonce = result.value.slice()
shake.rest()
// Create the boxing/unboxing pipe
log('exchanged nonces')
const [internal, external] = duplexPair()
pipe(
external,
// Encrypt all outbound traffic
createBoxStream(localNonce, this.psk),
shake.stream,
// Decrypt all inbound traffic
createUnboxStream(remoteNonce, this.psk),
external
)
return protectedConnection
return internal
}
}

View File

@ -1,110 +0,0 @@
'use strict'
const crypto = require('crypto')
const debug = require('debug')
const pair = require('pull-pair')
const Reader = require('pull-reader')
const cat = require('pull-cat')
const pull = require('pull-stream')
const deferred = require('pull-defer')
const cryptoStreams = require('./crypto')
const NONCE_LENGTH = require('./key-generator').NONCE_LENGTH
const log = debug('libp2p:pnet')
log.err = debug('libp2p:pnet:err')
log.trace = debug('libp2p:pnet:trace')
/**
* Keeps track of the state of a given connection, such as the local psk
* and local and remote nonces for encryption/decryption
*/
class State {
/**
* @param {Buffer} psk The key buffer used for encryption
* @constructor
*/
constructor (psk) {
this.local = {
nonce: Buffer.from(
crypto.randomBytes(NONCE_LENGTH)
),
psk: psk
}
this.remote = { nonce: null }
this.rawReader = Reader(60e3)
this.encryptedReader = Reader(60e3)
this.rawPairStream = pair()
this.encryptedPairStream = pair()
// The raw, pair stream
this.innerRawStream = null
this.outerRawStream = {
sink: this.rawReader,
source: cat([
pull.values([
this.local.nonce
]),
this.rawPairStream.source
])
}
// The encrypted, pair stream
this.innerEncryptedStream = {
sink: this.encryptedReader,
source: this.encryptedPairStream.source
}
this.outerEncryptedStream = null
}
/**
* Creates encryption streams for the given state
*
* @param {function(Error, Connection)} callback
* @returns {void}
*/
encrypt (callback) {
// The outer stream needs to be returned before we setup the
// rest of the streams, so we're delaying the execution
setTimeout(() => {
// Read the nonce first, once we have it resolve the
// deferred source, so we keep reading
const deferredSource = deferred.source()
this.rawReader.read(NONCE_LENGTH, (err, data) => {
if (err) {
log.err('There was an error attempting to read the nonce', err)
}
log.trace('remote nonce received')
this.remote.nonce = data
deferredSource.resolve(this.rawReader.read())
})
this.innerRawStream = {
sink: this.rawPairStream.sink,
source: deferredSource
}
// Create the pull exchange between the two inner streams
pull(
this.innerRawStream,
cryptoStreams.createUnboxStream(this.remote, this.local.psk),
this.innerEncryptedStream,
cryptoStreams.createBoxStream(this.local.nonce, this.local.psk),
this.innerRawStream
)
this.outerEncryptedStream = {
sink: this.encryptedPairStream.sink,
source: this.encryptedReader.read()
}
callback(null, this.outerEncryptedStream)
}, 0)
return this.outerRawStream
}
}
module.exports = State

View File

@ -1,52 +1,21 @@
'use strict'
const nextTick = require('async/nextTick')
const { messages, codes } = require('./errors')
const promisify = require('promisify-es6')
const errCode = require('err-code')
const { messages, codes } = require('./errors')
module.exports = (node, Pubsub, config) => {
const pubsub = new Pubsub(node, config)
const pubsub = new Pubsub(node.peerInfo, node.registrar, config)
return {
/**
* Subscribe the given handler to a pubsub topic
*
* @param {string} topic
* @param {function} handler The handler to subscribe
* @param {object|null} [options]
* @param {function} [callback] An optional callback
*
* @returns {Promise|void} A promise is returned if no callback is provided
*
* @example <caption>Subscribe a handler to a topic</caption>
*
* // `null` must be passed for options until subscribe is no longer using promisify
* const handler = (message) => { }
* await libp2p.subscribe(topic, handler, null)
*
* @example <caption>Use a callback instead of the Promise api</caption>
*
* // `options` may be passed or omitted when supplying a callback
* const handler = (message) => { }
* libp2p.subscribe(topic, handler, callback)
* @returns {void}
*/
subscribe: (topic, handler, options, callback) => {
// can't use promisify because it thinks the handler is a callback
if (typeof options === 'function') {
callback = options
options = {}
}
subscribe: (topic, handler) => {
if (!node.isStarted() && !pubsub.started) {
const err = errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
if (callback) {
return nextTick(() => callback(err))
}
return Promise.reject(err)
throw errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
}
if (pubsub.listenerCount(topic) === 0) {
@ -54,46 +23,16 @@ module.exports = (node, Pubsub, config) => {
}
pubsub.on(topic, handler)
if (callback) {
return nextTick(() => callback())
}
return Promise.resolve()
},
/**
* Unsubscribes from a pubsub topic
*
* @param {string} topic
* @param {function|null} handler The handler to unsubscribe from
* @param {function} [callback] An optional callback
*
* @returns {Promise|void} A promise is returned if no callback is provided
*
* @example <caption>Unsubscribe a topic for all handlers</caption>
*
* // `null` must be passed until unsubscribe is no longer using promisify
* await libp2p.unsubscribe(topic, null)
*
* @example <caption>Unsubscribe a topic for 1 handler</caption>
*
* await libp2p.unsubscribe(topic, handler)
*
* @example <caption>Use a callback instead of the Promise api</caption>
*
* libp2p.unsubscribe(topic, handler, callback)
* @param {function} [handler] The handler to unsubscribe from
*/
unsubscribe: (topic, handler, callback) => {
// can't use promisify because it thinks the handler is a callback
unsubscribe: (topic, handler) => {
if (!node.isStarted() && !pubsub.started) {
const err = errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
if (callback) {
return nextTick(() => callback(err))
}
return Promise.reject(err)
throw errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
}
if (!handler) {
@ -105,61 +44,61 @@ module.exports = (node, Pubsub, config) => {
if (pubsub.listenerCount(topic) === 0) {
pubsub.unsubscribe(topic)
}
if (callback) {
return nextTick(() => callback())
}
return Promise.resolve()
},
publish: promisify((topic, data, callback) => {
/**
* Publish messages to the given topics.
* @param {Array<string>|string} topic
* @param {Buffer} data
* @returns {Promise<void>}
*/
publish: (topic, data) => {
if (!node.isStarted() && !pubsub.started) {
return nextTick(callback, errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED))
throw errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
}
try {
data = Buffer.from(data)
} catch (err) {
return nextTick(callback, errCode(new Error('data must be convertible to a Buffer'), 'ERR_DATA_IS_NOT_VALID'))
throw errCode(new Error('data must be convertible to a Buffer'), 'ERR_DATA_IS_NOT_VALID')
}
pubsub.publish(topic, data, callback)
}),
return pubsub.publish(topic, data)
},
ls: promisify((callback) => {
/**
* Get a list of topics the node is subscribed to.
* @returns {Array<String>} topics
*/
getTopics: () => {
if (!node.isStarted() && !pubsub.started) {
return nextTick(callback, errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED))
throw errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
}
const subscriptions = Array.from(pubsub.subscriptions)
return pubsub.getTopics()
},
nextTick(() => callback(null, subscriptions))
}),
peers: promisify((topic, callback) => {
/**
* Get a list of the peer-ids that are subscribed to one topic.
* @param {string} topic
* @returns {Array<string>}
*/
getPeersSubscribed: (topic) => {
if (!node.isStarted() && !pubsub.started) {
return nextTick(callback, errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED))
throw errCode(new Error(messages.NOT_STARTED_YET), codes.PUBSUB_NOT_STARTED)
}
if (typeof topic === 'function') {
callback = topic
topic = null
}
const peers = Array.from(pubsub.peers.values())
.filter((peer) => topic ? peer.topics.has(topic) : true)
.map((peer) => peer.info.id.toB58String())
nextTick(() => callback(null, peers))
}),
return pubsub.getPeersSubscribed(topic)
},
setMaxListeners (n) {
return pubsub.setMaxListeners(n)
},
start: promisify((cb) => pubsub.start(cb)),
_pubsub: pubsub,
stop: promisify((cb) => pubsub.stop(cb))
start: () => pubsub.start(),
stop: () => pubsub.stop()
}
}

138
src/registrar.js Normal file
View File

@ -0,0 +1,138 @@
'use strict'
const assert = require('assert')
const debug = require('debug')
const log = debug('libp2p:peer-store')
log.error = debug('libp2p:peer-store:error')
const Topology = require('libp2p-interfaces/src/topology')
const { Connection } = require('libp2p-interfaces/src/connection')
const PeerInfo = require('peer-info')
/**
* Responsible for notifying registered protocols of events in the network.
*/
class Registrar {
/**
* @param {Object} props
* @param {PeerStore} props.peerStore
* @constructor
*/
constructor ({ peerStore }) {
this.peerStore = peerStore
/**
* Map of connections per peer
* TODO: this should be handled by connectionManager
* @type {Map<string, Array<conn>>}
*/
this.connections = new Map()
/**
* Map of topologies
*
* @type {Map<string, object>}
*/
this.topologies = new Map()
this._handle = undefined
}
get handle () {
return this._handle
}
set handle (handle) {
this._handle = handle
}
/**
* Add a new connected peer to the record
* TODO: this should live in the ConnectionManager
* @param {PeerInfo} peerInfo
* @param {Connection} conn
* @returns {void}
*/
onConnect (peerInfo, conn) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
assert(Connection.isConnection(conn), 'conn must be an instance of interface-connection')
const id = peerInfo.id.toB58String()
const storedConn = this.connections.get(id)
if (storedConn) {
storedConn.push(conn)
} else {
this.connections.set(id, [conn])
}
}
/**
* Remove a disconnected peer from the record
* TODO: this should live in the ConnectionManager
* @param {PeerInfo} peerInfo
* @param {Connection} connection
* @param {Error} [error]
* @returns {void}
*/
onDisconnect (peerInfo, connection, error) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
const id = peerInfo.id.toB58String()
let storedConn = this.connections.get(id)
if (storedConn && storedConn.length > 1) {
storedConn = storedConn.filter((conn) => conn.id === connection.id)
} else if (storedConn) {
for (const [, topology] of this.topologies) {
topology.disconnect(peerInfo, error)
}
this.connections.delete(peerInfo.id.toB58String())
}
}
/**
* Get a connection with a peer.
* @param {PeerInfo} peerInfo
* @returns {Connection}
*/
getConnection (peerInfo) {
assert(PeerInfo.isPeerInfo(peerInfo), 'peerInfo must be an instance of peer-info')
// TODO: what should we return
return this.connections.get(peerInfo.id.toB58String())[0]
}
/**
* Register handlers for a set of multicodecs given
* @param {Topology} topology protocol topology
* @return {string} registrar identifier
*/
register (topology) {
assert(
Topology.isTopology(topology),
'topology must be an instance of interfaces/topology')
// Create topology
const id = (parseInt(Math.random() * 1e9)).toString(36) + Date.now()
this.topologies.set(id, topology)
// Set registrar
topology.registrar = this
return id
}
/**
* Unregister topology.
* @param {string} id registrar identifier
* @return {boolean} unregistered successfully
*/
unregister (id) {
return this.topologies.delete(id)
}
}
module.exports = Registrar

View File

@ -81,7 +81,7 @@ tests]([./test/pnet.node.js]).
##### `switch.connection.addUpgrade()`
A connection upgrade must be able to receive and return something that implements the [interface-connection](https://github.com/libp2p/interface-connection) specification.
A connection upgrade must be able to receive and return something that implements the [interface-connection](https://github.com/libp2p/js-interfaces/tree/master/src/connection) specification.
> **WIP**
@ -151,7 +151,7 @@ a low priority dial to the provided peer. Calls to `dial` and `dialFSM` will tak
- `error`: emitted whenever a fatal error occurs with the connection; the error will be emitted.
- `error:upgrade_failed`: emitted whenever the connection fails to upgrade with a muxer, this is not fatal.
- `error:connection_attempt_failed`: emitted whenever a dial attempt fails for a given transport. An array of errors is emitted.
- `connection`: emitted whenever a useable connection has been established; the underlying [Connection](https://github.com/libp2p/interface-connection) will be emitted.
- `connection`: emitted whenever a useable connection has been established; the underlying [Connection](https://github.com/libp2p/js-interfaces/tree/master/src/connection) will be emitted.
- `close`: emitted when the connection has closed.
### `switch.handle(protocol, handlerFunc, matchFunc)`
@ -365,7 +365,7 @@ In order for a transport to be supported, it has to follow the [interface-transp
### Connection upgrades
Each connection in libp2p follows the [interface-connection](https://github.com/libp2p/interface-connection) spec. This design decision enables libp2p to have upgradable transports.
Each connection in libp2p follows the [interface-connection](https://github.com/libp2p/js-interfaces/tree/master/src/connection) spec. This design decision enables libp2p to have upgradable transports.
We think of `upgrade` as a very important notion when we are talking about connections, we can see mechanisms like: stream multiplexing, congestion control, encrypted channels, multipath, simulcast, etc, as `upgrades` to a connection. A connection can be a simple and with no guarantees, drop a packet on the network with a destination thing, a transport in the other hand can be a connection and or a set of different upgrades that are mounted on top of each other, giving extra functionality to that connection and therefore `upgrading` it.

View File

@ -1,6 +1,6 @@
'use strict'
const Connection = require('interface-connection').Connection
const { Connection } = require('libp2p-interfaces/src/connection')
const pull = require('pull-stream/pull')
const empty = require('pull-stream/sources/empty')
const timeout = require('async/timeout')

View File

@ -1,6 +1,6 @@
'use strict'
const Connection = require('interface-connection').Connection
const { Connection } = require('libp2p-interfaces/src/connection')
const pull = require('pull-stream/pull')
/**

183
src/transport-manager.js Normal file
View File

@ -0,0 +1,183 @@
'use strict'
const pSettle = require('p-settle')
const { codes } = require('./errors')
const errCode = require('err-code')
const debug = require('debug')
const log = debug('libp2p:transports')
log.error = debug('libp2p:transports:error')
class TransportManager {
/**
* @constructor
* @param {object} options
* @param {Libp2p} options.libp2p The Libp2p instance. It will be passed to the transports.
* @param {Upgrader} options.upgrader The upgrader to provide to the transports
*/
constructor ({ libp2p, upgrader }) {
this.libp2p = libp2p
this.upgrader = upgrader
this._transports = new Map()
this._listeners = new Map()
}
/**
* Adds a `Transport` to the manager
*
* @param {String} key
* @param {Transport} Transport
* @returns {void}
*/
add (key, Transport) {
log('adding %s', key)
if (!key) {
throw errCode(new Error(`Transport must have a valid key, was given '${key}'`), codes.ERR_INVALID_KEY)
}
if (this._transports.has(key)) {
throw errCode(new Error('There is already a transport with this key'), codes.ERR_DUPLICATE_TRANSPORT)
}
const transport = new Transport({
libp2p: this.libp2p,
upgrader: this.upgrader
})
this._transports.set(key, transport)
if (!this._listeners.has(key)) {
this._listeners.set(key, [])
}
}
/**
* Stops all listeners
* @async
*/
async close () {
const tasks = []
for (const [key, listeners] of this._listeners) {
log('closing listeners for %s', key)
while (listeners.length) {
const listener = listeners.pop()
tasks.push(listener.close())
}
}
await Promise.all(tasks)
log('all listeners closed')
this._listeners.clear()
}
/**
* Dials the given Multiaddr over it's supported transport
* @param {Multiaddr} ma
* @param {*} options
* @returns {Promise<Connection>}
*/
async dial (ma, options) {
const transport = this.transportForMultiaddr(ma)
if (!transport) {
throw errCode(new Error(`No transport available for address ${String(ma)}`), codes.ERR_TRANSPORT_UNAVAILABLE)
}
try {
return await transport.dial(ma, options)
} catch (err) {
throw errCode(new Error('Transport dial failed'), codes.ERR_TRANSPORT_DIAL_FAILED, err)
}
}
/**
* Returns all Multiaddr's the listeners are using
* @returns {Multiaddr[]}
*/
getAddrs () {
let addrs = []
for (const listeners of this._listeners.values()) {
for (const listener of listeners) {
addrs = [...addrs, ...listener.getAddrs()]
}
}
return addrs
}
/**
* Finds a transport that matches the given Multiaddr
* @param {Multiaddr} ma
* @returns {Transport|null}
*/
transportForMultiaddr (ma) {
for (const transport of this._transports.values()) {
const addrs = transport.filter([ma])
if (addrs.length) return transport
}
return null
}
/**
* Starts listeners for each given Multiaddr.
* @async
* @param {Multiaddr[]} addrs
*/
async listen (addrs) {
for (const [key, transport] of this._transports.entries()) {
const supportedAddrs = transport.filter(addrs)
const tasks = []
// For each supported multiaddr, create a listener
for (const addr of supportedAddrs) {
log('creating listener for %s on %s', key, addr)
const listener = transport.createListener({}, this.onConnection)
this._listeners.get(key).push(listener)
// We need to attempt to listen on everything
tasks.push(listener.listen(addr))
}
const results = await pSettle(tasks)
// If we are listening on at least 1 address, succeed.
// TODO: we should look at adding a retry (`p-retry`) here to better support
// listening on remote addresses as they may be offline. We could then potentially
// just wait for any (`p-any`) listener to succeed on each transport before returning
const isListening = results.find(r => r.isFulfilled === true)
if (!isListening) {
throw errCode(new Error(`Transport (${key}) could not listen on any available address`), codes.ERR_NO_VALID_ADDRESSES)
}
}
}
/**
* Removes the given transport from the manager.
* If a transport has any running listeners, they will be closed.
*
* @async
* @param {string} key
*/
async remove (key) {
log('removing %s', key)
if (this._listeners.has(key)) {
// Close any running listeners
for (const listener of this._listeners.get(key)) {
await listener.close()
}
}
this._transports.delete(key)
this._listeners.delete(key)
}
/**
* Removes all transports from the manager.
* If any listeners are running, they will be closed.
* @async
*/
async removeAll () {
const tasks = []
for (const key of this._transports.keys()) {
tasks.push(this.remove(key))
}
await Promise.all(tasks)
}
}
module.exports = TransportManager

366
src/upgrader.js Normal file
View File

@ -0,0 +1,366 @@
'use strict'
const debug = require('debug')
const log = debug('libp2p:upgrader')
log.error = debug('libp2p:upgrader:error')
const Multistream = require('multistream-select')
const { Connection } = require('libp2p-interfaces/src/connection')
const PeerId = require('peer-id')
const pipe = require('it-pipe')
const errCode = require('err-code')
const { codes } = require('./errors')
/**
* @typedef MultiaddrConnection
* @property {function} sink
* @property {AsyncIterator} source
* @property {*} conn
* @property {Multiaddr} remoteAddr
*/
/**
* @typedef CryptoResult
* @property {*} conn A duplex iterable
* @property {PeerId} remotePeer
* @property {string} protocol
*/
class Upgrader {
/**
* @param {object} options
* @param {PeerId} options.localPeer
* @param {Map<string, Crypto>} options.cryptos
* @param {Map<string, Muxer>} options.muxers
* @param {function(Connection)} options.onConnection Called when a connection is upgraded
* @param {function(Connection)} options.onConnectionEnd
*/
constructor ({
localPeer,
cryptos,
muxers,
onConnectionEnd = () => {},
onConnection = () => {}
}) {
this.localPeer = localPeer
this.cryptos = cryptos || new Map()
this.muxers = muxers || new Map()
this.protector = null
this.protocols = new Map()
this.onConnection = onConnection
this.onConnectionEnd = onConnectionEnd
}
/**
* Upgrades an inbound connection
* @async
* @param {MultiaddrConnection} maConn
* @returns {Promise<Connection>}
*/
async upgradeInbound (maConn) {
let encryptedConn
let remotePeer
let muxedConnection
let Muxer
let cryptoProtocol
log('Starting the inbound connection upgrade')
// Protect
let protectedConn = maConn
if (this.protector) {
protectedConn = await this.protector.protect(maConn)
}
try {
// Encrypt the connection
({
conn: encryptedConn,
remotePeer,
protocol: cryptoProtocol
} = await this._encryptInbound(this.localPeer, protectedConn, this.cryptos))
// Multiplex the connection
;({ stream: muxedConnection, Muxer } = await this._multiplexInbound(encryptedConn, this.muxers))
} catch (err) {
log.error('Failed to upgrade inbound connection', err)
await maConn.close(err)
// TODO: We shouldn't throw here, as there isn't anything to catch the failure
throw err
}
log('Successfully upgraded inbound connection')
return this._createConnection({
cryptoProtocol,
direction: 'inbound',
maConn,
muxedConnection,
Muxer,
remotePeer
})
}
/**
* Upgrades an outbound connection
* @async
* @param {MultiaddrConnection} maConn
* @returns {Promise<Connection>}
*/
async upgradeOutbound (maConn) {
let remotePeerId
try {
remotePeerId = PeerId.createFromB58String(maConn.remoteAddr.getPeerId())
} catch (err) {
log.error('multiaddr did not contain a valid peer id', err)
}
let encryptedConn
let remotePeer
let muxedConnection
let cryptoProtocol
let Muxer
log('Starting the outbound connection upgrade')
// Protect
let protectedConn = maConn
if (this.protector) {
protectedConn = await this.protector.protect(maConn)
}
try {
// Encrypt the connection
({
conn: encryptedConn,
remotePeer,
protocol: cryptoProtocol
} = await this._encryptOutbound(this.localPeer, protectedConn, remotePeerId, this.cryptos))
// Multiplex the connection
;({ stream: muxedConnection, Muxer } = await this._multiplexOutbound(encryptedConn, this.muxers))
} catch (err) {
log.error('Failed to upgrade outbound connection', err)
await maConn.close(err)
throw err
}
log('Successfully upgraded outbound connection')
return this._createConnection({
cryptoProtocol,
direction: 'outbound',
maConn,
muxedConnection,
Muxer,
remotePeer
})
}
/**
* A convenience method for generating a new `Connection`
* @private
* @param {object} options
* @param {string} cryptoProtocol The crypto protocol that was negotiated
* @param {string} direction One of ['inbound', 'outbound']
* @param {MultiaddrConnection} maConn The transport layer connection
* @param {*} muxedConnection A duplex connection returned from multiplexer selection
* @param {Muxer} Muxer The muxer to be used for muxing
* @param {PeerId} remotePeer The peer the connection is with
* @returns {Connection}
*/
_createConnection ({
cryptoProtocol,
direction,
maConn,
muxedConnection,
Muxer,
remotePeer
}) {
// Create the muxer
const muxer = new Muxer({
// Run anytime a remote stream is created
onStream: async muxedStream => {
const mss = new Multistream.Listener(muxedStream)
try {
const { stream, protocol } = await mss.handle(Array.from(this.protocols.keys()))
log('%s: incoming stream opened on %s', direction, protocol)
connection.addStream(stream, protocol)
this._onStream({ connection, stream, protocol })
} catch (err) {
log.error(err)
}
},
// Run anytime a stream closes
onStreamEnd: muxedStream => {
connection.removeStream(muxedStream.id)
}
})
const newStream = async protocols => {
log('%s: starting new stream on %s', direction, protocols)
const muxedStream = muxer.newStream()
const mss = new Multistream.Dialer(muxedStream)
try {
const { stream, protocol } = await mss.select(protocols)
return { stream: { ...muxedStream, ...stream }, protocol }
} catch (err) {
log.error('could not create new stream', err)
throw errCode(err, codes.ERR_UNSUPPORTED_PROTOCOL)
}
}
// Pipe all data through the muxer
pipe(muxedConnection, muxer, muxedConnection)
maConn.timeline.upgraded = Date.now()
const timelineProxy = new Proxy(maConn.timeline, {
set: (...args) => {
if (args[1] === 'close' && args[2]) {
this.onConnectionEnd(connection)
}
return Reflect.set(...args)
}
})
// Create the connection
const connection = new Connection({
localAddr: maConn.localAddr,
remoteAddr: maConn.remoteAddr,
localPeer: this.localPeer,
remotePeer: remotePeer,
stat: {
direction,
timeline: timelineProxy,
multiplexer: Muxer.multicodec,
encryption: cryptoProtocol
},
newStream,
getStreams: () => muxer.streams,
close: err => maConn.close(err)
})
this.onConnection(connection)
return connection
}
/**
* Routes incoming streams to the correct handler
* @private
* @param {object} options
* @param {Connection} options.connection The connection the stream belongs to
* @param {Stream} options.stream
* @param {string} options.protocol
*/
_onStream ({ connection, stream, protocol }) {
const handler = this.protocols.get(protocol)
handler({ connection, stream, protocol })
}
/**
* Attempts to encrypt the incoming `connection` with the provided `cryptos`.
* @private
* @async
* @param {PeerId} localPeer The initiators PeerInfo
* @param {*} connection
* @param {Map<string, Crypto>} cryptos
* @returns {CryptoResult} An encrypted connection, remote peer `PeerId` and the protocol of the `Crypto` used
*/
async _encryptInbound (localPeer, connection, cryptos) {
const mss = new Multistream.Listener(connection)
const protocols = Array.from(cryptos.keys())
log('handling inbound crypto protocol selection', protocols)
try {
const { stream, protocol } = await mss.handle(protocols)
const crypto = cryptos.get(protocol)
log('encrypting inbound connection...')
return {
...await crypto.secureInbound(localPeer, stream),
protocol
}
} catch (err) {
throw errCode(err, codes.ERR_ENCRYPTION_FAILED)
}
}
/**
* Attempts to encrypt the given `connection` with the provided `cryptos`.
* The first `Crypto` module to succeed will be used
* @private
* @async
* @param {PeerId} localPeer The initiators PeerInfo
* @param {*} connection
* @param {PeerId} remotePeerId
* @param {Map<string, Crypto>} cryptos
* @returns {CryptoResult} An encrypted connection, remote peer `PeerId` and the protocol of the `Crypto` used
*/
async _encryptOutbound (localPeer, connection, remotePeerId, cryptos) {
const mss = new Multistream.Dialer(connection)
const protocols = Array.from(cryptos.keys())
log('selecting outbound crypto protocol', protocols)
try {
const { stream, protocol } = await mss.select(protocols)
const crypto = cryptos.get(protocol)
log('encrypting outbound connection to %j', remotePeerId)
return {
...await crypto.secureOutbound(localPeer, stream, remotePeerId),
protocol
}
} catch (err) {
throw errCode(err, codes.ERR_ENCRYPTION_FAILED)
}
}
/**
* Selects one of the given muxers via multistream-select. That
* muxer will be used for all future streams on the connection.
* @private
* @async
* @param {*} connection A basic duplex connection to multiplex
* @param {Map<string, Muxer>} muxers The muxers to attempt multiplexing with
* @returns {*} A muxed connection
*/
async _multiplexOutbound (connection, muxers) {
const dialer = new Multistream.Dialer(connection)
const protocols = Array.from(muxers.keys())
log('outbound selecting muxer %s', protocols)
try {
const { stream, protocol } = await dialer.select(protocols)
log('%s selected as muxer protocol', protocol)
const Muxer = muxers.get(protocol)
return { stream, Muxer }
} catch (err) {
throw errCode(err, codes.ERR_MUXER_UNAVAILABLE)
}
}
/**
* Registers support for one of the given muxers via multistream-select. The
* selected muxer will be used for all future streams on the connection.
* @private
* @async
* @param {*} connection A basic duplex connection to multiplex
* @param {Map<string, Muxer>} muxers The muxers to attempt multiplexing with
* @returns {*} A muxed connection
*/
async _multiplexInbound (connection, muxers) {
const listener = new Multistream.Listener(connection)
const protocols = Array.from(muxers.keys())
log('inbound handling muxers %s', protocols)
try {
const { stream, protocol } = await listener.handle(protocols)
const Muxer = muxers.get(protocol)
return { stream, Muxer }
} catch (err) {
throw errCode(err, codes.ERR_MUXER_UNAVAILABLE)
}
}
}
module.exports = Upgrader

View File

@ -30,4 +30,18 @@ function emitFirst (emitter, events, handler) {
})
}
/**
* Converts BufferList messages to Buffers
* @param {*} source
* @returns {AsyncGenerator}
*/
function toBuffer (source) {
return (async function * () {
for await (const chunk of source) {
yield Buffer.isBuffer(chunk) ? chunk : chunk.slice()
}
})()
}
module.exports.emitFirst = emitFirst
module.exports.toBuffer = toBuffer

View File

@ -1,6 +0,0 @@
'use strict'
require('./circuit-relay.browser')
require('./transports.browser')
require('./switch/browser')

View File

@ -1,93 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const createNode = require('./utils/create-node')
const tryEcho = require('./utils/try-echo')
const echo = require('./utils/echo')
const {
getPeerRelay
} = require('./utils/constants')
function setupNodeWithRelay (addrs, options = {}) {
options = {
config: {
relay: {
enabled: true
},
...options.config
},
...options
}
return new Promise((resolve) => {
createNode(addrs, options, (err, node) => {
expect(err).to.not.exist()
node.handle(echo.multicodec, echo)
node.start((err) => {
expect(err).to.not.exist()
resolve(node)
})
})
})
}
describe('circuit relay', () => {
let browserNode1
let browserNode2
let peerRelay
before('get peer relay', async () => {
peerRelay = await getPeerRelay()
})
before('create the browser nodes', async () => {
[browserNode1, browserNode2] = await Promise.all([
setupNodeWithRelay([]),
setupNodeWithRelay([])
])
})
before('connect to the relay node', async () => {
await Promise.all(
[browserNode1, browserNode2].map((node) => {
return new Promise(resolve => {
node.dialProtocol(peerRelay, (err) => {
expect(err).to.not.exist()
resolve()
})
})
})
)
})
before('give time for HOP support to be determined', async () => {
await new Promise(resolve => {
setTimeout(resolve, 1e3)
})
})
after(async () => {
await Promise.all(
[browserNode1, browserNode2].map((node) => {
return new Promise((resolve) => {
node.stop(resolve)
})
})
)
})
it('should be able to echo over relay', (done) => {
browserNode1.dialProtocol(browserNode2.peerInfo, echo.multicodec, (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.exist()
tryEcho(conn, done)
})
})
})

View File

@ -1,215 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const sinon = require('sinon')
const waterfall = require('async/waterfall')
const series = require('async/series')
const parallel = require('async/parallel')
const Circuit = require('../src/circuit')
const multiaddr = require('multiaddr')
const createNode = require('./utils/create-node')
const tryEcho = require('./utils/try-echo')
const echo = require('./utils/echo')
describe('circuit relay', () => {
const handlerSpies = []
let relayNode1
let relayNode2
let nodeWS1
let nodeWS2
let nodeTCP1
let nodeTCP2
function setupNode (addrs, options, callback) {
if (typeof options === 'function') {
callback = options
options = {}
}
options = options || {}
return createNode(addrs, options, (err, node) => {
expect(err).to.not.exist()
node.handle('/echo/1.0.0', echo)
node.start((err) => {
expect(err).to.not.exist()
handlerSpies.push(sinon.spy(
node._switch.transports[Circuit.tag].listeners[0].hopHandler, 'handle'
))
callback(node)
})
})
}
before(function (done) {
this.timeout(20 * 1000)
waterfall([
// set up passive relay
(cb) => setupNode([
'/ip4/0.0.0.0/tcp/0/ws',
'/ip4/0.0.0.0/tcp/0'
], {
config: {
relay: {
enabled: true,
hop: {
enabled: true,
active: false // passive relay
}
}
}
}, (node) => {
relayNode1 = node
cb()
}),
// setup active relay
(cb) => setupNode([
'/ip4/0.0.0.0/tcp/0/ws',
'/ip4/0.0.0.0/tcp/0'
], {
config: {
relay: {
enabled: true,
hop: {
enabled: true,
active: false // passive relay
}
}
}
}, (node) => {
relayNode2 = node
cb()
}),
// setup node with WS
(cb) => setupNode([
'/ip4/0.0.0.0/tcp/0/ws'
], {
config: {
relay: {
enabled: true
}
}
}, (node) => {
nodeWS1 = node
cb()
}),
// setup node with WS
(cb) => setupNode([
'/ip4/0.0.0.0/tcp/0/ws'
], {
config: {
relay: {
enabled: true
}
}
}, (node) => {
nodeWS2 = node
cb()
}),
// set up node with TCP
(cb) => setupNode([
'/ip4/0.0.0.0/tcp/0'
], {
config: {
relay: {
enabled: true
}
}
}, (node) => {
nodeTCP1 = node
cb()
}),
// set up node with TCP
(cb) => setupNode([
'/ip4/0.0.0.0/tcp/0'
], {
config: {
relay: {
enabled: true
}
}
}, (node) => {
nodeTCP2 = node
cb()
})
], (err) => {
expect(err).to.not.exist()
series([
(cb) => nodeWS1.dial(relayNode1.peerInfo, cb),
(cb) => nodeWS1.dial(relayNode2.peerInfo, cb),
(cb) => nodeTCP1.dial(relayNode1.peerInfo, cb),
(cb) => nodeTCP2.dial(relayNode2.peerInfo, cb)
], done)
})
})
after((done) => {
parallel([
(cb) => relayNode1.stop(cb),
(cb) => relayNode2.stop(cb),
(cb) => nodeWS1.stop(cb),
(cb) => nodeWS2.stop(cb),
(cb) => nodeTCP1.stop(cb),
(cb) => nodeTCP2.stop(cb)
], done)
})
describe('any relay', function () {
this.timeout(20 * 1000)
it('dial from WS1 to TCP1 over any R', (done) => {
nodeWS1.dialProtocol(nodeTCP1.peerInfo, '/echo/1.0.0', (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.exist()
tryEcho(conn, done)
})
})
it('fail to dial - no R from WS2 to TCP1', (done) => {
nodeWS2.dialProtocol(nodeTCP2.peerInfo, '/echo/1.0.0', (err, conn) => {
expect(err).to.exist()
expect(conn).to.not.exist()
done()
})
})
})
describe('explicit relay', function () {
this.timeout(20 * 1000)
it('dial from WS1 to TCP1 over R1', (done) => {
nodeWS1.dialProtocol(nodeTCP1.peerInfo, '/echo/1.0.0', (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.exist()
tryEcho(conn, () => {
const addr = multiaddr(handlerSpies[0].args[2][0].dstPeer.addrs[0]).toString()
expect(addr).to.equal(`/ipfs/${nodeTCP1.peerInfo.id.toB58String()}`)
done()
})
})
})
it('dial from WS1 to TCP2 over R2', (done) => {
nodeWS1.dialProtocol(nodeTCP2.peerInfo, '/echo/1.0.0', (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.exist()
tryEcho(conn, () => {
const addr = multiaddr(handlerSpies[1].args[2][0].dstPeer.addrs[0]).toString()
expect(addr).to.equal(`/ipfs/${nodeTCP2.peerInfo.id.toB58String()}`)
done()
})
})
})
})
})

View File

@ -1,303 +0,0 @@
/* eslint-env mocha */
/* eslint max-nested-callbacks: ["error", 5] */
'use strict'
const Dialer = require('../../src/circuit/circuit/dialer')
const nodes = require('./fixtures/nodes')
const Connection = require('interface-connection').Connection
const multiaddr = require('multiaddr')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const pull = require('pull-stream/pull')
const values = require('pull-stream/sources/values')
const asyncMap = require('pull-stream/throughs/async-map')
const pair = require('pull-pair/duplex')
const pb = require('pull-protocol-buffers')
const proto = require('../../src/circuit/protocol')
const utilsFactory = require('../../src/circuit/circuit/utils')
const sinon = require('sinon')
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
describe('dialer tests', function () {
let dialer
beforeEach(() => {
dialer = sinon.createStubInstance(Dialer)
})
afterEach(() => {
sinon.restore()
})
describe('.dial', function () {
beforeEach(function () {
dialer.relayPeers = new Map()
dialer.relayPeers.set(nodes.node2.id, new Connection())
dialer.relayPeers.set(nodes.node3.id, new Connection())
dialer.dial.callThrough()
})
it('fail on non circuit addr', function () {
const dstMa = multiaddr(`/ipfs/${nodes.node4.id}`)
expect(() => dialer.dial(dstMa, (err) => {
err.to.match(/invalid circuit address/)
}))
})
it('dial a peer', function (done) {
const dstMa = multiaddr(`/p2p-circuit/ipfs/${nodes.node3.id}`)
dialer._dialPeer.callsFake(function (dstMa, relay, callback) {
return callback(null, dialer.relayPeers.get(nodes.node3.id))
})
dialer.dial(dstMa, (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.be.an.instanceOf(Connection)
done()
})
})
it('dial a peer over the specified relay', function (done) {
const dstMa = multiaddr(`/ipfs/${nodes.node3.id}/p2p-circuit/ipfs/${nodes.node4.id}`)
dialer._dialPeer.callsFake(function (dstMa, relay, callback) {
expect(relay.toString()).to.equal(`/ipfs/${nodes.node3.id}`)
return callback(null, new Connection())
})
dialer.dial(dstMa, (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.be.an.instanceOf(Connection)
done()
})
})
})
describe('.canHop', function () {
let fromConn = null
const peer = new PeerInfo(PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA'))
let p = null
beforeEach(function () {
p = pair()
fromConn = new Connection(p[0])
dialer.relayPeers = new Map()
dialer.relayConns = new Map()
dialer.utils = utilsFactory({})
dialer.canHop.callThrough()
dialer._dialRelayHelper.callThrough()
})
it('should handle successful CAN_HOP', (done) => {
dialer._dialRelay.callsFake((_, cb) => {
pull(
values([{
type: proto.CircuitRelay.type.HOP,
code: proto.CircuitRelay.Status.SUCCESS
}]),
pb.encode(proto.CircuitRelay),
p[1]
)
cb(null, fromConn)
})
dialer.canHop(peer, (err) => {
expect(err).to.not.exist()
expect(dialer.relayPeers.has(peer.id.toB58String())).to.be.ok()
done()
})
})
it('should handle failed CAN_HOP', function (done) {
dialer._dialRelay.callsFake((_, cb) => {
pull(
values([{
type: proto.CircuitRelay.type.HOP,
code: proto.CircuitRelay.Status.HOP_CANT_SPEAK_RELAY
}]),
pb.encode(proto.CircuitRelay),
p[1]
)
cb(null, fromConn)
})
dialer.canHop(peer, (err) => {
expect(err).to.exist()
expect(dialer.relayPeers.has(peer.id.toB58String())).not.to.be.ok()
done()
})
})
})
describe('._dialPeer', function () {
beforeEach(function () {
dialer.relayPeers = new Map()
dialer.relayPeers.set(nodes.node1.id, new Connection())
dialer.relayPeers.set(nodes.node2.id, new Connection())
dialer.relayPeers.set(nodes.node3.id, new Connection())
dialer._dialPeer.callThrough()
})
it('should dial a peer over any relay', function (done) {
const dstMa = multiaddr(`/ipfs/${nodes.node4.id}`)
dialer._negotiateRelay.callsFake(function (conn, dstMa, callback) {
if (conn === dialer.relayPeers.get(nodes.node3.id)) {
return callback(null, dialer.relayPeers.get(nodes.node3.id))
}
callback(new Error('error'))
})
dialer._dialPeer(dstMa, (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.be.an.instanceOf(Connection)
expect(conn).to.deep.equal(dialer.relayPeers.get(nodes.node3.id))
done()
})
})
it('should fail dialing a peer over any relay', function (done) {
const dstMa = multiaddr(`/ipfs/${nodes.node4.id}`)
dialer._negotiateRelay.callsFake(function (conn, dstMa, callback) {
callback(new Error('error'))
})
dialer._dialPeer(dstMa, (err, conn) => {
expect(conn).to.be.undefined()
expect(err).to.not.be.null()
expect(err).to.equal('no relay peers were found or all relays failed to dial')
done()
})
})
})
describe('._negotiateRelay', function () {
const dstMa = multiaddr(`/ipfs/${nodes.node4.id}`)
let conn = null
let peer = null
let p = null
before((done) => {
PeerId.createFromJSON(nodes.node4, (_, peerId) => {
PeerInfo.create(peerId, (err, peerInfo) => {
peer = peerInfo
peer.multiaddrs.add('/p2p-circuit/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')
done(err)
})
})
})
beforeEach(() => {
dialer.swarm = {
_peerInfo: peer
}
dialer.utils = utilsFactory({})
dialer.relayConns = new Map()
dialer._negotiateRelay.callThrough()
dialer._dialRelayHelper.callThrough()
peer = new PeerInfo(PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE'))
p = pair()
conn = new Connection(p[1])
})
it('should write the correct dst addr', function (done) {
dialer._dialRelay.callsFake((_, cb) => {
pull(
p[0],
pb.decode(proto.CircuitRelay),
asyncMap((msg, cb) => {
expect(msg.dstPeer.addrs[0]).to.deep.equal(dstMa.buffer)
cb(null, {
type: proto.CircuitRelay.Type.STATUS,
code: proto.CircuitRelay.Status.SUCCESS
})
}),
pb.encode(proto.CircuitRelay),
p[0]
)
cb(null, conn)
})
dialer._negotiateRelay(peer, dstMa, done)
})
it('should negotiate relay', function (done) {
dialer._dialRelay.callsFake((_, cb) => {
pull(
p[0],
pb.decode(proto.CircuitRelay),
asyncMap((msg, cb) => {
expect(msg.dstPeer.addrs[0]).to.deep.equal(dstMa.buffer)
cb(null, {
type: proto.CircuitRelay.Type.STATUS,
code: proto.CircuitRelay.Status.SUCCESS
})
}),
pb.encode(proto.CircuitRelay),
p[0]
)
cb(null, conn)
})
dialer._negotiateRelay(peer, dstMa, (err, conn) => {
expect(err).to.not.exist()
expect(conn).to.be.instanceOf(Connection)
done()
})
})
it('should fail with an invalid peer id', function (done) {
const dstMa = multiaddr('/ip4/127.0.0.1/tcp/4001')
dialer._dialRelay.callsFake((_, cb) => {
pull(
p[0],
pb.decode(proto.CircuitRelay),
asyncMap((msg, cb) => {
expect(msg.dstPeer.addrs[0]).to.deep.equal(dstMa.buffer)
cb(null, {
type: proto.CircuitRelay.Type.STATUS,
code: proto.CircuitRelay.Status.SUCCESS
})
}),
pb.encode(proto.CircuitRelay),
p[0]
)
cb(null, conn)
})
dialer._negotiateRelay(peer, dstMa, (err, conn) => {
expect(err).to.exist()
expect(conn).to.not.exist()
done()
})
})
it('should handle failed relay negotiation', function (done) {
dialer._dialRelay.callsFake((_, cb) => {
cb(null, conn)
pull(
values([{
type: proto.CircuitRelay.Type.STATUS,
code: proto.CircuitRelay.Status.MALFORMED_MESSAGE
}]),
pb.encode(proto.CircuitRelay),
p[0]
)
})
dialer._negotiateRelay(peer, dstMa, (err, conn) => {
expect(err).to.not.be.null()
expect(err).to.be.an.instanceOf(Error)
expect(err.message).to.be.equal('Got 400 error code trying to dial over relay')
done()
})
})
})
})

View File

@ -1,25 +0,0 @@
'use strict'
exports.node1 = {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
privKey: 'CAASpwkwggSjAgEAAoIBAQDJwzJPar4nylKY71Mm5q2BOED8uPf1ILvIi15VwVZWqter6flnlii/RKEcBypPbFqJHHa56MvybgQgrFmHKwDjnJvq4jyOZfR+o/D/99Ft1p2FAEBjImSXAgNpK4YsbyV5r0Q1+Avcj++aWWlLu6enUrL9WGzeUkf0U5L6XwXEPRUQdEojAIQi241P1hyqXX5gKAZVGqcPtKb6p1db3fcXodkS1G6JR90TopJHCqTCECp3SB9c6LlG7KXU92sIHJBlhOEEzGkEI1pM1SWnNnW5VLEypU7P56ifzzp4QxPNiJeC+cmE5SrgR3cXP44iKOuNVRJwBpCh5oNYqECzgqJ9AgMBAAECggEBAJpCdqXHrAmKJCqv2HiGqCODGhTfax1s4IYNIJwaTOPIjUrwgfKUGSVb2H4wcEX3RyVLsO6lMcFyIg/FFlJFK9HavE8SmFAbXZqxx6I9HE+JZjf5IEFrW1Mlg+wWDejNNe7adSF6O79wATaWo+32VNGWZilTQTGd4UvJ1jc9DZCh8zZeNhm4C6exXD45gMB0HI1t2ZNl47scsBEE4rV+s7F7y8Yk/tIsf0wSI/H8KSXS5I9aFxr3Z9c3HOfbVwhnIfNUDqcFTeU5BnhByYNLJ4v9xGj7puidcabVXkt2zLmm/LHbKVeGzec9LW5D+KkuB/pKaslsCXN6bVlu+SbVr9UCgYEA7MXfzZw36vDyfn4LPCN0wgzz11uh3cm31QzOPlWpA7hIsL/eInpvc8wa9yBRC1sRk41CedPHn913MR6EJi0Ne6/B1QOmRYBUjr60VPRNdTXCAiLykjXg6+TZ+AKnxlUGK1hjTo8krhpWq7iD/JchVlLoqDAXGFHvSxN0H3WEUm8CgYEA2iWC9w1v+YHfT2PXcLxYde9EuLVkIS4TM7Kb0N3wr/4+K4xWjVXuaJJLJoAbihNAZw0Y+2s1PswDUEpSG0jXeNXLs6XcQxYSEAu/pFdvHFeg2BfwVQoeEFlWyTJR29uti9/APaXMo8FSVAPPR5lKZLStJDM9hEfAPfUaHyic39MCgYAKQbwjNQw7Ejr+/cjQzxxkt5jskFyftfhPs2FP0/ghYB9OANHHnpQraQEWCYFZQ5WsVac2jdUM+NQL/a1t1e/Klt+HscPHKPsAwAQh1f9w/2YrH4ZwjQL0VRKYKs1HyzEcOZT7tzm4jQ2KHNEi5Q0dpzPK7WJivFHoZ6xVHIsh4wKBgAQq20mk9BKsLHvzyFXbA0WdgI6WyIbpvmwqaVegJcz26nEiiTTCA3/z64OcxunoXD6bvXJwJeBBPX73LIJg7dzdGLsh3AdcEJRF5S9ajEDaW7RFIM4/FzvwuPu2/mFY3QPjDmUfGb23H7+DIx6XCxjJatVaNT6lsEJ+wDUALZ8JAoGAO0YJSEziA7y0dXPK5azkJUMJ5yaN+zRDmoBnEggza34rQW0s16NnIR0EBzKGwbpNyePlProv4dQEaLF1kboKsSYvV2rW2ftLVdNqBHEUYFRC9ofPctCxwM1YU21TI2/k1squ+swApg2EHMev2+WKd+jpVPIbCIvJ3AjiAKZtiGQ=',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJwzJPar4nylKY71Mm5q2BOED8uPf1ILvIi15VwVZWqter6flnlii/RKEcBypPbFqJHHa56MvybgQgrFmHKwDjnJvq4jyOZfR+o/D/99Ft1p2FAEBjImSXAgNpK4YsbyV5r0Q1+Avcj++aWWlLu6enUrL9WGzeUkf0U5L6XwXEPRUQdEojAIQi241P1hyqXX5gKAZVGqcPtKb6p1db3fcXodkS1G6JR90TopJHCqTCECp3SB9c6LlG7KXU92sIHJBlhOEEzGkEI1pM1SWnNnW5VLEypU7P56ifzzp4QxPNiJeC+cmE5SrgR3cXP44iKOuNVRJwBpCh5oNYqECzgqJ9AgMBAAE='
}
exports.node2 = {
id: 'QmYJjAri5soV8RbeQcHaYYcTAYTET17QTvcoFMyKvRDTXe',
privKey: 'CAASpgkwggSiAgEAAoIBAQDt7YgUeBQsoN/lrgo690mB7yEh8G9iXhZiDecgZCLRRSl3v2cH9w4WjhoW9erfnVbdoTqkCK+se8uK01ySi/ubQQDPcrjacXTa6wAuRTbCG/0bUR9RxKtxZZBS1HaY7L923ulgGDTiVaRQ3JQqhzmQkaU0ikNcluSGaw0kmhXP6JmcL+wndKgW5VD9etcp2Qlk8uUFC/GAO90cOAuER3wnI3ocHGm9on9zyb97g4TDzIfjSaTW4Wanmx2yVbURQxmCba16X3LT9IMPqQaGOzq3+EewMLeCESbUm/uJaJLdqWrWRK4oNzxcMgmUkzav+s476HdA9CRo72am+g3Vdq+lAgMBAAECggEAcByKD6MZVoIjnlVo6qoVUA1+3kAuK/rLrz5/1wp4QYXGaW+eO+mVENm6v3D3UJESGnLbb+nL5Ymbunmn2EHvuBNkL1wOcJgfiPxM5ICmscaAeHu8N0plwpQp8m28yIheG8Qj0az2VmQmfhfCFVwMquuGHgC8hwdu/Uu6MLIObx1xjtaGbY9kk7nzAeXHeJ4RDeuNN0QrYuQVKwrIz1NtPNDR/cli298ZXJcm+HEhBCIHVIYpAq6BHSuiXVqPGEOYWYXo+yVhEtDJ8BmNqlN1Y1s6bnfu/tFkKUN6iQQ46vYnQEGTGR9lg7J/c6tqfRs9FcywWb9J1SX6HxPO8184zQKBgQD6vDYl20UT4ZtrzhFfMyV/1QUqFM/TdwNuiOsIewHBol9o7aOjrxrrbYVa1HOxETyBjmFsW+iIfOVl61SG2HcU4CG+O2s9WBo4JdRlOm4YQ8/83xO3YfbXzuTx8BMCyP/i1uPIZTKQFFAN0HiL96r4L60xHoWB7tQsbZiEbIO/2wKBgQDy7HnkgVeTld6o0+sT84FYRUotjDB00oUWiSeGtj0pFC4yIxhMhD8QjKiWoJyJItcoCsQ/EncuuwwRtuXi83793lJQR1DBYd+TSPg0M8J1pw97fUIPi/FU+jHtrsx7Vn/7Bk9voictsYVLAfbi68tYdsZpAaYOWYMY9NUfVuAmfwKBgCYZDwk1hgt9TkZVK2KRvPLthTldrC5veQAEoeHJ/vxTFbg105V9d9Op8odYnLOc8NqmrbrvRCfpAlo4JcHPhliPrdDf6m2Jw4IgjWNMO4pIU4QSyUYmBoHIGBWC6wCTVf47tKSwa7xkub0/nfF2km3foKtD/fk+NtMBXBlS+7ndAoGAJo6GIlCtN82X07AfJcGGjB4jUetoXYJ0gUkvruAKARUk5+xOFQcAg33v3EiNz+5pu/9JesFRjWc+2Sjwf/8p7t10ry1Ckg8Yz2XLj22PteDYQj91VsZdfaFgf1s5NXJbSdqMjSltkoEUqP0c1JOcaOQhRdVvJ+PpPPLPSPQfC70CgYBvJE1I06s7BEM1DOli3VyfNaJDI4k9W2dCJOU6Bh2MNmbdRjM3xnpOKH5SqRlCz/oI9pn4dxgbX6WPg331MD9CNYy2tt5KBQRrSuDj8p4jlzMIpX36hsyTTrzYU6WWSIPz6jXW8IexXKvXEmr8TVb78ZPiQfbG012cdUhAJniNgg==',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDt7YgUeBQsoN/lrgo690mB7yEh8G9iXhZiDecgZCLRRSl3v2cH9w4WjhoW9erfnVbdoTqkCK+se8uK01ySi/ubQQDPcrjacXTa6wAuRTbCG/0bUR9RxKtxZZBS1HaY7L923ulgGDTiVaRQ3JQqhzmQkaU0ikNcluSGaw0kmhXP6JmcL+wndKgW5VD9etcp2Qlk8uUFC/GAO90cOAuER3wnI3ocHGm9on9zyb97g4TDzIfjSaTW4Wanmx2yVbURQxmCba16X3LT9IMPqQaGOzq3+EewMLeCESbUm/uJaJLdqWrWRK4oNzxcMgmUkzav+s476HdA9CRo72am+g3Vdq+lAgMBAAE='
}
exports.node3 = {
id: 'QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA',
privKey: 'CAASpwkwggSjAgEAAoIBAQDdnGp0X7Pix5dIawfyuffVryRDRS5JXdyjayKUkgikJLYoiijB5TakrFKhx1SDKpmVLxxqAGz8m5iA2cHwetIQXTZvdYx7XXxv332En3ji8TiGRUiEFM8KQ5WCJ5G7yw8R2pv/pYdnMrPd04QbtSCn0cFVCiiA2Zkl5KnwBo/lf+sVI/TEeiwmVD9nxi13qWgBTmCysqH8Ppyu8fq+bQgqRZSlalVDswyIhgWlepPkD0uYakJJhhOxY+2RlbNhGY0qjRyMTYou2uR/hfd6j8uR++WdB0v3+DYWG2Kc3sWa4BLYb5r4trvQGO1Iagnwuk3AVoi7PldsaInekzWEVljDAgMBAAECggEAXx0jE49/xXWkmJBXePYYSL5C8hxfIV4HtJvm251R2CFpjTy/AXk/Wq4bSRQkUaeXA1CVAWntXP3rFmJfurb8McnP80agZNJa9ikV1jYbzEt71yUlWosT0XPwV0xkYBVnAmKxUafZ1ZENYcfGi53RxjVgpP8XIzZBZOIfjcVDPVw9NAOzQmq4i3DJEz5xZAkaeSM8mn5ZFl1JMBUOgyOHB7d4BWd3zuLyvnn0/08HlsaSUl0mZa3f2Lm2NlsjOiNfMCJTOIT+xDEP9THm5n2cqieSjvtpAZzV4kcoD0rB8OsyHQlFAEXzkgELDr5dVXji0rrIdVz8stYAKGfi996OAQKBgQDuviV1sc+ClJQA59vqbBiKxWqcuCKMzvmL4Yk1e/AkQeRt+JX9kALWzBx65fFmHTj4Lus8AIQoiruPxa0thtqh/m3SlucWnrdaW410xbz3KqQWS7bx+0sFWZIEi4N+PESrIYhtVbFuRiabYgliqdSU9shxtXXnvfhjl+9quZltiwKBgQDtoUCKqrZbm0bmzLvpnKdNodg1lUHaKGgEvWgza2N1t3b/GE07iha2KO3hBDta3bdfIEEOagY8o13217D0VIGsYNKpiEGLEeNIjfcXBEqAKiTfa/sXUfTprpWBZQ/7ZS+eZIYtQjq14EHa7ifAby1v3yDrMIuxphz5JfKdXFgYqQKBgHr47FikPwu2tkmFJCyqgzWvnEufOQSoc7eOc1tePIKggiX2/mM+M4gqWJ0hJeeAM+D6YeZlKa2sUBItMxeZN7JrWGw5mEx5cl4TfFhipgP2LdDiLRiVZL4bte+rYQ67wm8XdatDkYIIlkhBBi6Q5dPZDcQsQNAedPvvvb2OXi4jAoGBAKp06FpP+L2fle2LYSRDlhNvDCvrpDA8mdkEkRGJb/AKKdb09LnH5WDH3VNy+KzGrHoVJfWUAmNPAOFHeYzabaZcUeEAd5utui7afytIjbSABrEpwRTKWneiH2aROzSnMdBZ5ZHjlz/N3Q+RlHxKg/piwTdUPHCzasch/HX6vsr5AoGAGvhCNPKyCwpu8Gg5GQdx0yN6ZPar9wieD345cLanDZWKkLRQbo4SfkfoS+PDfOLzDbWFdPRnWQ0qhdPm3D/N1YD/nudHqbeDlx0dj/6lEHmmPKFFO2kiNFEhn8DycNGbvWyVBKksacuRXav21+LvW+TatUkRMhi8fgRoypnbJjg=',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDdnGp0X7Pix5dIawfyuffVryRDRS5JXdyjayKUkgikJLYoiijB5TakrFKhx1SDKpmVLxxqAGz8m5iA2cHwetIQXTZvdYx7XXxv332En3ji8TiGRUiEFM8KQ5WCJ5G7yw8R2pv/pYdnMrPd04QbtSCn0cFVCiiA2Zkl5KnwBo/lf+sVI/TEeiwmVD9nxi13qWgBTmCysqH8Ppyu8fq+bQgqRZSlalVDswyIhgWlepPkD0uYakJJhhOxY+2RlbNhGY0qjRyMTYou2uR/hfd6j8uR++WdB0v3+DYWG2Kc3sWa4BLYb5r4trvQGO1Iagnwuk3AVoi7PldsaInekzWEVljDAgMBAAE='
}
exports.node4 = {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
privKey: 'CAASqAkwggSkAgEAAoIBAQC6pg6LYWbY+49SOYdYap6RPqKqZxg80IXeo3hiTUbiGtTruxVYZpnz3UbernL9J9mwlXJGRUQJUKmXmi1yePTQiyclpH0KyPefaWLbpxJQdCBI1TPZpDWo2hutWSPqhKBU1QyH2FLKQPWLdxuIX1cNFPtIlSl5gCxN6oiDIwh7++kxNM1G+d5XgJX6iHLlLoNv3Wn6XYX+VtYdyZRFk8gYyT2BrISbxmsrSjSOodwUUzF8TYTjsqW6ksL2x0mrRm2cMM9evULktqwU+I8l9ulASDbFWBXUToXaZSL9M+Oq5JvZO0WIjPeYVpAgWladtayhdxg5dBv8aTbDaM5DZvyRAgMBAAECggEAR65YbZz1k6Vg0HI5kXI4/YzxicHYJBrtHqjnJdGJxHILjZCmzPFydJ5phkG29ZRlXRS381bMn0s0Jn3WsFzVoHWgjitSvl6aAsXFapgKR42hjHcc15vh47wH3xYZ3gobTRkZG96vRO+XnX0bvM7orqR9MM3gRMI9wZqt3LcKnhpiqSlyEZ3Zehu7ZZ8B+XcUw42H6ZTXgmg5mCFEjS/1rVt+EsdZl7Ll7jHigahPA6qMjyRiZB6T20qQ0FFYfmaNuRuuC6cWUXf8DOgnEjMB/Mi/Feoip9bTqNBrVYn2XeDxdMv5pDznNKXpalsMkZwx5FpNOMKnIMdQFyAGtkeQ9QKBgQD3rjTiulitpbbQBzF8VXeymtMJAbR1TAqNv2yXoowhL3JZaWICM7nXHjjsJa3UzJygbi8bO0KWrw7tY0nUbPy5SmHtNYhmUsEjiTjqEnNRrYN68tEKr0HlgX+9rArsjOcwucl2svFSfk+rTYDHU5neZkDDhu1QmnZm/pQI92Lo4wKBgQDA6wpMd53fmX9DhWegs3xelRStcqBTw1ucWVRyPgY1hO1cJ0oReYIXKEw9CHNLW0RHvnVM26kRnqCl+dTcg7dhLuqrckuyQyY1KcRYG1ryJnz3euucaSF2UCsZCHvFNV7Vz8dszUMUVCogWmroVP6HE/BoazUCNh25s/dNwE+i+wKBgEfa1WL1luaBzgCaJaQhk4FQY2sYgIcLEYDACTwQn0C9aBpCdXmYEhEzpmX0JHM5DTOJ48atsYrPrK/3/yJOoB8NUk2kGzc8SOYLWGSoB6aphRx1N2o3IBH6ONoJAH5R/nxnWehCz7oUBP74lCS/v0MDPUS8bzrUJQeKUd4sDxjrAoGBAIRO7rJA+1qF+J1DWi4ByxNHJXZLfh/UhPj23w628SU1dGDWZVsUvZ7KOXdGW2RcRLj7q5E5uXtnEoCillViVJtnRPSun7Gzkfm2Gn3ezQH0WZKVkA+mnpd5JgW2JsS69L6pEPnS0OWZT4b+3AFZgXL8vs2ucR2CJeLdxYdilHuPAoGBAPLCzBkAboXZZtvEWqzqtVNqdMrjLHihFrpg4TXSsk8+ZQZCVN+sRyTGTvBX8+Jvx4at6ClaSgT3eJ/412fEH6CHvrFXjUE9W9y6X0axxaT63y1OXmFiB/hU3vjLWZKZWSDGNS7St02fYri4tWmGtJDjYG1maLRhMSzcoj4fP1xz',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6pg6LYWbY+49SOYdYap6RPqKqZxg80IXeo3hiTUbiGtTruxVYZpnz3UbernL9J9mwlXJGRUQJUKmXmi1yePTQiyclpH0KyPefaWLbpxJQdCBI1TPZpDWo2hutWSPqhKBU1QyH2FLKQPWLdxuIX1cNFPtIlSl5gCxN6oiDIwh7++kxNM1G+d5XgJX6iHLlLoNv3Wn6XYX+VtYdyZRFk8gYyT2BrISbxmsrSjSOodwUUzF8TYTjsqW6ksL2x0mrRm2cMM9evULktqwU+I8l9ulASDbFWBXUToXaZSL9M+Oq5JvZO0WIjPeYVpAgWladtayhdxg5dBv8aTbDaM5DZvyRAgMBAAE='
}

View File

@ -1,22 +0,0 @@
'use strict'
const Libp2p = require('../../../src')
const secio = require('libp2p-secio')
class TestNode extends Libp2p {
constructor (peerInfo, transports, muxer, options) {
options = options || {}
const modules = {
transport: transports,
connection: {
muxer: [muxer],
crypto: options.isCrypto ? [secio] : null
},
discovery: []
}
super(modules, peerInfo, null, options)
}
}
module.exports = TestNode

View File

@ -1,78 +0,0 @@
'use strict'
const TestNode = require('./test-node')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const eachAsync = require('async/each')
exports.createNodes = function createNodes (configNodes, callback) {
const nodes = {}
eachAsync(Object.keys(configNodes), (key, cb1) => {
const config = configNodes[key]
const setup = (err, peer) => {
if (err) {
callback(err)
}
eachAsync(config.addrs, (addr, cb2) => {
peer.multiaddrs.add(addr)
cb2()
}, (err) => {
if (err) {
return callback(err)
}
nodes[key] = new TestNode(peer, config.transports, config.muxer, config.config)
cb1()
})
}
if (config.id) {
PeerId.createFromJSON(config.id, (err, peerId) => {
if (err) return callback(err)
PeerInfo.create(peerId, setup)
})
} else {
PeerInfo.create(setup)
}
}, (err) => {
if (err) {
return callback(err)
}
startNodes(nodes, (err) => {
if (err) {
callback(err)
}
callback(null, nodes)
})
})
}
function startNodes (nodes, callback) {
eachAsync(Object.keys(nodes),
(key, cb) => {
nodes[key].start(cb)
},
(err) => {
if (err) {
return callback(err)
}
callback(null)
})
}
exports.stopNodes = function stopNodes (nodes, callback) {
eachAsync(Object.keys(nodes),
(key, cb) => {
nodes[key].stop(cb)
},
(err) => {
if (err) {
return callback(err)
}
callback()
})
}

View File

@ -1,433 +0,0 @@
/* eslint-env mocha */
/* eslint max-nested-callbacks: ["error", 5] */
'use strict'
const Hop = require('../../src/circuit/circuit/hop')
const nodes = require('./fixtures/nodes')
const Connection = require('interface-connection').Connection
const handshake = require('pull-handshake')
const waterfall = require('async/waterfall')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const multiaddr = require('multiaddr')
const pull = require('pull-stream/pull')
const values = require('pull-stream/sources/values')
const collect = require('pull-stream/sinks/collect')
const lp = require('pull-length-prefixed')
const proto = require('../../src/circuit/protocol')
const StreamHandler = require('../../src/circuit/circuit/stream-handler')
const sinon = require('sinon')
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
describe('relay', () => {
describe('.handle', () => {
let relay
let swarm
let fromConn
let stream
let shake
beforeEach((done) => {
stream = handshake({ timeout: 1000 * 60 })
shake = stream.handshake
fromConn = new Connection(stream)
const peerInfo = new PeerInfo(PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA'))
fromConn.setPeerInfo(peerInfo)
const peers = {
QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE:
new PeerInfo(PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')),
QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA:
new PeerInfo(PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA')),
QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy:
new PeerInfo(PeerId.createFromB58String('QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy'))
}
Object.keys(peers).forEach((key) => { peers[key]._connectedMultiaddr = true }) // make it truthy
waterfall([
(cb) => PeerId.createFromJSON(nodes.node4, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(peer, cb) => {
peer.multiaddrs.add('/p2p-circuit/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')
swarm = {
_peerInfo: peer,
conns: {
QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE: new Connection(),
QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA: new Connection(),
QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy: new Connection()
},
_peerBook: {
get: (peer) => {
if (!peers[peer]) {
throw new Error()
}
return peers[peer]
}
}
}
cb()
}
], () => {
relay = new Hop(swarm, { enabled: true })
relay._circuit = sinon.stub()
relay._circuit.callsArgWith(2, null, new Connection())
done()
})
})
afterEach(() => {
relay._circuit.reset()
})
it('should handle a valid circuit request', (done) => {
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').id,
addrs: [multiaddr('/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').buffer]
},
dstPeer: {
id: PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').id,
addrs: [multiaddr('/ipfs/QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').buffer]
}
}
relay.on('circuit:success', () => {
expect(relay._circuit.calledWith(sinon.match.any, relayMsg)).to.be.ok()
done()
})
relay.handle(relayMsg, new StreamHandler(fromConn))
})
it('should handle a request to passive circuit', (done) => {
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').id,
addrs: [multiaddr('/ipfs/QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').buffer]
},
dstPeer: {
id: PeerId.createFromB58String('QmYJjAri5soV8RbeQcHaYYcTAYTET17QTvcoFMyKvRDTXe').id,
addrs: [multiaddr('/ipfs/QmYJjAri5soV8RbeQcHaYYcTAYTET17QTvcoFMyKvRDTXe').buffer]
}
}
relay.active = false
lp.decodeFromReader(
shake,
(err, msg) => {
expect(err).to.not.exist()
const response = proto.CircuitRelay.decode(msg)
expect(response.code).to.equal(proto.CircuitRelay.Status.HOP_NO_CONN_TO_DST)
expect(response.type).to.equal(proto.CircuitRelay.Type.STATUS)
done()
})
relay.handle(relayMsg, new StreamHandler(fromConn))
})
it('should handle a request to active circuit', (done) => {
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').id,
addrs: [multiaddr('/ipfs/QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').buffer]
},
dstPeer: {
id: PeerId.createFromB58String('QmYJjAri5soV8RbeQcHaYYcTAYTET17QTvcoFMyKvRDTXe').id,
addrs: [multiaddr('/ipfs/QmYJjAri5soV8RbeQcHaYYcTAYTET17QTvcoFMyKvRDTXe').buffer]
}
}
relay.active = true
relay.on('circuit:success', () => {
expect(relay._circuit.calledWith(sinon.match.any, relayMsg)).to.be.ok()
done()
})
relay.on('circuit:error', (err) => {
done(err)
})
relay.handle(relayMsg, new StreamHandler(fromConn))
})
it('not dial to self', (done) => {
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').id,
addrs: [multiaddr('/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').buffer]
},
dstPeer: {
id: PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').id,
addrs: [multiaddr('/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').buffer]
}
}
lp.decodeFromReader(
shake,
(err, msg) => {
expect(err).to.not.exist()
const response = proto.CircuitRelay.decode(msg)
expect(response.code).to.equal(proto.CircuitRelay.Status.HOP_CANT_RELAY_TO_SELF)
expect(response.type).to.equal(proto.CircuitRelay.Type.STATUS)
done()
})
relay.handle(relayMsg, new StreamHandler(fromConn))
})
it('fail on invalid src address', (done) => {
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: 'sdfkjsdnfkjdsb',
addrs: ['sdfkjsdnfkjdsb']
},
dstPeer: {
id: PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').id,
addrs: [multiaddr('/ipfs/QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').buffer]
}
}
lp.decodeFromReader(
shake,
(err, msg) => {
expect(err).to.not.exist()
const response = proto.CircuitRelay.decode(msg)
expect(response.code).to.equal(proto.CircuitRelay.Status.HOP_SRC_MULTIADDR_INVALID)
expect(response.type).to.equal(proto.CircuitRelay.Type.STATUS)
done()
})
relay.handle(relayMsg, new StreamHandler(fromConn))
})
it('fail on invalid dst address', (done) => {
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').id,
addrs: [multiaddr('/ipfs/QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA').buffer]
},
dstPeer: {
id: PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').id,
addrs: ['sdfkjsdnfkjdsb']
}
}
lp.decodeFromReader(
shake,
(err, msg) => {
expect(err).to.not.exist()
const response = proto.CircuitRelay.decode(msg)
expect(response.code).to.equal(proto.CircuitRelay.Status.HOP_DST_MULTIADDR_INVALID)
expect(response.type).to.equal(proto.CircuitRelay.Type.STATUS)
done()
})
relay.handle(relayMsg, new StreamHandler(fromConn))
})
})
describe('._circuit', () => {
let relay
let swarm
let srcConn
let dstConn
let srcStream
let dstStream
let srcShake
let dstShake
before((done) => {
srcStream = handshake({ timeout: 1000 * 60 })
srcShake = srcStream.handshake
srcConn = new Connection(srcStream)
dstStream = handshake({ timeout: 1000 * 60 })
dstShake = dstStream.handshake
dstConn = new Connection(dstStream)
const peerInfo = new PeerInfo(PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA'))
srcConn.setPeerInfo(peerInfo)
const peers = {
QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE:
new PeerInfo(PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')),
QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA:
new PeerInfo(PeerId.createFromB58String('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA')),
QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy:
new PeerInfo(PeerId.createFromB58String('QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy'))
}
Object.keys(peers).forEach((key) => { peers[key]._connectedMultiaddr = true }) // make it truthy
waterfall([
(cb) => PeerId.createFromJSON(nodes.node4, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(peer, cb) => {
peer.multiaddrs.add('/p2p-circuit/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')
swarm = {
_peerInfo: peer,
conns: {
QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE: new Connection(),
QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA: new Connection(),
QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy: new Connection()
},
_peerBook: {
get: (peer) => {
if (!peers[peer]) {
throw new Error()
}
return peers[peer]
}
}
}
cb()
}
], () => {
relay = new Hop(swarm, { enabled: true })
relay._dialPeer = sinon.stub()
relay._dialPeer.callsArgWith(1, null, dstConn)
done()
})
})
after(() => relay._dialPeer.reset())
describe('should correctly dial destination node', () => {
const msg = {
type: proto.CircuitRelay.Type.STOP,
srcPeer: {
id: Buffer.from('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA'),
addrs: [Buffer.from('dsfsdfsdf')]
},
dstPeer: {
id: Buffer.from('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE'),
addrs: [Buffer.from('sdflksdfndsklfnlkdf')]
}
}
before(() => {
relay._circuit(
new StreamHandler(srcConn),
msg,
(err) => {
expect(err).to.not.exist()
})
})
it('should respond with SUCCESS to source node', (done) => {
lp.decodeFromReader(
srcShake,
(err, msg) => {
expect(err).to.not.exist()
const response = proto.CircuitRelay.decode(msg)
expect(response.type).to.equal(proto.CircuitRelay.Type.STATUS)
expect(response.code).to.equal(proto.CircuitRelay.Status.SUCCESS)
done()
})
})
it('should send STOP message to destination node', (done) => {
lp.decodeFromReader(
dstShake,
(err, _msg) => {
expect(err).to.not.exist()
const response = proto.CircuitRelay.decode(_msg)
expect(response.type).to.deep.equal(msg.type)
expect(response.srcPeer).to.deep.equal(msg.srcPeer)
expect(response.dstPeer).to.deep.equal(msg.dstPeer)
done()
})
})
it('should create circuit', (done) => {
pull(
values([proto.CircuitRelay.encode({
type: proto.CircuitRelay.Type.STATUS,
code: proto.CircuitRelay.Status.SUCCESS
})]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => dstShake.write(e))
pull(
values([Buffer.from('hello')]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => srcShake.write(e))
lp.decodeFromReader(
dstShake,
(err, _msg) => {
expect(err).to.not.exist()
expect(_msg.toString()).to.equal('hello')
done()
})
})
)
})
)
})
})
describe('should fail creating circuit', () => {
const msg = {
type: proto.CircuitRelay.Type.STOP,
srcPeer: {
id: Buffer.from('QmQWqGdndSpAkxfk8iyiJyz3XXGkrDNujvc8vEst3baubA'),
addrs: [Buffer.from('dsfsdfsdf')]
},
dstPeer: {
id: Buffer.from('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE'),
addrs: [Buffer.from('sdflksdfndsklfnlkdf')]
}
}
it('should not create circuit', (done) => {
relay._circuit(
new StreamHandler(srcConn),
msg,
(err) => {
expect(err).to.exist()
expect(err).to.match(/Unable to create circuit!/)
done()
})
pull(
values([proto.CircuitRelay.encode({
type: proto.CircuitRelay.Type.STATUS,
code: proto.CircuitRelay.Status.STOP_RELAY_REFUSED
})]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => dstShake.write(e))
})
)
})
})
})
})

View File

@ -1,292 +0,0 @@
/* eslint-env mocha */
'use strict'
const Listener = require('../../src/circuit/listener')
const nodes = require('./fixtures/nodes')
const waterfall = require('async/waterfall')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const multiaddr = require('multiaddr')
const handshake = require('pull-handshake')
const Connection = require('interface-connection').Connection
const proto = require('../../src/circuit/protocol')
const lp = require('pull-length-prefixed')
const pull = require('pull-stream/pull')
const values = require('pull-stream/sources/values')
const collect = require('pull-stream/sinks/collect')
const multicodec = require('../../src/circuit/multicodec')
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const sinon = require('sinon')
describe('listener', function () {
describe('listen', function () {
let swarm = null
let handlerSpy = null
let listener = null
let stream = null
let shake = null
let conn = null
beforeEach(function (done) {
stream = handshake({ timeout: 1000 * 60 })
shake = stream.handshake
conn = new Connection(stream)
conn.setPeerInfo(new PeerInfo(PeerId
.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')))
waterfall([
(cb) => PeerId.createFromJSON(nodes.node4, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(peer, cb) => {
swarm = {
_peerInfo: peer,
handle: sinon.spy((proto, h) => {
handlerSpy = sinon.spy(h)
}),
conns: {
QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE: new Connection()
}
}
listener = Listener(swarm, {}, () => {})
listener.listen()
cb()
}
], done)
})
afterEach(() => {
listener = null
})
it('should handle HOP', function (done) {
handlerSpy(multicodec.relay, conn)
const relayMsg = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
addrs: ['/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE']
},
dstPeer: {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
addrs: ['/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy']
}
}
listener.hopHandler.handle = (message, conn) => {
expect(message.type).to.equal(proto.CircuitRelay.Type.HOP)
expect(message.srcPeer.id.toString()).to.equal(relayMsg.srcPeer.id)
expect(message.srcPeer.addrs[0].toString()).to.equal(relayMsg.srcPeer.addrs[0])
expect(message.dstPeer.id.toString()).to.equal(relayMsg.dstPeer.id)
expect(message.dstPeer.addrs[0].toString()).to.equal(relayMsg.dstPeer.addrs[0])
done()
}
pull(
values([proto.CircuitRelay.encode(relayMsg)]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => shake.write(e))
})
)
})
it('should handle STOP', function (done) {
handlerSpy(multicodec.relay, conn)
const relayMsg = {
type: proto.CircuitRelay.Type.STOP,
srcPeer: {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
addrs: ['/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE']
},
dstPeer: {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
addrs: ['/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy']
}
}
listener.stopHandler.handle = (message, conn) => {
expect(message.type).to.equal(proto.CircuitRelay.Type.STOP)
expect(message.srcPeer.id.toString()).to.equal(relayMsg.srcPeer.id)
expect(message.srcPeer.addrs[0].toString()).to.equal(relayMsg.srcPeer.addrs[0])
expect(message.dstPeer.id.toString()).to.equal(relayMsg.dstPeer.id)
expect(message.dstPeer.addrs[0].toString()).to.equal(relayMsg.dstPeer.addrs[0])
done()
}
pull(
values([proto.CircuitRelay.encode(relayMsg)]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => shake.write(e))
})
)
})
it('should emit \'connection\'', function (done) {
handlerSpy(multicodec.relay, conn)
const relayMsg = {
type: proto.CircuitRelay.Type.STOP,
srcPeer: {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
addrs: ['/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE']
},
dstPeer: {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
addrs: ['/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy']
}
}
listener.stopHandler.handle = (message, sh) => {
const newConn = new Connection(sh.rest())
listener.stopHandler.emit('connection', newConn)
}
listener.on('connection', (conn) => {
expect(conn).to.be.instanceof(Connection)
done()
})
pull(
values([proto.CircuitRelay.encode(relayMsg)]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => shake.write(e))
})
)
})
it('should handle CAN_HOP', function (done) {
handlerSpy(multicodec.relay, conn)
const relayMsg = {
type: proto.CircuitRelay.Type.CAN_HOP,
srcPeer: {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
addrs: ['/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE']
},
dstPeer: {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
addrs: ['/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy']
}
}
listener.hopHandler.handle = (message, conn) => {
expect(message.type).to.equal(proto.CircuitRelay.Type.CAN_HOP)
expect(message.srcPeer.id.toString()).to.equal(relayMsg.srcPeer.id)
expect(message.srcPeer.addrs[0].toString()).to.equal(relayMsg.srcPeer.addrs[0])
expect(message.dstPeer.id.toString()).to.equal(relayMsg.dstPeer.id)
expect(message.dstPeer.addrs[0].toString()).to.equal(relayMsg.dstPeer.addrs[0])
done()
}
pull(
values([proto.CircuitRelay.encode(relayMsg)]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => shake.write(e))
})
)
})
it('should handle invalid message correctly', function (done) {
handlerSpy(multicodec.relay, conn)
const relayMsg = {
type: 100000,
srcPeer: {
id: Buffer.from('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE'),
addrs: [multiaddr('/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE').buffer]
},
dstPeer: {
id: Buffer.from('QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy'),
addrs: [multiaddr('/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy').buffer]
}
}
pull(
values([Buffer.from([relayMsg])]),
lp.encode(),
collect((err, encoded) => {
expect(err).to.not.exist()
encoded.forEach((e) => shake.write(e))
}),
lp.decodeFromReader(shake, { maxLength: this.maxLength }, (err, msg) => {
expect(err).to.not.exist()
expect(proto.CircuitRelay.decode(msg).type).to.equal(proto.CircuitRelay.Type.STATUS)
expect(proto.CircuitRelay.decode(msg).code).to.equal(proto.CircuitRelay.Status.MALFORMED_MESSAGE)
done()
})
)
})
})
describe('getAddrs', function () {
let swarm = null
let listener = null
let peerInfo = null
beforeEach(function (done) {
waterfall([
(cb) => PeerId.createFromJSON(nodes.node4, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(peer, cb) => {
swarm = {
_peerInfo: peer
}
peerInfo = peer
listener = Listener(swarm, {}, () => {})
cb()
}
], done)
})
afterEach(() => {
peerInfo = null
})
it('should return correct addrs', function () {
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/4002')
peerInfo.multiaddrs.add('/ip4/127.0.0.1/tcp/4003/ws')
listener.getAddrs((err, addrs) => {
expect(err).to.not.exist()
expect(addrs).to.deep.equal([
multiaddr('/p2p-circuit/ip4/0.0.0.0/tcp/4002/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy'),
multiaddr('/p2p-circuit/ip4/127.0.0.1/tcp/4003/ws/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy')])
})
})
it('don\'t return default addrs in an explicit p2p-circuit addres', function () {
peerInfo.multiaddrs.add('/ip4/127.0.0.1/tcp/4003/ws')
peerInfo.multiaddrs.add('/p2p-circuit/ip4/0.0.0.0/tcp/4002')
listener.getAddrs((err, addrs) => {
expect(err).to.not.exist()
expect(addrs[0]
.toString())
.to.equal('/p2p-circuit/ip4/0.0.0.0/tcp/4002/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy')
})
})
})
})

View File

@ -1,50 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const multiaddr = require('multiaddr')
const proto = require('../../src/circuit/protocol')
describe('protocol', function () {
let msgObject = null
let message = null
before(() => {
msgObject = {
type: proto.CircuitRelay.Type.HOP,
srcPeer: {
id: Buffer.from('QmSource'),
addrs: [
multiaddr('/p2p-circuit/ipfs/QmSource').buffer,
multiaddr('/p2p-circuit/ip4/0.0.0.0/tcp/9000/ipfs/QmSource').buffer,
multiaddr('/ip4/0.0.0.0/tcp/9000/ipfs/QmSource').buffer
]
},
dstPeer: {
id: Buffer.from('QmDest'),
addrs: [
multiaddr('/p2p-circuit/ipfs/QmDest').buffer,
multiaddr('/p2p-circuit/ip4/1.1.1.1/tcp/9000/ipfs/QmDest').buffer,
multiaddr('/ip4/1.1.1.1/tcp/9000/ipfs/QmDest').buffer
]
}
}
const buff = proto.CircuitRelay.encode(msgObject)
message = proto.CircuitRelay.decode(buff)
})
it('should source and dest', () => {
expect(message.srcPeer).to.deep.equal(msgObject.srcPeer)
expect(message.dstPeer).to.deep.equal(msgObject.dstPeer)
})
it('should encode message', () => {
expect(message.message).to.deep.equal(msgObject.message)
})
})

View File

@ -1,85 +0,0 @@
/* eslint-env mocha */
'use strict'
const Stop = require('../../src/circuit/circuit/stop')
const nodes = require('./fixtures/nodes')
const Connection = require('interface-connection').Connection
const handshake = require('pull-handshake')
const waterfall = require('async/waterfall')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const StreamHandler = require('../../src/circuit/circuit/stream-handler')
const proto = require('../../src/circuit/protocol')
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
describe('stop', function () {
describe('handle relayed connections', function () {
let stopHandler
let swarm
let conn
let stream
beforeEach(function (done) {
stream = handshake({ timeout: 1000 * 60 })
conn = new Connection(stream)
const peerId = PeerId.createFromB58String('QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')
conn.setPeerInfo(new PeerInfo(peerId))
waterfall([
(cb) => PeerId.createFromJSON(nodes.node4, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(peer, cb) => {
peer.multiaddrs.add('/p2p-circuit/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE')
swarm = {
_peerInfo: peer,
conns: {
QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE: new Connection()
}
}
stopHandler = new Stop(swarm)
cb()
}
], done)
})
it('handle request with a valid multiaddr', function (done) {
stopHandler.handle({
type: proto.CircuitRelay.Type.STOP,
srcPeer: {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
addrs: ['/ipfs/QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE']
},
dstPeer: {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
addrs: ['/ipfs/QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy']
}
}, new StreamHandler(conn), (conn) => { // multistream handler doesn't expect errors...
expect(conn).to.be.instanceOf(Connection)
done()
})
})
it('handle request with invalid multiaddr', function (done) {
stopHandler.handle({
type: proto.CircuitRelay.Type.STOP,
srcPeer: {
id: 'QmSswe1dCFRepmhjAMR5VfHeokGLcvVggkuDJm7RMfJSrE',
addrs: ['dsfsdfsdf']
},
dstPeer: {
id: 'QmQvM2mpqkjyXWbTHSUidUAWN26GgdMphTh9iGDdjgVXCy',
addrs: ['sdflksdfndsklfnlkdf']
}
}, new StreamHandler(conn), (conn) => {
expect(conn).to.not.exist()
done()
})
})
})
})

View File

@ -1,358 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const waterfall = require('async/waterfall')
const WS = require('libp2p-websockets')
const Bootstrap = require('libp2p-bootstrap')
const DelegatedPeerRouter = require('libp2p-delegated-peer-routing')
const DelegatedContentRouter = require('libp2p-delegated-content-routing')
const DHT = require('libp2p-kad-dht')
const validateConfig = require('../src/config').validate
describe('configuration', () => {
let peerInfo
before((done) => {
waterfall([
(cb) => PeerId.create({ bits: 512 }, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(info, cb) => {
peerInfo = info
cb()
}
], () => done())
})
it('should throw an error if peerInfo is missing', () => {
expect(() => {
validateConfig({
modules: {
transport: [WS]
}
})
}).to.throw()
})
it('should throw an error if modules is missing', () => {
expect(() => {
validateConfig({
peerInfo
})
}).to.throw()
})
it('should throw an error if there are no transports', () => {
expect(() => {
validateConfig({
peerInfo,
modules: {
transport: []
}
})
}).to.throw('ERROR_EMPTY')
})
it('should add defaults to config', () => {
const options = {
peerInfo,
modules: {
transport: [WS],
peerDiscovery: [Bootstrap],
dht: DHT
}
}
const expected = {
peerInfo,
connectionManager: {
minPeers: 25
},
modules: {
transport: [WS],
peerDiscovery: [Bootstrap],
dht: DHT
},
config: {
peerDiscovery: {
autoDial: true
},
pubsub: {
enabled: true,
emitSelf: true,
signMessages: true,
strictSigning: true
},
dht: {
kBucketSize: 20,
enabled: false,
randomWalk: {
enabled: false,
queriesPerPeriod: 1,
interval: 300000,
timeout: 10000
}
},
relay: {
enabled: true,
hop: {
active: false,
enabled: false
}
}
}
}
expect(validateConfig(options)).to.deep.equal(expected)
})
it('should add defaults to missing items', () => {
const options = {
peerInfo,
modules: {
transport: [WS],
peerDiscovery: [Bootstrap],
dht: DHT
},
config: {
peerDiscovery: {
bootstrap: {
interval: 1000,
enabled: true
}
},
dht: {
enabled: false
},
relay: {
enabled: true
},
pubsub: {
enabled: true
}
}
}
const expected = {
peerInfo,
connectionManager: {
minPeers: 25
},
modules: {
transport: [WS],
peerDiscovery: [Bootstrap],
dht: DHT
},
config: {
peerDiscovery: {
autoDial: true,
bootstrap: {
interval: 1000,
enabled: true
}
},
pubsub: {
enabled: true,
emitSelf: true,
signMessages: true,
strictSigning: true
},
dht: {
kBucketSize: 20,
enabled: false,
randomWalk: {
enabled: false,
queriesPerPeriod: 1,
interval: 300000,
timeout: 10000
}
},
relay: {
enabled: true,
hop: {
active: false,
enabled: false
}
}
}
}
expect(validateConfig(options)).to.deep.equal(expected)
})
it('should allow for configuring the switch', () => {
const options = {
peerInfo,
switch: {
denyTTL: 60e3,
denyAttempts: 5,
maxParallelDials: 100,
maxColdCalls: 50,
dialTimeout: 30e3
},
modules: {
transport: [WS],
peerDiscovery: []
}
}
expect(validateConfig(options)).to.deep.include({
switch: {
denyTTL: 60e3,
denyAttempts: 5,
maxParallelDials: 100,
maxColdCalls: 50,
dialTimeout: 30e3
}
})
})
it('should allow for delegated content and peer routing', () => {
const peerRouter = new DelegatedPeerRouter()
const contentRouter = new DelegatedContentRouter(peerInfo)
const options = {
peerInfo,
modules: {
transport: [WS],
peerDiscovery: [Bootstrap],
peerRouting: [peerRouter],
contentRouting: [contentRouter],
dht: DHT
},
config: {
peerDiscovery: {
bootstrap: {
interval: 1000,
enabled: true
}
}
}
}
expect(validateConfig(options).modules).to.deep.include({
peerRouting: [peerRouter],
contentRouting: [contentRouter]
})
})
it('should not allow for dht to be enabled without it being provided', () => {
const options = {
peerInfo,
modules: {
transport: [WS]
},
config: {
dht: {
enabled: true
}
}
}
expect(() => validateConfig(options)).to.throw()
})
it('should be able to add validators and selectors for dht', () => {
const selectors = {}
const validators = {}
const options = {
peerInfo,
modules: {
transport: [WS],
dht: DHT
},
config: {
dht: {
selectors,
validators
}
}
}
const expected = {
peerInfo,
connectionManager: {
minPeers: 25
},
modules: {
transport: [WS],
dht: DHT
},
config: {
pubsub: {
enabled: true,
emitSelf: true,
signMessages: true,
strictSigning: true
},
peerDiscovery: {
autoDial: true
},
relay: {
enabled: true,
hop: {
active: false,
enabled: false
}
},
dht: {
kBucketSize: 20,
enabled: false,
randomWalk: {
enabled: false,
queriesPerPeriod: 1,
interval: 300000,
timeout: 10000
},
selectors,
validators
}
}
}
expect(validateConfig(options)).to.deep.equal(expected)
})
it('should support new properties for the dht config', () => {
const options = {
peerInfo,
modules: {
transport: [WS],
dht: DHT
},
config: {
dht: {
kBucketSize: 20,
enabled: false,
myNewDHTConfigProperty: true,
randomWalk: {
enabled: false,
queriesPerPeriod: 1,
interval: 300000,
timeout: 10000
}
}
}
}
const expected = {
kBucketSize: 20,
enabled: false,
myNewDHTConfigProperty: true,
randomWalk: {
enabled: false,
queriesPerPeriod: 1,
interval: 300000,
timeout: 10000
}
}
const actual = validateConfig(options).config.dht
expect(actual).to.deep.equal(expected)
})
})

View File

@ -1,19 +0,0 @@
/* eslint-env mocha */
'use strict'
const Prepare = require('./utils/prepare')
describe('default', function () {
const prepare = Prepare(3, { pollInterval: 1000 })
before(prepare.before)
after(prepare.after)
it('does not kick out any peer', (done) => {
prepare.connManagers().forEach((connManager) => {
connManager.on('disconnected', () => {
throw new Error('should not have disconnected')
})
})
setTimeout(done, 1900)
})
})

View File

@ -1,36 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('maxData', function () {
const prepare = Prepare(PEER_COUNT, {
maxData: 100,
minPeers: 1
})
before(prepare.create)
after(prepare.after)
it('kicks out peer after maxData reached', function (done) {
this.timeout(10000)
let disconnects = 0
const manager = prepare.connManagers()[0]
manager.on('disconnected', () => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
manager.removeAllListeners('disconnected')
done()
})
prepare.tryConnectAll((err) => {
expect(err).to.not.exist()
})
})
})

View File

@ -1,59 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('maxEventLoopDelay', function () {
const prepare = Prepare(PEER_COUNT, [{
pollInterval: 1000,
maxEventLoopDelay: 5,
minPeers: 1
}])
before(prepare.create)
after(prepare.after)
it('kicks out peer after maxEventLoopDelay reached', function (done) {
this.timeout(10000)
let stopped = false
let disconnects = 0
const manager = prepare.connManagers()[0]
manager.on('disconnected', () => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
manager.removeAllListeners('disconnected')
stopped = true
done()
})
prepare.tryConnectAll((err) => {
expect(err).to.not.exist()
makeDelay()
})
function makeDelay () {
let sum = 0
for (let i = 0; i < 1000000; i++) {
sum += Math.random()
}
debug(sum)
if (!stopped) {
setTimeout(makeDelay, 0)
}
}
})
})
function debug (what) {
if (what === 0) {
// never true but the compiler doesn't know that
throw new Error('something went wrong')
}
}

View File

@ -1,37 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('maxPeers', function () {
const prepare = Prepare(PEER_COUNT, [{
maxPeersPerProtocol: {
tcp: 1
}
}])
before(prepare.create)
after(prepare.after)
it('kicks out peers in excess', function (done) {
this.timeout(10000)
let disconnects = 0
const manager = prepare.connManagers()[0]
manager.on('disconnected', () => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
manager.removeAllListeners('disconnected')
done()
})
prepare.tryConnectAll((err) => {
expect(err).to.not.exist()
})
})
})

View File

@ -1,35 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('maxPeers', function () {
const prepare = Prepare(PEER_COUNT, [{
maxPeers: 1
}])
before(prepare.create)
after(prepare.after)
it('kicks out peers in excess', function (done) {
this.timeout(10000)
let disconnects = 0
const manager = prepare.connManagers()[0]
manager.on('disconnected', () => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
manager.removeAllListeners('disconnected')
done()
})
prepare.tryConnectAll((err, eachNodeConnections) => {
expect(err).to.not.exist()
})
})
})

View File

@ -1,36 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('maxReceivedData', function () {
const prepare = Prepare(PEER_COUNT, {
maxReceivedData: 50,
minPeers: 1
})
before(prepare.create)
after(prepare.after)
it('kicks out peer after maxReceivedData reached', function (done) {
this.timeout(10000)
let disconnects = 0
const manager = prepare.connManagers()[0]
manager.on('disconnected', () => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
manager.removeAllListeners('disconnected')
done()
})
prepare.tryConnectAll((err, eachNodeConnections) => {
expect(err).to.not.exist()
})
})
})

View File

@ -1,36 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('maxSentData', function () {
const prepare = Prepare(PEER_COUNT, [{
maxSentData: 50,
minPeers: 1
}])
before(prepare.create)
after(prepare.after)
it('kicks out peer after maxSentData reached', function (done) {
this.timeout(10000)
let disconnects = 0
const manager = prepare.connManagers()[0]
manager.on('disconnected', () => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
manager.removeAllListeners('disconnected')
done()
})
prepare.tryConnectAll((err, eachNodeConnections) => {
expect(err).to.not.exist()
})
})
})

View File

@ -1,10 +0,0 @@
'use strict'
require('./default')
require('./max-data')
require('./max-event-loop-delay')
require('./max-peer-per-protocol')
require('./max-peers')
require('./max-received-data')
require('./max-sent-data')
require('./set-peer-value')

View File

@ -1,44 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const Prepare = require('./utils/prepare')
const PEER_COUNT = 3
describe('setPeerValue', function () {
const prepare = Prepare(PEER_COUNT, [{
maxPeers: 1,
defaultPeerValue: 0
}])
before(prepare.create)
after(prepare.after)
it('kicks out lower valued peer first', function (done) {
let disconnects = 0
let firstConnectedPeer
const manager = prepare.connManagers()[0]
manager.once('connected', (peerId) => {
if (!firstConnectedPeer) {
firstConnectedPeer = peerId
manager.setPeerValue(peerId, 1)
}
})
manager.on('disconnected', (peerId) => {
disconnects++
expect(disconnects).to.be.most(PEER_COUNT - 2)
expect(peerId).to.not.be.equal(firstConnectedPeer)
manager.removeAllListeners('disconnected')
done()
})
prepare.tryConnectAll((err) => {
expect(err).to.not.exist()
})
})
})

View File

@ -1,17 +0,0 @@
'use strict'
const eachSeries = require('async/eachSeries')
module.exports = (nodes, callback) => {
eachSeries(
nodes,
(node, cb) => {
eachSeries(
nodes.filter(n => node !== n),
(otherNode, cb) => node.dial(otherNode.peerInfo, cb),
cb
)
},
callback
)
}

View File

@ -1,50 +0,0 @@
'use strict'
const TCP = require('libp2p-tcp')
const Multiplex = require('libp2p-mplex')
const SECIO = require('libp2p-secio')
const libp2p = require('../../../src')
const waterfall = require('async/waterfall')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const ConnManager = require('../../../src/connection-manager')
class Node extends libp2p {
constructor (peerInfo) {
const modules = {
transport: [TCP],
streamMuxer: [Multiplex],
connEncryption: [SECIO]
}
super({
peerInfo,
modules,
config: {
peerDiscovery: {
autoDial: false
}
}
})
}
}
function createLibp2pNode (options, callback) {
let node
waterfall([
(cb) => PeerId.create({ bits: 1024 }, cb),
(id, cb) => PeerInfo.create(id, cb),
(peerInfo, cb) => {
peerInfo.multiaddrs.add('/ip4/127.0.0.1/tcp/0')
node = new Node(peerInfo)
// Replace the connection manager so we use source code instead of dep code
node.connectionManager = new ConnManager(node, options)
node.start(cb)
}
], (err) => callback(err, node))
}
exports = module.exports = createLibp2pNode
exports.bundle = Node

View File

@ -1,83 +0,0 @@
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const series = require('async/series')
const each = require('async/each')
const createLibp2pNode = require('./create-libp2p-node')
const connectAll = require('./connect-all')
const tryConnectAll = require('./try-connect-all')
module.exports = (count, options) => {
let nodes
if (!Array.isArray(options)) {
const opts = options
options = []
for (let n = 0; n < count; n++) {
options[n] = opts
}
}
const create = (done) => {
const tasks = []
for (let i = 0; i < count; i++) {
tasks.push((cb) => createLibp2pNode(options.shift() || {}, cb))
}
series(tasks, (err, things) => {
if (!err) {
nodes = things
expect(things.length).to.equal(count)
}
done(err)
})
}
const connect = function (done) {
if (this && this.timeout) {
this.timeout(10000)
}
connectAll(nodes, done)
}
const tryConnectAllFn = function (done) {
if (this && this.timeout) {
this.timeout(10000)
}
tryConnectAll(nodes, done)
}
const before = (done) => {
if (this && this.timeout) {
this.timeout(10000)
}
series([create, connect], done)
}
const after = function (done) {
if (this && this.timeout) {
this.timeout(10000)
}
if (!nodes) { return done() }
each(nodes, (node, cb) => {
series([
(cb) => node.stop(cb)
], cb)
}, done)
}
return {
create,
connect,
tryConnectAll: tryConnectAllFn,
before,
after,
things: () => nodes,
connManagers: () => nodes.map((node) => node.connectionManager)
}
}

View File

@ -1,27 +0,0 @@
'use strict'
const mapSeries = require('async/mapSeries')
const eachSeries = require('async/eachSeries')
module.exports = (nodes, callback) => {
mapSeries(
nodes,
(node, cb) => {
const connectedTo = []
eachSeries(
nodes.filter(n => node !== n),
(otherNode, cb) => {
const otherNodePeerInfo = otherNode.peerInfo
node.dial(otherNodePeerInfo, (err) => {
if (!err) {
connectedTo.push(otherNodePeerInfo.id.toB58String())
}
cb()
})
},
(err) => cb(err, connectedTo)
)
},
callback
)
}

View File

@ -1,404 +0,0 @@
/* eslint-env mocha */
/* eslint max-nested-callbacks: ["error", 8] */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const parallel = require('async/parallel')
const waterfall = require('async/waterfall')
const _times = require('lodash.times')
const CID = require('cids')
const DelegatedContentRouter = require('libp2p-delegated-content-routing')
const sinon = require('sinon')
const nock = require('nock')
const ma = require('multiaddr')
const Node = require('./utils/bundle-nodejs')
const createNode = require('./utils/create-node')
const createPeerInfo = createNode.createPeerInfo
describe('.contentRouting', () => {
describe('via the dht', () => {
let nodeA
let nodeB
let nodeC
let nodeD
let nodeE
before(function (done) {
this.timeout(5 * 1000)
const tasks = _times(5, () => (cb) => {
createNode('/ip4/0.0.0.0/tcp/0', (err, node) => {
expect(err).to.not.exist()
node.start((err) => cb(err, node))
})
})
parallel(tasks, (err, nodes) => {
expect(err).to.not.exist()
nodeA = nodes[0]
nodeB = nodes[1]
nodeC = nodes[2]
nodeD = nodes[3]
nodeE = nodes[4]
parallel([
(cb) => nodeA.dial(nodeB.peerInfo, cb),
(cb) => nodeB.dial(nodeC.peerInfo, cb),
(cb) => nodeC.dial(nodeD.peerInfo, cb),
(cb) => nodeD.dial(nodeE.peerInfo, cb),
(cb) => nodeE.dial(nodeA.peerInfo, cb)
], done)
})
})
after((done) => {
parallel([
(cb) => nodeA.stop(cb),
(cb) => nodeB.stop(cb),
(cb) => nodeC.stop(cb),
(cb) => nodeD.stop(cb),
(cb) => nodeE.stop(cb)
], done)
})
it('should use the nodes dht to provide', (done) => {
const stub = sinon.stub(nodeA._dht, 'provide').callsFake(() => {
stub.restore()
done()
})
nodeA.contentRouting.provide()
})
it('should use the nodes dht to find providers', (done) => {
const stub = sinon.stub(nodeA._dht, 'findProviders').callsFake(() => {
stub.restore()
done()
})
nodeA.contentRouting.findProviders()
})
describe('le ring', () => {
const cid = new CID('QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSySnL')
it('let kbucket get filled', (done) => {
setTimeout(() => done(), 250)
})
it('nodeA.contentRouting.provide', (done) => {
nodeA.contentRouting.provide(cid, done)
})
it('nodeE.contentRouting.findProviders for existing record', (done) => {
nodeE.contentRouting.findProviders(cid, { maxTimeout: 5000 }, (err, providers) => {
expect(err).to.not.exist()
expect(providers).to.have.length.above(0)
done()
})
})
it('nodeE.contentRouting.findProviders with limited number of providers', (done) => {
parallel([
(cb) => nodeA.contentRouting.provide(cid, cb),
(cb) => nodeB.contentRouting.provide(cid, cb),
(cb) => nodeC.contentRouting.provide(cid, cb)
], (err) => {
expect(err).to.not.exist()
nodeE.contentRouting.findProviders(cid, { maxNumProviders: 2 }, (err, providers) => {
expect(err).to.not.exist()
expect(providers).to.have.length(2)
done()
})
})
})
it('nodeC.contentRouting.findProviders for non existing record (timeout)', (done) => {
const cid = new CID('QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSnnnn')
nodeE.contentRouting.findProviders(cid, { maxTimeout: 5000 }, (err, providers) => {
expect(err).to.exist()
expect(err.code).to.eql('ERR_NOT_FOUND')
expect(providers).to.not.exist()
done()
})
})
})
})
describe('via a delegate', () => {
let nodeA
let delegate
before((done) => {
waterfall([
(cb) => {
createPeerInfo(cb)
},
// Create the node using the delegate
(peerInfo, cb) => {
delegate = new DelegatedContentRouter(peerInfo.id, {
host: '0.0.0.0',
protocol: 'http',
port: 60197
}, [
ma('/ip4/0.0.0.0/tcp/60194')
])
nodeA = new Node({
peerInfo,
modules: {
contentRouting: [delegate]
},
config: {
dht: {
enabled: false
},
relay: {
enabled: true,
hop: {
enabled: true,
active: false
}
}
}
})
nodeA.start(cb)
}
], done)
})
after((done) => nodeA.stop(done))
afterEach(() => nock.cleanAll())
describe('provide', () => {
it('should use the delegate router to provide', (done) => {
const stub = sinon.stub(delegate, 'provide').callsFake(() => {
stub.restore()
done()
})
nodeA.contentRouting.provide()
})
it('should be able to register as a provider', (done) => {
const cid = new CID('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')
const mockApi = nock('http://0.0.0.0:60197')
// mock the refs call
.post('/api/v0/refs')
.query({
recursive: false,
arg: cid.toBaseEncodedString(),
'stream-channels': true
})
.reply(200, null, [
'Content-Type', 'application/json',
'X-Chunked-Output', '1'
])
nodeA.contentRouting.provide(cid, (err) => {
expect(err).to.not.exist()
expect(mockApi.isDone()).to.equal(true)
done()
})
})
it('should handle errors when registering as a provider', (done) => {
const cid = new CID('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')
const mockApi = nock('http://0.0.0.0:60197')
// mock the refs call
.post('/api/v0/refs')
.query({
recursive: false,
arg: cid.toBaseEncodedString(),
'stream-channels': true
})
.reply(502, 'Bad Gateway', ['Content-Type', 'application/json'])
nodeA.contentRouting.provide(cid, (err) => {
expect(err).to.exist()
expect(mockApi.isDone()).to.equal(true)
done()
})
})
})
describe('find providers', () => {
it('should use the delegate router to find providers', (done) => {
const stub = sinon.stub(delegate, 'findProviders').callsFake(() => {
stub.restore()
done()
})
nodeA.contentRouting.findProviders()
})
it('should be able to find providers', (done) => {
const cid = new CID('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')
const provider = 'QmZNgCqZCvTsi3B4Vt7gsSqpkqDpE7M2Y9TDmEhbDb4ceF'
const mockApi = nock('http://0.0.0.0:60197')
.post('/api/v0/dht/findprovs')
.query({
arg: cid.toBaseEncodedString(),
timeout: '1000ms',
'stream-channels': true
})
.reply(200, `{"Extra":"","ID":"QmWKqWXCtRXEeCQTo3FoZ7g4AfnGiauYYiczvNxFCHicbB","Responses":[{"Addrs":["/ip4/0.0.0.0/tcp/0"],"ID":"${provider}"}],"Type":4}\n`, [
'Content-Type', 'application/json',
'X-Chunked-Output', '1'
])
nodeA.contentRouting.findProviders(cid, 1000, (err, response) => {
expect(err).to.not.exist()
expect(response).to.have.length(1)
expect(response[0].id.toB58String()).to.equal(provider)
expect(mockApi.isDone()).to.equal(true)
done()
})
})
it('should handle errors when finding providers', (done) => {
const cid = new CID('QmU621oD8AhHw6t25vVyfYKmL9VV3PTgc52FngEhTGACFB')
const mockApi = nock('http://0.0.0.0:60197')
.post('/api/v0/dht/findprovs')
.query({
arg: cid.toBaseEncodedString(),
timeout: '30000ms',
'stream-channels': true
})
.reply(502, 'Bad Gateway', [
'X-Chunked-Output', '1'
])
nodeA.contentRouting.findProviders(cid, (err) => {
expect(err).to.exist()
expect(mockApi.isDone()).to.equal(true)
done()
})
})
})
})
describe('via the dht and a delegate', () => {
let nodeA
let delegate
before((done) => {
waterfall([
(cb) => {
createPeerInfo(cb)
},
// Create the node using the delegate
(peerInfo, cb) => {
delegate = new DelegatedContentRouter(peerInfo.id, {
host: '0.0.0.0',
protocol: 'http',
port: 60197
}, [
ma('/ip4/0.0.0.0/tcp/60194')
])
nodeA = new Node({
peerInfo,
modules: {
contentRouting: [delegate]
},
config: {
relay: {
enabled: true,
hop: {
enabled: true,
active: false
}
}
}
})
nodeA.start(cb)
}
], done)
})
after((done) => nodeA.stop(done))
describe('provide', () => {
it('should use both the dht and delegate router to provide', (done) => {
const dhtStub = sinon.stub(nodeA._dht, 'provide').callsFake(() => {})
const delegateStub = sinon.stub(delegate, 'provide').callsFake(() => {
expect(dhtStub.calledOnce).to.equal(true)
expect(delegateStub.calledOnce).to.equal(true)
delegateStub.restore()
dhtStub.restore()
done()
})
nodeA.contentRouting.provide()
})
})
describe('findProviders', () => {
it('should only use the dht if it finds providers', (done) => {
const results = [true]
const dhtStub = sinon.stub(nodeA._dht, 'findProviders').callsArgWith(2, null, results)
const delegateStub = sinon.stub(delegate, 'findProviders').throws(() => {
return new Error('the delegate should not have been called')
})
nodeA.contentRouting.findProviders('a cid', { maxTimeout: 5000 }, (err, results) => {
expect(err).to.not.exist()
expect(results).to.equal(results)
expect(dhtStub.calledOnce).to.equal(true)
expect(delegateStub.notCalled).to.equal(true)
delegateStub.restore()
dhtStub.restore()
done()
})
})
it('should use the delegate if the dht fails to find providers', (done) => {
const results = [true]
const dhtStub = sinon.stub(nodeA._dht, 'findProviders').callsArgWith(2, null, [])
const delegateStub = sinon.stub(delegate, 'findProviders').callsArgWith(2, null, results)
nodeA.contentRouting.findProviders('a cid', { maxTimeout: 5000 }, (err, results) => {
expect(err).to.not.exist()
expect(results).to.deep.equal(results)
expect(dhtStub.calledOnce).to.equal(true)
expect(delegateStub.calledOnce).to.equal(true)
delegateStub.restore()
dhtStub.restore()
done()
})
})
})
})
describe('no routers', () => {
let nodeA
before((done) => {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
dht: {
enabled: false
}
}
}, (err, node) => {
expect(err).to.not.exist()
nodeA = node
done()
})
})
it('.findProviders should return an error with no options', (done) => {
nodeA.contentRouting.findProviders('a cid', (err) => {
expect(err).to.exist()
done()
})
})
it('.findProviders should return an error with options', (done) => {
nodeA.contentRouting.findProviders('a cid', { maxTimeout: 5000 }, (err) => {
expect(err).to.exist()
done()
})
})
})
})

View File

@ -1,143 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const series = require('async/series')
const createNode = require('./utils/create-node')
const sinon = require('sinon')
const { createLibp2p } = require('../src')
const WS = require('libp2p-websockets')
const PeerInfo = require('peer-info')
describe('libp2p creation', () => {
afterEach(() => {
sinon.restore()
})
it('should be able to start and stop successfully', (done) => {
createNode([], {
config: {
pubsub: {
enabled: true
},
dht: {
enabled: true
}
}
}, (err, node) => {
expect(err).to.not.exist()
const sw = node._switch
const cm = node.connectionManager
const dht = node._dht
const pub = node.pubsub
sinon.spy(sw, 'start')
sinon.spy(cm, 'start')
sinon.spy(dht, 'start')
sinon.spy(dht.randomWalk, 'start')
sinon.spy(pub, 'start')
sinon.spy(sw, 'stop')
sinon.spy(cm, 'stop')
sinon.spy(dht, 'stop')
sinon.spy(dht.randomWalk, 'stop')
sinon.spy(pub, 'stop')
sinon.spy(node, 'emit')
series([
(cb) => node.start(cb),
(cb) => {
expect(sw.start.calledOnce).to.equal(true)
expect(cm.start.calledOnce).to.equal(true)
expect(dht.start.calledOnce).to.equal(true)
expect(dht.randomWalk.start.calledOnce).to.equal(true)
expect(pub.start.calledOnce).to.equal(true)
expect(node.emit.calledWith('start')).to.equal(true)
cb()
},
(cb) => node.stop(cb)
], (err) => {
expect(err).to.not.exist()
expect(sw.stop.calledOnce).to.equal(true)
expect(cm.stop.calledOnce).to.equal(true)
expect(dht.stop.calledOnce).to.equal(true)
expect(dht.randomWalk.stop.called).to.equal(true)
expect(pub.stop.calledOnce).to.equal(true)
expect(node.emit.calledWith('stop')).to.equal(true)
done()
})
})
})
it('should not create disabled modules', (done) => {
createNode([], {
config: {
pubsub: {
enabled: false
}
}
}, (err, node) => {
expect(err).to.not.exist()
expect(node._pubsub).to.not.exist()
done()
})
})
it('should not throw errors from switch if node has no error listeners', (done) => {
createNode([], {}, (err, node) => {
expect(err).to.not.exist()
node._switch.emit('error', new Error('bad things'))
done()
})
})
it('should emit errors from switch if node has error listeners', (done) => {
const error = new Error('bad things')
createNode([], {}, (err, node) => {
expect(err).to.not.exist()
node.once('error', (err) => {
expect(err).to.eql(error)
done()
})
node._switch.emit('error', error)
})
})
it('createLibp2p should create a peerInfo instance', function (done) {
this.timeout(10e3)
createLibp2p({
modules: {
transport: [WS]
}
}, (err, libp2p) => {
expect(err).to.not.exist()
expect(libp2p).to.exist()
done()
})
})
it('createLibp2p should allow for a provided peerInfo instance', function (done) {
this.timeout(10e3)
PeerInfo.create((err, peerInfo) => {
expect(err).to.not.exist()
sinon.spy(PeerInfo, 'create')
createLibp2p({
peerInfo,
modules: {
transport: [WS]
}
}, (err, libp2p) => {
expect(err).to.not.exist()
expect(libp2p).to.exist()
expect(PeerInfo.create.callCount).to.eql(0)
done()
})
})
})
})

View File

@ -1,168 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const MemoryStore = require('interface-datastore').MemoryDatastore
const createNode = require('./utils/create-node')
describe('.dht', () => {
describe('enabled', () => {
let nodeA
const datastore = new MemoryStore()
before(function (done) {
createNode('/ip4/0.0.0.0/tcp/0', {
datastore
}, (err, node) => {
expect(err).to.not.exist()
nodeA = node
// Rewrite validators
nodeA._dht.validators.v = {
func (key, publicKey, callback) {
setImmediate(callback)
},
sign: false
}
// Rewrite selectors
nodeA._dht.selectors.v = () => 0
// Start
nodeA.start(done)
})
})
after((done) => {
nodeA.stop(done)
})
it('should be able to dht.put a value to the DHT', (done) => {
const key = Buffer.from('key')
const value = Buffer.from('value')
nodeA.dht.put(key, value, (err) => {
expect(err).to.not.exist()
done()
})
})
it('should be able to dht.get a value from the DHT with options', (done) => {
const key = Buffer.from('/v/hello')
const value = Buffer.from('world')
nodeA.dht.put(key, value, (err) => {
expect(err).to.not.exist()
nodeA.dht.get(key, { maxTimeout: 3000 }, (err, res) => {
expect(err).to.not.exist()
expect(res).to.eql(value)
done()
})
})
})
it('should be able to dht.get a value from the DHT with no options defined', (done) => {
const key = Buffer.from('/v/hello')
const value = Buffer.from('world')
nodeA.dht.put(key, value, (err) => {
expect(err).to.not.exist()
nodeA.dht.get(key, (err, res) => {
expect(err).to.not.exist()
expect(res).to.eql(value)
done()
})
})
})
it('should be able to dht.getMany a value from the DHT with options', (done) => {
const key = Buffer.from('/v/hello')
const value = Buffer.from('world')
nodeA.dht.put(key, value, (err) => {
expect(err).to.not.exist()
nodeA.dht.getMany(key, 1, { maxTimeout: 3000 }, (err, res) => {
expect(err).to.not.exist()
expect(res).to.exist()
done()
})
})
})
it('should be able to dht.getMany a value from the DHT with no options defined', (done) => {
const key = Buffer.from('/v/hello')
const value = Buffer.from('world')
nodeA.dht.put(key, value, (err) => {
expect(err).to.not.exist()
nodeA.dht.getMany(key, 1, (err, res) => {
expect(err).to.not.exist()
expect(res).to.exist()
done()
})
})
})
})
describe('disabled', () => {
let nodeA
before(function (done) {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
dht: {
enabled: false
}
}
}, (err, node) => {
expect(err).to.not.exist()
nodeA = node
nodeA.start(done)
})
})
after((done) => {
nodeA.stop(done)
})
it('should receive an error on dht.put if the dht is disabled', (done) => {
const key = Buffer.from('key')
const value = Buffer.from('value')
nodeA.dht.put(key, value, (err) => {
expect(err).to.exist()
expect(err.code).to.equal('ERR_DHT_DISABLED')
done()
})
})
it('should receive an error on dht.get if the dht is disabled', (done) => {
const key = Buffer.from('key')
nodeA.dht.get(key, (err) => {
expect(err).to.exist()
expect(err.code).to.equal('ERR_DHT_DISABLED')
done()
})
})
it('should receive an error on dht.getMany if the dht is disabled', (done) => {
const key = Buffer.from('key')
nodeA.dht.getMany(key, 10, (err) => {
expect(err).to.exist()
expect(err.code).to.equal('ERR_DHT_DISABLED')
done()
})
})
})
})

258
test/dialing/direct.node.js Normal file
View File

@ -0,0 +1,258 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const Transport = require('libp2p-tcp')
const Muxer = require('libp2p-mplex')
const Crypto = require('../../src/insecure/plaintext')
const multiaddr = require('multiaddr')
const PeerId = require('peer-id')
const PeerInfo = require('peer-info')
const delay = require('delay')
const pDefer = require('p-defer')
const pipe = require('it-pipe')
const Libp2p = require('../../src')
const Dialer = require('../../src/dialer')
const TransportManager = require('../../src/transport-manager')
const { codes: ErrorCodes } = require('../../src/errors')
const Protector = require('../../src/pnet')
const swarmKeyBuffer = Buffer.from(require('../fixtures/swarm.key'))
const mockUpgrader = require('../utils/mockUpgrader')
const Peers = require('../fixtures/peers')
const listenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
const unsupportedAddr = multiaddr('/ip4/127.0.0.1/tcp/9999/ws')
describe('Dialing (direct, TCP)', () => {
let remoteTM
let localTM
let remoteAddr
before(async () => {
remoteTM = new TransportManager({
libp2p: {},
upgrader: mockUpgrader
})
remoteTM.add(Transport.prototype[Symbol.toStringTag], Transport)
localTM = new TransportManager({
libp2p: {},
upgrader: mockUpgrader
})
localTM.add(Transport.prototype[Symbol.toStringTag], Transport)
await remoteTM.listen([listenAddr])
remoteAddr = remoteTM.getAddrs()[0]
})
after(() => remoteTM.close())
afterEach(() => {
sinon.restore()
})
it('should be able to connect to a remote node via its multiaddr', async () => {
const dialer = new Dialer({ transportManager: localTM })
const connection = await dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
await connection.close()
})
it('should be able to connect to a remote node via its stringified multiaddr', async () => {
const dialer = new Dialer({ transportManager: localTM })
const connection = await dialer.connectToMultiaddr(remoteAddr.toString())
expect(connection).to.exist()
await connection.close()
})
it('should fail to connect to an unsupported multiaddr', async () => {
const dialer = new Dialer({ transportManager: localTM })
try {
await dialer.connectToMultiaddr(unsupportedAddr)
} catch (err) {
expect(err).to.satisfy((err) => err.code === ErrorCodes.ERR_TRANSPORT_UNAVAILABLE)
return
}
expect.fail('Dial should have failed')
})
it('should be able to connect to a given peer', async () => {
const dialer = new Dialer({ transportManager: localTM })
const peerId = await PeerId.createFromJSON(Peers[0])
const peerInfo = new PeerInfo(peerId)
peerInfo.multiaddrs.add(remoteAddr)
const connection = await dialer.connectToPeer(peerInfo)
expect(connection).to.exist()
await connection.close()
})
it('should fail to connect to a given peer with unsupported addresses', async () => {
const dialer = new Dialer({ transportManager: localTM })
const peerId = await PeerId.createFromJSON(Peers[0])
const peerInfo = new PeerInfo(peerId)
peerInfo.multiaddrs.add(unsupportedAddr)
try {
await dialer.connectToPeer(peerInfo)
} catch (err) {
expect(err).to.satisfy((err) => err.code === ErrorCodes.ERR_CONNECTION_FAILED)
return
}
expect.fail('Dial should have failed')
})
it('should abort dials on queue task timeout', async () => {
const dialer = new Dialer({
transportManager: localTM,
timeout: 50
})
sinon.stub(localTM, 'dial').callsFake(async (addr, options) => {
expect(options.signal).to.exist()
expect(options.signal.aborted).to.equal(false)
expect(addr.toString()).to.eql(remoteAddr.toString())
await delay(60)
expect(options.signal.aborted).to.equal(true)
})
try {
await dialer.connectToMultiaddr(remoteAddr)
} catch (err) {
expect(err).to.satisfy((err) => err.code === ErrorCodes.ERR_TIMEOUT)
return
}
expect.fail('Dial should have failed')
})
it('should dial to the max concurrency', async () => {
const dialer = new Dialer({
transportManager: localTM,
concurrency: 2
})
const deferredDial = pDefer()
sinon.stub(localTM, 'dial').callsFake(async () => {
await deferredDial.promise
})
// Add 3 dials
Promise.all([
dialer.connectToMultiaddr(remoteAddr),
dialer.connectToMultiaddr(remoteAddr),
dialer.connectToMultiaddr(remoteAddr)
])
// Let the call stack run
await delay(0)
// We should have 2 in progress, and 1 waiting
expect(localTM.dial.callCount).to.equal(2)
expect(dialer.queue.pending).to.equal(2)
expect(dialer.queue.size).to.equal(1)
deferredDial.resolve()
// Let the call stack run
await delay(0)
// All dials should have executed
expect(localTM.dial.callCount).to.equal(3)
expect(dialer.queue.pending).to.equal(0)
expect(dialer.queue.size).to.equal(0)
})
describe('libp2p.dialer', () => {
let peerInfo
let remotePeerInfo
let libp2p
let remoteLibp2p
let remoteAddr
before(async () => {
const [peerId, remotePeerId] = await Promise.all([
PeerId.createFromJSON(Peers[0]),
PeerId.createFromJSON(Peers[1])
])
peerInfo = new PeerInfo(peerId)
remotePeerInfo = new PeerInfo(remotePeerId)
remoteLibp2p = new Libp2p({
peerInfo: remotePeerInfo,
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
})
remoteLibp2p.handle('/echo/1.0.0', ({ stream }) => pipe(stream, stream))
await remoteLibp2p.transportManager.listen([listenAddr])
remoteAddr = remoteLibp2p.transportManager.getAddrs()[0]
})
afterEach(async () => {
sinon.restore()
libp2p && await libp2p.stop()
libp2p = null
})
after(() => remoteLibp2p.stop())
it('should use the dialer for connecting', async () => {
libp2p = new Libp2p({
peerInfo,
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
})
sinon.spy(libp2p.dialer, 'connectToMultiaddr')
const connection = await libp2p.dial(remoteAddr)
expect(connection).to.exist()
const { stream, protocol } = await connection.newStream('/echo/1.0.0')
expect(stream).to.exist()
expect(protocol).to.equal('/echo/1.0.0')
await connection.close()
expect(libp2p.dialer.connectToMultiaddr.callCount).to.equal(1)
})
it('should use the protectors when provided for connecting', async () => {
const protector = new Protector(swarmKeyBuffer)
libp2p = new Libp2p({
peerInfo,
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto],
connProtector: protector
}
})
sinon.spy(libp2p.upgrader.protector, 'protect')
sinon.stub(remoteLibp2p.upgrader, 'protector').value(new Protector(swarmKeyBuffer))
const connection = await libp2p.dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
const { stream, protocol } = await connection.newStream('/echo/1.0.0')
expect(stream).to.exist()
expect(protocol).to.equal('/echo/1.0.0')
await connection.close()
expect(libp2p.upgrader.protector.protect.callCount).to.equal(1)
})
})
})

245
test/dialing/direct.spec.js Normal file
View File

@ -0,0 +1,245 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const pDefer = require('p-defer')
const delay = require('delay')
const Transport = require('libp2p-websockets')
const Muxer = require('libp2p-mplex')
const Crypto = require('../../src/insecure/plaintext')
const multiaddr = require('multiaddr')
const PeerId = require('peer-id')
const PeerInfo = require('peer-info')
const { codes: ErrorCodes } = require('../../src/errors')
const Constants = require('../../src/constants')
const Dialer = require('../../src/dialer')
const TransportManager = require('../../src/transport-manager')
const Libp2p = require('../../src')
const Peers = require('../fixtures/peers')
const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser')
const mockUpgrader = require('../utils/mockUpgrader')
const unsupportedAddr = multiaddr('/ip4/127.0.0.1/tcp/9999/ws')
const remoteAddr = MULTIADDRS_WEBSOCKETS[0]
describe('Dialing (direct, WebSockets)', () => {
let localTM
before(() => {
localTM = new TransportManager({
libp2p: {},
upgrader: mockUpgrader,
onConnection: () => {}
})
localTM.add(Transport.prototype[Symbol.toStringTag], Transport)
})
afterEach(() => {
sinon.restore()
})
it('should have appropriate defaults', () => {
const dialer = new Dialer({ transportManager: localTM })
expect(dialer.concurrency).to.equal(Constants.MAX_PARALLEL_DIALS)
expect(dialer.timeout).to.equal(Constants.DIAL_TIMEOUT)
})
it('should be able to connect to a remote node via its multiaddr', async () => {
const dialer = new Dialer({ transportManager: localTM })
const connection = await dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
await connection.close()
})
it('should be able to connect to a remote node via its stringified multiaddr', async () => {
const dialer = new Dialer({ transportManager: localTM })
const connection = await dialer.connectToMultiaddr(remoteAddr.toString())
expect(connection).to.exist()
await connection.close()
})
it('should fail to connect to an unsupported multiaddr', async () => {
const dialer = new Dialer({ transportManager: localTM })
try {
await dialer.connectToMultiaddr(unsupportedAddr)
} catch (err) {
expect(err).to.satisfy((err) => err.code === ErrorCodes.ERR_TRANSPORT_DIAL_FAILED)
return
}
expect.fail('Dial should have failed')
})
it('should be able to connect to a given peer', async () => {
const dialer = new Dialer({ transportManager: localTM })
const peerId = await PeerId.createFromJSON(Peers[0])
const peerInfo = new PeerInfo(peerId)
peerInfo.multiaddrs.add(remoteAddr)
const connection = await dialer.connectToPeer(peerInfo)
expect(connection).to.exist()
await connection.close()
})
it('should fail to connect to a given peer with unsupported addresses', async () => {
const dialer = new Dialer({ transportManager: localTM })
const peerId = await PeerId.createFromJSON(Peers[0])
const peerInfo = new PeerInfo(peerId)
peerInfo.multiaddrs.add(unsupportedAddr)
try {
await dialer.connectToPeer(peerInfo)
} catch (err) {
expect(err).to.satisfy((err) => err.code === ErrorCodes.ERR_CONNECTION_FAILED)
return
}
expect.fail('Dial should have failed')
})
it('should abort dials on queue task timeout', async () => {
const dialer = new Dialer({
transportManager: localTM,
timeout: 50
})
sinon.stub(localTM, 'dial').callsFake(async (addr, options) => {
expect(options.signal).to.exist()
expect(options.signal.aborted).to.equal(false)
expect(addr.toString()).to.eql(remoteAddr.toString())
await delay(60)
expect(options.signal.aborted).to.equal(true)
})
try {
await dialer.connectToMultiaddr(remoteAddr)
} catch (err) {
expect(err).to.satisfy((err) => err.code === ErrorCodes.ERR_TIMEOUT)
return
}
expect.fail('Dial should have failed')
})
it('should dial to the max concurrency', async () => {
const dialer = new Dialer({
transportManager: localTM,
concurrency: 2
})
const deferredDial = pDefer()
sinon.stub(localTM, 'dial').callsFake(async () => {
await deferredDial.promise
})
// Add 3 dials
Promise.all([
dialer.connectToMultiaddr(remoteAddr),
dialer.connectToMultiaddr(remoteAddr),
dialer.connectToMultiaddr(remoteAddr)
])
// Let the call stack run
await delay(0)
// We should have 2 in progress, and 1 waiting
expect(localTM.dial.callCount).to.equal(2)
expect(dialer.queue.pending).to.equal(2)
expect(dialer.queue.size).to.equal(1)
deferredDial.resolve()
// Let the call stack run
await delay(0)
// All dials should have executed
expect(localTM.dial.callCount).to.equal(3)
expect(dialer.queue.pending).to.equal(0)
expect(dialer.queue.size).to.equal(0)
})
describe('libp2p.dialer', () => {
let peerInfo
let libp2p
let remoteLibp2p
before(async () => {
const peerId = await PeerId.createFromJSON(Peers[0])
peerInfo = new PeerInfo(peerId)
})
afterEach(async () => {
sinon.restore()
libp2p && await libp2p.stop()
libp2p = null
})
after(async () => {
remoteLibp2p && await remoteLibp2p.stop()
})
it('should create a dialer', () => {
libp2p = new Libp2p({
peerInfo,
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
})
expect(libp2p.dialer).to.exist()
// Ensure the dialer also has the transport manager
expect(libp2p.transportManager).to.equal(libp2p.dialer.transportManager)
})
it('should use the dialer for connecting', async () => {
libp2p = new Libp2p({
peerInfo,
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
})
sinon.spy(libp2p.dialer, 'connectToMultiaddr')
const connection = await libp2p.dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
const { stream, protocol } = await connection.newStream('/echo/1.0.0')
expect(stream).to.exist()
expect(protocol).to.equal('/echo/1.0.0')
await connection.close()
expect(libp2p.dialer.connectToMultiaddr.callCount).to.equal(1)
})
it('should run identify automatically after connecting', async () => {
libp2p = new Libp2p({
peerInfo,
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
})
sinon.spy(libp2p.dialer.identifyService, 'identify')
sinon.spy(libp2p.peerStore, 'update')
const connection = await libp2p.dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
// Wait for setImmediate to trigger the identify call
await delay(1)
expect(libp2p.dialer.identifyService.identify.callCount).to.equal(1)
await libp2p.dialer.identifyService.identify.firstCall.returnValue
expect(libp2p.peerStore.update.callCount).to.equal(1)
})
})
})

7
test/fixtures/browser.js vendored Normal file
View File

@ -0,0 +1,7 @@
'use strict'
const multiaddr = require('multiaddr')
module.exports.MULTIADDRS_WEBSOCKETS = [
multiaddr('/ip4/127.0.0.1/tcp/15001/ws/p2p/QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN')
]

27
test/fixtures/peers.js vendored Normal file
View File

@ -0,0 +1,27 @@
'use strict'
module.exports = [{
id: 'QmNMMAqSxPetRS1cVMmutW5BCN1qQQyEr4u98kUvZjcfEw',
privKey: 'CAASpQkwggShAgEAAoIBAQDPek2aeHMa0blL42RTKd6xgtkk4Zkldvq4LHxzcag5uXepiQzWANEUvoD3KcUTmMRmx14PvsxdLCNst7S2JSa0R2n5wSRs14zGy6892lx4H4tLBD1KSpQlJ6vabYM1CJhIQRG90BtzDPrJ/X1iJ2HA0PPDz0Mflam2QUMDDrU0IuV2m7gSCJ5r4EmMs3U0xnH/1gShkVx4ir0WUdoWf5KQUJOmLn1clTRHYPv4KL9A/E38+imNAXfkH3c2T7DrCcYRkZSpK+WecjMsH1dCX15hhhggNqfp3iulO1tGPxHjm7PDGTPUjpCWKpD5e50sLqsUwexac1ja6ktMfszIR+FPAgMBAAECggEAB2H2uPRoRCAKU+T3gO4QeoiJaYKNjIO7UCplE0aMEeHDnEjAKC1HQ1G0DRdzZ8sb0fxuIGlNpFMZv5iZ2ZFg2zFfV//DaAwTek9tIOpQOAYHUtgHxkj5FIlg2BjlflGb+ZY3J2XsVB+2HNHkUEXOeKn2wpTxcoJE07NmywkO8Zfr1OL5oPxOPlRN1gI4ffYH2LbfaQVtRhwONR2+fs5ISfubk5iKso6BX4moMYkxubYwZbpucvKKi/rIjUA3SK86wdCUnno1KbDfdXSgCiUlvxt/IbRFXFURQoTV6BOi3sP5crBLw8OiVubMr9/8WE6KzJ0R7hPd5+eeWvYiYnWj4QKBgQD6jRlAFo/MgPO5NZ/HRAk6LUG+fdEWexA+GGV7CwJI61W/Dpbn9ZswPDhRJKo3rquyDFVZPdd7+RlXYg1wpmp1k54z++L1srsgj72vlg4I8wkZ4YLBg0+zVgHlQ0kxnp16DvQdOgiRFvMUUMEgetsoIx1CQWTd67hTExGsW+WAZQKBgQDT/WaHWvwyq9oaZ8G7F/tfeuXvNTk3HIJdfbWGgRXB7lJ7Gf6FsX4x7PeERfL5a67JLV6JdiLLVuYC2CBhipqLqC2DB962aKMvxobQpSljBBZvZyqP1IGPoKskrSo+2mqpYkeCLbDMuJ1nujgMP7gqVjabs2zj6ACKmmpYH/oNowJ/T0ZVtvFsjkg+1VsiMupUARRQuPUWMwa9HOibM1NIZcoQV2NGXB5Z++kR6JqxQO0DZlKArrviclderUdY+UuuY4VRiSEprpPeoW7ZlbTku/Ap8QZpWNEzZorQDro7bnfBW91fX9/81ets/gCPGrfEn+58U3pdb9oleCOQc/ifpQKBgBTYGbi9bYbd9vgZs6bd2M2um+VFanbMytS+g5bSIn2LHXkVOT2UEkB+eGf9KML1n54QY/dIMmukA8HL1oNAyalpw+/aWj+9Ui5kauUhGEywHjSeBEVYM9UXizxz+m9rsoktLLLUI0o97NxCJzitG0Kub3gn0FEogsUeIc7AdinZAoGBANnM1vcteSQDs7x94TDEnvvqwSkA2UWyLidD2jXgE0PG4V6tTkK//QPBmC9eq6TIqXkzYlsErSw4XeKO91knFofmdBzzVh/ddgx/NufJV4tXF+a2iTpqYBUJiz9wpIKgf43/Ob+P1EA99GAhSdxz1ess9O2aTqf3ANzn6v6g62Pv',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPek2aeHMa0blL42RTKd6xgtkk4Zkldvq4LHxzcag5uXepiQzWANEUvoD3KcUTmMRmx14PvsxdLCNst7S2JSa0R2n5wSRs14zGy6892lx4H4tLBD1KSpQlJ6vabYM1CJhIQRG90BtzDPrJ/X1iJ2HA0PPDz0Mflam2QUMDDrU0IuV2m7gSCJ5r4EmMs3U0xnH/1gShkVx4ir0WUdoWf5KQUJOmLn1clTRHYPv4KL9A/E38+imNAXfkH3c2T7DrCcYRkZSpK+WecjMsH1dCX15hhhggNqfp3iulO1tGPxHjm7PDGTPUjpCWKpD5e50sLqsUwexac1ja6ktMfszIR+FPAgMBAAE='
}, {
id: 'QmW8rAgaaA6sRydK1k6vonShQME47aDxaFidbtMevWs73t',
privKey: 'CAASpwkwggSjAgEAAoIBAQCTU3gVDv3SRXLOsFln9GEf1nJ/uCEDhOG10eC0H9l9IPpVxjuPT1ep+ykFUdvefq3D3q+W3hbmiHm81o8dYv26RxZIEioToUWp7Ec5M2B/niYoE93za9/ZDwJdl7eh2hNKwAdxTmdbXUPjkIU4vLyHKRFbJIn9X8w9djldz8hoUvC1BK4L1XrT6F2l0ruJXErH2ZwI1youfSzo87TdXIoFKdrQLuW6hOtDCGKTiS+ab/DkMODc6zl8N47Oczv7vjzoWOJMUJs1Pg0ZsD1zmISY38P0y/QyEhatZn0B8BmSWxlLQuukatzOepQI6k+HtfyAAjn4UEqnMaXTP1uwLldVAgMBAAECggEAHq2f8MqpYjLiAFZKl9IUs3uFZkEiZsgx9BmbMAb91Aec+WWJG4OLHrNVTG1KWp+IcaQablEa9bBvoToQnS7y5OpOon1d066egg7Ymfmv24NEMM5KRpktCNcOSA0CySpPIB6yrg6EiUr3ixiaFUGABKkxmwgVz/Q15IqM0ZMmCUsC174PMAz1COFZxD0ZX0zgHblOJQW3dc0X3XSzhht8vU02SMoVObQHQfeXEHv3K/RiVj/Ax0bTc5JVkT8dm8xksTtsFCNOzRBqFS6MYqX6U/u0Onz3Jm5Jt7fLWb5n97gZR4SleyGrqxYNb46d9X7mP0ie7E6bzFW0DsWBIeAqVQKBgQDW0We2L1n44yOvJaMs3evpj0nps13jWidt2I3RlZXjWzWHiYQfvhWUWqps/xZBnAYgnN/38xbKzHZeRNhrqOo+VB0WK1IYl0lZVE4l6TNKCsLsUfQzsb1pePkd1eRZA+TSqsi+I/IOQlQU7HA0bMrah/5FYyUBP0jYvCOvYTlZuwKBgQCvkcVRydVlzjUgv7lY5lYvT8IHV5iYO4Qkk2q6Wjv9VUKAJZauurMdiy05PboWfs5kbETdwFybXMBcknIvZO4ihxmwL8mcoNwDVZHI4bXapIKMTCyHgUKvJ9SeTcKGC7ZuQJ8mslRmYox/HloTOXEJgQgPRxXcwa3amzvdZI+6LwKBgQCLsnQqgxKUi0m6bdR2qf7vzTH4258z6X34rjpT0F5AEyF1edVFOz0XU/q+lQhpNEi7zqjLuvbYfSyA026WXKuwSsz7jMJ/oWqev/duKgAjp2npesY/E9gkjfobD+zGgoS9BzkyhXe1FCdP0A6L2S/1+zg88WOwMvJxl6/xLl24XwKBgCm60xSajX8yIQyUpWBM9yUtpueJ2Xotgz4ST+bVNbcEAddll8gWFiaqgug9FLLuFu5lkYTHiPtgc1RNdphvO+62/9MRuLDixwh/2TPO+iNqwKDKJjda8Nei9vVddCPaOtU/xNQ0xLzFJbG9LBmvqH9izOCcu8SJwGHaTcNUeJj/AoGADCJ26cY30c13F/8awAAmFYpZWCuTP5ppTsRmjd63ixlrqgkeLGpJ7kYb5fXkcTycRGYgP0e1kssBGcmE7DuG955fx3ZJESX3GQZ+XfMHvYGONwF1EiK1f0p6+GReC2VlQ7PIkoD9o0hojM6SnWvv9EXNjCPALEbfPFFvcniKVsE=',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCTU3gVDv3SRXLOsFln9GEf1nJ/uCEDhOG10eC0H9l9IPpVxjuPT1ep+ykFUdvefq3D3q+W3hbmiHm81o8dYv26RxZIEioToUWp7Ec5M2B/niYoE93za9/ZDwJdl7eh2hNKwAdxTmdbXUPjkIU4vLyHKRFbJIn9X8w9djldz8hoUvC1BK4L1XrT6F2l0ruJXErH2ZwI1youfSzo87TdXIoFKdrQLuW6hOtDCGKTiS+ab/DkMODc6zl8N47Oczv7vjzoWOJMUJs1Pg0ZsD1zmISY38P0y/QyEhatZn0B8BmSWxlLQuukatzOepQI6k+HtfyAAjn4UEqnMaXTP1uwLldVAgMBAAE='
}, {
id: 'QmZqCdSzgpsmB3Qweb9s4fojAoqELWzqku21UVrqtVSKi4',
privKey: 'CAASpgkwggSiAgEAAoIBAQCdbSEsTmw7lp5HagRcx57DaLiSUEkh4iBcKc7Y+jHICEIA8NIVi9FlfGEZj9G21FpiTR4Cy+BLVEuf8Nm90bym4iV+cSumeS21fvD8xGTEbeKGljs6OYHy3M45JhWF85gqHQJOqZufI2NRDuRgMZEO2+qGEXmSlv9mMXba/+9ecze8nSpB7bG2Z2pnKDeYwhF9Cz+ElMyn7TBWDjJERGVgFbTpdM3rBnbhB/TGpvs732QqZmIBlxnDb/Jn0l1gNZCgkEDcJ/0NDMBJTQ8vbvcdmaw3eaMPLkn1ix4wdu9QWCA0IBtuY1R7vSUtf4irnLJG7DnAw2GfM5QrF3xF1GLXAgMBAAECggEAQ1N0qHoxl5pmvqv8iaFlqLSUmx5y6GbI6CGJMQpvV9kQQU68yjItr3VuIXx8d/CBZyEMAK4oko7OeOyMcr3MLKLy3gyQWnXgsopDjhZ/8fH8uwps8g2+IZuFJrO+6LaxEPGvFu06fOiphPUVfn40R2KN/iBjGeox+AaXijmCqaV2vEdNJJPpMfz6VKZBDLTrbiqvo/3GN1U99PUqfPWpOWR29oAhh/Au6blSqvqTUPXB2+D/X6e1JXv31mxMPK68atDHSUjZWKB9lE4FMK1bkSKJRbyXmNIlbZ9V8X4/0r8/6T7JnW7ZT8ugRkquohmwgG7KkDXB1YsOCKXYUqzVYQKBgQDtnopFXWYl7XUyePJ/2MA5i7eoko9jmF44L31irqmHc5unNf6JlNBjlxTNx3WyfzhUzrn3c18psnGkqtow0tkBj5hmqn8/WaPbc5UA/5R1FNaNf8W5khn7MDm6KtYRPjN9djqTDiVHyC6ljONYd+5S+MqyKVWZ3t/xvG60sw85qwKBgQCpmpDtL+2JBwkfeUr3LyDcQxvbfzcv8lXj2otopWxWiLiZF1HzcqgAa2CIwu9kCGEt9Zr+9E4uINbe1To0b01/FhvR6xKO/ukceGA/mBB3vsKDcRmvpBUp+3SmnhY0nOk+ArQl4DhJ34k8pDM3EDPrixPf8SfVdU/8IM32lsdHhQKBgHLgpvCKCwxjFLnmBzcPzz8C8TOqR3BbBZIcQ34l+wflOGdKj1hsfaLoM8KYn6pAHzfBCd88A9Hg11hI0VuxVACRL5jS7NnvuGwsIOluppNEE8Ys86aXn7/0vLPoab3EWJhbRE48FIHzobmft3nZ4XpzlWs02JGfUp1IAC2UM9QpAoGAeWy3pZhSr2/iEC5+hUmwdQF2yEbj8+fDpkWo2VrVnX506uXPPkQwE1zM2Bz31t5I9OaJ+U5fSpcoPpDaAwBMs1fYwwlRWB8YNdHY1q6/23svN3uZsC4BGPV2JnO34iMUudilsRg+NGVdk5TbNejbwx7nM8Urh59djFzQGGMKeSECgYA0QMCARPpdMY50Mf2xQaCP7HfMJhESSPaBq9V3xY6ToEOEnXgAR5pNjnU85wnspHp+82r5XrKfEQlFxGpj2YA4DRRmn239sjDa29qP42UNAFg1+C3OvXTht1d5oOabaGhU0udwKmkEKUbb0bG5xPQJ5qeSJ5T1gLzLk3SIP0GlSw==',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCdbSEsTmw7lp5HagRcx57DaLiSUEkh4iBcKc7Y+jHICEIA8NIVi9FlfGEZj9G21FpiTR4Cy+BLVEuf8Nm90bym4iV+cSumeS21fvD8xGTEbeKGljs6OYHy3M45JhWF85gqHQJOqZufI2NRDuRgMZEO2+qGEXmSlv9mMXba/+9ecze8nSpB7bG2Z2pnKDeYwhF9Cz+ElMyn7TBWDjJERGVgFbTpdM3rBnbhB/TGpvs732QqZmIBlxnDb/Jn0l1gNZCgkEDcJ/0NDMBJTQ8vbvcdmaw3eaMPLkn1ix4wdu9QWCA0IBtuY1R7vSUtf4irnLJG7DnAw2GfM5QrF3xF1GLXAgMBAAE='
}, {
id: 'QmR5VwgsL7jyfZHAGyp66tguVrQhCRQuRc3NokocsCZ3fA',
privKey: 'CAASpwkwggSjAgEAAoIBAQCGXYU+uc2nn1zuJhfdFOl34upztnrD1gpHu58ousgHdGlGgYgbqLBAvIAauXdEL0+e30HofjA634SQxE+9nV+0FQBam1DDzHQlXsuwHV+2SKvSDkk4bVllMFpu2SJtts6VH+OXC/2ANJOm+eTALykQPYXgLIBxrhp/eD+Jz5r6wW2nq3k6OmYyK/4pgGzFjo5UyX+fa/171AJ68UPboFpDy6BZCcUjS0ondxPvD7cv5jMNqqMKIB/7rpi8n+Q3oeccRqVL56wH+FE3/QLjwYHwY6ILNRyvNXRqHjwBEXB2R5moXN0AFUWTw9rt3KhFiEjR1U81BTw5/xS7W2Iu0FgZAgMBAAECggEAS64HK8JZfE09eYGJNWPe8ECmD1C7quw21BpwVe+GVPSTizvQHswPohbKDMNj0srXDMPxCnNw1OgqcaOwyjsGuZaOoXoTroTM8nOHRIX27+PUqzaStS6aCG2IsiCozKUHjGTuupftS7XRaF4eIsUtWtFcQ1ytZ9pJYHypRQTi5NMSrTze5ThjnWxtHilK7gnBXik+aR0mYEVfSn13czQEC4rMOs+b9RAc/iibDNoLopfIdvmCCvfxzmySnR7Cu1iSUAONkir7PB+2Mt/qRFCH6P+jMamtCgQ8AmifXgVmDUlun+4MnKg3KrPd6ZjOEKhVe9mCHtGozk65RDREShfDdQKBgQDi+x2MuRa9peEMOHnOyXTS+v+MFcfmG0InsO08rFNBKZChLB+c9UHBdIvexpfBHigSyERfuDye4z6lxi8ZnierWMYJP30nxmrnxwTGTk1MQquhfs1A0kpmDnPsjlOS/drEIEIssNx2WbfJ7YtMxLWBtp+BJzGpQmr0LKC+NHRSrwKBgQCXiy2kJESIUkIs2ihV55hhT6/bZo1B1O5DPA2nkjOBXqXF6fvijzMDX82JjLd07lQZlI0n1Q/Hw0p4iYi9YVd2bLkLXF5UIb2qOeHj76enVFOrPHUSkC9Y2g/0Xs+60Ths2xRd8RrrfQU3kl5iVpBywkCIrb2M5+wRnNTk1W3TtwKBgQCvplyrteAfSurpJhs9JzE8w/hWU9SqAZYkWQp91W1oE95Um2yrbjBAoQxMjaqKS+f/APPIjy56Vqj4aHGyhW11b/Fw3qzfxvCcBKtxOs8eoMlo5FO6QgJJEA4tlcafDcvp0nzjUMqK28safLU7503+33B35fjMXxWdd5u9FaKfCQKBgC4W6j6tuRosymuRvgrCcRnHfpify/5loEFallyMnpWOD6Tt0OnK25z/GifnYDRz96gAAh5HMpFy18dpLOlMHamqz2yhHx8/U8vd5tHIJZlCkF/X91M5/uxrBccwvsT2tM6Got8fYSyVzWxlW8dUxIHiinYHQUsFjkqdBDLEpq5pAoGASoTw5RBEWFM0GuAZdXsyNyxU+4S+grkTS7WdW/Ymkukh+bJZbnvF9a6MkSehqXnknthmufonds2AFNS//63gixENsoOhzT5+2cdfc6tJECvJ9xXVXkf85AoQ6T/RrXF0W4m9yQyCngNJUrKUOIH3oDIfdZITlYzOC3u1ojj7VuQ=',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCGXYU+uc2nn1zuJhfdFOl34upztnrD1gpHu58ousgHdGlGgYgbqLBAvIAauXdEL0+e30HofjA634SQxE+9nV+0FQBam1DDzHQlXsuwHV+2SKvSDkk4bVllMFpu2SJtts6VH+OXC/2ANJOm+eTALykQPYXgLIBxrhp/eD+Jz5r6wW2nq3k6OmYyK/4pgGzFjo5UyX+fa/171AJ68UPboFpDy6BZCcUjS0ondxPvD7cv5jMNqqMKIB/7rpi8n+Q3oeccRqVL56wH+FE3/QLjwYHwY6ILNRyvNXRqHjwBEXB2R5moXN0AFUWTw9rt3KhFiEjR1U81BTw5/xS7W2Iu0FgZAgMBAAE='
}, {
id: 'QmScLDqRg7H6ipCYxm9fVk152UWavQFKscTdoT4YNHxgqp',
privKey: 'CAASpwkwggSjAgEAAoIBAQCWEHaTZ6LBLFP5OPrUqjDM/cF4b2zrfh1Zm3kd02ZtgQB3iYtZqRPJT5ctT3A7WdVF/7dCxPGOCkJlLekTx4Y4gD8JtjA+EfN9fR/2RBKbti2N3CD4vkGp9ss4hbBFcXIhl8zuD/ELHutbV6b8b4QXJGnxfp/B+1kNPnyd7SJznS0QyvI8OLI1nAkVKdYLDRW8kPKeHyx1xhdNDuTQVTFyAjRGQ4e3UYFB7bYIHW3E6kCtCoJDlj+JPC02Yt1LHzIzZVLvPvNFnYY2mag6OiGFuh/oMBIqvnPc1zRZ3eLUqeGZjQVaoR0kdgZUKz7Q2TBeNldxK/s6XO0DnkQTlelNAgMBAAECggEAdmt1dyswR2p4tdIeNpY7Pnj9JNIhTNDPznefI0dArCdBvBMhkVaYk6MoNIxcj6l7YOrDroAF8sXr0TZimMY6B/pERKCt/z1hPWTxRQBBAvnHhwvwRPq2jK6BfhAZoyM8IoBNKowP9mum5QUNdGV4Al8s73KyFX0IsCfgZSvNpRdlt+DzPh+hu/CyoZaMpRchJc1UmK8Fyk3KfO+m0DZNfHP5P08lXNfM6MZLgTJVVgERHyG+vBOzTd2RElMe19nVCzHwb3dPPRZSQ7Fnz3rA+GeLqsM2Zi4HNhfbD1OcD9C4wDj5tYL6hWTkdz4IlfVcjCeUHxgIOhdDV2K+OwbuAQKBgQD0FjUZ09UW2FQ/fitbvIB5f1SkXWPxTF9l6mAeuXhoGv2EtQUO4vq/PK6N08RjrZdWQy6UsqHgffi7lVQ8o3hvCKdbtf4sP+cM92OrY0WZV89os79ndj4tyvmnP8WojwRjt/2XEfgdoWcgWxW9DiYINTOQVimZX+X/3on4s8hEgQKBgQCdY3kOMbyQeLTRkqHXjVTY4ddO+v4S4wOUa1l4rTqAbq1W3JYWwoDQgFuIu3limIHmjnSJpCD4EioXFsM7p6csenoc20sHxsaHnJ6Mn5Te41UYmY9EW0otkQ0C3KbXM0hwQkjyplnEmZawGKmjEHW8DJ3vRYTv9TUCgYKxDHgOzQKBgB4A/NYH7BG61eBYKgxEx6YnuMfbkwV+Vdu5S8d7FQn3B2LgvZZu4FPRqcNVXLbEB+5ao8czjiKCWaj1Wj15+rvrXGcxn+Tglg5J+r5+nXeUC7LbJZQaPNp0MOwWMr3dlrSLUWjYlJ9Pz9VyXOG4c4Rexc/gR4zK9QLW4C7qKpwBAoGAZzyUb0cYlPtYQA+asTU3bnvVKy1f8yuNcZFowst+EDiI4u0WVh+HNzy6zdmLKa03p+/RaWeLaK0hhrubnEnAUmCUMNF3ScaM+u804LDcicc8TkKLwx7ObU0z56isl4RAA8K27tNHFrpYKXJD834cfBkaj5ReOrfw6Y/iFhhDuBECgYEA8gbC76uz7LSHhW30DSRTcqOzTyoe2oYKQaxuxYNp7vSSOkcdRen+mrdflDvud2q/zN2QdL4pgqdldHlR35M/lJ0f0B6zp74jlzbO9700wzsOqreezGc5eWiroDL100U9uIZ50BKb8CKtixIHpinUSPIUcVDkSAZ2y7mbfCxQwqQ=',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCWEHaTZ6LBLFP5OPrUqjDM/cF4b2zrfh1Zm3kd02ZtgQB3iYtZqRPJT5ctT3A7WdVF/7dCxPGOCkJlLekTx4Y4gD8JtjA+EfN9fR/2RBKbti2N3CD4vkGp9ss4hbBFcXIhl8zuD/ELHutbV6b8b4QXJGnxfp/B+1kNPnyd7SJznS0QyvI8OLI1nAkVKdYLDRW8kPKeHyx1xhdNDuTQVTFyAjRGQ4e3UYFB7bYIHW3E6kCtCoJDlj+JPC02Yt1LHzIzZVLvPvNFnYY2mag6OiGFuh/oMBIqvnPc1zRZ3eLUqeGZjQVaoR0kdgZUKz7Q2TBeNldxK/s6XO0DnkQTlelNAgMBAAE='
}, {
id: 'QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN',
privKey: 'CAASpwkwggSjAgEAAoIBAQC1/GFud/7xutux7qRfMj1sIdMRh99/chR6HqVj6LQqrgk4jil0mdN/LCk/tqPqmDtObHdmEhCoybzuhLbCKgUqryKDwO6yBJHSKWY9QqrKZtLJ37SgKwGjE3+NUD4r1dJHhtQrICFdOdSCBzs/v8gi+J+KZLHo7+Nms4z09ysy7qZh94Pd7cW4gmSMergqUeANLD9C0ERw1NXolswOW7Bi7UGr7yuBxejICLO3nkxe0OtpQBrYrqdCD9vs3t/HQZbPWVoiRj4VO7fxkAPKLl30HzcIfxj/ayg8NHcH59d08D+N2v5Sdh28gsiYKIPE9CXvuw//HUY2WVRY5fDC5JglAgMBAAECggEBAKb5aN/1w3pBqz/HqRMbQpYLNuD33M3PexBNPAy+P0iFpDo63bh5Rz+A4lvuFNmzUX70MFz7qENlzi6+n/zolxMB29YtWBUH8k904rTEjXXl//NviQgITZk106tx+4k2x5gPEm57LYGfBOdFAUzNhzDnE2LkXwRNzkS161f7zKwOEsaGWRscj6UvhO4MIFxjb32CVwt5eK4yOVqtyMs9u30K4Og+AZYTlhtm+bHg6ndCCBO6CQurCQ3jD6YOkT+L3MotKqt1kORpvzIB0ujZRf49Um8wlcjC5G9aexBeGriXaVdPF62zm7GA7RMsbQM/6aRbA1fEQXvJhHUNF9UFeaECgYEA8wCjKqQA7UQnHjRwTsktdwG6szfxd7z+5MTqHHTWhWzgcQLgdh5/dO/zanEoOThadMk5C1Bqjq96gH2xim8dg5XQofSVtV3Ui0dDa+XRB3E3fyY4D3RF5hHv85O0GcvQc6DIb+Ja1oOhvHowFB1C+CT3yEgwzX/EK9xpe+KtYAkCgYEAv7hCnj/DcZFU3fAfS+unBLuVoVJT/drxv66P686s7J8UM6tW+39yDBZ1IcwY9vHFepBvxY2fFfEeLI02QFM+lZXVhNGzFkP90agNHK01psGgrmIufl9zAo8WOKgkLgbYbSHzkkDeqyjEPU+B0QSsZOCE+qLCHSdsnTmo/TjQhj0CgYAz1+j3yfGgrS+jVBC53lXi0+2fGspbf2jqKdDArXSvFqFzuudki/EpY6AND4NDYfB6hguzjD6PnoSGMUrVfAtR7X6LbwEZpqEX7eZGeMt1yQPMDr1bHrVi9mS5FMQR1NfuM1lP9Xzn00GIUpE7WVrWUhzDEBPJY/7YVLf0hFH08QKBgDWBRQZJIVBmkNrHktRrVddaSq4U/d/Q5LrsCrpymYwH8WliHgpeTQPWmKXwAd+ZJdXIzYjCt202N4eTeVqGYOb6Q/anV2WVYBbM4avpIxoA28kPGY6nML+8EyWIt2ApBOmgGgvtEreNzwaVU9NzjHEyv6n7FlVwlT1jxCe3XWq5AoGASYPKQoPeDlW+NmRG7z9EJXJRPVtmLL40fmGgtju9QIjLnjuK8XaczjAWT+ySI93Whu+Eujf2Uj7Q+NfUjvAEzJgwzuOd3jlQvoALq11kuaxlNQTn7rx0A1QhBgUJE8AkvShPC9FEnA4j/CLJU0re9H/8VvyN6qE0Mho0+YbjpP8=',
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1/GFud/7xutux7qRfMj1sIdMRh99/chR6HqVj6LQqrgk4jil0mdN/LCk/tqPqmDtObHdmEhCoybzuhLbCKgUqryKDwO6yBJHSKWY9QqrKZtLJ37SgKwGjE3+NUD4r1dJHhtQrICFdOdSCBzs/v8gi+J+KZLHo7+Nms4z09ysy7qZh94Pd7cW4gmSMergqUeANLD9C0ERw1NXolswOW7Bi7UGr7yuBxejICLO3nkxe0OtpQBrYrqdCD9vs3t/HQZbPWVoiRj4VO7fxkAPKLl30HzcIfxj/ayg8NHcH59d08D+N2v5Sdh28gsiYKIPE9CXvuw//HUY2WVRY5fDC5JglAgMBAAE='
}]

5
test/fixtures/swarm.key.js vendored Normal file
View File

@ -0,0 +1,5 @@
'use strict'
module.exports = '/key/swarm/psk/1.0.0/\n' +
'/base16/\n' +
'411f0a244cbbc25ecbb2b070d00a1832516ded521eb3ee3aa13189efe2e2b9a2'

View File

@ -1,5 +0,0 @@
{
"id": "QmaG17D4kfTB2RNUCr16bSfVvUVt2Xn3rPYeqQDvnVcXFr",
"privKey": "CAASqAkwggSkAgEAAoIBAQDBpXRrSLoVhP8C4YI0nm+YTb7UIe+xT9dwaMzKcGsH2zzz1lfxl54e1XNO+6Ut+If5jswpydgHhn9nGPod53sUIR2m+BiHOAH/Blgfa1nUKUkspts1MH3z5ZaO6Xo336Y0Uaw7UqfeIzKliTM6bpev2XIHyu0v/VJ2mylzfbDLMWqZs/shE3xwCJCD/PxoVpTlr/SzzF7MdgDMxkyiC3iLZ5Zkm+baPbi3mpKM0ue25Thedcc0KFjhQrjBfy5FPamrsMn5fnnoHwnQl9u7UWigzeC+7X+38EML1sVrV37ExxHPtM6882Ivjc7VN6zFHOHD2c9eVfbShkIf8YkVQUcFAgMBAAECggEAVE1mgGo58LJknml0WNn8tS5rfEiF5AhhPyOwvBTy04nDYFgZEykxgjTkrSbqgzfmYmOjSDICJUyNXGHISYqDz4CXOyBY9U0RuWeWp58BjVan75N4bRB+VNbHk9HbDkYEQlSoCW9ze0aRfvVa4v5QdRLSDMhwN+stokrsYcX/WIWYTM2e2jW+qQOzS8SJl7wYsgtd3WikrxwXkRL3sCMHEcgcPhoKacoD5Yr9cB0IC5vzhu4t/WMa+N2UEndcKGAbXsh8kA7BPFM6lqnEpOHpWEVEAYasAwFGUvUN9GwhtqpaNNS2sG6Nrz95cC99Nqx58uIXcTAJm3Fh/WfKJ6I1xQKBgQD+g7A5OSWw+i/zhTKVPJg93/eohViL0dGZT9Tf0/VslsFl00FwnZmBKA6BJ6ZL3hD00OcqIL3m6EzZ4q38U97XZxf2OUsPPJtl+Avqtlk16AHRHB9I17LGXJ30xZRkxL665oLms0D2T4NIZZX/uVMoS18lRvCZj1aEYQFCrZYgowKBgQDCxtA695S0vl6E3Q4G6MrDZK+2JqjaGL0XTnpHWiAjnk2lnV2CCZnWpEHT+ebF2fWx5nYQo5sugc6bS+4k9jRNUgxf2sQieZYCBjbnjCEVrPTm/dPTkaw1CQ/ox5/R1/Elbw8vteF9uUAvR0FL8Ss1Dqw6B2SxdTowxMy6qQ7sNwKBgG2N3eMj2DeP2egm45kdliK8L2yYyX6V+HTXyjf2kuQFGIZuIvMIw7S2u1eY65ooon/fFEIsCdJFGB+J1X6R05BAzi2sh8StP+7qkKadi1UK4w1R352JS2jbIRrlmXSuw7LL2njXnBTqMQaOw7xp14O2vePb32EaNBGTd+ltsvulAoGBALGIc4370oA4MIDb2Ag2MXKNmJbnf+piuB/BOTVGEZtFlDKLUArR43W/+/xRgKX/97FyhVS/OxfV21Kzj9oCy0NasMrB5RojRraLoYnFsPZH0mWlIGlsEtG4c9bR9XtYX4WmR+pN1r04mCc/xGWK6b4PpK2zxXT2i9ad2pmctGxbAoGBAIcp0UML5QCqvLmcob2/6PCRaYAxJBb9lDqOHredMgQih2hGnHFCyKk9eBAbFf/KN0guJTBDaAJRclcxsLLn7rV6grMNt+0EUepm7tWT0z5j8gNGbGGhuGDdqcmfJTc2EMdQrfhzYDN3rL1v3l+Ujwla2khL2ozE7SQ/KVeA1saY",
"pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDBpXRrSLoVhP8C4YI0nm+YTb7UIe+xT9dwaMzKcGsH2zzz1lfxl54e1XNO+6Ut+If5jswpydgHhn9nGPod53sUIR2m+BiHOAH/Blgfa1nUKUkspts1MH3z5ZaO6Xo336Y0Uaw7UqfeIzKliTM6bpev2XIHyu0v/VJ2mylzfbDLMWqZs/shE3xwCJCD/PxoVpTlr/SzzF7MdgDMxkyiC3iLZ5Zkm+baPbi3mpKM0ue25Thedcc0KFjhQrjBfy5FPamrsMn5fnnoHwnQl9u7UWigzeC+7X+38EML1sVrV37ExxHPtM6882Ivjc7VN6zFHOHD2c9eVfbShkIf8YkVQUcFAgMBAAE="
}

View File

@ -1,5 +0,0 @@
{
"id": "Qmex1SSsueWFsUfjdkugJ5zhcnjddAt8TxcnDLUXKD9Sx7",
"privKey": "CAASqAkwggSkAgEAAoIBAQCXzV127CvVHOGMzvsn/U+/32JM58KA6k0FSCCeNFzNowiDS/vV5eezGN5AFoxsF6icWLoaczz7l9RdVD+I/t6PEt9X7XUdrDCtSS8WmAcCgvZWSSf7yAd3jT4GSZDUIgIEeRZsERDt/yVqTLwsZ1G9dMIeh8sbf2zwjTXZIWaRM6o4lq3DYFfzLvJUXlJodxPogU7l7nLkITPUv+yQAMcVHizbNwJvwiETKYeUj73/m/wEPAlnFESexDstxNiIwE/FH8Ao50QPZRO6E6Jb0hhYSI/4CLRdrzDFm/Vzplei3Wr2DokSROaNyeG37VAueyA+pDqn84um+L9uXLwbv5FbAgMBAAECggEAdBUzV/GaQ0nmoQrWvOnUxmFIho7kCjkh1NwnNVPNc+Msa1r7pcI9wJNPwap8j1w4L/cZuYhOJgcg+o2mWFiuULKZ4F9Ro/M89gZ038457g2/2pPu43c/Xoi/2YcAHXg0Gr+OCe2zCIyITBWKAFqyAzL6DubAxrJW2Ezj1LrZ+EZgMyzbh/go/eEGSJaaGkINeAkY144DqDWWWvzyhKhryipsGkZGEkVy9xJgMEI3ipVvuPez2XAvoyyeuinBBLe+Z2vY5G50XXzbIMhIQGLncHf9MwTv6wt1ilyOSLOXK0BoQbB76J3R3is5dSULXXP9r8VocjLBEkmBuf4FXAKzoQKBgQDNNS4F1XE1gxD8LPkL+aB/hi6eVHVPhr+w0I/9ATikcLGeUfBM2Gd6cZRPFtNVrv1p6ZF1D1UyGDknGbDBSQd9wLUgb0fDoo3jKYMGWq6G+VvaP5rzWQeBV8YV2EhSmUk1i6kiYe2ZE8WyrPie7iwpQIY60e2A8Ly0GKZiBZUcHQKBgQC9YDAVsGnEHFVFkTDpvw5HwEzCgTb2A3NgkGY3rTYZ7L6AFjqCYmUwFB8Fmbyc4kdFWNh8wfmq5Qrvl49NtaeukiqWKUUlB8uPdztB1P0IahA2ks0owStZlRifmwfgYyMd4xE17lhaOgQQJZZPxmP0F6mdOvb3YJafNURCdMS51wKBgEvvIM+h0tmFXXSjQ6kNvzlRMtD92ccKysYn9xAdMpOO6/r0wSH+dhQWEVZO0PcE4NsfReb2PIVj90ojtIdhebcr5xpQc1LORQjJJKXmSmzBux6AqNrhl+hhzXfp56FA/Zkly/lgGWaqrV5XqUxOP+Mn8EO1yNgMvRc7g94DyNB1AoGBAKLBuXHalXwDsdHBUB2Eo3xNLGt6bEcRfia+0+sEBdxQGQWylQScFkU09dh1YaIf44sZKa5HdBFJGpYCVxo9hmjFnK5Dt/Z0daHOonIY4INLzLVqg8KECoLKXkhGEIXsDjFQhukn+G1LMVTDSSU055DQiWjlVX4UWD9qo0jOXIkvAoGBAMP50p2X6PsWWZUuuR7i1JOJHRyQZPWdHh9p8SSLnCtEpHYZfJr4INXNmhnSiB/3TUnHix2vVKjosjMTCk/CjfzXV2H41WPOLZ2/Pi3SxCicWIRj4kCcWhkEuIF2jGkg1+jmNiCl/zNMaBOAIP3QbDPtqOWbYlPd2YIzdj6WQ6R4",
"pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCXzV127CvVHOGMzvsn/U+/32JM58KA6k0FSCCeNFzNowiDS/vV5eezGN5AFoxsF6icWLoaczz7l9RdVD+I/t6PEt9X7XUdrDCtSS8WmAcCgvZWSSf7yAd3jT4GSZDUIgIEeRZsERDt/yVqTLwsZ1G9dMIeh8sbf2zwjTXZIWaRM6o4lq3DYFfzLvJUXlJodxPogU7l7nLkITPUv+yQAMcVHizbNwJvwiETKYeUj73/m/wEPAlnFESexDstxNiIwE/FH8Ao50QPZRO6E6Jb0hhYSI/4CLRdrzDFm/Vzplei3Wr2DokSROaNyeG37VAueyA+pDqn84um+L9uXLwbv5FbAgMBAAE="
}

View File

@ -1,168 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
chai.use(require('chai-checkmark'))
const expect = chai.expect
const sinon = require('sinon')
const series = require('async/series')
const createNode = require('./utils/create-node')
describe('libp2p state machine (fsm)', () => {
describe('starting and stopping', () => {
let node
beforeEach((done) => {
createNode([], {
config: {
dht: {
enabled: false
}
}
}, (err, _node) => {
node = _node
done(err)
})
})
afterEach(() => {
node.removeAllListeners()
sinon.restore()
})
after((done) => {
node.stop(done)
node = null
})
it('should be able to start and stop several times', (done) => {
node.on('start', (err) => {
expect(err).to.not.exist().mark()
})
node.on('stop', (err) => {
expect(err).to.not.exist().mark()
})
expect(4).checks(done)
series([
(cb) => node.start(cb),
(cb) => node.stop(cb),
(cb) => node.start(cb),
(cb) => node.stop(cb)
], () => {})
})
it('should noop when stopping a stopped node', (done) => {
node.once('start', node.stop)
node.once('stop', () => {
node.state.on('STOPPING', () => {
throw new Error('should not stop a stopped node')
})
node.once('stop', done)
// stop the stopped node
node.stop(() => {})
})
node.start(() => {})
})
it('should callback with an error when it occurs on stop', (done) => {
const error = new Error('some error starting')
node.once('start', () => {
node.once('error', (err) => {
expect(err).to.eql(error).mark()
})
node.stop((err) => {
expect(err).to.eql(error).mark()
})
})
expect(2).checks(done)
sinon.stub(node._switch, 'stop').callsArgWith(0, error)
node.start(() => {})
})
it('should noop when starting a started node', (done) => {
node.once('start', () => {
node.state.on('STARTING', () => {
throw new Error('should not start a started node')
})
node.once('start', () => {
node.once('stop', done)
node.stop(() => {})
})
// start the started node
node.start(() => {})
})
node.start(() => {})
})
it('should error on start with no transports', (done) => {
const transports = node._modules.transport
node._modules.transport = null
node.on('stop', () => {
node._modules.transport = transports
expect(node._modules.transport).to.exist().mark()
})
node.on('error', (err) => {
expect(err).to.exist().mark()
})
node.on('start', () => {
throw new Error('should not start')
})
expect(2).checks(done)
node.start(() => {})
})
it('should not start if the switch fails to start', (done) => {
const error = new Error('switch didnt start')
const stub = sinon.stub(node._switch, 'start')
.callsArgWith(0, error)
node.on('stop', () => {
expect(stub.calledOnce).to.eql(true).mark()
stub.restore()
})
node.on('error', (err) => {
expect(err).to.eql(error).mark()
})
node.on('start', () => {
throw new Error('should not start')
})
expect(3).checks(done)
node.start((err) => {
expect(err).to.eql(error).mark()
})
})
it('should not dial when the node is stopped', (done) => {
node.on('stop', () => {
node.dial(null, (err) => {
expect(err).to.exist()
expect(err.code).to.eql('ERR_NODE_NOT_STARTED')
done()
})
})
node.stop(() => {})
})
it('should not dial (fsm) when the node is stopped', (done) => {
node.on('stop', () => {
node.dialFSM(null, null, (err) => {
expect(err).to.exist()
expect(err.code).to.eql('ERR_NODE_NOT_STARTED')
done()
})
})
node.stop(() => {})
})
})
})

View File

@ -1,134 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const PeerBook = require('peer-book')
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const MultiAddr = require('multiaddr')
const TestPeerInfos = require('./switch/test-data/ids.json').infos
const { getPeerInfo, getPeerInfoRemote } = require('../src/get-peer-info')
describe('Get Peer Info', () => {
describe('getPeerInfo', () => {
let peerBook
let peerInfoA
let multiaddrA
let peerIdA
before((done) => {
peerBook = new PeerBook()
PeerId.createFromJSON(TestPeerInfos[0].id, (err, id) => {
peerIdA = id
peerInfoA = new PeerInfo(peerIdA)
multiaddrA = MultiAddr('/ipfs/QmdWYwTywvXBeLKWthrVNjkq9SafEDn1PbAZdz4xZW7Jd9')
peerInfoA.multiaddrs.add(multiaddrA)
peerBook.put(peerInfoA)
done(err)
})
})
it('should be able get peer info from multiaddr', () => {
const _peerInfo = getPeerInfo(multiaddrA, peerBook)
expect(peerBook.has(_peerInfo)).to.equal(true)
expect(peerInfoA).to.deep.equal(_peerInfo)
})
it('should return a new PeerInfo with a multiAddr not in the PeerBook', () => {
const wrongMultiAddr = MultiAddr('/ipfs/QmckZzdVd72h9QUFuJJpQqhsZqGLwjhh81qSvZ9BhB2FQi')
const _peerInfo = getPeerInfo(wrongMultiAddr, peerBook)
expect(PeerInfo.isPeerInfo(_peerInfo)).to.equal(true)
})
it('should be able get peer info from peer id', () => {
const _peerInfo = getPeerInfo(multiaddrA, peerBook)
expect(peerBook.has(_peerInfo)).to.equal(true)
expect(peerInfoA).to.deep.equal(_peerInfo)
})
it('should add a peerInfo to the book', (done) => {
PeerId.createFromJSON(TestPeerInfos[1].id, (err, id) => {
const peerInfo = new PeerInfo(id)
expect(peerBook.has(peerInfo.id.toB58String())).to.eql(false)
expect(getPeerInfo(peerInfo, peerBook)).to.exist()
expect(peerBook.has(peerInfo.id.toB58String())).to.eql(true)
done(err)
})
})
it('should return the most up to date version of the peer', (done) => {
const ma1 = MultiAddr('/ip4/0.0.0.0/tcp/8080')
const ma2 = MultiAddr('/ip6/::/tcp/8080')
PeerId.createFromJSON(TestPeerInfos[1].id, (err, id) => {
const peerInfo = new PeerInfo(id)
peerInfo.multiaddrs.add(ma1)
expect(getPeerInfo(peerInfo, peerBook)).to.exist()
const peerInfo2 = new PeerInfo(id)
peerInfo2.multiaddrs.add(ma2)
const returnedPeerInfo = getPeerInfo(peerInfo2, peerBook)
expect(returnedPeerInfo.multiaddrs.toArray()).to.contain.members([
ma1, ma2
])
done(err)
})
})
it('an invalid peer type should throw an error', () => {
let error
try {
getPeerInfo('/ip4/127.0.0.1/tcp/1234', peerBook)
} catch (err) {
error = err
}
expect(error.code).to.eql('ERR_INVALID_MULTIADDR')
})
})
describe('getPeerInfoRemote', () => {
it('should callback with error for invalid string multiaddr', async () => {
let error
try {
await getPeerInfoRemote('INVALID MULTIADDR')
} catch (err) {
error = err
}
expect(error.code).to.eql('ERR_INVALID_PEER_TYPE')
})
it('should callback with error for invalid non-peer multiaddr', async () => {
let error
try {
await getPeerInfoRemote('/ip4/8.8.8.8/tcp/1080')
} catch (err) {
error = err
}
expect(error.code).to.eql('ERR_INVALID_PEER_TYPE')
})
it('should callback with error for invalid non-peer multiaddr', async () => {
let error
try {
await getPeerInfoRemote(undefined)
} catch (err) {
error = err
}
expect(error.code).to.eql('ERR_INVALID_PEER_TYPE')
})
it('should callback with error for invalid non-peer multiaddr (promise)', () => {
return getPeerInfoRemote(undefined)
.then(expect.fail, (err) => {
expect(err.code).to.eql('ERR_INVALID_PEER_TYPE')
})
})
})
})

View File

@ -1,15 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const identify = require('../../src/identify')
describe('basic', () => {
it('multicodec', () => {
expect(identify.multicodec).to.eql('/ipfs/id/1.0.0')
})
})

View File

@ -1,192 +0,0 @@
/* eslint-env mocha */
'use strict'
const pull = require('pull-stream/pull')
const values = require('pull-stream/sources/values')
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const pair = require('pull-pair/duplex')
const PeerInfo = require('peer-info')
const lp = require('pull-length-prefixed')
const multiaddr = require('multiaddr')
const identify = require('../../src/identify')
const msg = identify.message
describe('identify.dialer', () => {
let original
before(function (done) {
this.timeout(20 * 1000)
PeerInfo.create((err, info) => {
if (err) {
return done(err)
}
original = info
done()
})
})
afterEach(() => {
original.multiaddrs.clear()
original.protocols.clear()
})
it('works', (done) => {
const p = pair()
original.multiaddrs.add(multiaddr('/ip4/127.0.0.1/tcp/5002'))
original.protocols.add('/echo/1.0.0')
original.protocols.add('/ping/1.0.0')
const input = msg.encode({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: original.id.pubKey.bytes,
listenAddrs: [multiaddr('/ip4/127.0.0.1/tcp/5002').buffer],
observedAddr: multiaddr('/ip4/127.0.0.1/tcp/5001').buffer,
protocols: Array.from(original.protocols)
})
pull(
values([input]),
lp.encode(),
p[0]
)
identify.dialer(p[1], (err, info, observedAddrs) => {
expect(err).to.not.exist()
expect(info.id.pubKey.bytes)
.to.eql(original.id.pubKey.bytes)
expect(info.multiaddrs.has(original.multiaddrs.toArray()[0]))
.to.eql(true)
expect(multiaddr('/ip4/127.0.0.1/tcp/5001').equals(observedAddrs[0]))
.to.eql(true)
expect(info.protocols).to.eql(original.protocols)
done()
})
})
it('should handle missing protocols', (done) => {
const p = pair()
original.multiaddrs.add(multiaddr('/ip4/127.0.0.1/tcp/5002'))
const input = msg.encode({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: original.id.pubKey.bytes,
listenAddrs: [multiaddr('/ip4/127.0.0.1/tcp/5002').buffer],
observedAddr: multiaddr('/ip4/127.0.0.1/tcp/5001').buffer,
protocols: Array.from(original.protocols)
})
pull(
values([input]),
lp.encode(),
p[0]
)
identify.dialer(p[1], (err, info, observedAddrs) => {
expect(err).to.not.exist()
expect(info.id.pubKey.bytes)
.to.eql(original.id.pubKey.bytes)
expect(info.multiaddrs.has(original.multiaddrs.toArray()[0]))
.to.eql(true)
expect(multiaddr('/ip4/127.0.0.1/tcp/5001').equals(observedAddrs[0]))
.to.eql(true)
expect(Array.from(info.protocols)).to.eql([])
done()
})
})
it('does not crash with invalid listen addresses', (done) => {
const p = pair()
original.multiaddrs.add(multiaddr('/ip4/127.0.0.1/tcp/5002'))
const input = msg.encode({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: original.id.pubKey.bytes,
listenAddrs: [Buffer.from('ffac010203')],
observedAddr: Buffer.from('ffac010203')
})
pull(
values([input]),
lp.encode(),
p[0]
)
identify.dialer(p[1], (err, info, observedAddrs) => {
expect(err).to.exist()
done()
})
})
it('does not crash with invalid observed address', (done) => {
const p = pair()
original.multiaddrs.add(multiaddr('/ip4/127.0.0.1/tcp/5002'))
const input = msg.encode({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: original.id.pubKey.bytes,
listenAddrs: [multiaddr('/ip4/127.0.0.1/tcp/5002').buffer],
observedAddr: Buffer.from('ffac010203')
})
pull(
values([input]),
lp.encode(),
p[0]
)
identify.dialer(p[1], (err, info, observedAddrs) => {
expect(err).to.exist()
done()
})
})
it('should return an error with mismatched peerInfo data', function (done) {
this.timeout(10e3)
const p = pair()
original.multiaddrs.add(multiaddr('/ip4/127.0.0.1/tcp/5002'))
const input = msg.encode({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: original.id.pubKey.bytes,
listenAddrs: [multiaddr('/ip4/127.0.0.1/tcp/5002').buffer],
observedAddr: multiaddr('/ip4/127.0.0.1/tcp/5001').buffer
})
PeerInfo.create((err, info) => {
if (err) {
return done(err)
}
pull(
values([input]),
lp.encode(),
p[0]
)
identify.dialer(p[1], info, (err, peerInfo) => {
expect(err).to.exist()
expect(peerInfo).to.not.exist()
done()
})
})
})
})

247
test/identify/index.spec.js Normal file
View File

@ -0,0 +1,247 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const delay = require('delay')
const PeerId = require('peer-id')
const PeerInfo = require('peer-info')
const duplexPair = require('it-pair/duplex')
const multiaddr = require('multiaddr')
const { codes: Errors } = require('../../src/errors')
const { IdentifyService, multicodecs } = require('../../src/identify')
const Peers = require('../fixtures/peers')
const Libp2p = require('../../src')
const baseOptions = require('../utils/base-options.browser')
const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser')
const remoteAddr = MULTIADDRS_WEBSOCKETS[0]
describe('Identify', () => {
let localPeer
let remotePeer
const protocols = new Map([
[multicodecs.IDENTIFY, () => {}],
[multicodecs.IDENTIFY_PUSH, () => {}]
])
before(async () => {
[localPeer, remotePeer] = (await Promise.all([
PeerId.createFromJSON(Peers[0]),
PeerId.createFromJSON(Peers[1])
])).map(id => new PeerInfo(id))
})
afterEach(() => {
sinon.restore()
})
it('should be able to identify another peer', async () => {
const localIdentify = new IdentifyService({
peerInfo: localPeer,
protocols,
registrar: {
peerStore: {
update: () => {}
}
}
})
const remoteIdentify = new IdentifyService({
peerInfo: remotePeer,
protocols
})
const observedAddr = multiaddr('/ip4/127.0.0.1/tcp/1234')
const localConnectionMock = { newStream: () => {} }
const remoteConnectionMock = { remoteAddr: observedAddr }
const [local, remote] = duplexPair()
sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY })
sinon.spy(localIdentify.registrar.peerStore, 'update')
// Run identify
await Promise.all([
localIdentify.identify(localConnectionMock, remotePeer.id),
remoteIdentify.handleMessage({
connection: remoteConnectionMock,
stream: remote,
protocol: multicodecs.IDENTIFY
})
])
expect(localIdentify.registrar.peerStore.update.callCount).to.equal(1)
// Validate the remote peer gets updated in the peer store
const call = localIdentify.registrar.peerStore.update.firstCall
expect(call.args[0].id.bytes).to.equal(remotePeer.id.bytes)
})
it('should throw if identified peer is the wrong peer', async () => {
const localIdentify = new IdentifyService({
peerInfo: localPeer,
protocols
})
const remoteIdentify = new IdentifyService({
peerInfo: remotePeer,
protocols
})
const observedAddr = multiaddr('/ip4/127.0.0.1/tcp/1234')
const localConnectionMock = { newStream: () => {} }
const remoteConnectionMock = { remoteAddr: observedAddr }
const [local, remote] = duplexPair()
sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY })
// Run identify
try {
await Promise.all([
localIdentify.identify(localConnectionMock, localPeer.id),
remoteIdentify.handleMessage({
connection: remoteConnectionMock,
stream: remote,
protocol: multicodecs.IDENTIFY
})
])
expect.fail('should have thrown')
} catch (err) {
expect(err).to.exist()
expect(err.code).to.eql(Errors.ERR_INVALID_PEER)
}
})
describe('push', () => {
it('should be able to push identify updates to another peer', async () => {
const localIdentify = new IdentifyService({
peerInfo: localPeer,
registrar: { getConnection: () => {} },
protocols: new Map([
[multicodecs.IDENTIFY],
[multicodecs.IDENTIFY_PUSH],
['/echo/1.0.0']
])
})
const remoteIdentify = new IdentifyService({
peerInfo: remotePeer,
registrar: {
peerStore: {
update: () => {}
}
}
})
// Setup peer protocols and multiaddrs
const localProtocols = new Set([multicodecs.IDENTIFY, multicodecs.IDENTIFY_PUSH, '/echo/1.0.0'])
const listeningAddr = multiaddr('/ip4/127.0.0.1/tcp/1234')
sinon.stub(localPeer.multiaddrs, 'toArray').returns([listeningAddr])
sinon.stub(localPeer, 'protocols').value(localProtocols)
sinon.stub(remotePeer, 'protocols').value(new Set([multicodecs.IDENTIFY, multicodecs.IDENTIFY_PUSH]))
const localConnectionMock = { newStream: () => {} }
const remoteConnectionMock = { remotePeer: localPeer.id }
const [local, remote] = duplexPair()
sinon.stub(localConnectionMock, 'newStream').returns({ stream: local, protocol: multicodecs.IDENTIFY_PUSH })
sinon.spy(IdentifyService, 'updatePeerAddresses')
sinon.spy(IdentifyService, 'updatePeerProtocols')
sinon.spy(remoteIdentify.registrar.peerStore, 'update')
// Run identify
await Promise.all([
localIdentify.push([localConnectionMock]),
remoteIdentify.handleMessage({
connection: remoteConnectionMock,
stream: remote,
protocol: multicodecs.IDENTIFY_PUSH
})
])
expect(IdentifyService.updatePeerAddresses.callCount).to.equal(1)
expect(IdentifyService.updatePeerProtocols.callCount).to.equal(1)
expect(remoteIdentify.registrar.peerStore.update.callCount).to.equal(1)
const [peerInfo] = remoteIdentify.registrar.peerStore.update.firstCall.args
expect(peerInfo.id.bytes).to.eql(localPeer.id.bytes)
expect(peerInfo.multiaddrs.toArray()).to.eql([listeningAddr])
expect(peerInfo.protocols).to.eql(localProtocols)
})
})
describe('libp2p.dialer.identifyService', () => {
let peerInfo
let libp2p
let remoteLibp2p
before(async () => {
const peerId = await PeerId.createFromJSON(Peers[0])
peerInfo = new PeerInfo(peerId)
})
afterEach(async () => {
sinon.restore()
libp2p && await libp2p.stop()
libp2p = null
})
after(async () => {
remoteLibp2p && await remoteLibp2p.stop()
})
it('should run identify automatically after connecting', async () => {
libp2p = new Libp2p({
...baseOptions,
peerInfo
})
sinon.spy(libp2p.dialer.identifyService, 'identify')
sinon.spy(libp2p.peerStore, 'update')
const connection = await libp2p.dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
// Wait for nextTick to trigger the identify call
await delay(1)
expect(libp2p.dialer.identifyService.identify.callCount).to.equal(1)
await libp2p.dialer.identifyService.identify.firstCall.returnValue
expect(libp2p.peerStore.update.callCount).to.equal(1)
await connection.close()
})
it('should push protocol updates to an already connected peer', async () => {
libp2p = new Libp2p({
...baseOptions,
peerInfo
})
sinon.spy(libp2p.dialer.identifyService, 'identify')
sinon.spy(libp2p.dialer.identifyService, 'push')
sinon.spy(libp2p.peerStore, 'update')
const connection = await libp2p.dialer.connectToMultiaddr(remoteAddr)
expect(connection).to.exist()
// Wait for nextTick to trigger the identify call
await delay(1)
// Wait for identify to finish
await libp2p.dialer.identifyService.identify.firstCall.returnValue
libp2p.handle('/echo/2.0.0', () => {})
libp2p.unhandle('/echo/2.0.0')
// Verify the remote peer is notified of both changes
expect(libp2p.dialer.identifyService.push.callCount).to.equal(2)
for (const call of libp2p.dialer.identifyService.push.getCalls()) {
const [connections] = call.args
expect(connections.length).to.equal(1)
expect(connections[0].remotePeer.toB58String()).to.equal(remoteAddr.getPeerId())
const results = await call.returnValue
expect(results.length).to.equal(1)
}
})
})
})

View File

@ -1,70 +0,0 @@
/* eslint-env mocha */
'use strict'
const pull = require('pull-stream/pull')
const collect = require('pull-stream/sinks/collect')
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const pair = require('pull-pair/duplex')
const PeerInfo = require('peer-info')
const lp = require('pull-length-prefixed')
const multiaddr = require('multiaddr')
const identify = require('../../src/identify')
const msg = identify.message
describe('identify.listener', () => {
let info
beforeEach(function (done) {
this.timeout(20 * 1000)
PeerInfo.create((err, _info) => {
if (err) {
return done(err)
}
_info.protocols.add('/echo/1.0.0')
_info.protocols.add('/chat/1.0.0')
info = _info
done()
})
})
it('works', (done) => {
const p = pair()
info.multiaddrs.add(multiaddr('/ip4/127.0.0.1/tcp/5002'))
pull(
p[1],
lp.decode(),
collect((err, result) => {
expect(err).to.not.exist()
const input = msg.decode(result[0])
expect(
input
).to.be.eql({
protocolVersion: 'ipfs/0.1.0',
agentVersion: 'na',
publicKey: info.id.pubKey.bytes,
listenAddrs: [multiaddr('/ip4/127.0.0.1/tcp/5002').buffer],
observedAddr: multiaddr('/ip4/127.0.0.1/tcp/5001').buffer,
protocols: ['/echo/1.0.0', '/chat/1.0.0']
})
done()
})
)
const conn = p[0]
conn.getObservedAddrs = (cb) => {
cb(null, [multiaddr('/ip4/127.0.0.1/tcp/5001')])
}
identify.listener(conn, info)
})
})

View File

@ -0,0 +1,13 @@
'use strict'
/* eslint-env mocha */
const tests = require('libp2p-interfaces/src/crypto/tests')
const plaintext = require('../../src/insecure/plaintext')
describe('plaintext compliance', () => {
tests({
setup () {
return plaintext
}
})
})

View File

@ -0,0 +1,69 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const PeerId = require('peer-id')
const duplexPair = require('it-pair/duplex')
const peers = require('../fixtures/peers')
const plaintext = require('../../src/insecure/plaintext')
const {
InvalidCryptoExchangeError,
UnexpectedPeerError
} = require('libp2p-interfaces/src/crypto/errors')
describe('plaintext', () => {
let localPeer
let remotePeer
let wrongPeer
before(async () => {
[localPeer, remotePeer, wrongPeer] = await Promise.all([
PeerId.createFromJSON(peers[0]),
PeerId.createFromJSON(peers[1]),
PeerId.createFromJSON(peers[2])
])
})
afterEach(() => {
sinon.restore()
})
it('should verify the public key and id match', () => {
const [localConn, remoteConn] = duplexPair()
// When we attempt to get the remote peer key, return the wrong peers pub key
sinon.stub(remotePeer, 'marshalPubKey').callsFake(() => {
return wrongPeer.marshalPubKey()
})
return Promise.all([
plaintext.secureInbound(remotePeer, localConn),
plaintext.secureOutbound(localPeer, remoteConn, remotePeer)
]).then(() => expect.fail('should have failed'), (err) => {
expect(err).to.exist()
expect(err).to.have.property('code', UnexpectedPeerError.code)
})
})
it('should fail if the peer does not provide its public key', () => {
const [localConn, remoteConn] = duplexPair()
// When we attempt to get the remote peer key, return the wrong peers pub key
sinon.stub(remotePeer, 'marshalPubKey').callsFake(() => {
return Buffer.alloc(0)
})
return Promise.all([
plaintext.secureInbound(remotePeer, localConn),
plaintext.secureOutbound(localPeer, remoteConn, remotePeer)
]).then(() => expect.fail('should have failed'), (err) => {
expect(err).to.exist()
expect(err).to.have.property('code', InvalidCryptoExchangeError.code)
})
})
})

View File

@ -1,41 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const series = require('async/series')
const createNode = require('./utils/create-node')
describe('multiaddr trim', () => {
it('non used multiaddrs get trimmed', (done) => {
let node
series([
(cb) => createNode([
'/ip4/0.0.0.0/tcp/999/wss/p2p-webrtc-direct',
'/ip4/127.0.0.1/tcp/55555/ws',
'/ip4/0.0.0.0/tcp/0/'
], (err, _node) => {
expect(err).to.not.exist()
node = _node
const multiaddrs = node.peerInfo.multiaddrs.toArray()
expect(multiaddrs).to.have.length(3)
cb()
}),
(cb) => node.start(cb)
], (err) => {
expect(err).to.not.exist()
const multiaddrs = node.peerInfo.multiaddrs.toArray()
expect(multiaddrs.length).to.be.at.least(2)
// ensure the p2p-webrtc-direct address has been trimmed
multiaddrs.forEach((addr) => {
expect(() => addr.decapsulate('/ip4/0.0.0.0/tcp/999/wss/p2p-webrtc-direct')).to.throw()
})
node.stop(done)
})
})
})

View File

@ -1,18 +1,14 @@
'use strict'
require('./pnet.node')
require('./transports.node')
require('./stream-muxing.node')
require('./peer-discovery.node')
require('./peer-routing.node')
require('./ping.node')
require('./promisify.node')
require('./pubsub.node')
require('./content-routing.node')
require('./circuit-relay.node')
require('./multiaddr-trim.node')
require('./stats')
require('./dht.node')
const glob = require('glob')
const path = require('path')
require('./ping/node')
require('./switch/node')
// Automatically require test files so we don't have to worry about adding new ones
glob('test/**/*.node.js', function (err, testPaths) {
if (err) throw err
if (testPaths.length < 1) throw new Error('Could not find any node test files')
testPaths.forEach(file => {
require(path.resolve(file))
})
})

View File

@ -1,494 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const sinon = require('sinon')
const parallel = require('async/parallel')
const crypto = require('crypto')
const createNode = require('./utils/create-node')
const echo = require('./utils/echo')
const { WRTC_RENDEZVOUS_MULTIADDR } = require('./utils/constants')
describe('peer discovery', () => {
let nodeA
let nodeB
let nodeC
function setup (options) {
before((done) => {
parallel([
(cb) => createNode([
'/ip4/0.0.0.0/tcp/0',
`${WRTC_RENDEZVOUS_MULTIADDR.toString()}/p2p-webrtc-star`
], options, (err, node) => {
expect(err).to.not.exist()
nodeA = node
node.handle('/echo/1.0.0', echo)
node.start(cb)
}),
(cb) => createNode([
'/ip4/0.0.0.0/tcp/0',
`${WRTC_RENDEZVOUS_MULTIADDR.toString()}/p2p-webrtc-star`
], options, (err, node) => {
expect(err).to.not.exist()
nodeB = node
node.handle('/echo/1.0.0', echo)
node.start(cb)
}),
(cb) => createNode([
'/ip4/0.0.0.0/tcp/0',
`${WRTC_RENDEZVOUS_MULTIADDR.toString()}/p2p-webrtc-star`
], options, (err, node) => {
expect(err).to.not.exist()
nodeC = node
node.handle('/echo/1.0.0', echo)
node.start(cb)
})
], done)
})
after((done) => {
parallel([
(cb) => nodeA.stop(cb),
(cb) => nodeB.stop(cb),
(cb) => nodeC.stop(cb)
], done)
})
afterEach(() => {
sinon.restore()
})
}
describe('module registration', () => {
it('should enable by default a module passed as an object', (done) => {
const mockDiscovery = {
on: sinon.stub(),
removeListener: sinon.stub(),
start: sinon.stub().callsArg(0),
stop: sinon.stub().callsArg(0)
}
const options = { modules: { peerDiscovery: [mockDiscovery] } }
createNode(['/ip4/0.0.0.0/tcp/0'], options, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
expect(mockDiscovery.start.called).to.be.true()
node.stop(done)
})
})
})
it('should enable by default a module passed as a function', (done) => {
const mockDiscovery = {
on: sinon.stub(),
removeListener: sinon.stub(),
start: sinon.stub().callsArg(0),
stop: sinon.stub().callsArg(0)
}
const MockDiscovery = sinon.stub().returns(mockDiscovery)
const options = { modules: { peerDiscovery: [MockDiscovery] } }
createNode(['/ip4/0.0.0.0/tcp/0'], options, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
expect(mockDiscovery.start.called).to.be.true()
node.stop(done)
})
})
})
it('should enable module by configutation', (done) => {
const mockDiscovery = {
on: sinon.stub(),
removeListener: sinon.stub(),
start: sinon.stub().callsArg(0),
stop: sinon.stub().callsArg(0),
tag: 'mockDiscovery'
}
const enabled = sinon.stub().returns(true)
const options = {
modules: { peerDiscovery: [mockDiscovery] },
config: {
peerDiscovery: {
mockDiscovery: {
get enabled () {
return enabled()
}
}
}
}
}
createNode(['/ip4/0.0.0.0/tcp/0'], options, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
expect(mockDiscovery.start.called).to.be.true()
expect(enabled.called).to.be.true()
node.stop(done)
})
})
})
it('should disable module by configutation', (done) => {
const mockDiscovery = {
on: sinon.stub(),
removeListener: sinon.stub(),
start: sinon.stub().callsArg(0),
stop: sinon.stub().callsArg(0),
tag: 'mockDiscovery'
}
const disabled = sinon.stub().returns(false)
const options = {
modules: { peerDiscovery: [mockDiscovery] },
config: {
peerDiscovery: {
mockDiscovery: {
get enabled () {
return disabled()
}
}
}
}
}
createNode(['/ip4/0.0.0.0/tcp/0'], options, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
expect(mockDiscovery.start.called).to.be.false()
expect(disabled.called).to.be.true()
node.stop(done)
})
})
})
it('should register module passed as function', (done) => {
const mockDiscovery = {
on: sinon.stub(),
removeListener: sinon.stub(),
start: sinon.stub().callsArg(0),
stop: sinon.stub().callsArg(0)
}
const MockDiscovery = sinon.stub().returns(mockDiscovery)
MockDiscovery.tag = 'mockDiscovery'
const options = {
modules: { peerDiscovery: [MockDiscovery] },
config: {
peerDiscovery: {
mockDiscovery: {
enabled: true,
time: Date.now()
}
}
}
}
createNode(['/ip4/0.0.0.0/tcp/0'], options, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
expect(mockDiscovery.start.called).to.be.true()
expect(MockDiscovery.called).to.be.true()
// Ensure configuration was passed
expect(MockDiscovery.firstCall.args[0])
.to.deep.include(options.config.peerDiscovery.mockDiscovery)
node.stop(done)
})
})
})
it('should register module passed as object', (done) => {
const mockDiscovery = {
on: sinon.stub(),
removeListener: sinon.stub(),
start: sinon.stub().callsArg(0),
stop: sinon.stub().callsArg(0),
tag: 'mockDiscovery'
}
const options = {
modules: { peerDiscovery: [mockDiscovery] },
config: {
peerDiscovery: {
mockDiscovery: { enabled: true }
}
}
}
createNode(['/ip4/0.0.0.0/tcp/0'], options, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
expect(mockDiscovery.start.called).to.be.true()
node.stop(done)
})
})
})
})
describe('discovery scenarios', () => {
setup({
config: {
dht: {
enabled: false
},
peerDiscovery: {
autoDial: false,
bootstrap: {
enabled: true,
list: []
}
}
}
})
it('should ignore self on discovery', function () {
const discoverySpy = sinon.spy()
nodeA.on('peer:discovery', discoverySpy)
nodeA._discovery[0].emit('peer', nodeA.peerInfo)
expect(discoverySpy.called).to.eql(false)
expect(nodeA.peerBook.getAllArray()).to.have.length(0)
expect()
})
})
describe('MulticastDNS', () => {
setup({
config: {
dht: {
enabled: false
},
peerDiscovery: {
autoDial: true,
mdns: {
enabled: true,
interval: 200, // discover quickly
// use a random tag to prevent CI collision
serviceTag: crypto.randomBytes(10).toString('hex')
}
}
}
})
it('find peers', function (done) {
const expectedPeers = new Set([
nodeB.peerInfo.id.toB58String(),
nodeC.peerInfo.id.toB58String()
])
function finish () {
nodeA.removeAllListeners('peer:discovery')
expect(expectedPeers.size).to.eql(0)
done()
}
nodeA.on('peer:discovery', (peerInfo) => {
expectedPeers.delete(peerInfo.id.toB58String())
if (expectedPeers.size === 0) {
finish()
}
})
})
})
// TODO needs a delay (this test is already long)
describe.skip('WebRTCStar', () => {
setup({
config: {
dht: {
enabled: false
},
peerDiscovery: {
autoDial: true,
webRTCStar: {
enabled: true
}
}
}
})
it('find peers', function (done) {
this.timeout(20e3)
const expectedPeers = new Set([
nodeB.peerInfo.id.toB58String(),
nodeC.peerInfo.id.toB58String()
])
function finish () {
nodeA.removeAllListeners('peer:discovery')
expect(expectedPeers.size).to.eql(0)
done()
}
nodeA.on('peer:discovery', (peerInfo) => {
expectedPeers.delete(peerInfo.id.toB58String())
if (expectedPeers.size === 0) {
finish()
}
})
})
})
describe('MulticastDNS + WebRTCStar', () => {
setup({
config: {
dht: {
enabled: false
},
peerDiscovery: {
autoDial: true,
mdns: {
enabled: true,
interval: 200, // discovery quickly
// use a random tag to prevent CI collision
serviceTag: crypto.randomBytes(10).toString('hex')
},
webRTCStar: {
enabled: true
}
}
}
})
it('find peers', function (done) {
const expectedPeers = new Set([
nodeB.peerInfo.id.toB58String(),
nodeC.peerInfo.id.toB58String()
])
function finish () {
nodeA.removeAllListeners('peer:discovery')
expect(expectedPeers.size).to.eql(0)
done()
}
nodeA.on('peer:discovery', (peerInfo) => {
expectedPeers.delete(peerInfo.id.toB58String())
if (expectedPeers.size === 0) {
finish()
}
})
})
})
describe('dht', () => {
setup({
config: {
peerDiscovery: {
autoDial: true,
mdns: {
enabled: false
},
webRTCStar: {
enabled: false
}
},
dht: {
enabled: true,
kBucketSize: 20,
randomWalk: {
enabled: true,
queriesPerPeriod: 1,
delay: 100,
interval: 200, // start the query sooner
timeout: 3000
}
}
}
})
it('find peers through the dht', function (done) {
const expectedPeers = new Set([
nodeB.peerInfo.id.toB58String(),
nodeC.peerInfo.id.toB58String()
])
function finish () {
nodeA.removeAllListeners('peer:discovery')
expect(expectedPeers.size).to.eql(0)
done()
}
nodeA.on('peer:discovery', (peerInfo) => {
expectedPeers.delete(peerInfo.id.toB58String())
if (expectedPeers.size === 0) {
finish()
}
})
// Topology:
// A -> B
// C -> B
nodeA.dial(nodeB.peerInfo, (err) => {
expect(err).to.not.exist()
})
nodeC.dial(nodeB.peerInfo, (err) => {
expect(err).to.not.exist()
})
})
})
describe('auto dial', () => {
setup({
connectionManager: {
minPeers: 1
},
config: {
peerDiscovery: {
autoDial: true,
mdns: {
enabled: false
},
webRTCStar: {
enabled: false
},
bootstrap: {
enabled: true,
list: []
}
},
dht: {
enabled: false
}
}
})
it('should only dial when the peer count is below the low watermark', (done) => {
const bootstrap = nodeA._discovery[0]
sinon.stub(nodeA._switch.dialer, 'connect').callsFake((peerInfo) => {
nodeA._switch.connection.connections[peerInfo.id.toB58String()] = []
})
bootstrap.emit('peer', nodeB.peerInfo)
bootstrap.emit('peer', nodeC.peerInfo)
// Only nodeB should get dialed
expect(nodeA._switch.dialer.connect.callCount).to.eql(1)
expect(nodeA._switch.dialer.connect.getCall(0).args[0]).to.eql(nodeB.peerInfo)
done()
})
})
})

View File

@ -1,294 +0,0 @@
/* eslint-env mocha */
/* eslint max-nested-callbacks: ["error", 8] */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const parallel = require('async/parallel')
const _times = require('lodash.times')
const DelegatedPeerRouter = require('libp2p-delegated-peer-routing')
const sinon = require('sinon')
const nock = require('nock')
const createNode = require('./utils/create-node')
describe('.peerRouting', () => {
describe('via the dht', () => {
let nodeA
let nodeB
let nodeC
let nodeD
let nodeE
before('create the outer ring of connections', (done) => {
const tasks = _times(5, () => (cb) => {
createNode('/ip4/0.0.0.0/tcp/0', (err, node) => {
expect(err).to.not.exist()
node.start((err) => cb(err, node))
})
})
parallel(tasks, (err, nodes) => {
expect(err).to.not.exist()
nodeA = nodes[0]
nodeB = nodes[1]
nodeC = nodes[2]
nodeD = nodes[3]
nodeE = nodes[4]
parallel([
(cb) => nodeA.dial(nodeB.peerInfo, cb),
(cb) => nodeB.dial(nodeC.peerInfo, cb),
(cb) => nodeC.dial(nodeD.peerInfo, cb),
(cb) => nodeD.dial(nodeE.peerInfo, cb),
(cb) => nodeE.dial(nodeA.peerInfo, cb)
], (err) => {
expect(err).to.not.exist()
// Give the kbucket time to fill in the dht
setTimeout(done, 250)
})
})
})
after((done) => {
parallel([
(cb) => nodeA.stop(cb),
(cb) => nodeB.stop(cb),
(cb) => nodeC.stop(cb),
(cb) => nodeD.stop(cb),
(cb) => nodeE.stop(cb)
], done)
})
it('should use the nodes dht', (done) => {
const stub = sinon.stub(nodeA._dht, 'findPeer').callsFake(() => {
stub.restore()
done()
})
nodeA.peerRouting.findPeer()
})
describe('connected in an el ring', () => {
it('should be able to find a peer we are not directly connected to', (done) => {
parallel([
(cb) => nodeA.dial(nodeC.peerInfo.id, cb),
(cb) => nodeB.dial(nodeD.peerInfo.id, cb),
(cb) => nodeC.dial(nodeE.peerInfo.id, cb)
], (err) => {
if (err) throw err
expect(err).to.not.exist()
nodeB.peerRouting.findPeer(nodeE.peerInfo.id, (err, peerInfo) => {
expect(err).to.not.exist()
expect(nodeE.peerInfo.id.toB58String()).to.equal(peerInfo.id.toB58String())
done()
})
})
})
})
})
describe('via a delegate', () => {
let nodeA
let delegate
before((done) => {
parallel([
// Create the node using the delegate
(cb) => {
delegate = new DelegatedPeerRouter({
host: 'ipfs.io',
protocol: 'https',
port: '443'
})
createNode('/ip4/0.0.0.0/tcp/0', {
modules: {
peerRouting: [delegate]
},
config: {
dht: {
enabled: false
}
}
}, (err, node) => {
expect(err).to.not.exist()
nodeA = node
nodeA.start(cb)
})
}
], done)
})
after((done) => nodeA.stop(done))
afterEach(() => nock.cleanAll())
it('should use the delegate router to find peers', (done) => {
const stub = sinon.stub(delegate, 'findPeer').callsFake(() => {
stub.restore()
done()
})
nodeA.peerRouting.findPeer()
})
it('should be able to find a peer', (done) => {
const peerKey = 'QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSySnL'
const mockApi = nock('https://ipfs.io')
.post('/api/v0/dht/findpeer')
.query({
arg: peerKey,
timeout: '30000ms',
'stream-channels': true
})
.reply(200, `{"Extra":"","ID":"some other id","Responses":null,"Type":0}\n{"Extra":"","ID":"","Responses":[{"Addrs":["/ip4/127.0.0.1/tcp/4001"],"ID":"${peerKey}"}],"Type":2}\n`, [
'Content-Type', 'application/json',
'X-Chunked-Output', '1'
])
nodeA.peerRouting.findPeer(peerKey, (err, peerInfo) => {
expect(err).to.not.exist()
expect(peerInfo.id.toB58String()).to.equal(peerKey)
expect(mockApi.isDone()).to.equal(true)
done()
})
})
it('should error when a peer cannot be found', (done) => {
const peerKey = 'key of a peer not on the network'
const mockApi = nock('https://ipfs.io')
.post('/api/v0/dht/findpeer')
.query({
arg: peerKey,
timeout: '30000ms',
'stream-channels': true
})
.reply(200, '{"Extra":"","ID":"some other id","Responses":null,"Type":6}\n{"Extra":"","ID":"yet another id","Responses":null,"Type":0}\n{"Extra":"routing:not found","ID":"","Responses":null,"Type":3}\n', [
'Content-Type', 'application/json',
'X-Chunked-Output', '1'
])
nodeA.peerRouting.findPeer(peerKey, (err, peerInfo) => {
expect(err).to.exist()
expect(peerInfo).to.not.exist()
expect(mockApi.isDone()).to.equal(true)
done()
})
})
it('should handle errors from the api', (done) => {
const peerKey = 'key of a peer not on the network'
const mockApi = nock('https://ipfs.io')
.post('/api/v0/dht/findpeer')
.query({
arg: peerKey,
timeout: '30000ms',
'stream-channels': true
})
.reply(502)
nodeA.peerRouting.findPeer(peerKey, (err, peerInfo) => {
expect(err).to.exist()
expect(peerInfo).to.not.exist()
expect(mockApi.isDone()).to.equal(true)
done()
})
})
})
describe('via the dht and a delegate', () => {
let nodeA
let delegate
before((done) => {
parallel([
// Create the node using the delegate
(cb) => {
delegate = new DelegatedPeerRouter({
host: 'ipfs.io',
protocol: 'https',
port: '443'
})
createNode('/ip4/0.0.0.0/tcp/0', {
modules: {
peerRouting: [delegate]
}
}, (err, node) => {
expect(err).to.not.exist()
nodeA = node
nodeA.start(cb)
})
}
], done)
})
after((done) => nodeA.stop(done))
describe('findPeer', () => {
it('should only use the dht if it finds the peer', (done) => {
const results = [true]
const dhtStub = sinon.stub(nodeA._dht, 'findPeer').callsArgWith(2, null, results)
const delegateStub = sinon.stub(delegate, 'findPeer').throws(() => {
return new Error('the delegate should not have been called')
})
nodeA.peerRouting.findPeer('a peer id', (err, results) => {
expect(err).to.not.exist()
expect(results).to.equal(results)
expect(dhtStub.calledOnce).to.equal(true)
expect(delegateStub.notCalled).to.equal(true)
delegateStub.restore()
dhtStub.restore()
done()
})
})
it('should use the delegate if the dht fails to find the peer', (done) => {
const results = [true]
const dhtStub = sinon.stub(nodeA._dht, 'findPeer').callsArgWith(2, null, undefined)
const delegateStub = sinon.stub(delegate, 'findPeer').callsArgWith(2, null, results)
nodeA.peerRouting.findPeer('a peer id', (err, results) => {
expect(err).to.not.exist()
expect(results).to.deep.equal(results)
expect(dhtStub.calledOnce).to.equal(true)
expect(delegateStub.calledOnce).to.equal(true)
delegateStub.restore()
dhtStub.restore()
done()
})
})
})
})
describe('no routers', () => {
let nodeA
before((done) => {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
dht: {
enabled: false
}
}
}, (err, node) => {
expect(err).to.not.exist()
nodeA = node
done()
})
})
it('.findPeer should return an error with no options', (done) => {
nodeA.peerRouting.findPeer('a cid', (err) => {
expect(err).to.exist()
done()
})
})
it('.findPeer should return an error with options', (done) => {
nodeA.peerRouting.findPeer('a cid', { maxTimeout: 5000 }, (err) => {
expect(err).to.exist()
done()
})
})
})
})

View File

@ -0,0 +1,220 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const pDefer = require('p-defer')
const mergeOptions = require('merge-options')
const Libp2p = require('../../src')
const PeerStore = require('../../src/peer-store')
const multiaddr = require('multiaddr')
const baseOptions = require('../utils/base-options')
const peerUtils = require('../utils/creators/peer')
const mockConnection = require('../utils/mockConnection')
const addr = multiaddr('/ip4/127.0.0.1/tcp/8000')
const listenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
describe('peer-store', () => {
let peerStore
beforeEach(() => {
peerStore = new PeerStore()
})
it('should add a new peer and emit it when it does not exist', async () => {
const defer = pDefer()
sinon.spy(peerStore, 'put')
sinon.spy(peerStore, 'add')
sinon.spy(peerStore, 'update')
const [peerInfo] = await peerUtils.createPeerInfo(1)
peerStore.on('peer', (peer) => {
expect(peer).to.exist()
defer.resolve()
})
peerStore.put(peerInfo)
// Wait for peerStore to emit the peer
await defer.promise
expect(peerStore.put.callCount).to.equal(1)
expect(peerStore.add.callCount).to.equal(1)
expect(peerStore.update.callCount).to.equal(0)
})
it('should update peer when it is already in the store', async () => {
const [peerInfo] = await peerUtils.createPeerInfo(1)
// Put the peer in the store
peerStore.put(peerInfo)
sinon.spy(peerStore, 'put')
sinon.spy(peerStore, 'add')
sinon.spy(peerStore, 'update')
// When updating, peer event must not be emitted
peerStore.on('peer', () => {
throw new Error('should not emit twice')
})
// If no multiaddrs change, the event should not be emitted
peerStore.on('change:multiaddrs', () => {
throw new Error('should not emit change:multiaddrs')
})
// If no protocols change, the event should not be emitted
peerStore.on('change:protocols', () => {
throw new Error('should not emit change:protocols')
})
peerStore.put(peerInfo)
expect(peerStore.put.callCount).to.equal(1)
expect(peerStore.add.callCount).to.equal(0)
expect(peerStore.update.callCount).to.equal(1)
})
it('should emit the "change:multiaddrs" event when a peer has new multiaddrs', async () => {
const defer = pDefer()
const [createdPeerInfo] = await peerUtils.createPeerInfo(1)
// Put the peer in the store
peerStore.put(createdPeerInfo)
// When updating, "change:multiaddrs" event must not be emitted
peerStore.on('change:multiaddrs', ({ peerInfo, multiaddrs }) => {
expect(peerInfo).to.exist()
expect(peerInfo.id).to.eql(createdPeerInfo.id)
expect(peerInfo.protocols).to.eql(createdPeerInfo.protocols)
expect(multiaddrs).to.exist()
expect(multiaddrs).to.eql(createdPeerInfo.multiaddrs.toArray())
defer.resolve()
})
// If no protocols change, the event should not be emitted
peerStore.on('change:protocols', () => {
throw new Error('should not emit change:protocols')
})
createdPeerInfo.multiaddrs.add(addr)
peerStore.put(createdPeerInfo)
// Wait for peerStore to emit the event
await defer.promise
})
it('should emit the "change:protocols" event when a peer has new protocols', async () => {
const defer = pDefer()
const [createdPeerInfo] = await peerUtils.createPeerInfo(1)
// Put the peer in the store
peerStore.put(createdPeerInfo)
// If no multiaddrs change, the event should not be emitted
peerStore.on('change:multiaddrs', () => {
throw new Error('should not emit change:multiaddrs')
})
// When updating, "change:protocols" event must be emitted
peerStore.on('change:protocols', ({ peerInfo, protocols }) => {
expect(peerInfo).to.exist()
expect(peerInfo.id).to.eql(createdPeerInfo.id)
expect(peerInfo.multiaddrs).to.eql(createdPeerInfo.multiaddrs)
expect(protocols).to.exist()
expect(protocols).to.eql(Array.from(createdPeerInfo.protocols))
defer.resolve()
})
createdPeerInfo.protocols.add('/new-protocol/1.0.0')
peerStore.put(createdPeerInfo)
// Wait for peerStore to emit the event
await defer.promise
})
it('should be able to retrieve a peer from store through its b58str id', async () => {
const [peerInfo] = await peerUtils.createPeerInfo(1)
const id = peerInfo.id.toB58String()
let retrievedPeer = peerStore.get(id)
expect(retrievedPeer).to.not.exist()
// Put the peer in the store
peerStore.put(peerInfo)
retrievedPeer = peerStore.get(id)
expect(retrievedPeer).to.exist()
expect(retrievedPeer.id).to.equal(peerInfo.id)
expect(retrievedPeer.multiaddrs).to.eql(peerInfo.multiaddrs)
expect(retrievedPeer.protocols).to.eql(peerInfo.protocols)
})
it('should be able to remove a peer from store through its b58str id', async () => {
const [peerInfo] = await peerUtils.createPeerInfo(1)
const id = peerInfo.id.toB58String()
let removed = peerStore.remove(id)
expect(removed).to.eql(false)
// Put the peer in the store
peerStore.put(peerInfo)
expect(peerStore.peers.size).to.equal(1)
removed = peerStore.remove(id)
expect(removed).to.eql(true)
expect(peerStore.peers.size).to.equal(0)
})
})
describe('peer-store on dial', () => {
let peerInfo
let remotePeerInfo
let libp2p
let remoteLibp2p
before(async () => {
[peerInfo, remotePeerInfo] = await peerUtils.createPeerInfoFromFixture(2)
remoteLibp2p = new Libp2p(mergeOptions(baseOptions, {
peerInfo: remotePeerInfo
}))
})
after(async () => {
sinon.restore()
await remoteLibp2p.stop()
libp2p && await libp2p.stop()
})
it('should put the remote peerInfo after dial and emit event', async () => {
const remoteId = remotePeerInfo.id.toB58String()
libp2p = new Libp2p(mergeOptions(baseOptions, {
peerInfo
}))
sinon.spy(libp2p.peerStore, 'put')
sinon.spy(libp2p.peerStore, 'add')
sinon.spy(libp2p.peerStore, 'update')
sinon.stub(libp2p.dialer, 'connectToMultiaddr').returns(mockConnection({
remotePeer: remotePeerInfo.id
}))
const connection = await libp2p.dial(listenAddr)
await connection.close()
expect(libp2p.peerStore.put.callCount).to.equal(1)
expect(libp2p.peerStore.add.callCount).to.equal(1)
expect(libp2p.peerStore.update.callCount).to.equal(0)
const storedPeer = libp2p.peerStore.get(remoteId)
expect(storedPeer).to.exist()
})
})
describe('peer-store on discovery', () => {
// TODO: implement with discovery
})

View File

@ -1,61 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const parallel = require('async/parallel')
const createNode = require('./utils/create-node.js')
const echo = require('./utils/echo')
describe('ping', () => {
let nodeA
let nodeB
before((done) => {
parallel([
(cb) => createNode('/ip4/0.0.0.0/tcp/0', (err, node) => {
expect(err).to.not.exist()
nodeA = node
node.handle('/echo/1.0.0', echo)
node.start(cb)
}),
(cb) => createNode('/ip4/0.0.0.0/tcp/0', (err, node) => {
expect(err).to.not.exist()
nodeB = node
node.handle('/echo/1.0.0', echo)
node.start(cb)
})
], done)
})
after((done) => {
parallel([
(cb) => nodeA.stop(cb),
(cb) => nodeB.stop(cb)
], done)
})
it('should be able to ping another node', (done) => {
nodeA.ping(nodeB.peerInfo, (err, ping) => {
expect(err).to.not.exist()
ping.once('ping', (time) => {
expect(time).to.exist()
ping.stop()
done()
})
ping.start()
})
})
it('should be not be able to ping when stopped', (done) => {
nodeA.stop(() => {
nodeA.ping(nodeB.peerInfo, (err) => {
expect(err).to.exist()
done()
})
})
})
})

View File

@ -1,3 +0,0 @@
'use strict'
require('./test-ping.js')

View File

@ -1,118 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
const dirtyChai = require('dirty-chai')
const expect = chai.expect
chai.use(dirtyChai)
const PeerInfo = require('peer-info')
const PeerBook = require('peer-book')
const Swarm = require('../../src/switch')
const TCP = require('libp2p-tcp')
const series = require('async/series')
const parallel = require('async/parallel')
const Ping = require('../../src/ping')
describe('libp2p ping', () => {
let swarmA
let swarmB
let peerA
let peerB
before(function (done) {
this.timeout(20 * 1000)
series([
(cb) => PeerInfo.create((err, peerInfo) => {
expect(err).to.not.exist()
peerA = peerInfo
peerA.multiaddrs.add('/ip4/127.0.0.1/tcp/0')
cb()
}),
(cb) => PeerInfo.create((err, peerInfo) => {
expect(err).to.not.exist()
peerB = peerInfo
peerB.multiaddrs.add('/ip4/127.0.0.1/tcp/0')
cb()
}),
(cb) => {
swarmA = new Swarm(peerA, new PeerBook())
swarmB = new Swarm(peerB, new PeerBook())
swarmA.transport.add('tcp', new TCP())
swarmB.transport.add('tcp', new TCP())
cb()
},
(cb) => swarmA.start(cb),
(cb) => swarmB.start(cb),
(cb) => {
Ping.mount(swarmA)
Ping.mount(swarmB)
cb()
}
], done)
})
after((done) => {
parallel([
(cb) => swarmA.stop(cb),
(cb) => swarmB.stop(cb)
], done)
})
it('ping once from peerA to peerB', (done) => {
const p = new Ping(swarmA, peerB)
p.on('error', (err) => {
expect(err).to.not.exist()
})
p.on('ping', (time) => {
expect(time).to.be.a('Number')
p.stop()
done()
})
p.start()
})
it('ping 5 times from peerB to peerA', (done) => {
const p = new Ping(swarmB, peerA)
p.on('error', (err) => {
expect(err).to.not.exist()
})
let counter = 0
p.on('ping', (time) => {
expect(time).to.be.a('Number')
if (++counter === 5) {
p.stop()
done()
}
})
p.start()
})
it('cannot ping itself', (done) => {
const p = new Ping(swarmA, peerA)
p.on('error', (err) => {
expect(err).to.exist()
done()
})
p.on('ping', () => {
expect.fail('should not be called')
})
p.start()
})
it('unmount PING protocol', () => {
Ping.unmount(swarmA)
Ping.unmount(swarmB)
})
})

View File

@ -1,92 +0,0 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const PeerInfo = require('peer-info')
const PeerId = require('peer-id')
const waterfall = require('async/waterfall')
const WS = require('libp2p-websockets')
const defaultsDeep = require('@nodeutils/defaults-deep')
const DHT = require('libp2p-kad-dht')
const Libp2p = require('../src')
describe('private network', () => {
let config
before((done) => {
waterfall([
(cb) => PeerId.create({ bits: 512 }, cb),
(peerId, cb) => PeerInfo.create(peerId, cb),
(peerInfo, cb) => {
config = {
peerInfo,
modules: {
transport: [WS],
dht: DHT
}
}
cb()
}
], () => done())
})
describe('enforced network protection', () => {
before(() => {
process.env.LIBP2P_FORCE_PNET = 1
})
after(() => {
delete process.env.LIBP2P_FORCE_PNET
})
it('should throw an error without a provided protector', () => {
expect(() => {
return new Libp2p(config)
}).to.throw('Private network is enforced, but no protector was provided')
})
it('should create a libp2p node with a provided protector', () => {
let node
const protector = {
psk: '123',
tag: '/psk/1.0.0',
protect: () => { }
}
expect(() => {
const options = defaultsDeep(config, {
modules: {
connProtector: protector
}
})
node = new Libp2p(options)
return node
}).to.not.throw()
expect(node._switch.protector).to.deep.equal(protector)
})
it('should throw an error if the protector does not have a protect method', () => {
expect(() => {
const options = defaultsDeep(config, {
modules: {
connProtector: { }
}
})
return new Libp2p(options)
}).to.throw()
})
})
describe('network protection not enforced', () => {
it('should not throw an error with no provided protector', () => {
expect(() => {
return new Libp2p(config)
}).to.not.throw()
})
})
})

View File

@ -1,5 +0,0 @@
{
"id": "QmeS1ou3mrjCFGoFtRx3MwrGDzqKD6xbuYJU1CKtMrtFFu",
"privKey": "CAASqAkwggSkAgEAAoIBAQChwzYwCNIyUkzEK3sILqq9ChAKZ9eU+ribY+B/xwAwDKPfvuqHq0hjauJBMcjiQyKAWz9xEBR3WupOM7h9M8oU+/e0xJUTt/CDOrtJ0PCgUXeBLkqsApbBoXW3yomHEDHxYttKzrtoTimiP1bhrxurcpVNC4CUYD+q8gw3sRZlsrqpeYYAfU04kS0BM75W/sUT90znnHvOxFXrEdMMdenEFhZOsDyEK9ENzwhkKgOGb18MBY4kN5DoW4bVd4ItfZnNwdkQtpP/X99tMWJxO4yqpngbywZGnkfirLeuRwt/xRGFVbLOigjBpTVpcbBqe1t2Flhuf/bfWYX4FbyElA5FAgMBAAECggEAJnDTcbrG6LpyD7QdeqZMYLwBb9eZfYfPUu37LaJGwyRd1Q/zf+YOP8HonoGMMWiuzD3i56Vgl7R9NbRIxUgHX9E43jZRDuyJNUZBt5r1c8OoWIR9rj63QLBz3wc8g2Iv3CMX5cEW/ASHFE1lAiCwvJ9wJ2zyU1BEEQWQLbPhlKzw7SLhr4fee45/7pnrKZMllt5vwC9pM6lrpIkICO5gUu0OWu5wfzzlTvfmCgfTb11VqKESEPbDBMUtpJibRqegE4xvipLklJ8VV8jz7NFs9bhgCpNM74Ngt5vGHcddeqtj//86UsClEw5YgWAdRe29ZjMApWvKIkginLjZEO8eiQKBgQDoDWii0rmlgBl1/8fENUSWxYvknGmWO7eWjVqMjDvA+waWUVDpTE+eHT1QAaPofM+nFz5PG+SpB55o4rXdxDesq+DqnaRAI9WtSHdgRtjgETyqoBAiahQ0zGWmSEYHGDB+xGctTMr8GxdhZxqZjjfyptp6oXXqZkmxgcogrx+WTwKBgQCydNDmCDpeH0kSvhAPxaNx5c9WkFEFSA0OCZOx57Y+Mt0MVamRILFrUrcMz095w8BQZkjlHjSHfsRgKa/b2eOd+3BhoMLZVtxRqBdpdqq1KTAcRRG4yA2KA39rttpVzaTV5SPfdDf3tsVlBtV784W63gVpN9gNfajyyrpeffiBKwKBgDnDrLprbl8uZigjhdznza0ie9JqxTXqo6bMhS/bcLx3QIqGr3eD0YXwjWSvI9gpyZ80gAQ9U0xoYxyE4vTTdXB8UL7Wgx6cTQKXuW+z8yTD5bArrBiFA4apItyjvRrjAJ9t0KlMJnNfYxCSE+MJrg+vTU+dhbbVw552SpScQ2atAoGBAKMu3rb6XyUiRpe05MsHVuYX1vi5Dt1dfVKQv1W3JJbLvAZDbsMeuh4BjRFRoMMflQPwBEg+zpn3+WpVtFG9dL5J5gHgF0zWeLDSnFX8BS2TdELlhccKaBcEC8hbdFtxqIFO/vaeN2902hv/m8e0b1zpGNmWDyKG/a7GYpV1a3/xAoGBAJtgGANDVk6qqcWGEVk56FH1ZksvgF3SPXWaXpzbZ5KLCcV5ooRyhowylKUZBBPowMeZ46tem2xwJbraB5kDg6WiSjBsXcbN95ivb8AuoRa6gDqAszjokQUSdpY7FTgMaL046AuihrKsQSly1jrQqbQu8JBgmnnBzus3s77inL/j",
"pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChwzYwCNIyUkzEK3sILqq9ChAKZ9eU+ribY+B/xwAwDKPfvuqHq0hjauJBMcjiQyKAWz9xEBR3WupOM7h9M8oU+/e0xJUTt/CDOrtJ0PCgUXeBLkqsApbBoXW3yomHEDHxYttKzrtoTimiP1bhrxurcpVNC4CUYD+q8gw3sRZlsrqpeYYAfU04kS0BM75W/sUT90znnHvOxFXrEdMMdenEFhZOsDyEK9ENzwhkKgOGb18MBY4kN5DoW4bVd4ItfZnNwdkQtpP/X99tMWJxO4yqpngbywZGnkfirLeuRwt/xRGFVbLOigjBpTVpcbBqe1t2Flhuf/bfWYX4FbyElA5FAgMBAAE="
}

View File

@ -1,5 +0,0 @@
{
"id": "QmYWHGZ9y1Bzx59bBzn85JsJxwmpBy5bpXDWDfwMfsHsxz",
"privKey": "CAASqQkwggSlAgEAAoIBAQDLVaPqWFA8WgK6ixuPvhTHeQfBblmEFLEmraLlIDSWbMUPva6aJ1V/hi2I5QLXNeeiig5sco+nF+RKhGnzQ9NpgHRVZ7Ze+LWq3Q4YxONdzFeNUjTvJrDSKgkubA5EKC/LI6pU33WZbjyKkomGo+Gzuqvlj4Rx1dLVXRIOjxUYcIQw3vpLQgwPpiz52eWCeoCpzn06DcsF6aNPjhlp9uJRZCRxZ4yeiwh/A0xxiQtnB4fdZuUPmia1r62+oaxrDl4hUwR7kzHYl0YGfXxAW9GT17KGtjES2yO4kAUgquelNh0hgBKZRvny9imwsObG7ntw5ZG7H62sP7UySIUJqoNRAgMBAAECggEBAKLVU25BCQg7wQGokwra2wMfPoG+IDuw4mkqFlBNKS/prSo86c2TgFmel2qQk2TLS1OUIZbha38RmAXA4qQohe5wKzmV06tcmwdY/YgCbF5aXSbUVYXLQ0Ea3r1pVUdps1SHnElZpnCXoi4Kyc2kAgSPkkdFVnhfFvc9EE/Ob8NgMkdFhlosE5WVNqm4BKQ+mqONddSz4JDbDOApPs/rRpgYm7pJKc3vkrYwniPjyQGYb5EoSbSWuu31RzIcn3Bhte3wKtfMMlpn8MMpPiYo2WJ2eVG6hlUOxhHgS93Y6czCfAgsDtD3C2JpteewuBjg8N0d6WRArKxny83J34q0qy0CgYEA6YSo5UDEq1TF8sbtSVYg6MKSX92NO5MQI/8fTjU4tEwxn/yxpGsnqUu0WGYIc2qVaZuxtcnk2CQxEilxQTbWSIxKuTt7qofEcpSjLLQ4f4chk4DpPsba+S8zSUdWdjthPHZT9IYzobylGBLfbPxyXXiYn1VuqAJfFy8iV9XqmdcCgYEA3ukROQQZCJcgsNTc5uFAKUeQvzv1iae3fGawgJmIJW3Bl8+4dSm1diqG3ZXP1WU31no2aX50PqOZjoIpbl1ggT76cnBDuu3pItR3dNJFQyMEpQOWOjO+NBWF7sRswCvlqbyjofWkzsdd0BioL7vWMjPftiusyyAFA55HRoeStxcCgYEA0tP7rKdSKKFr6inhl+GT6rGod7bOSSgYXXd7qx9v55AXCauaMqiv8TAxTdIo9RMYfHWd91OlMeNTDmOuJcO9qVhIKn5iw266VPyPac/4ZmL5VHQBobTlhC4yLomirTIlMvJeEBmNygtIPrjjUUGGe49itA/szPD/Ky5Z4lV27pcCgYAWU3mqIELxnVFk5K0LYtwuRkC1Jqg9FVNHXnGnL7l3JjsRnXh4I6lNII1JfEvIr86b6LmybzvtWi1zHI5Rw4B68XfcJmpiOpnzJxyf0r+lLci1Tlqpka0nQlCbzYim5r6l9YLeIeBT5Zv7z7xoq4OUm6V4dX9lCNv3tM6mvcVwGQKBgQC9hhjD64/VKXL8wYKZyTAOVO5xYCcqylrpI39qdzl+sS8oqmLUbXnKsGY4If9U61XdULld41BJCRlv6CsKreynm6ZN41j9YRuWWLu8STJcniV9Ef9uVl1M1zo8kfnCHMCym9LkTfJY+Ow/kYhqPukJJL6ve1CVmIuA4rnZlshjbg==",
"pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLVaPqWFA8WgK6ixuPvhTHeQfBblmEFLEmraLlIDSWbMUPva6aJ1V/hi2I5QLXNeeiig5sco+nF+RKhGnzQ9NpgHRVZ7Ze+LWq3Q4YxONdzFeNUjTvJrDSKgkubA5EKC/LI6pU33WZbjyKkomGo+Gzuqvlj4Rx1dLVXRIOjxUYcIQw3vpLQgwPpiz52eWCeoCpzn06DcsF6aNPjhlp9uJRZCRxZ4yeiwh/A0xxiQtnB4fdZuUPmia1r62+oaxrDl4hUwR7kzHYl0YGfXxAW9GT17KGtjES2yO4kAUgquelNh0hgBKZRvny9imwsObG7ntw5ZG7H62sP7UySIUJqoNRAgMBAAE="
}

94
test/pnet/index.spec.js Normal file
View File

@ -0,0 +1,94 @@
/* eslint-env mocha */
'use strict'
const chai = require('chai')
const dirtyChai = require('dirty-chai')
chai.use(dirtyChai)
const expect = chai.expect
const duplexPair = require('it-pair/duplex')
const pipe = require('it-pipe')
const { collect } = require('streaming-iterables')
const Protector = require('../../src/pnet')
const Errors = Protector.errors
const generate = Protector.generate
const swarmKeyBuffer = Buffer.alloc(95)
const wrongSwarmKeyBuffer = Buffer.alloc(95)
// Write new psk files to the buffers
generate(swarmKeyBuffer)
generate(wrongSwarmKeyBuffer)
describe('private network', () => {
it('should accept a valid psk buffer', () => {
const protector = new Protector(swarmKeyBuffer)
expect(protector.tag).to.equal('/key/swarm/psk/1.0.0/')
expect(protector.psk.byteLength).to.equal(32)
})
it('should protect a simple connection', async () => {
const [inbound, outbound] = duplexPair()
const protector = new Protector(swarmKeyBuffer)
const [aToB, bToA] = await Promise.all([
protector.protect(inbound),
protector.protect(outbound)
])
pipe(
[Buffer.from('hello world'), Buffer.from('doo dah')],
aToB
)
const output = await pipe(
bToA,
source => (async function * () {
for await (const chunk of source) {
yield chunk.slice()
}
})(),
collect
)
expect(output).to.eql([Buffer.from('hello world'), Buffer.from('doo dah')])
})
it('should not be able to share correct data with different keys', async () => {
const [inbound, outbound] = duplexPair()
const protector = new Protector(swarmKeyBuffer)
const protectorB = new Protector(wrongSwarmKeyBuffer)
const [aToB, bToA] = await Promise.all([
protector.protect(inbound),
protectorB.protect(outbound)
])
pipe(
[Buffer.from('hello world'), Buffer.from('doo dah')],
aToB
)
const output = await pipe(
bToA,
collect
)
expect(output).to.not.eql([Buffer.from('hello world'), Buffer.from('doo dah')])
})
describe('invalid psks', () => {
it('should not accept a bad psk', () => {
expect(() => {
return new Protector(Buffer.from('not-a-key'))
}).to.throw(Errors.INVALID_PSK)
})
it('should not accept a psk of incorrect length', () => {
expect(() => {
return new Protector(Buffer.from('/key/swarm/psk/1.0.0/\n/base16/\ndffb7e'))
}).to.throw(Errors.INVALID_PSK)
})
})
})

View File

@ -1,105 +0,0 @@
/* eslint max-nested-callbacks: ["error", 8] */
/* eslint-env mocha */
'use strict'
const chai = require('chai')
const dirtyChai = require('dirty-chai')
chai.use(dirtyChai)
const expect = chai.expect
const parallel = require('async/parallel')
const PeerId = require('peer-id')
const Connection = require('interface-connection').Connection
const pair = require('pull-pair/duplex')
const pull = require('pull-stream')
const Protector = require('../../src/pnet')
const Errors = Protector.errors
const generate = Protector.generate
const swarmKeyBuffer = Buffer.alloc(95)
const wrongSwarmKeyBuffer = Buffer.alloc(95)
// Write new psk files to the buffers
generate(swarmKeyBuffer)
generate(wrongSwarmKeyBuffer)
describe('private network', () => {
before((done) => {
parallel([
(cb) => PeerId.createFromJSON(require('./fixtures/peer-a'), cb),
(cb) => PeerId.createFromJSON(require('./fixtures/peer-b'), cb)
], (err) => {
expect(err).to.not.exist()
done()
})
})
it('should accept a valid psk buffer', () => {
const protector = new Protector(swarmKeyBuffer)
expect(protector.tag).to.equal('/key/swarm/psk/1.0.0/')
expect(protector.psk.byteLength).to.equal(32)
})
it('should protect a simple connection', (done) => {
const p = pair()
const protector = new Protector(swarmKeyBuffer)
const aToB = protector.protect(new Connection(p[0]), (err) => {
expect(err).to.not.exist()
})
const bToA = protector.protect(new Connection(p[1]), (err) => {
expect(err).to.not.exist()
})
pull(
pull.values([Buffer.from('hello world'), Buffer.from('doo dah')]),
aToB
)
pull(
bToA,
pull.collect((err, chunks) => {
expect(err).to.not.exist()
expect(chunks).to.eql([Buffer.from('hello world'), Buffer.from('doo dah')])
done()
})
)
})
it('should not connect to a peer with a different key', (done) => {
const p = pair()
const protector = new Protector(swarmKeyBuffer)
const protectorB = new Protector(wrongSwarmKeyBuffer)
const aToB = protector.protect(new Connection(p[0]), () => { })
const bToA = protectorB.protect(new Connection(p[1]), () => { })
pull(
pull.values([Buffer.from('hello world'), Buffer.from('doo dah')]),
aToB
)
pull(
bToA,
pull.collect((values) => {
expect(values).to.equal(null)
done()
})
)
})
describe('invalid psks', () => {
it('should not accept a bad psk', () => {
expect(() => {
return new Protector(Buffer.from('not-a-key'))
}).to.throw(Errors.INVALID_PSK)
})
it('should not accept a psk of incorrect length', () => {
expect(() => {
return new Protector(Buffer.from('/key/swarm/psk/1.0.0/\n/base16/\ndffb7e'))
}).to.throw(Errors.INVALID_PSK)
})
})
})

View File

@ -1,87 +0,0 @@
/* eslint-env mocha */
'use strict'
/**
* This test suite is intended to validate compatability of
* the promisified api, until libp2p has been fully migrated to
* async/await. Once the migration is complete and all tests
* are using async/await, this file can be removed.
*/
const chai = require('chai')
chai.use(require('dirty-chai'))
const expect = chai.expect
const promisify = require('promisify-es6')
const createNode = promisify(require('./utils/create-node'))
const { createPeerInfo } = require('./utils/create-node')
const Node = require('./utils/bundle-nodejs')
const pull = require('pull-stream')
const Ping = require('../src/ping')
/**
* As libp2p is currently promisified, when extending libp2p,
* method arguments must be passed to `super` to ensure the
* promisify callbacks are properly resolved
*/
class AsyncLibp2p extends Node {
async start (...args) {
await super.start(...args)
}
async stop (...args) {
await super.start(...args)
}
}
async function createAsyncNode () {
const peerInfo = await promisify(createPeerInfo)()
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
return new AsyncLibp2p({ peerInfo })
}
describe('promisified libp2p', () => {
let libp2p
let otherNode
const ECHO_PROTO = '/echo/1.0.0'
before('Create and Start', async () => {
[libp2p, otherNode] = await Promise.all([
createNode('/ip4/0.0.0.0/tcp/0'),
createAsyncNode()
])
return [libp2p, otherNode].map(node => {
node.handle(ECHO_PROTO, (_, conn) => pull(conn, conn))
return node.start()
})
})
after('Stop', () => {
return [libp2p, otherNode].map(node => node.stop())
})
afterEach('Hang up', () => {
return libp2p.hangUp(otherNode.peerInfo)
})
it('dial', async () => {
const stream = await libp2p.dial(otherNode.peerInfo)
expect(stream).to.not.exist()
expect(libp2p._switch.connection.getAll()).to.have.length(1)
})
it('dialFSM', async () => {
const connectionFSM = await libp2p.dialFSM(otherNode.peerInfo, ECHO_PROTO)
expect(connectionFSM).to.exist()
})
it('dialProtocol', async () => {
const stream = await libp2p.dialProtocol(otherNode.peerInfo, ECHO_PROTO)
expect(stream).to.exist()
})
it('ping', async () => {
const ping = await libp2p.ping(otherNode.peerInfo)
expect(ping).to.be.an.instanceOf(Ping)
})
})

View File

@ -1,467 +0,0 @@
/* eslint-env mocha */
/* eslint max-nested-callbacks: ["error", 8] */
'use strict'
const chai = require('chai')
chai.use(require('dirty-chai'))
chai.use(require('chai-checkmark'))
const expect = chai.expect
const parallel = require('async/parallel')
const series = require('async/series')
const _times = require('lodash.times')
const promisify = require('promisify-es6')
const delay = require('delay')
const Floodsub = require('libp2p-floodsub')
const mergeOptions = require('merge-options')
const { codes } = require('../src/errors')
const createNode = require('./utils/create-node')
function startTwo (options, callback) {
if (typeof options === 'function') {
callback = options
options = {}
}
const tasks = _times(2, () => (cb) => {
createNode('/ip4/0.0.0.0/tcp/0', mergeOptions({
config: {
peerDiscovery: {
mdns: {
enabled: false
}
},
pubsub: {
enabled: true
}
}
}, options), (err, node) => {
expect(err).to.not.exist()
node.start((err) => cb(err, node))
})
})
parallel(tasks, (err, nodes) => {
expect(err).to.not.exist()
nodes[0].dial(nodes[1].peerInfo, (err) => callback(err, nodes))
})
}
function stopTwo (nodes, callback) {
parallel([
(cb) => nodes[0].stop(cb),
(cb) => nodes[1].stop(cb)
], callback)
}
describe('.pubsub', () => {
describe('.pubsub on (default)', () => {
it('start two nodes and send one message, then unsubscribe', (done) => {
// Check the final series error, and the publish handler
expect(2).checks(done)
let nodes
const data = 'test'
const handler = (msg) => {
// verify the data is correct and mark the expect
expect(msg.data.toString()).to.eql(data).mark()
}
series([
// Start the nodes
(cb) => startTwo((err, _nodes) => {
nodes = _nodes
cb(err)
}),
// subscribe on the first
(cb) => nodes[0].pubsub.subscribe('pubsub', handler, null, cb),
// Wait a moment before publishing
(cb) => setTimeout(cb, 500),
// publish on the second
(cb) => nodes[1].pubsub.publish('pubsub', data, cb),
// Wait a moment before unsubscribing
(cb) => setTimeout(cb, 500),
// unsubscribe on the first
(cb) => nodes[0].pubsub.unsubscribe('pubsub', handler, cb),
// Stop both nodes
(cb) => stopTwo(nodes, cb)
], (err) => {
// Verify there was no error, and mark the expect
expect(err).to.not.exist().mark()
})
})
it('start two nodes and send one message, then unsubscribe without handler', (done) => {
// Check the final series error, and the publish handler
expect(3).checks(done)
let nodes
const data = Buffer.from('test')
const handler = (msg) => {
// verify the data is correct and mark the expect
expect(msg.data).to.eql(data).mark()
}
series([
// Start the nodes
(cb) => startTwo((err, _nodes) => {
nodes = _nodes
cb(err)
}),
// subscribe on the first
(cb) => nodes[0].pubsub.subscribe('pubsub', handler, {}, cb),
// Wait a moment before publishing
(cb) => setTimeout(cb, 500),
// publish on the second
(cb) => nodes[1].pubsub.publish('pubsub', data, cb),
// ls subscripts
(cb) => nodes[1].pubsub.ls(cb),
// get subscribed peers
(cb) => nodes[1].pubsub.peers('pubsub', cb),
// Wait a moment before unsubscribing
(cb) => setTimeout(cb, 500),
// unsubscribe from all
(cb) => nodes[0].pubsub.unsubscribe('pubsub', null, cb),
// Verify unsubscribed
(cb) => {
nodes[0].pubsub.ls((err, topics) => {
expect(topics.length).to.eql(0).mark()
cb(err)
})
},
// Stop both nodes
(cb) => stopTwo(nodes, cb)
], (err) => {
// Verify there was no error, and mark the expect
expect(err).to.not.exist().mark()
})
})
it('publish should fail if data is not a buffer nor a string', (done) => {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
peerDiscovery: {
mdns: {
enabled: false
}
},
pubsub: {
enabled: true
}
}
}, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
node.pubsub.publish('pubsub', 10, (err) => {
expect(err).to.exist()
expect(err.code).to.equal('ERR_DATA_IS_NOT_VALID')
done()
})
})
})
})
})
describe('.pubsub on using floodsub', () => {
it('start two nodes and send one message, then unsubscribe', (done) => {
// Check the final series error, and the publish handler
expect(2).checks(done)
let nodes
const data = Buffer.from('test')
const handler = (msg) => {
// verify the data is correct and mark the expect
expect(msg.data).to.eql(data).mark()
}
series([
// Start the nodes
(cb) => startTwo({
modules: {
pubsub: Floodsub
}
}, (err, _nodes) => {
nodes = _nodes
cb(err)
}),
// subscribe on the first
(cb) => nodes[0].pubsub.subscribe('pubsub', handler, cb),
// Wait a moment before publishing
(cb) => setTimeout(cb, 500),
// publish on the second
(cb) => nodes[1].pubsub.publish('pubsub', data, cb),
// Wait a moment before unsubscribing
(cb) => setTimeout(cb, 500),
// unsubscribe on the first
(cb) => nodes[0].pubsub.unsubscribe('pubsub', handler, cb),
// Stop both nodes
(cb) => stopTwo(nodes, cb)
], (err) => {
// Verify there was no error, and mark the expect
expect(err).to.not.exist().mark()
})
})
it('start two nodes and send one message, then unsubscribe (promises)', async () => {
let messageRecieved
const data = Buffer.from('test')
const handler = (msg) => {
expect(msg.data).to.eql(data)
messageRecieved = true
}
// Start the nodes
const nodes = await promisify(startTwo)({
modules: {
pubsub: Floodsub
}
})
// subscribe on the first
await nodes[0].pubsub.subscribe('pubsub', handler)
// Wait a moment before publishing
await delay(500)
// publish on the second
await nodes[1].pubsub.publish('pubsub', data)
// Wait a moment before unsubscribing
await delay(500)
// unsubscribe on the first
await nodes[0].pubsub.unsubscribe('pubsub', handler)
// Stop both nodes
await promisify(stopTwo)(nodes)
expect(messageRecieved).to.be.true()
})
it('start two nodes and send one message, then unsubscribe without handler', (done) => {
// Check the final series error, and the publish handler
expect(3).checks(done)
let nodes
const data = Buffer.from('test')
const handler = (msg) => {
// verify the data is correct and mark the expect
expect(msg.data).to.eql(data).mark()
}
series([
// Start the nodes
(cb) => startTwo({
modules: {
pubsub: Floodsub
}
}, (err, _nodes) => {
nodes = _nodes
cb(err)
}),
// subscribe on the first
(cb) => nodes[0].pubsub.subscribe('pubsub', handler, cb),
// Wait a moment before publishing
(cb) => setTimeout(cb, 500),
// publish on the second
(cb) => nodes[1].pubsub.publish('pubsub', data, cb),
// Wait a moment before unsubscribing
(cb) => setTimeout(cb, 500),
// unsubscribe from all
(cb) => nodes[0].pubsub.unsubscribe('pubsub', null, cb),
// Verify unsubscribed
(cb) => {
nodes[0].pubsub.ls((err, topics) => {
expect(topics.length).to.eql(0).mark()
cb(err)
})
},
// Stop both nodes
(cb) => stopTwo(nodes, cb)
], (err) => {
// Verify there was no error, and mark the expect
expect(err).to.not.exist().mark()
})
})
it('publish should fail if data is not a buffer', (done) => {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
peerDiscovery: {
mdns: {
enabled: false
}
},
pubsub: {
enabled: true
}
},
modules: {
pubsub: Floodsub
}
}, (err, node) => {
expect(err).to.not.exist()
node.start((err) => {
expect(err).to.not.exist()
node.pubsub.publish('pubsub', 10, (err) => {
expect(err).to.exist()
expect(err.code).to.equal('ERR_DATA_IS_NOT_VALID')
done()
})
})
})
})
})
describe('.pubsub off', () => {
it('fail to use pubsub if disabled', (done) => {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
peerDiscovery: {
mdns: {
enabled: false
}
}
}
}, (err, node) => {
expect(err).to.not.exist()
expect(node.pubsub).to.not.exist()
done()
})
})
})
describe('.pubsub on and node not started', () => {
let libp2pNode
before(function (done) {
createNode('/ip4/0.0.0.0/tcp/0', {
config: {
peerDiscovery: {
mdns: {
enabled: false
}
},
pubsub: {
enabled: true
}
}
}, (err, node) => {
expect(err).to.not.exist()
libp2pNode = node
done()
})
})
it('fail to subscribe if node not started yet', (done) => {
libp2pNode.pubsub.subscribe('pubsub', () => { }, (err) => {
expect(err).to.exist()
expect(err.code).to.equal(codes.PUBSUB_NOT_STARTED)
done()
})
})
it('fail to unsubscribe if node not started yet', (done) => {
libp2pNode.pubsub.unsubscribe('pubsub', () => { }, (err) => {
expect(err).to.exist()
expect(err.code).to.equal(codes.PUBSUB_NOT_STARTED)
done()
})
})
it('fail to publish if node not started yet', (done) => {
libp2pNode.pubsub.publish('pubsub', Buffer.from('data'), (err) => {
expect(err).to.exist()
expect(err.code).to.equal(codes.PUBSUB_NOT_STARTED)
done()
})
})
it('fail to ls if node not started yet', (done) => {
libp2pNode.pubsub.ls((err) => {
expect(err).to.exist()
expect(err.code).to.equal(codes.PUBSUB_NOT_STARTED)
done()
})
})
it('fail to get subscribed peers to a topic if node not started yet', (done) => {
libp2pNode.pubsub.peers('pubsub', (err) => {
expect(err).to.exist()
expect(err.code).to.equal(codes.PUBSUB_NOT_STARTED)
done()
})
})
})
describe('.pubsub config', () => {
it('toggle all pubsub options off (except enabled)', done => {
expect(3).checks(done)
class PubSubSpy {
constructor (node, config) {
expect(config).to.be.eql({
enabled: true,
emitSelf: false,
signMessages: false,
strictSigning: false
}).mark()
}
}
createNode('/ip4/0.0.0.0/tcp/0', {
modules: {
pubsub: PubSubSpy
},
config: {
pubsub: {
enabled: true,
emitSelf: false,
signMessages: false,
strictSigning: false
}
}
}, (err, node) => {
expect(err).to.not.exist().mark()
expect(node).to.exist().mark()
})
})
it('toggle all pubsub options on', done => {
expect(3).checks(done)
class PubSubSpy {
constructor (node, config) {
expect(config).to.be.eql({
enabled: true,
emitSelf: true,
signMessages: true,
strictSigning: true
}).mark()
}
}
createNode('/ip4/0.0.0.0/tcp/0', {
modules: {
pubsub: PubSubSpy
},
config: {
pubsub: {
enabled: true,
emitSelf: true,
signMessages: true,
strictSigning: true
}
}
}, (err, node) => {
expect(err).to.not.exist().mark()
expect(node).to.exist().mark()
})
})
})
})

View File

@ -0,0 +1,92 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const mergeOptions = require('merge-options')
const multiaddr = require('multiaddr')
const { create } = require('../../src')
const { baseOptions, subsystemOptions } = require('./utils')
const peerUtils = require('../utils/creators/peer')
const listenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
describe('Pubsub subsystem is configurable', () => {
let libp2p
afterEach(async () => {
libp2p && await libp2p.stop()
})
it('should not exist if no module is provided', async () => {
libp2p = await create(baseOptions)
expect(libp2p.pubsub).to.not.exist()
})
it('should exist if the module is provided', async () => {
libp2p = await create(subsystemOptions)
expect(libp2p.pubsub).to.exist()
})
it('should start and stop by default once libp2p starts', async () => {
const [peerInfo] = await peerUtils.createPeerInfoFromFixture(1)
peerInfo.multiaddrs.add(listenAddr)
const customOptions = mergeOptions(subsystemOptions, {
peerInfo
})
libp2p = await create(customOptions)
expect(libp2p.pubsub._pubsub.started).to.equal(false)
await libp2p.start()
expect(libp2p.pubsub._pubsub.started).to.equal(true)
await libp2p.stop()
expect(libp2p.pubsub._pubsub.started).to.equal(false)
})
it('should not start if disabled once libp2p starts', async () => {
const [peerInfo] = await peerUtils.createPeerInfoFromFixture(1)
peerInfo.multiaddrs.add(listenAddr)
const customOptions = mergeOptions(subsystemOptions, {
peerInfo,
config: {
pubsub: {
enabled: false
}
}
})
libp2p = await create(customOptions)
expect(libp2p.pubsub._pubsub.started).to.equal(false)
await libp2p.start()
expect(libp2p.pubsub._pubsub.started).to.equal(false)
})
it('should allow a manual start', async () => {
const [peerInfo] = await peerUtils.createPeerInfoFromFixture(1)
peerInfo.multiaddrs.add(listenAddr)
const customOptions = mergeOptions(subsystemOptions, {
peerInfo,
config: {
pubsub: {
enabled: false
}
}
})
libp2p = await create(customOptions)
await libp2p.start()
expect(libp2p.pubsub._pubsub.started).to.equal(false)
await libp2p.pubsub.start()
expect(libp2p.pubsub._pubsub.started).to.equal(true)
})
})

View File

@ -0,0 +1,95 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const pWaitFor = require('p-wait-for')
const pDefer = require('p-defer')
const mergeOptions = require('merge-options')
const Floodsub = require('libp2p-floodsub')
const Gossipsub = require('libp2p-gossipsub')
const { multicodec: floodsubMulticodec } = require('libp2p-floodsub')
const { multicodec: gossipsubMulticodec } = require('libp2p-gossipsub')
const multiaddr = require('multiaddr')
const { create } = require('../../src')
const { baseOptions } = require('./utils')
const peerUtils = require('../utils/creators/peer')
const listenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
const remoteListenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
describe('Pubsub subsystem is able to use different implementations', () => {
let peerInfo, remotePeerInfo
let libp2p, remoteLibp2p
let remAddr
beforeEach(async () => {
[peerInfo, remotePeerInfo] = await peerUtils.createPeerInfoFromFixture(2)
peerInfo.multiaddrs.add(listenAddr)
remotePeerInfo.multiaddrs.add(remoteListenAddr)
})
afterEach(() => Promise.all([
libp2p && libp2p.stop(),
remoteLibp2p && remoteLibp2p.stop()
]))
it('Floodsub nodes', () => {
return pubsubTest(floodsubMulticodec, Floodsub)
})
it('Gossipsub nodes', () => {
return pubsubTest(gossipsubMulticodec, Gossipsub)
})
const pubsubTest = async (multicodec, pubsub) => {
const defer = pDefer()
const topic = 'test-topic'
const data = 'hey!'
libp2p = await create(mergeOptions(baseOptions, {
peerInfo,
modules: {
pubsub: pubsub
}
}))
remoteLibp2p = await create(mergeOptions(baseOptions, {
peerInfo: remotePeerInfo,
modules: {
pubsub: pubsub
}
}))
await Promise.all([
libp2p.start(),
remoteLibp2p.start()
])
const libp2pId = libp2p.peerInfo.id.toB58String()
remAddr = remoteLibp2p.transportManager.getAddrs()[0]
const connection = await libp2p.dialProtocol(remAddr, multicodec)
expect(connection).to.exist()
libp2p.pubsub.subscribe(topic, (msg) => {
expect(msg.data.toString()).to.equal(data)
defer.resolve()
})
// wait for remoteLibp2p to know about libp2p subscription
await pWaitFor(() => {
const subscribedPeers = remoteLibp2p.pubsub.getPeersSubscribed(topic)
return subscribedPeers.includes(libp2pId)
})
remoteLibp2p.pubsub.publish(topic, data)
await defer.promise
}
})

View File

@ -0,0 +1,184 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const pWaitFor = require('p-wait-for')
const pDefer = require('p-defer')
const mergeOptions = require('merge-options')
const multiaddr = require('multiaddr')
const { create } = require('../../src')
const { subsystemOptions, subsystemMulticodecs } = require('./utils')
const peerUtils = require('../utils/creators/peer')
const listenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
const remoteListenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
describe('Pubsub subsystem operates correctly', () => {
let peerInfo, remotePeerInfo
let libp2p, remoteLibp2p
let remAddr
beforeEach(async () => {
[peerInfo, remotePeerInfo] = await peerUtils.createPeerInfoFromFixture(2)
peerInfo.multiaddrs.add(listenAddr)
remotePeerInfo.multiaddrs.add(remoteListenAddr)
})
describe('pubsub started before connect', () => {
beforeEach(async () => {
libp2p = await create(mergeOptions(subsystemOptions, {
peerInfo
}))
remoteLibp2p = await create(mergeOptions(subsystemOptions, {
peerInfo: remotePeerInfo
}))
await Promise.all([
libp2p.start(),
remoteLibp2p.start()
])
remAddr = remoteLibp2p.transportManager.getAddrs()[0]
})
afterEach(() => Promise.all([
libp2p && libp2p.stop(),
remoteLibp2p && remoteLibp2p.stop()
]))
afterEach(() => {
sinon.restore()
})
it('should get notified of connected peers on dial', async () => {
sinon.spy(libp2p.registrar, 'onConnect')
sinon.spy(remoteLibp2p.registrar, 'onConnect')
const connection = await libp2p.dialProtocol(remAddr, subsystemMulticodecs)
expect(connection).to.exist()
expect(libp2p.pubsub._pubsub.peers.size).to.be.eql(1)
expect(remoteLibp2p.pubsub._pubsub.peers.size).to.be.eql(1)
expect(libp2p.registrar.onConnect.callCount).to.equal(1)
expect(remoteLibp2p.registrar.onConnect.callCount).to.equal(1)
})
it('should receive pubsub messages', async () => {
const defer = pDefer()
const topic = 'test-topic'
const data = 'hey!'
const libp2pId = libp2p.peerInfo.id.toB58String()
await libp2p.dialProtocol(remAddr, subsystemMulticodecs)
let subscribedTopics = libp2p.pubsub.getTopics()
expect(subscribedTopics).to.not.include(topic)
libp2p.pubsub.subscribe(topic, (msg) => {
expect(msg.data.toString()).to.equal(data)
defer.resolve()
})
subscribedTopics = libp2p.pubsub.getTopics()
expect(subscribedTopics).to.include(topic)
// wait for remoteLibp2p to know about libp2p subscription
await pWaitFor(() => {
const subscribedPeers = remoteLibp2p.pubsub.getPeersSubscribed(topic)
return subscribedPeers.includes(libp2pId)
})
remoteLibp2p.pubsub.publish(topic, data)
await defer.promise
})
})
describe('pubsub started after connect', () => {
beforeEach(async () => {
libp2p = await create(mergeOptions(subsystemOptions, {
peerInfo
}))
remoteLibp2p = await create(mergeOptions(subsystemOptions, {
peerInfo: remotePeerInfo,
config: {
pubsub: {
enabled: false
}
}
}))
await libp2p.start()
await remoteLibp2p.start()
remAddr = remoteLibp2p.transportManager.getAddrs()[0]
})
afterEach(() => Promise.all([
libp2p && libp2p.stop(),
remoteLibp2p && remoteLibp2p.stop()
]))
afterEach(() => {
sinon.restore()
})
it('should get notified of connected peers after starting', async () => {
const connection = await libp2p.dial(remAddr)
expect(connection).to.exist()
expect(libp2p.pubsub._pubsub.peers.size).to.be.eql(0)
expect(remoteLibp2p.pubsub._pubsub.peers.size).to.be.eql(0)
remoteLibp2p.pubsub.start()
await pWaitFor(() => libp2p.pubsub._pubsub.peers.size === 1)
expect(libp2p.pubsub._pubsub.peers.size).to.be.eql(1)
expect(remoteLibp2p.pubsub._pubsub.peers.size).to.be.eql(1)
})
it('should receive pubsub messages', async function () {
this.timeout(10e3)
const defer = pDefer()
const libp2pId = libp2p.peerInfo.id.toB58String()
const topic = 'test-topic'
const data = 'hey!'
await libp2p.dial(remAddr)
remoteLibp2p.pubsub.start()
await pWaitFor(() => libp2p.pubsub._pubsub.peers.size === 1)
let subscribedTopics = libp2p.pubsub.getTopics()
expect(subscribedTopics).to.not.include(topic)
libp2p.pubsub.subscribe(topic, (msg) => {
expect(msg.data.toString()).to.equal(data)
defer.resolve()
})
subscribedTopics = libp2p.pubsub.getTopics()
expect(subscribedTopics).to.include(topic)
// wait for remoteLibp2p to know about libp2p subscription
await pWaitFor(() => {
const subscribedPeers = remoteLibp2p.pubsub.getPeersSubscribed(topic)
return subscribedPeers.includes(libp2pId)
})
remoteLibp2p.pubsub.publish(topic, data)
await defer.promise
})
})
})

29
test/pubsub/utils.js Normal file
View File

@ -0,0 +1,29 @@
'use strict'
const Gossipsub = require('libp2p-gossipsub')
const { multicodec } = require('libp2p-gossipsub')
const Crypto = require('../../src/insecure/plaintext')
const Muxer = require('libp2p-mplex')
const Transport = require('libp2p-tcp')
const mergeOptions = require('merge-options')
const baseOptions = {
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
}
}
module.exports.baseOptions = baseOptions
const subsystemOptions = mergeOptions(baseOptions, {
modules: {
pubsub: Gossipsub
}
})
module.exports.subsystemOptions = subsystemOptions
module.exports.subsystemMulticodecs = [multicodec]

View File

@ -0,0 +1,57 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const mergeOptions = require('merge-options')
const multiaddr = require('multiaddr')
const Libp2p = require('../../src')
const baseOptions = require('../utils/base-options')
const peerUtils = require('../utils/creators/peer')
const listenAddr = multiaddr('/ip4/127.0.0.1/tcp/0')
describe('registrar on dial', () => {
let peerInfo
let remotePeerInfo
let libp2p
let remoteLibp2p
let remoteAddr
before(async () => {
[peerInfo, remotePeerInfo] = await peerUtils.createPeerInfoFromFixture(2)
remoteLibp2p = new Libp2p(mergeOptions(baseOptions, {
peerInfo: remotePeerInfo
}))
await remoteLibp2p.transportManager.listen([listenAddr])
remoteAddr = remoteLibp2p.transportManager.getAddrs()[0]
})
after(async () => {
sinon.restore()
await remoteLibp2p.stop()
libp2p && await libp2p.stop()
})
it('should inform registrar of a new connection', async () => {
libp2p = new Libp2p(mergeOptions(baseOptions, {
peerInfo
}))
sinon.spy(remoteLibp2p.registrar, 'onConnect')
await libp2p.dial(remoteAddr)
expect(remoteLibp2p.registrar.onConnect.callCount).to.equal(1)
const libp2pConn = libp2p.registrar.getConnection(remotePeerInfo)
expect(libp2pConn).to.exist()
const remoteConn = remoteLibp2p.registrar.getConnection(peerInfo)
expect(remoteConn).to.exist()
})
})

View File

@ -0,0 +1,186 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const pDefer = require('p-defer')
const PeerInfo = require('peer-info')
const Topology = require('libp2p-interfaces/src/topology/multicodec-topology')
const PeerStore = require('../../src/peer-store')
const Registrar = require('../../src/registrar')
const { createMockConnection } = require('./utils')
const multicodec = '/test/1.0.0'
describe('registrar', () => {
let peerStore, registrar
describe('errors', () => {
beforeEach(() => {
peerStore = new PeerStore()
registrar = new Registrar({ peerStore })
})
it('should fail to register a protocol if no multicodec is provided', () => {
try {
registrar.register()
} catch (err) {
expect(err).to.exist()
return
}
throw new Error('should fail to register a protocol if no multicodec is provided')
})
it('should fail to register a protocol if an invalid topology is provided', () => {
const fakeTopology = {
random: 1
}
try {
registrar.register()
} catch (err) {
expect(err).to.exist(fakeTopology)
return
}
throw new Error('should fail to register a protocol if an invalid topology is provided')
})
})
describe('registration', () => {
beforeEach(() => {
peerStore = new PeerStore()
registrar = new Registrar({ peerStore })
})
it('should be able to register a protocol', () => {
const topologyProps = new Topology({
multicodecs: multicodec,
handlers: {
onConnect: () => { },
onDisconnect: () => { }
}
})
const identifier = registrar.register(topologyProps)
expect(identifier).to.exist()
})
it('should be able to unregister a protocol', () => {
const topologyProps = new Topology({
multicodecs: multicodec,
handlers: {
onConnect: () => { },
onDisconnect: () => { }
}
})
const identifier = registrar.register(topologyProps)
const success = registrar.unregister(identifier)
expect(success).to.eql(true)
})
it('should fail to unregister if no register was made', () => {
const success = registrar.unregister('bad-identifier')
expect(success).to.eql(false)
})
it('should call onConnect handler for connected peers after register', async () => {
const onConnectDefer = pDefer()
const onDisconnectDefer = pDefer()
// Setup connections before registrar
const conn = await createMockConnection()
const remotePeerInfo = await PeerInfo.create(conn.remotePeer)
// Add protocol to peer
remotePeerInfo.protocols.add(multicodec)
// Add connected peer to peerStore and registrar
peerStore.put(remotePeerInfo)
registrar.onConnect(remotePeerInfo, conn)
expect(registrar.connections.size).to.eql(1)
const topologyProps = new Topology({
multicodecs: multicodec,
handlers: {
onConnect: (peerInfo, connection) => {
expect(peerInfo.id.toB58String()).to.eql(remotePeerInfo.id.toB58String())
expect(connection.id).to.eql(conn.id)
onConnectDefer.resolve()
},
onDisconnect: (peerInfo) => {
expect(peerInfo.id.toB58String()).to.eql(remotePeerInfo.id.toB58String())
onDisconnectDefer.resolve()
}
}
})
// Register protocol
const identifier = registrar.register(topologyProps)
const topology = registrar.topologies.get(identifier)
// Topology created
expect(topology).to.exist()
registrar.onDisconnect(remotePeerInfo)
expect(registrar.connections.size).to.eql(0)
// Wait for handlers to be called
return Promise.all([
onConnectDefer.promise,
onDisconnectDefer.promise
])
})
it('should call onConnect handler after register, once a peer is connected and protocols are updated', async () => {
const onConnectDefer = pDefer()
const onDisconnectDefer = pDefer()
const topologyProps = new Topology({
multicodecs: multicodec,
handlers: {
onConnect: () => {
onConnectDefer.resolve()
},
onDisconnect: () => {
onDisconnectDefer.resolve()
}
}
})
// Register protocol
const identifier = registrar.register(topologyProps)
const topology = registrar.topologies.get(identifier)
// Topology created
expect(topology).to.exist()
expect(registrar.connections.size).to.eql(0)
// Setup connections before registrar
const conn = await createMockConnection()
const peerInfo = await PeerInfo.create(conn.remotePeer)
// Add connected peer to peerStore and registrar
peerStore.put(peerInfo)
registrar.onConnect(peerInfo, conn)
// Add protocol to peer and update it
peerInfo.protocols.add(multicodec)
peerStore.put(peerInfo)
await onConnectDefer.promise
// Remove protocol to peer and update it
peerInfo.protocols.delete(multicodec)
peerStore.put(peerInfo)
await onDisconnectDefer.promise
})
})
})

50
test/registrar/utils.js Normal file
View File

@ -0,0 +1,50 @@
'use strict'
const { Connection } = require('libp2p-interfaces/src/connection')
const multiaddr = require('multiaddr')
const pair = require('it-pair')
const peerUtils = require('../utils/creators/peer')
module.exports.createMockConnection = async (properties = {}) => {
const localAddr = multiaddr('/ip4/127.0.0.1/tcp/8080')
const remoteAddr = multiaddr('/ip4/127.0.0.1/tcp/8081')
const [localPeer, remotePeer] = await peerUtils.createPeerInfoFromFixture(2)
const openStreams = []
let streamId = 0
return new Connection({
localPeer: localPeer.id,
remotePeer: remotePeer.id,
localAddr,
remoteAddr,
stat: {
timeline: {
open: Date.now() - 10,
upgraded: Date.now()
},
direction: 'outbound',
encryption: '/secio/1.0.0',
multiplexer: '/mplex/6.7.0'
},
newStream: (protocols) => {
const id = streamId++
const stream = pair()
stream.close = () => stream.sink([])
stream.id = id
openStreams.push(stream)
return {
stream,
protocol: protocols[0]
}
},
close: () => { },
getStreams: () => openStreams,
...properties
})
}

Some files were not shown because too many files have changed in this diff Show More