mirror of
https://github.com/fluencelabs/js-libp2p
synced 2025-07-24 13:01:58 +00:00
Compare commits
22 Commits
fix/return
...
feat/keybo
Author | SHA1 | Date | |
---|---|---|---|
|
14bd946990 | ||
|
7969151458 | ||
|
1c2c4324bf | ||
|
6610ef33f9 | ||
|
1059c87925 | ||
|
b880d5de97 | ||
|
da7ddbb160 | ||
|
7d3d6dec68 | ||
|
8c92e8b3d8 | ||
|
0252eaf960 | ||
|
de05e70820 | ||
|
beb6b8090d | ||
|
52f45d0319 | ||
|
bc9760319c | ||
|
261c99dd18 | ||
|
31d8a929ea | ||
|
67fda7e106 | ||
|
e96f01ae74 | ||
|
ea149cd1fe | ||
|
daf980e2ba | ||
|
ac124644d5 | ||
|
a08a725123 |
@@ -45,7 +45,7 @@ const after = async () => {
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
bundlesize: { maxSize: '202kB' },
|
||||
bundlesize: { maxSize: '185kB' },
|
||||
hooks: {
|
||||
pre: before,
|
||||
post: after
|
||||
|
132
CHANGELOG.md
132
CHANGELOG.md
@@ -1,135 +1,3 @@
|
||||
<a name="0.28.7"></a>
|
||||
## [0.28.7](https://github.com/libp2p/js-libp2p/compare/v0.28.6...v0.28.7) (2020-07-14)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* retimer reschedule does not work as interval ([#710](https://github.com/libp2p/js-libp2p/issues/710)) ([999c1b7](https://github.com/libp2p/js-libp2p/commit/999c1b7))
|
||||
|
||||
|
||||
|
||||
<a name="0.28.6"></a>
|
||||
## [0.28.6](https://github.com/libp2p/js-libp2p/compare/v0.28.5...v0.28.6) (2020-07-14)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* not dial all known peers in parallel on startup ([#698](https://github.com/libp2p/js-libp2p/issues/698)) ([9ccab40](https://github.com/libp2p/js-libp2p/commit/9ccab40))
|
||||
|
||||
|
||||
|
||||
<a name="0.28.5"></a>
|
||||
## [0.28.5](https://github.com/libp2p/js-libp2p/compare/v0.28.4...v0.28.5) (2020-07-10)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* pass libp2p to the dht ([#700](https://github.com/libp2p/js-libp2p/issues/700)) ([5a84dd5](https://github.com/libp2p/js-libp2p/commit/5a84dd5))
|
||||
|
||||
|
||||
|
||||
<a name="0.28.4"></a>
|
||||
## [0.28.4](https://github.com/libp2p/js-libp2p/compare/v0.28.3...v0.28.4) (2020-07-03)
|
||||
|
||||
|
||||
|
||||
<a name="0.28.3"></a>
|
||||
## [0.28.3](https://github.com/libp2p/js-libp2p/compare/v0.28.2...v0.28.3) (2020-06-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* catch pipe errors ([#678](https://github.com/libp2p/js-libp2p/issues/678)) ([a8219e6](https://github.com/libp2p/js-libp2p/commit/a8219e6))
|
||||
|
||||
|
||||
|
||||
<a name="0.28.2"></a>
|
||||
## [0.28.2](https://github.com/libp2p/js-libp2p/compare/v0.28.1...v0.28.2) (2020-06-15)
|
||||
|
||||
|
||||
### Reverts
|
||||
|
||||
* "fix: throw if no conn encryption module provided ([#665](https://github.com/libp2p/js-libp2p/issues/665))" ([b621fbd](https://github.com/libp2p/js-libp2p/commit/b621fbd))
|
||||
|
||||
|
||||
|
||||
<a name="0.28.1"></a>
|
||||
## [0.28.1](https://github.com/libp2p/js-libp2p/compare/v0.28.0...v0.28.1) (2020-06-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* throw if no conn encryption module provided ([#665](https://github.com/libp2p/js-libp2p/issues/665)) ([c038550](https://github.com/libp2p/js-libp2p/commit/c038550))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add ConnectionManager#getAll ([8f680e2](https://github.com/libp2p/js-libp2p/commit/8f680e2))
|
||||
|
||||
|
||||
|
||||
<a name="0.28.0"></a>
|
||||
# [0.28.0](https://github.com/libp2p/js-libp2p/compare/v0.28.0-rc.0...v0.28.0) (2020-06-05)
|
||||
|
||||
|
||||
|
||||
<a name="0.28.0-rc.0"></a>
|
||||
# [0.28.0-rc.0](https://github.com/libp2p/js-libp2p/compare/v0.27.8...v0.28.0-rc.0) (2020-05-28)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* always emit when a connection is made ([72f37ac](https://github.com/libp2p/js-libp2p/commit/72f37ac))
|
||||
* expose the muxed stream interface on inbound streams ([52a615f](https://github.com/libp2p/js-libp2p/commit/52a615f))
|
||||
* libp2p connections getter ([aaf62a4](https://github.com/libp2p/js-libp2p/commit/aaf62a4))
|
||||
* onConnect should not add addr to the addressBook ([2b45fee](https://github.com/libp2p/js-libp2p/commit/2b45fee))
|
||||
* use libp2p.multiaddrs instead of listen ([7fbd155](https://github.com/libp2p/js-libp2p/commit/7fbd155))
|
||||
* **example:** rename misleading variable ([#645](https://github.com/libp2p/js-libp2p/issues/645)) ([b781911](https://github.com/libp2p/js-libp2p/commit/b781911))
|
||||
|
||||
|
||||
### Chores
|
||||
|
||||
* deprecate old peer store api ([#598](https://github.com/libp2p/js-libp2p/issues/598)) ([ed6d5bb](https://github.com/libp2p/js-libp2p/commit/ed6d5bb))
|
||||
* remove peer-info usage ([12e48ad](https://github.com/libp2p/js-libp2p/commit/12e48ad))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* address and proto books ([#590](https://github.com/libp2p/js-libp2p/issues/590)) ([e9d225c](https://github.com/libp2p/js-libp2p/commit/e9d225c))
|
||||
* address manager ([2a7967c](https://github.com/libp2p/js-libp2p/commit/2a7967c))
|
||||
* keybook ([ce38033](https://github.com/libp2p/js-libp2p/commit/ce38033))
|
||||
* metadata book ([#638](https://github.com/libp2p/js-libp2p/issues/638)) ([84b935f](https://github.com/libp2p/js-libp2p/commit/84b935f))
|
||||
* peerStore persistence ([5123a83](https://github.com/libp2p/js-libp2p/commit/5123a83))
|
||||
* support dial only on transport manager to tolerate errors ([#643](https://github.com/libp2p/js-libp2p/issues/643)) ([698c1df](https://github.com/libp2p/js-libp2p/commit/698c1df))
|
||||
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
* all API methods with peer-info parameters or return values were changed. You can check the API.md document, in order to check the new values to use
|
||||
* the peer-store api changed. Check the API docs for the new specification.
|
||||
|
||||
* chore: apply suggestions from code review
|
||||
|
||||
Co-Authored-By: Jacob Heun <jacobheun@gmail.com>
|
||||
|
||||
* chore: apply suggestions from code review
|
||||
|
||||
Co-Authored-By: Jacob Heun <jacobheun@gmail.com>
|
||||
|
||||
Co-authored-by: Jacob Heun <jacobheun@gmail.com>
|
||||
|
||||
|
||||
|
||||
<a name="0.27.8"></a>
|
||||
## [0.27.8](https://github.com/libp2p/js-libp2p/compare/v0.27.7...v0.27.8) (2020-05-06)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* reset discovery services upon stop ([#618](https://github.com/libp2p/js-libp2p/issues/618)) ([ea0621b](https://github.com/libp2p/js-libp2p/commit/ea0621b))
|
||||
|
||||
|
||||
|
||||
<a name="0.27.7"></a>
|
||||
## [0.27.7](https://github.com/libp2p/js-libp2p/compare/v0.27.6...v0.27.7) (2020-04-24)
|
||||
|
||||
|
@@ -1,45 +0,0 @@
|
||||
<!--Specify versions for migration below-->
|
||||
# Migrating to libp2p@__
|
||||
|
||||
A migration guide for refactoring your application code from libp2p v__ to v__.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [API](#api)
|
||||
- [Module Updates](#module-updates)
|
||||
|
||||
## API
|
||||
|
||||
<!--Describe breaking APIs with examples for Before and After
|
||||
Example:
|
||||
|
||||
### Peer Discovery
|
||||
|
||||
__Describe__
|
||||
|
||||
**Before**
|
||||
|
||||
```js
|
||||
|
||||
```
|
||||
|
||||
**After**
|
||||
|
||||
```js
|
||||
|
||||
```
|
||||
|
||||
-->
|
||||
|
||||
## Module Updates
|
||||
|
||||
With this release you should update the following libp2p modules if you are relying on them:
|
||||
|
||||
<!--Specify module versions in JSON for migration below.
|
||||
It's recommended to check package.json changes for this:
|
||||
`git diff <release> <prev> -- package.json`
|
||||
-->
|
||||
|
||||
```json
|
||||
|
||||
```
|
11
README.md
11
README.md
@@ -35,11 +35,9 @@ We've come a long way, but this project is still in Alpha, lots of development i
|
||||
The documentation in the master branch may contain changes from a pre-release.
|
||||
If you are looking for the documentation of the latest release, you can view the latest release on [**npm**](https://www.npmjs.com/package/libp2p), or select the tag in github that matches the version you are looking for.
|
||||
|
||||
**Want to get started?** Check our [GETTING_STARTED.md](./doc/GETTING_STARTED.md) guide and [examples folder](/examples).
|
||||
**Want to get started?** Check our [examples folder](/examples).
|
||||
|
||||
**Want to update libp2p in your project?** Check our [migrations folder](./doc/migrations).
|
||||
|
||||
[**`Weekly Core Dev Calls`**](https://github.com/libp2p/team-mgmt/issues/16)
|
||||
[**`Weekly Core Dev Calls`**](https://github.com/ipfs/pm/issues/650)
|
||||
|
||||
## Lead Maintainer
|
||||
|
||||
@@ -155,7 +153,7 @@ List of packages currently in existence for libp2p
|
||||
| [`libp2p-kad-dht`](//github.com/libp2p/js-libp2p-kad-dht) | [](//github.com/libp2p/js-libp2p-kad-dht/releases) | [](https://david-dm.org/libp2p/js-libp2p-kad-dht) | [](https://travis-ci.com/libp2p/js-libp2p-kad-dht) | [](https://codecov.io/gh/libp2p/js-libp2p-kad-dht) | [Vasco Santos](mailto:vasco.santos@moxy.studio) |
|
||||
| [`libp2p-mdns`](//github.com/libp2p/js-libp2p-mdns) | [](//github.com/libp2p/js-libp2p-mdns/releases) | [](https://david-dm.org/libp2p/js-libp2p-mdns) | [](https://travis-ci.com/libp2p/js-libp2p-mdns) | [](https://codecov.io/gh/libp2p/js-libp2p-mdns) | [Jacob Heun](mailto:jacobheun@gmail.com) |
|
||||
| [`libp2p-webrtc-star`](//github.com/libp2p/js-libp2p-webrtc-star) | [](//github.com/libp2p/js-libp2p-webrtc-star/releases) | [](https://david-dm.org/libp2p/js-libp2p-webrtc-star) | [](https://travis-ci.com/libp2p/js-libp2p-webrtc-star) | [](https://codecov.io/gh/libp2p/js-libp2p-webrtc-star) | [Vasco Santos](mailto:vasco.santos@moxy.studio) |
|
||||
| [`@chainsafe/discv5`](//github.com/ChainSafe/discv5) | [](//github.com/ChainSafe/discv5/releases) | [](https://david-dm.org/ChainSafe/discv5) | [](https://travis-ci.com/ChainSafe/discv5) | [](https://codecov.io/gh/ChainSafe/discv5) | [Cayman Nava](mailto:caymannava@gmail.com) |
|
||||
| [`discv5`](//github.com/ChainSafe/discv5) | [](//github.com/ChainSafe/discv5/releases) | [](https://david-dm.org/ChainSafe/discv5) | [](https://travis-ci.com/ChainSafe/discv5) | [](https://codecov.io/gh/ChainSafe/discv5) | [Cayman Nava](mailto:caymannava@gmail.com) |
|
||||
| **content routing** |
|
||||
| [`libp2p-delegated-content-routing`](//github.com/libp2p/js-libp2p-delegated-content-routing) | [](//github.com/libp2p/js-libp2p-delegated-content-routing/releases) | [](https://david-dm.org/libp2p/js-libp2p-delegated-content-routing) | [](https://travis-ci.com/libp2p/js-libp2p-delegated-content-routing) | [](https://codecov.io/gh/libp2p/js-libp2p-delegated-content-routing) | [Jacob Heun](mailto:jacobheun@gmail.com) |
|
||||
| [`libp2p-kad-dht`](//github.com/libp2p/js-libp2p-kad-dht) | [](//github.com/libp2p/js-libp2p-kad-dht/releases) | [](https://david-dm.org/libp2p/js-libp2p-kad-dht) | [](https://travis-ci.com/libp2p/js-libp2p-kad-dht) | [](https://codecov.io/gh/libp2p/js-libp2p-kad-dht) | [Vasco Santos](mailto:vasco.santos@moxy.studio) |
|
||||
@@ -167,10 +165,11 @@ List of packages currently in existence for libp2p
|
||||
| [`libp2p-crypto-secp256k1`](//github.com/libp2p/js-libp2p-crypto-secp256k1) | [](//github.com/libp2p/js-libp2p-crypto-secp256k1/releases) | [](https://david-dm.org/libp2p/js-libp2p-crypto-secp256k1) | [](https://travis-ci.com/libp2p/js-libp2p-crypto-secp256k1) | [](https://codecov.io/gh/libp2p/js-libp2p-crypto-secp256k1) | [Friedel Ziegelmayer](mailto:dignifiedquire@gmail.com) |
|
||||
| **data types** |
|
||||
| [`peer-id`](//github.com/libp2p/js-peer-id) | [](//github.com/libp2p/js-peer-id/releases) | [](https://david-dm.org/libp2p/js-peer-id) | [](https://travis-ci.com/libp2p/js-peer-id) | [](https://codecov.io/gh/libp2p/js-peer-id) | [Vasco Santos](mailto:santos.vasco10@gmail.com) |
|
||||
| [`peer-info`](//github.com/libp2p/js-peer-info) | [](//github.com/libp2p/js-peer-info/releases) | [](https://david-dm.org/libp2p/js-peer-info) | [](https://travis-ci.com/libp2p/js-peer-info) | [](https://codecov.io/gh/libp2p/js-peer-info) | [Vasco Santos](mailto:vasco.santos@moxy.studio) |
|
||||
| **pubsub** |
|
||||
| [`libp2p-pubsub`](//github.com/libp2p/js-libp2p-pubsub) | [](//github.com/libp2p/js-libp2p-pubsub/releases) | [](https://david-dm.org/libp2p/js-libp2p-pubsub) | [](https://travis-ci.com/libp2p/js-libp2p-pubsub) | [](https://codecov.io/gh/libp2p/js-libp2p-pubsub) | [Vasco Santos](mailto:santos.vasco10@gmail.com) |
|
||||
| [`libp2p-floodsub`](//github.com/libp2p/js-libp2p-floodsub) | [](//github.com/libp2p/js-libp2p-floodsub/releases) | [](https://david-dm.org/libp2p/js-libp2p-floodsub) | [](https://travis-ci.com/libp2p/js-libp2p-floodsub) | [](https://codecov.io/gh/libp2p/js-libp2p-floodsub) | [Vasco Santos](mailto:vasco.santos@moxy.studio) |
|
||||
| [`libp2p-gossipsub`](//github.com/ChainSafe/js-libp2p-gossipsub) | [](//github.com/ChainSafe/js-libp2p-gossipsub/releases) | [](https://david-dm.org/ChainSafe/js-libp2p-gossipsub) | [](https://travis-ci.com/ChainSafe/js-libp2p-gossipsub) | [](https://codecov.io/gh/ChainSafe/js-libp2p-gossipsub) | [Cayman Nava](mailto:caymannava@gmail.com) |
|
||||
| [`libp2p-gossipsub`](//github.com/ChainSafe/gossipsub-js) | [](//github.com/ChainSafe/gossipsub-js/releases) | [](https://david-dm.org/ChainSafe/gossipsub-js) | [](https://travis-ci.com/ChainSafe/gossipsub-js) | [](https://codecov.io/gh/ChainSafe/gossipsub-js) | [Cayman Nava](mailto:caymannava@gmail.com) |
|
||||
| **extensions** |
|
||||
| [`libp2p-nat-mgnr`](//github.com/libp2p/js-libp2p-nat-mgnr) | [](//github.com/libp2p/js-libp2p-nat-mgnr/releases) | [](https://david-dm.org/libp2p/js-libp2p-nat-mgnr) | [](https://travis-ci.com/libp2p/js-libp2p-nat-mgnr) | [](https://codecov.io/gh/libp2p/js-libp2p-nat-mgnr) | N/A |
|
||||
| [`libp2p-utils`](//github.com/libp2p/js-libp2p-utils) | [](//github.com/libp2p/js-libp2p-utils/releases) | [](https://david-dm.org/libp2p/js-libp2p-utils) | [](https://travis-ci.com/libp2p/js-libp2p-utils) | [](https://codecov.io/gh/libp2p/js-libp2p-utils) | [Vasco Santos](mailto:santos.vasco10@gmail.com) |
|
||||
|
@@ -26,7 +26,6 @@
|
||||
- Documentation
|
||||
- [ ] Ensure that README.md is up to date
|
||||
- [ ] Ensure that all the examples run
|
||||
- [ ] Ensure that [libp2p/docs](https://github.com/libp2p/docs) is updated
|
||||
- Communication
|
||||
- [ ] Create the release issue
|
||||
- [ ] Take a snapshot between of everyone that has contributed to this release (including its subdeps in IPFS, libp2p, IPLD and multiformats) using [`name-your-contributors`](https://www.npmjs.com/package/name-your-contributors). Generate a nice markdown list with [this script](https://gist.github.com/jacobheun/d2ff479ca991733c13cdcf688a1317e5)
|
||||
|
481
doc/API.md
481
doc/API.md
@@ -29,11 +29,6 @@
|
||||
* [`peerStore.keyBook.delete`](#peerstorekeybookdelete)
|
||||
* [`peerStore.keyBook.get`](#peerstorekeybookget)
|
||||
* [`peerStore.keyBook.set`](#peerstorekeybookset)
|
||||
* [`peerStore.metadataBook.delete`](#peerstoremetadatabookdelete)
|
||||
* [`peerStore.metadataBook.deleteValue`](#peerstoremetadatabookdeletevalue)
|
||||
* [`peerStore.metadataBook.get`](#peerstoremetadatabookget)
|
||||
* [`peerStore.metadataBook.getValue`](#peerstoremetadatabookgetvalue)
|
||||
* [`peerStore.metadataBook.set`](#peerstoremetadatabookset)
|
||||
* [`peerStore.protoBook.add`](#peerstoreprotobookadd)
|
||||
* [`peerStore.protoBook.delete`](#peerstoreprotobookdelete)
|
||||
* [`peerStore.protoBook.get`](#peerstoreprotobookget)
|
||||
@@ -49,17 +44,6 @@
|
||||
* [`connectionManager.get`](#connectionmanagerget)
|
||||
* [`connectionManager.setPeerValue`](#connectionmanagersetpeervalue)
|
||||
* [`connectionManager.size`](#connectionmanagersize)
|
||||
* [`keychain.createKey`](#keychaincreatekey)
|
||||
* [`keychain.renameKey`](#keychainrenamekey)
|
||||
* [`keychain.removeKey`](#keychainremovekey)
|
||||
* [`keychain.exportKey`](#keychainexportkey)
|
||||
* [`keychain.importKey`](#keychainimportkey)
|
||||
* [`keychain.importPeer`](#keychainimportpeer)
|
||||
* [`keychain.listKeys`](#keychainlistkeys)
|
||||
* [`keychain.findKeyById`](#keychainfindkeybyid)
|
||||
* [`keychain.findKeyByName`](#keychainfindkeybyname)
|
||||
* [`keychain.cms.encrypt`](#keychaincmsencrypt)
|
||||
* [`keychain.cms.decrypt`](#keychaincmsdecrypt)
|
||||
* [`metrics.global`](#metricsglobal)
|
||||
* [`metrics.peers`](#metricspeers)
|
||||
* [`metrics.protocols`](#metricsprotocols)
|
||||
@@ -85,17 +69,15 @@ Creates an instance of Libp2p.
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| options | `object` | libp2p options |
|
||||
| options.modules | [`Array<object>`](./CONFIGURATION.md#modules) | libp2p modules to use |
|
||||
| options.modules | `Array<object>` | libp2p modules to use |
|
||||
| [options.addresses] | `{ listen: Array<string>, announce: Array<string>, noAnnounce: Array<string> }` | Addresses for transport listening and to advertise to the network |
|
||||
| [options.config] | `object` | libp2p modules configuration and core configuration |
|
||||
| [options.connectionManager] | [`object`](./CONFIGURATION.md#configuring-connection-manager) | libp2p Connection Manager configuration |
|
||||
| [options.transportManager] | [`object`](./CONFIGURATION.md#configuring-transport-manager) | libp2p transport manager configuration |
|
||||
| [options.connectionManager] | `object` | libp2p Connection Manager configuration |
|
||||
| [options.datastore] | `object` | must implement [ipfs/interface-datastore](https://github.com/ipfs/interface-datastore) (in memory datastore will be used if not provided) |
|
||||
| [options.dialer] | [`object`](./CONFIGURATION.md#configuring-dialing) | libp2p Dialer configuration
|
||||
| [options.keychain] | [`object`](./CONFIGURATION.md#setup-with-keychain) | keychain configuration |
|
||||
| [options.metrics] | [`object`](./CONFIGURATION.md#configuring-metrics) | libp2p Metrics configuration
|
||||
| [options.dialer] | `object` | libp2p Dialer configuration
|
||||
| [options.metrics] | `object` | libp2p Metrics configuration
|
||||
| [options.peerId] | [`PeerId`][peer-id] | peerId instance (it will be created if not provided) |
|
||||
| [options.peerStore] | [`object`](./CONFIGURATION.md#configuring-peerstore) | libp2p PeerStore configuration |
|
||||
| [options.peerStore] | `object` | libp2p PeerStore configuration |
|
||||
|
||||
For Libp2p configurations and modules details read the [Configuration Document](./CONFIGURATION.md).
|
||||
|
||||
@@ -143,36 +125,6 @@ Required keys in the `options` object:
|
||||
|
||||
## Libp2p Instance Methods
|
||||
|
||||
### loadKeychain
|
||||
|
||||
Load keychain keys from the datastore, importing the private key as 'self', if needed.
|
||||
|
||||
`libp2p.loadKeychain()`
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise` | Promise resolves when the keychain is ready |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const Libp2p = require('libp2p')
|
||||
|
||||
// ...
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
// ...
|
||||
keychain: {
|
||||
pass: '0123456789pass1234567890'
|
||||
}
|
||||
})
|
||||
|
||||
// load keychain
|
||||
await libp2p.loadKeychain()
|
||||
```
|
||||
|
||||
### start
|
||||
|
||||
Starts the libp2p node.
|
||||
@@ -907,7 +859,7 @@ Get the known `PublicKey` of a provided peer.
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| [`RsaPublicKey\|Ed25519PublicKey\|Secp256k1PublicKey`][keys] | Peer PublicKey |
|
||||
| `RsaPublicKey|Ed25519PublicKey|Secp256k1PublicKey` | Peer PublicKey |
|
||||
|
||||
#### Example
|
||||
|
||||
@@ -930,7 +882,7 @@ Set known `peerId`. This can include its Public Key.
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| peerId | [`PeerId`][peer-id] | peerId to set |
|
||||
| publicKey | [`RsaPublicKey\|Ed25519PublicKey\|Secp256k1PublicKey`][keys] | peer's public key |
|
||||
| publicKey | [`RsaPublicKey|Ed25519PublicKey|Secp256k1PublicKey`][keys] | peer's public key |
|
||||
|
||||
#### Returns
|
||||
|
||||
@@ -945,146 +897,6 @@ const publicKey = peerId.pubKey
|
||||
peerStore.keyBook.set(peerId, publicKey)
|
||||
```
|
||||
|
||||
### peerStore.metadataBook.delete
|
||||
|
||||
Delete the provided peer from the book.
|
||||
|
||||
`peerStore.metadataBook.delete(peerId)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| peerId | [`PeerId`][peer-id] | peerId to remove |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `boolean` | true if found and removed |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
peerStore.metadataBook.delete(peerId)
|
||||
// false
|
||||
peerStore.metadataBook.set(peerId, 'nickname', Buffer.from('homePeer'))
|
||||
peerStore.metadataBook.delete(peerId)
|
||||
// true
|
||||
```
|
||||
|
||||
### peerStore.metadataBook.deleteValue
|
||||
|
||||
Deletes the provided peer metadata key-value pair from the book.
|
||||
|
||||
`peerStore.metadataBook.deleteValue(peerId, key)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| peerId | [`PeerId`][peer-id] | peerId to remove |
|
||||
| key | `string` | key of the metadata value to remove |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `boolean` | true if found and removed |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
peerStore.metadataBook.deleteValue(peerId, 'location')
|
||||
// false
|
||||
peerStore.metadataBook.set(peerId, 'location', Buffer.from('Berlin'))
|
||||
peerStore.metadataBook.deleteValue(peerId, 'location')
|
||||
// true
|
||||
```
|
||||
|
||||
### peerStore.metadataBook.get
|
||||
|
||||
Get the known metadata of a provided peer.
|
||||
|
||||
`peerStore.metadataBook.get(peerId)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| peerId | [`PeerId`][peer-id] | peerId to get |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Map<string, Buffer>` | Peer Metadata |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
peerStore.metadataBook.get(peerId)
|
||||
// undefined
|
||||
peerStore.metadataBook.set(peerId, 'location', Buffer.from('Berlin'))
|
||||
peerStore.metadataBook.get(peerId)
|
||||
// Metadata Map
|
||||
```
|
||||
|
||||
### peerStore.metadataBook.getValue
|
||||
|
||||
Get specific metadata of a provided peer.
|
||||
|
||||
`peerStore.metadataBook.getValue(peerId)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| peerId | [`PeerId`][peer-id] | peerId to get |
|
||||
| key | `string` | key of the metadata value to get |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Map<string, Buffer>` | Peer Metadata |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
peerStore.metadataBook.getValue(peerId, 'location')
|
||||
// undefined
|
||||
peerStore.metadataBook.set(peerId, 'location', Buffer.from('Berlin'))
|
||||
peerStore.metadataBook.getValue(peerId, 'location')
|
||||
// Metadata Map
|
||||
```
|
||||
|
||||
### peerStore.metadataBook.set
|
||||
|
||||
Set known metadata of a given `peerId`.
|
||||
|
||||
`peerStore.metadataBook.set(peerId, key, value)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| peerId | [`PeerId`][peer-id] | peerId to set |
|
||||
| key | `string` | key of the metadata value to store |
|
||||
| value | `Buffer` | metadata value to store |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `MetadataBook` | Returns the Metadata Book component |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
peerStore.metadataBook.set(peerId, 'location', Buffer.from('Berlin'))
|
||||
```
|
||||
|
||||
### peerStore.protoBook.delete
|
||||
|
||||
Delete the provided peer from the book.
|
||||
@@ -1442,283 +1254,6 @@ libp2p.connectionManager.size
|
||||
// 10
|
||||
```
|
||||
|
||||
### keychain.createKey
|
||||
|
||||
Create a key in the keychain.
|
||||
|
||||
`libp2p.keychain.createKey(name, type, size)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The local key name. It cannot already exist. |
|
||||
| type | `string` | One of the key types; 'rsa' |
|
||||
| size | `number` | The key size in bits. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfo = await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
```
|
||||
|
||||
### keychain.renameKey
|
||||
|
||||
Rename a key in the keychain.
|
||||
|
||||
`libp2p.keychain.renameKey(oldName, newName)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The old local key name. It must already exist. |
|
||||
| type | `string` | The new local key name. It must not already exist. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const keyInfo = await libp2p.keychain.renameKey('keyTest', 'keyNewNtest')
|
||||
```
|
||||
|
||||
### keychain.removeKey
|
||||
|
||||
Removes a key from the keychain.
|
||||
|
||||
`libp2p.keychain.removeKey(name)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The local key name. It must already exist. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const keyInfo = await libp2p.keychain.removeKey('keyTest')
|
||||
```
|
||||
|
||||
### keychain.exportKey
|
||||
|
||||
Export an existing key as a PEM encrypted PKCS #8 string.
|
||||
|
||||
`libp2p.keychain.exportKey(name, password)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The local key name. It must already exist. |
|
||||
| password | `string` | The password to use. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<string>` | Key as a PEM encrypted PKCS #8 |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const pemKey = await libp2p.keychain.exportKey('keyTest', 'password123')
|
||||
```
|
||||
|
||||
### keychain.importKey
|
||||
|
||||
Import a new key from a PEM encoded PKCS #8 string.
|
||||
|
||||
`libp2p.keychain.importKey(name, pem, password)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The local key name. It must not exist. |
|
||||
| pem | `string` | The PEM encoded PKCS #8 string. |
|
||||
| password | `string` | The password to use. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const pemKey = await libp2p.keychain.exportKey('keyTest', 'password123')
|
||||
const keyInfo = await libp2p.keychain.importKey('keyTestImport', pemKey, 'password123')
|
||||
```
|
||||
|
||||
### keychain.importPeer
|
||||
|
||||
Import a new key from a PeerId.
|
||||
|
||||
`libp2p.keychain.importPeer(name, peerId)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The local key name. It must not exist. |
|
||||
| peerId | ['PeerId'][peer-id] | The PEM encoded PKCS #8 string. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfo = await libp2p.keychain.importPeer('keyTestImport', peerId)
|
||||
```
|
||||
|
||||
### keychain.listKeys
|
||||
|
||||
List all the keys.
|
||||
|
||||
`libp2p.keychain.listKeys()`
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<Array<{ id, name }>>` | Array of Key info |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfos = await libp2p.keychain.listKeys()
|
||||
```
|
||||
|
||||
### keychain.findKeyById
|
||||
|
||||
Find a key by it's id.
|
||||
|
||||
`libp2p.keychain.findKeyById(id)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| id | `string` | The universally unique key identifier. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfo = await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const keyInfo2 = await libp2p.keychain.findKeyById(keyInfo.id)
|
||||
```
|
||||
|
||||
### keychain.findKeyByName
|
||||
|
||||
Find a key by it's name.
|
||||
|
||||
`libp2p.keychain.findKeyByName(id)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| id | `string` | The local key name. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<{ id, name }>` | Key info object |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfo = await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const keyInfo2 = await libp2p.keychain.findKeyByName('keyTest')
|
||||
```
|
||||
|
||||
### keychain.cms.encrypt
|
||||
|
||||
Encrypt protected data using the Cryptographic Message Syntax (CMS).
|
||||
|
||||
`libp2p.keychain.cms.encrypt(name, data)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| name | `string` | The local key name. |
|
||||
| data | `Buffer` | The data to encrypt. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<Buffer>` | Encrypted data as a PKCS #7 message in DER. |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfo = await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const enc = await libp2p.keychain.cms.encrypt('keyTest', Buffer.from('data'))
|
||||
```
|
||||
|
||||
### keychain.cms.decrypt
|
||||
|
||||
Decrypt protected data using the Cryptographic Message Syntax (CMS).
|
||||
The keychain must contain one of the keys used to encrypt the data. If none of the keys exists, an Error is returned with the property 'missingKeys'.
|
||||
|
||||
`libp2p.keychain.cms.decrypt(cmsData)`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| cmsData | `string` | The CMS encrypted data to decrypt. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Promise<Buffer>` | Decrypted data. |
|
||||
|
||||
#### Example
|
||||
|
||||
```js
|
||||
const keyInfo = await libp2p.keychain.createKey('keyTest', 'rsa', 4096)
|
||||
const enc = await libp2p.keychain.cms.encrypt('keyTest', Buffer.from('data'))
|
||||
const decData = await libp2p.keychain.cms.decrypt(enc)
|
||||
```
|
||||
|
||||
### metrics.global
|
||||
|
||||
A [`Stats`](#stats) object of tracking the global bandwidth of the libp2p node.
|
||||
@@ -1885,4 +1420,4 @@ This event will be triggered anytime we are disconnected from another peer, rega
|
||||
[connection]: https://github.com/libp2p/js-interfaces/tree/master/src/connection
|
||||
[multiaddr]: https://github.com/multiformats/js-multiaddr
|
||||
[peer-id]: https://github.com/libp2p/js-peer-id
|
||||
[keys]: https://github.com/libp2p/js-libp2p-crypto/tree/master/src/keys
|
||||
[keys]: https://github.com/libp2p/js-libp2p-crypto/tree/master/src/keys
|
@@ -20,12 +20,9 @@
|
||||
- [Customizing DHT](#customizing-dht)
|
||||
- [Setup with Content and Peer Routing](#setup-with-content-and-peer-routing)
|
||||
- [Setup with Relay](#setup-with-relay)
|
||||
- [Setup with Keychain](#setup-with-keychain)
|
||||
- [Configuring Dialing](#configuring-dialing)
|
||||
- [Configuring Connection Manager](#configuring-connection-manager)
|
||||
- [Configuring Transport Manager](#configuring-transport-manager)
|
||||
- [Configuring Metrics](#configuring-metrics)
|
||||
- [Configuring PeerStore](#configuring-peerstore)
|
||||
- [Customizing Transports](#customizing-transports)
|
||||
- [Configuration examples](#configuration-examples)
|
||||
|
||||
@@ -177,7 +174,7 @@ If you want to know more about libp2p DHT, you should read the following content
|
||||
Some available pubsub routers are:
|
||||
|
||||
- [libp2p/js-libp2p-floodsub](https://github.com/libp2p/js-libp2p-floodsub)
|
||||
- [ChainSafe/js-libp2p-gossipsub](https://github.com/ChainSafe/js-libp2p-gossipsub)
|
||||
- [ChainSafe/gossipsub-js](https://github.com/ChainSafe/gossipsub-js)
|
||||
|
||||
If none of the available pubsub routers fulfills your needs, you can create a libp2p compatible one. A libp2p pubsub router just needs to be created on top of [libp2p/js-libp2p-pubsub](https://github.com/libp2p/js-libp2p-pubsub), which ensures `js-libp2p` API expectations.
|
||||
|
||||
@@ -270,7 +267,7 @@ const node = await Libp2p.create({
|
||||
},
|
||||
config: {
|
||||
peerDiscovery: {
|
||||
autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minConnections)
|
||||
autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minPeers)
|
||||
// The `tag` property will be searched when creating the instance of your Peer Discovery service.
|
||||
// The associated object, will be passed to the service when it is instantiated.
|
||||
[MulticastDNS.tag]: {
|
||||
@@ -378,10 +375,10 @@ const MPLEX = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const DelegatedPeerRouter = require('libp2p-delegated-peer-routing')
|
||||
const DelegatedContentRouter = require('libp2p-delegated-content-routing')
|
||||
const PeerId = require('peer-id')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
// create a peerId
|
||||
const peerId = await PeerId.create()
|
||||
// create a peerInfo
|
||||
const peerInfo = await PeerInfo.create()
|
||||
|
||||
const node = await Libp2p.create({
|
||||
modules: {
|
||||
@@ -389,13 +386,13 @@ const node = await Libp2p.create({
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [SECIO],
|
||||
contentRouting: [
|
||||
new DelegatedContentRouter(peerId)
|
||||
new DelegatedContentRouter(peerInfo.id)
|
||||
],
|
||||
peerRouting: [
|
||||
new DelegatedPeerRouter()
|
||||
],
|
||||
},
|
||||
peerId
|
||||
peerInfo
|
||||
})
|
||||
```
|
||||
|
||||
@@ -425,48 +422,9 @@ const node = await Libp2p.create({
|
||||
})
|
||||
```
|
||||
|
||||
#### Setup with Keychain
|
||||
|
||||
Libp2p allows you to setup a secure keychain to manage your keys. The keychain configuration object should have the following properties:
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| pass | `string` | Passphrase to use in the keychain (minimum of 20 characters). |
|
||||
| datastore | `object` | must implement [ipfs/interface-datastore](https://github.com/ipfs/interface-datastore) |
|
||||
|
||||
```js
|
||||
const Libp2p = require('libp2p')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const LevelStore = require('datastore-level')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [SECIO]
|
||||
},
|
||||
keychain: {
|
||||
pass: 'notsafepassword123456789',
|
||||
datastore: new LevelStore('path/to/store')
|
||||
}
|
||||
})
|
||||
|
||||
await libp2p.loadKeychain()
|
||||
```
|
||||
|
||||
#### Configuring Dialing
|
||||
|
||||
Dialing in libp2p can be configured to limit the rate of dialing, and how long dials are allowed to take. The dialer configuration object should have the following properties:
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| maxParallelDials | `number` | How many multiaddrs we can dial in parallel. |
|
||||
| maxDialsPerPeer | `number` | How many multiaddrs we can dial per peer, in parallel. |
|
||||
| dialTimeout | `number` | Second dial timeout per peer in ms. |
|
||||
|
||||
The below configuration example shows how the dialer should be configured, with the current defaults:
|
||||
Dialing in libp2p can be configured to limit the rate of dialing, and how long dials are allowed to take. The below configuration example shows the default values for the dialer.
|
||||
|
||||
```js
|
||||
const Libp2p = require('libp2p')
|
||||
@@ -481,9 +439,9 @@ const node = await Libp2p.create({
|
||||
connEncryption: [SECIO]
|
||||
},
|
||||
dialer: {
|
||||
maxParallelDials: 100,
|
||||
maxDialsPerPeer: 4,
|
||||
dialTimeout: 30e3
|
||||
maxParallelDials: 100, // How many multiaddrs we can dial in parallel
|
||||
maxDialsPerPeer: 4, // How many multiaddrs we can dial per peer, in parallel
|
||||
dialTimeout: 30e3 // 30 second dial timeout per peer
|
||||
}
|
||||
```
|
||||
|
||||
@@ -518,43 +476,9 @@ const node = await Libp2p.create({
|
||||
})
|
||||
```
|
||||
|
||||
#### Configuring Transport Manager
|
||||
|
||||
The Transport Manager is responsible for managing the libp2p transports life cycle. This includes starting listeners for the provided listen addresses, closing these listeners and dialing using the provided transports. By default, if a libp2p node has a list of multiaddrs for listenning on and there are no valid transports for those multiaddrs, libp2p will throw an error on startup and shutdown. However, for some applications it is perfectly acceptable for libp2p nodes to start in dial only mode if all the listen multiaddrs failed. This error tolerance can be enabled as follows:
|
||||
|
||||
```js
|
||||
const Libp2p = require('libp2p')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
|
||||
const { FaultTolerance } = require('libp2p/src/transport-manager')}
|
||||
|
||||
const node = await Libp2p.create({
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [SECIO]
|
||||
},
|
||||
transportManager: {
|
||||
faultTolerance: FaultTolerance.NO_FATAL
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
#### Configuring Metrics
|
||||
|
||||
Metrics are disabled in libp2p by default. You can enable and configure them as follows:
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| enabled | `boolean` | Enabled metrics collection. |
|
||||
| computeThrottleMaxQueueSize | `number` | How many messages a stat will queue before processing. |
|
||||
| computeThrottleTimeout | `number` | Time in milliseconds a stat will wait, after the last item was added, before processing. |
|
||||
| movingAverageIntervals | `Array<number>` | The moving averages that will be computed. |
|
||||
| maxOldPeersRetention | `number` | How many disconnected peers we will retain stats for. |
|
||||
|
||||
The below configuration example shows how the metrics should be configured. Aside from enabled being `false` by default, the following default configuration options are listed below:
|
||||
Metrics are disabled in libp2p by default. You can enable and configure them as follows. Aside from enabled being `false` by default, the configuration options listed here are the current defaults.
|
||||
|
||||
```js
|
||||
const Libp2p = require('libp2p')
|
||||
@@ -570,14 +494,14 @@ const node = await Libp2p.create({
|
||||
},
|
||||
metrics: {
|
||||
enabled: true,
|
||||
computeThrottleMaxQueueSize: 1000,
|
||||
computeThrottleTimeout: 2000,
|
||||
movingAverageIntervals: [
|
||||
computeThrottleMaxQueueSize: 1000, // How many messages a stat will queue before processing
|
||||
computeThrottleTimeout: 2000, // Time in milliseconds a stat will wait, after the last item was added, before processing
|
||||
movingAverageIntervals: [ // The moving averages that will be computed
|
||||
60 * 1000, // 1 minute
|
||||
5 * 60 * 1000, // 5 minutes
|
||||
15 * 60 * 1000 // 15 minutes
|
||||
],
|
||||
maxOldPeersRetention: 50
|
||||
maxOldPeersRetention: 50 // How many disconnected peers we will retain stats for
|
||||
}
|
||||
})
|
||||
```
|
||||
@@ -588,13 +512,6 @@ PeerStore persistence is disabled in libp2p by default. You can enable and confi
|
||||
|
||||
The threshold number represents the maximum number of "dirty peers" allowed in the PeerStore, i.e. peers that are not updated in the datastore. In this context, browser nodes should use a threshold of 1, since they might not "stop" properly in several scenarios and the PeerStore might end up with unflushed records when the window is closed.
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| persistence | `boolean` | Is persistence enabled. |
|
||||
| threshold | `number` | Number of dirty peers allowed. |
|
||||
|
||||
The below configuration example shows how the PeerStore should be configured. Aside from persistence being `false` by default, the following default configuration options are listed below:
|
||||
|
||||
```js
|
||||
const Libp2p = require('libp2p')
|
||||
const TCP = require('libp2p-tcp')
|
||||
@@ -611,8 +528,8 @@ const node = await Libp2p.create({
|
||||
},
|
||||
datastore: new LevelStore('path/to/store'),
|
||||
peerStore: {
|
||||
persistence: true,
|
||||
threshold: 5
|
||||
persistence: true, // Is persistence enabled (default: false)
|
||||
threshold: 5 // Number of dirty peers allowed (default: 5)
|
||||
}
|
||||
})
|
||||
```
|
||||
@@ -649,8 +566,8 @@ const node = await Libp2p.create({
|
||||
|
||||
As libp2p is designed to be a modular networking library, its usage will vary based on individual project needs. We've included links to some existing project configurations for your reference, in case you wish to replicate their configuration:
|
||||
|
||||
- [libp2p-ipfs-nodejs](https://github.com/ipfs/js-ipfs/blob/master/packages/ipfs/src/core/runtime/libp2p-nodejs.js) - libp2p configuration used by js-ipfs when running in Node.js
|
||||
- [libp2p-ipfs-browser](https://github.com/ipfs/js-ipfs/blob/master/packages/ipfs/src/core/runtime/libp2p-browser.js) - libp2p configuration used by js-ipfs when running in a Browser (that supports WebRTC)
|
||||
- [libp2p-ipfs-nodejs](https://github.com/ipfs/js-ipfs/tree/master/src/core/runtime/libp2p-nodejs.js) - libp2p configuration used by js-ipfs when running in Node.js
|
||||
- [libp2p-ipfs-browser](https://github.com/ipfs/js-ipfs/tree/master/src/core/runtime/libp2p-browser.js) - libp2p configuration used by js-ipfs when running in a Browser (that supports WebRTC)
|
||||
|
||||
If you have developed a project using `js-libp2p`, please consider submitting your configuration to this list so that it can be found easily by other users.
|
||||
|
||||
|
@@ -217,7 +217,7 @@ const node = await Libp2p.create({
|
||||
},
|
||||
config: {
|
||||
peerDiscovery: {
|
||||
autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minConnections)
|
||||
autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minPeers)
|
||||
// The `tag` property will be searched when creating the instance of your Peer Discovery service.
|
||||
// The associated object, will be passed to the service when it is instantiated.
|
||||
[Bootstrap.tag]: {
|
||||
|
@@ -161,4 +161,4 @@ const duplex = {
|
||||
[it-pipe]: https://github.com/alanshaw/it-pipe
|
||||
[it-pushable]: https://github.com/alanshaw/it-pushable
|
||||
[it-reader]: https://github.com/alanshaw/it-reader
|
||||
[streaming-iterables]: https://github.com/reconbot/streaming-iterables
|
||||
[streaming-iterables]: https://github.com/bustle/streaming-iterables
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Migrating to the libp2p@0.27 API
|
||||
# Migrating to the new API
|
||||
|
||||
A migration guide for refactoring your application code from libp2p v0.26.x to v0.27.0.
|
||||
|
||||
|
@@ -1,343 +0,0 @@
|
||||
# Migrating to the libp2p@0.28 API
|
||||
|
||||
A migration guide for refactoring your application code from libp2p v0.27.x to v0.28.0.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [PeerStore API](#peerstore-api)
|
||||
- [Migrating from Peer Info](#migrating-from-peer-info)
|
||||
- [Create](#create)
|
||||
- [API Implications](#api-implications)
|
||||
- [Connection Manager and Registrar](#connection-manager-and-registrar)
|
||||
- [Events](#events)
|
||||
- [Module Updates](#module-updates)
|
||||
|
||||
## PeerStore API
|
||||
|
||||
In `libp2p@0.27` we integrated the PeerStore (former [peer-book](https://github.com/libp2p/js-peer-book)) into the codebase. By that time, it was not documented in the [API DOC](../API.md) since it kept the same API as the `peer-book` and it was expected to be completelly rewritten in `libp2p@0.28`.
|
||||
|
||||
Moving towards a separation of concerns regarding known peers' data, as well as enabling PeerStore persistence, the PeerStore is now divided into four main components: `AddressBook`, `ProtoBook`, `KeyBook` and `MetadataBook`. This resulted in API changes in the PeerStore, since each type of peer data should now be added in an atomic fashion.
|
||||
|
||||
### Adding a Peer
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerId = ...
|
||||
const peerInfo = new PeerInfo(peerId)
|
||||
|
||||
peerInfo.protocols.add('/ping/1.0.0')
|
||||
peerInfo.protocols.add('/ping/2.0.0')
|
||||
peerInfo.multiaddrs.add('/ip4/127.0.0.1/tcp/0')
|
||||
|
||||
libp2p.peerStore.put(peerInfo)
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
const protocols = ['/ping/1.0.0', 'ping/2.0.0']
|
||||
const multiaddrs = ['/ip4/127.0.0.1/tcp/0']
|
||||
|
||||
libp2p.peerStore.protoBook.add(peerId, protocols)
|
||||
libp2p.peerStore.addressBook.add(peerId, multiaddrs)
|
||||
```
|
||||
|
||||
### Getting a Peer
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerId = ...
|
||||
const peerInfo = libp2p.peerStore.get(peerId)
|
||||
// { id: PeerId, multiaddrs: MultiaddrSet, protocols: Set<string>}
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
const peer = libp2p.peerStore.get(peerId)
|
||||
// { id: PeerId, addresses: Array<{ multiaddr: Multiaddr }>, protocols: Array<string> }
|
||||
```
|
||||
|
||||
### Checking for a Peer
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerId = ...
|
||||
const hasData = libp2p.peerStore.has(peerId)
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
const hasData = Boolean(libp2p.peerStore.get(peerId))
|
||||
```
|
||||
|
||||
### Removing a Peer
|
||||
|
||||
**Before**
|
||||
```js
|
||||
libp2p.peerStore.remove(peerId)
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
// Atomic
|
||||
libp2p.peerStore.protoBook.delete(peerId)
|
||||
libp2p.peerStore.addressBook.delete(peerId)
|
||||
// Remove the peer and ALL of its associated data
|
||||
libp2p.peerStore.delete(peerId)
|
||||
```
|
||||
|
||||
### Get all known Peers
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peers = libp2p.peerStore.peers
|
||||
// Map<string, PeerInfo>
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peers = libp2p.peerStore.peers
|
||||
// Similar to libp2p.peerStore.get()
|
||||
// Map<string, { id: PeerId, addresses: Array<{ multiaddr: Multiaddr }>, protocols: Array<string> }
|
||||
```
|
||||
|
||||
## Migrating from Peer Info
|
||||
|
||||
[`PeerInfo`][peer-info] is a libp2p peer abstraction layer that combines a [`PeerId`][peer-id] with known data of the peer, namely its multiaddrs and protocols. It has been used for a long time by `js-libp2p` and its modules to carry this data around the libp2p stack, as well as by the libp2p API, both for providing this data to the users or to receive it from them.
|
||||
|
||||
Since this PeerInfo instances were navigating through the entire codebases, some data inconsistencies could be observed in libp2p. Different libp2p subsystems were running with different visions of the known peers data. For instance, a libp2p subsystem receives a copy of this instance with the peer multiaddrs and protocols, but if new data of the peer is obtained from other subsystem, it would not be updated on the former. Moreover, considering that several subsystems were modifying the peer data, libp2p had no way to determine the accurate data.
|
||||
|
||||
Considering the complete revamp of the libp2p PeerStore towards its second version, the PeerStore now acts as the single source of truth, we do not need to carry [`PeerInfo`][peer-info] instances around. This also solves all the problems stated above, since subsystems will report new observations to the PeerStore.
|
||||
|
||||
### Create
|
||||
|
||||
While it was possible to create a libp2p node without providing a [`PeerInfo`][peer-info], there were 2 use cases where a [`PeerInfo`][peer-info] was provided when creating a libp2p node.
|
||||
|
||||
#### Using an existing PeerId
|
||||
|
||||
`libp2p.create` receives a `peerId` property instead of a `peerInfo` property.
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerId = ...
|
||||
const peerInfo = new PeerInfo(peerId)
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
peerInfo
|
||||
// ...
|
||||
})
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
peerId
|
||||
// ...
|
||||
})
|
||||
```
|
||||
|
||||
#### Providing listen addresses
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerId = ...
|
||||
const peerInfo = new PeerInfo(peerId)
|
||||
|
||||
peerInfo.multiaddrs.add('/ip4/127.0.0.1/tcp/0')
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
peerInfo
|
||||
// ...
|
||||
})
|
||||
|
||||
await libp2p.start()
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
peerId,
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/0']
|
||||
}
|
||||
// ...
|
||||
})
|
||||
await libp2p.start()
|
||||
```
|
||||
|
||||
There is also a bonus regarding the peer addresses. `libp2p@0.28` comes with an AddressManager that also allows the configuration of `announce` and `noAnnounce` addresses.
|
||||
This was possible to achieve before, but in a hacky way by removing or adding addresses to the `peerInfo`, after the node starts.
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerId = ...
|
||||
const peerInfo = new PeerInfo(peerId)
|
||||
|
||||
peerInfo.multiaddrs.add('/ip4/127.0.0.1/tcp/8000')
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
peerInfo
|
||||
// ...
|
||||
})
|
||||
|
||||
await libp2p.start()
|
||||
peerInfo.multiaddrs.add('/dns4/peer.io') // Announce
|
||||
peerInfo.multiaddrs.delete('/ip4/127.0.0.1/tcp/8000') // Not announce
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
|
||||
const libp2p = await Libp2p.create({
|
||||
peerId,
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/8000'],
|
||||
announce: ['/dns4/peer.io'],
|
||||
noAnnounce: ['/ip4/127.0.0.1/tcp/8000']
|
||||
}
|
||||
// ...
|
||||
})
|
||||
await libp2p.start()
|
||||
```
|
||||
|
||||
### API Implications
|
||||
|
||||
#### Peer Dialing, Hangup and Ping
|
||||
|
||||
`libp2p.dial`, `libp2p.dialProtocol`, `libp2p.hangup` and `libp2p.ping` supported as the target parameter a [`PeerInfo`](peer-info), a [`PeerId`](peer-id), a [`Multiaddr`][multiaddr] and a string representation of the multiaddr. Considering that [`PeerInfo`](peer-info) is being removed from libp2p, all these methods will now support the other 3 possibilities.
|
||||
|
||||
There is one relevant aspect to consider with this change. When using a [`PeerId`](peer-id), the PeerStore **MUST** have known addresses for that peer in its AddressBook, so that it can perform the request. This was also true in the past, but it is important pointing it out because it might not be enough to switch from using [`PeerInfo`](peer-info) to [`PeerId`](peer-id). When using a [`PeerInfo`](peer-info), the PeerStore was not required to have the multiaddrs when they existed on the PeerInfo instance.
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerInfo = ... // PeerInfo containing its multiaddrs
|
||||
|
||||
const connection = await libp2p.dial(peerInfo)
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peerId = ...
|
||||
|
||||
// Known multiaddrs should be added to the PeerStore
|
||||
libp2p.peerStore.addressBook.add(peerId, multiaddrs)
|
||||
|
||||
const connection = await libp2p.dial(peerId)
|
||||
```
|
||||
|
||||
#### Content Routing and Peer Routing
|
||||
|
||||
Both [content-routing](https://github.com/libp2p/js-libp2p-interfaces/tree/master/src/content-routing) and [peer-routing](https://github.com/libp2p/js-libp2p-interfaces/tree/master/src/peer-routing) interfaces were modified to not return a ['PeerInfo'][peer-info] instance.
|
||||
|
||||
**Before**
|
||||
```js
|
||||
for await (const peerInfo of libp2p.contentRouting.findProviders(cid)) {
|
||||
// peerInfo is a PeerInfo instance
|
||||
}
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
for await (const peer of libp2p.contentRouting.findProviders(cid)) {
|
||||
// { id: PeerId, multiaddrs: Multiaddr[] }
|
||||
}
|
||||
```
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const peerInfo = await libp2p.peerRouting.findPeer(peerId)
|
||||
// peerInfo is a PeerInfo instance
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const peer = await libp2p.peerRouting.findPeer(peerId)
|
||||
// { id: PeerId, multiaddrs: Multiaddr[] }
|
||||
```
|
||||
|
||||
## Connection Manager and Registrar
|
||||
|
||||
Registrar was introduced in `libp2p@0.27` along with [libp2p topologies](https://github.com/libp2p/js-libp2p-interfaces/tree/master/src/topology). `Registrar` and `ConnectionManager` were both listening on new connections and keeping their record of the open connections with other peers.
|
||||
|
||||
The registrar API was not documented in the [API DOC](../API.md). However, it exposed a useful method for some libp2p users, `libp2p.registrar.getConnection()`. On the other hand, the connection Manager did not provide any methods to access its stored connections. On `libp2p@0.28` we removed this data duplication and the connections are handled solely by the `ConnectionManager`.
|
||||
|
||||
**Before**
|
||||
```js
|
||||
const connection = libp2p.registrar.getConnection(peerId)
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
const connection = libp2p.connectionManager.get(peerId)
|
||||
```
|
||||
|
||||
## Events
|
||||
|
||||
### Connection Events
|
||||
|
||||
Libp2p emits events whenever new connections are established. These emitted events previously providing the [`PeerInfo`](peer-info) of the peer that connected. In `libp2p@0.28` these events are now emitted from the Connection Manager and will now emit the [`connection`](connection) itself.
|
||||
|
||||
**Before**
|
||||
```js
|
||||
libp2p.on('peer:connect', (peerInfo) => {
|
||||
// PeerInfo instance
|
||||
})
|
||||
|
||||
libp2p.on('peer:disconnect', (peerInfo) => {
|
||||
// PeerInfo instance
|
||||
})
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
libp2p.connectionManager.on('peer:connect', (connection) => {
|
||||
// Connection instance
|
||||
})
|
||||
|
||||
libp2p.connectionManager.on('peer:disconnect', (connection) => {
|
||||
// Connection instance
|
||||
})
|
||||
```
|
||||
|
||||
### Peer Discovery
|
||||
|
||||
**Before**
|
||||
```js
|
||||
libp2p.on('peer:discovery', (peerInfo) => {
|
||||
// PeerInfo instance
|
||||
})
|
||||
```
|
||||
|
||||
**After**
|
||||
```js
|
||||
libp2p.on('peer:discovery', (peerId) => {
|
||||
// peerId instance
|
||||
})
|
||||
```
|
||||
|
||||
## Module Updates
|
||||
|
||||
With `libp2p@0.28` you should update the following libp2p modules if you are relying on them:
|
||||
|
||||
```json
|
||||
"libp2p-bootstrap": "^0.11.0",
|
||||
"libp2p-delegated-content-routing": "^0.5.0",
|
||||
"libp2p-delegated-peer-routing": "^0.5.0",
|
||||
"libp2p-floodsub": "^0.21.0",
|
||||
"libp2p-gossipsub": "^0.4.0",
|
||||
"libp2p-kad-dht": "^0.19.1",
|
||||
"libp2p-mdns": "^0.14.1",
|
||||
"libp2p-webrtc-star": "^0.18.0"
|
||||
```
|
||||
|
||||
[connection]: https://github.com/libp2p/js-interfaces/tree/master/src/connection
|
||||
[multiaddr]: https://github.com/multiformats/js-multiaddr
|
||||
[peer-id]: https://github.com/libp2p/js-peer-id
|
||||
[peer-info]: https://github.com/libp2p/js-peer-info
|
@@ -2,7 +2,7 @@
|
||||
/* eslint-disable no-console */
|
||||
|
||||
const PeerId = require('peer-id')
|
||||
const multiaddr = require('multiaddr')
|
||||
const PeerInfo = require('peer-info')
|
||||
const Node = require('./libp2p-bundle')
|
||||
const { stdinToStream, streamToConsole } = require('./stream')
|
||||
|
||||
@@ -13,25 +13,27 @@ async function run() {
|
||||
])
|
||||
|
||||
// Create a new libp2p node on localhost with a randomly chosen port
|
||||
const peerDialer = new PeerInfo(idDialer)
|
||||
peerDialer.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
const nodeDialer = new Node({
|
||||
peerId: idDialer,
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
}
|
||||
peerInfo: peerDialer
|
||||
})
|
||||
|
||||
// Create a PeerInfo with the listening peer's address
|
||||
const peerListener = new PeerInfo(idListener)
|
||||
peerListener.multiaddrs.add('/ip4/127.0.0.1/tcp/10333')
|
||||
|
||||
// Start the libp2p host
|
||||
await nodeDialer.start()
|
||||
|
||||
// Output this node's address
|
||||
console.log('Dialer ready, listening on:')
|
||||
nodeDialer.multiaddrs.forEach((ma) => {
|
||||
console.log(ma.toString() + '/p2p/' + idDialer.toB58String())
|
||||
peerListener.multiaddrs.forEach((ma) => {
|
||||
console.log(ma.toString() + '/p2p/' + idListener.toB58String())
|
||||
})
|
||||
|
||||
// Dial to the remote peer (the "listener")
|
||||
const listenerMa = multiaddr(`/ip4/127.0.0.1/tcp/10333/p2p/${idListener.toB58String()}`)
|
||||
const { stream } = await nodeDialer.dialProtocol(listenerMa, '/chat/1.0.0')
|
||||
const { stream } = await nodeDialer.dialProtocol(peerListener, '/chat/1.0.0')
|
||||
|
||||
console.log('Dialer dialed to listener on protocol: /chat/1.0.0')
|
||||
console.log('Type a message and see what happens')
|
||||
|
@@ -3,8 +3,7 @@
|
||||
const TCP = require('libp2p-tcp')
|
||||
const WS = require('libp2p-websockets')
|
||||
const mplex = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const secio = require('libp2p-secio')
|
||||
const defaultsDeep = require('@nodeutils/defaults-deep')
|
||||
const libp2p = require('../../..')
|
||||
|
||||
@@ -17,7 +16,7 @@ class Node extends libp2p {
|
||||
WS
|
||||
],
|
||||
streamMuxer: [ mplex ],
|
||||
connEncryption: [ NOISE, SECIO ]
|
||||
connEncryption: [ secio ]
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,24 +1,23 @@
|
||||
'use strict'
|
||||
/* eslint-disable no-console */
|
||||
|
||||
const multaddr = require('multiaddr')
|
||||
const PeerId = require('peer-id')
|
||||
const PeerInfo = require('peer-info')
|
||||
const Node = require('./libp2p-bundle.js')
|
||||
const { stdinToStream, streamToConsole } = require('./stream')
|
||||
|
||||
async function run() {
|
||||
// Create a new libp2p node with the given multi-address
|
||||
const idListener = await PeerId.createFromJSON(require('./peer-id-listener'))
|
||||
const peerListener = new PeerInfo(idListener)
|
||||
peerListener.multiaddrs.add('/ip4/0.0.0.0/tcp/10333')
|
||||
const nodeListener = new Node({
|
||||
peerId: idListener,
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/10333']
|
||||
}
|
||||
peerInfo: peerListener
|
||||
})
|
||||
|
||||
// Log a message when a remote peer connects to us
|
||||
nodeListener.connectionManager.on('peer:connect', (connection) => {
|
||||
console.log('connected to: ', connection.remotePeer.toB58String())
|
||||
nodeListener.on('peer:connect', (peerInfo) => {
|
||||
console.log(peerInfo.id.toB58String())
|
||||
})
|
||||
|
||||
// Handle messages for the protocol
|
||||
@@ -34,7 +33,7 @@ async function run() {
|
||||
|
||||
// Output listen addresses to the console
|
||||
console.log('Listener ready, listening on:')
|
||||
nodeListener.multiaddrs.forEach((ma) => {
|
||||
peerListener.multiaddrs.forEach((ma) => {
|
||||
console.log(ma.toString() + '/p2p/' + idListener.toB58String())
|
||||
})
|
||||
}
|
||||
|
@@ -5,7 +5,6 @@ const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const Mplex = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const Bootstrap = require('libp2p-bootstrap')
|
||||
|
||||
// Find this list at: https://github.com/ipfs/js-ipfs/blob/master/src/core/runtime/config-nodejs.json
|
||||
@@ -23,13 +22,10 @@ const bootstrapers = [
|
||||
|
||||
;(async () => {
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [Mplex],
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
peerDiscovery: [Bootstrap]
|
||||
},
|
||||
config: {
|
||||
@@ -43,13 +39,15 @@ const bootstrapers = [
|
||||
}
|
||||
})
|
||||
|
||||
node.connectionManager.on('peer:connect', (connection) => {
|
||||
console.log('Connection established to:', connection.remotePeer.toB58String()) // Emitted when a peer has been found
|
||||
node.peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
node.on('peer:connect', (peer) => {
|
||||
console.log('Connection established to:', peer.id.toB58String()) // Emitted when a peer has been found
|
||||
})
|
||||
|
||||
node.on('peer:discovery', (peerId) => {
|
||||
node.on('peer:discovery', (peer) => {
|
||||
// No need to dial, autoDial is on
|
||||
console.log('Discovered:', peerId.toB58String())
|
||||
console.log('Discovered:', peer.id.toB58String())
|
||||
})
|
||||
|
||||
await node.start()
|
||||
|
@@ -5,18 +5,14 @@ const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const Mplex = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const MulticastDNS = require('libp2p-mdns')
|
||||
|
||||
const createNode = async () => {
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [Mplex],
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
peerDiscovery: [MulticastDNS]
|
||||
},
|
||||
config: {
|
||||
@@ -28,6 +24,7 @@ const createNode = async () => {
|
||||
}
|
||||
}
|
||||
})
|
||||
node.peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
return node
|
||||
}
|
||||
@@ -38,8 +35,8 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
node1.on('peer:discovery', (peerId) => console.log('Discovered:', peerId.toB58String()))
|
||||
node2.on('peer:discovery', (peerId) => console.log('Discovered:', peerId.toB58String()))
|
||||
node1.on('peer:discovery', (peer) => console.log('Discovered:', peer.id.toB58String()))
|
||||
node2.on('peer:discovery', (peer) => console.log('Discovered:', peer.id.toB58String()))
|
||||
|
||||
await Promise.all([
|
||||
node1.start(),
|
||||
|
@@ -4,7 +4,7 @@ A Peer Discovery module enables libp2p to find peers to connect to. Think of the
|
||||
|
||||
With these system, a libp2p node can both have a set of nodes to always connect on boot (bootstraper nodes), discover nodes through locality (e.g connected in the same LAN) or through serendipity (random walks on a DHT).
|
||||
|
||||
These mechanisms save configuration and enable a node to operate without any explicit dials, it will just work. Once new peers are discovered, their known data is stored in the peer's PeerStore.
|
||||
These mechanisms save configuration and enable a node to operate without any explicit dials, it will just work.
|
||||
|
||||
## 1. Bootstrap list of Peers when booting a node
|
||||
|
||||
@@ -20,7 +20,7 @@ const node = Libp2p.create({
|
||||
modules: {
|
||||
transport: [ TCP ],
|
||||
streamMuxer: [ Mplex ],
|
||||
connEncryption: [ NOISE, SECIO ],
|
||||
connEncryption: [ SECIO ],
|
||||
peerDiscovery: [ Bootstrap ]
|
||||
},
|
||||
config: {
|
||||
@@ -55,14 +55,11 @@ Now, once we create and start the node, we can listen for events such as `peer:d
|
||||
|
||||
```JavaScript
|
||||
const node = await Libp2p.create({
|
||||
peerId,
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
}
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [ TCP ],
|
||||
streamMuxer: [ Mplex ],
|
||||
connEncryption: [ NOISE, SECIO ],
|
||||
connEncryption: [ SECIO ],
|
||||
peerDiscovery: [ Bootstrap ]
|
||||
},
|
||||
config: {
|
||||
@@ -76,13 +73,15 @@ const node = await Libp2p.create({
|
||||
}
|
||||
})
|
||||
|
||||
node.connectionManager.on('peer:connect', (connection) => {
|
||||
console.log('Connection established to:', connection.remotePeer.toB58String()) // Emitted when a new connection has been created
|
||||
node.peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
node.on('peer:connect', (peer) => {
|
||||
console.log('Connection established to:', peer.id.toB58String()) // Emitted when a peer has been found
|
||||
})
|
||||
|
||||
node.on('peer:discovery', (peerId) => {
|
||||
// No need to dial, autoDial is on
|
||||
console.log('Discovered:', peerId.toB58String())
|
||||
// Emitted when a peer has been found
|
||||
node.on('peer:discovery', (peer) => {
|
||||
console.log('Discovered:', peer.id.toB58String())
|
||||
})
|
||||
|
||||
await node.start()
|
||||
@@ -101,15 +100,6 @@ Discovered: QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64
|
||||
Discovered: QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd
|
||||
Discovered: QmSoLMeWqB7YGVLJN3pNLQpmmEk35v6wYtsMGLzSr5QBU3
|
||||
Discovered: QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx
|
||||
Connection established to: QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ
|
||||
Connection established to: QmSoLnSGccFuZQJzRadHn95W2CrSFmZuTdDWP8HXaHca9z
|
||||
Connection established to: QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM
|
||||
Connection established to: QmSoLueR4xBeUbY9WZ9xGUUxunbKWcrNFTDAadQJmocnWm
|
||||
Connection established to: QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu
|
||||
Connection established to: QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64
|
||||
Connection established to: QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd
|
||||
Connection established to: QmSoLMeWqB7YGVLJN3pNLQpmmEk35v6wYtsMGLzSr5QBU3
|
||||
Connection established to: QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx
|
||||
```
|
||||
|
||||
## 2. MulticastDNS to find other peers in the network
|
||||
@@ -124,13 +114,10 @@ const MulticastDNS = require('libp2p-mdns')
|
||||
|
||||
const createNode = () => {
|
||||
return Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
}
|
||||
modules: {
|
||||
transport: [ TCP ],
|
||||
streamMuxer: [ Mplex ],
|
||||
connEncryption: [ NOISE, SECIO ],
|
||||
connEncryption: [ SECIO ],
|
||||
peerDiscovery: [ MulticastDNS ]
|
||||
},
|
||||
config: {
|
||||
@@ -170,5 +157,5 @@ Discovered: QmRcXXhtG8vTqwVBRonKWtV4ovDoC1Fe56WYtcrw694eiJ
|
||||
There are plenty more Peer Discovery Mechanisms out there, you can:
|
||||
|
||||
- Find one in [libp2p-webrtc-star](https://github.com/libp2p/js-libp2p-webrtc-star). Yes, a transport with discovery capabilities! This happens because WebRTC requires a rendezvous point for peers to exchange [SDP](https://tools.ietf.org/html/rfc4317) offer, which means we have one or more points that can introduce peers to each other. Think of it as MulticastDNS for the Web, as in MulticastDNS only works in LAN.
|
||||
- Any DHT will offer you a discovery capability. You can simple _random-walk_ the routing tables to find other peers to connect to. For example [libp2p-kad-dht](https://github.com/libp2p/js-libp2p-kad-dht) can be used for peer discovery. An example how to configure it to enable random walks can be found [here](https://github.com/libp2p/js-libp2p/blob/v0.28.4/doc/CONFIGURATION.md#customizing-dht).
|
||||
- Any DHT will offer you a discovery capability. You can simple _random-walk_ the routing tables to find other peers to connect to.
|
||||
- You can create your own Discovery service, a registry, a list, a radio beacon, you name it!
|
||||
|
@@ -5,8 +5,8 @@
|
||||
* Dialer Node
|
||||
*/
|
||||
|
||||
const multiaddr = require('multiaddr')
|
||||
const PeerId = require('peer-id')
|
||||
const PeerInfo = require('peer-info')
|
||||
const Node = require('./libp2p-bundle')
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
@@ -17,26 +17,28 @@ async function run() {
|
||||
])
|
||||
|
||||
// Dialer
|
||||
const dialerPeerInfo = new PeerInfo(dialerId)
|
||||
dialerPeerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
const dialerNode = new Node({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerId: dialerId
|
||||
peerInfo: dialerPeerInfo
|
||||
})
|
||||
|
||||
// Add peer to Dial (the listener) into the PeerStore
|
||||
const listenerMultiaddr = '/ip4/127.0.0.1/tcp/10333/p2p/' + listenerId.toB58String()
|
||||
// Peer to Dial (the listener)
|
||||
const listenerPeerInfo = new PeerInfo(listenerId)
|
||||
const listenerMultiaddr = '/ip4/127.0.0.1/tcp/10333/p2p/' +
|
||||
listenerId.toB58String()
|
||||
listenerPeerInfo.multiaddrs.add(listenerMultiaddr)
|
||||
|
||||
// Start the dialer libp2p node
|
||||
await dialerNode.start()
|
||||
|
||||
console.log('Dialer ready, listening on:')
|
||||
dialerNode.multiaddrs.forEach((ma) => console.log(ma.toString() +
|
||||
dialerPeerInfo.multiaddrs.forEach((ma) => console.log(ma.toString() +
|
||||
'/p2p/' + dialerId.toB58String()))
|
||||
|
||||
// Dial the listener node
|
||||
console.log('Dialing to peer:', listenerMultiaddr)
|
||||
const { stream } = await dialerNode.dialProtocol(listenerMultiaddr, '/echo/1.0.0')
|
||||
console.log('Dialing to peer:', listenerMultiaddr.toString())
|
||||
const { stream } = await dialerNode.dialProtocol(listenerPeerInfo, '/echo/1.0.0')
|
||||
|
||||
console.log('nodeA dialed to nodeB on protocol: /echo/1.0.0')
|
||||
|
||||
|
@@ -3,9 +3,7 @@
|
||||
const TCP = require('libp2p-tcp')
|
||||
const WS = require('libp2p-websockets')
|
||||
const mplex = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
|
||||
const secio = require('libp2p-secio')
|
||||
const defaultsDeep = require('@nodeutils/defaults-deep')
|
||||
const libp2p = require('../../..')
|
||||
|
||||
@@ -18,7 +16,7 @@ class Node extends libp2p {
|
||||
WS
|
||||
],
|
||||
streamMuxer: [ mplex ],
|
||||
connEncryption: [ NOISE, SECIO ]
|
||||
connEncryption: [ secio ]
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
const PeerId = require('peer-id')
|
||||
const PeerInfo = require('peer-info')
|
||||
const Node = require('./libp2p-bundle')
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
@@ -13,16 +14,15 @@ async function run() {
|
||||
const listenerId = await PeerId.createFromJSON(require('./id-l'))
|
||||
|
||||
// Listener libp2p node
|
||||
const listenerPeerInfo = new PeerInfo(listenerId)
|
||||
listenerPeerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/10333')
|
||||
const listenerNode = new Node({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/10333']
|
||||
},
|
||||
peerId: listenerId
|
||||
peerInfo: listenerPeerInfo
|
||||
})
|
||||
|
||||
// Log a message when we receive a connection
|
||||
listenerNode.connectionManager.on('peer:connect', (connection) => {
|
||||
console.log('received dial to me from:', connection.remotePeer.toB58String())
|
||||
listenerNode.on('peer:connect', (peerInfo) => {
|
||||
console.log('received dial to me from:', peerInfo.id.toB58String())
|
||||
})
|
||||
|
||||
// Handle incoming connections for the protocol by piping from the stream
|
||||
@@ -33,7 +33,7 @@ async function run() {
|
||||
await listenerNode.start()
|
||||
|
||||
console.log('Listener ready, listening on:')
|
||||
listenerNode.multiaddrs.forEach((ma) => {
|
||||
listenerNode.peerInfo.multiaddrs.forEach((ma) => {
|
||||
console.log(ma.toString() + '/p2p/' + listenerId.toB58String())
|
||||
})
|
||||
}
|
||||
|
@@ -3,20 +3,21 @@
|
||||
const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const Mplex = require('libp2p-mplex')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [Mplex],
|
||||
connEncryption: [NOISE, SECIO]
|
||||
connEncryption: [SECIO]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -31,8 +32,6 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
|
||||
node2.handle('/a-protocol', ({ stream }) => {
|
||||
pipe(
|
||||
stream,
|
||||
@@ -44,7 +43,7 @@ const createNode = async () => {
|
||||
)
|
||||
})
|
||||
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, '/a-protocol')
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, '/a-protocol')
|
||||
|
||||
await pipe(
|
||||
['This information is sent out encrypted to the other peer'],
|
||||
|
@@ -6,15 +6,16 @@ We call this usage a _connection upgrade_ where given a connection between peer
|
||||
|
||||
A byproduct of having these encrypted communications modules is that we can authenticate the peers we are dialing to. You might have noticed that every time we dial to a peer in libp2p space, we always use its PeerId at the end (e.g /ip4/127.0.0.1/tcp/89765/p2p/QmWCbVw1XZ8hiYBwwshPce2yaTDYTqTaP7GCHGpry3ykWb), this PeerId is generated by hashing the Public Key of the peer. With this, we can create a crypto challenge when dialing to another peer and prove that peer is the owner of a PrivateKey that matches the Public Key we know.
|
||||
|
||||
# 1. Set up encrypted communications
|
||||
# 1. Set up encrypted communications with SECIO
|
||||
|
||||
We will build this example on top of example for [Protocol and Stream Multiplexing](../protocol-and-stream-multiplexing). You will need the modules `libp2p-secio`<sup>*</sup> and `libp2p-noise` to complete it, go ahead and `npm install libp2p-secio libp2p-noise`.
|
||||
We will build this example on top of example for [Protocol and Stream Multiplexing](../protocol-and-stream-multiplexing). You will need the module `libp2p-secio` to complete it, go ahead and `npm install libp2p-secio`.
|
||||
|
||||
To add them to your libp2p configuration, all you have to do is:
|
||||
SECIO is the crypto channel developed for IPFS, it is a TLS 1.3 like crypto channel that established an encrypted communication channel between two peers.
|
||||
|
||||
To add it to your libp2p configuration, all you have to do is:
|
||||
|
||||
```JavaScript
|
||||
const Libp2p = require('libp2p')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
|
||||
const createNode = () => {
|
||||
@@ -23,7 +24,7 @@ const createNode = () => {
|
||||
transport: [ TCP ],
|
||||
streamMuxer: [ Mplex ],
|
||||
// Attach secio as the crypto channel to use
|
||||
connEncryption: [ NOISE, SECIO ]
|
||||
connEncryption: [ SECIO ]
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -31,8 +32,6 @@ const createNode = () => {
|
||||
|
||||
And that's it, from now on, all your libp2p communications are encrypted. Try running the example [1.js](./1.js) to see it working.
|
||||
|
||||
_<sup>*</sup> SECIO is the crypto channel developed for IPFS, it is a TLS 1.3 like crypto channel that established an encrypted communication channel between two peers._
|
||||
|
||||
If you want to want to learn more about how SECIO works, you can read the [great write up done by Dominic Tarr](https://github.com/auditdrivencrypto/secure-channel/blob/master/prior-art.md#ipfss-secure-channel).
|
||||
|
||||
Important note: SECIO hasn't been audited and so, we do not recommend to trust its security. We intent to move to TLS 1.3 once the specification is finalized and an implementation exists that we can use.
|
||||
|
@@ -11,9 +11,26 @@ cd ./examples/libp2p-in-the-browser
|
||||
npm install
|
||||
```
|
||||
|
||||
## Signaling Server
|
||||
|
||||
This example uses the `libp2p-webrtc-star` module, which enables libp2p browser nodes to establish direct connections to one another via a central signaling server. For this example, we are using the signaling server that ships with `libp2p-webrtc-star`.
|
||||
|
||||
You can start the server by running `npm run server`. This will start a signaling server locally on port `9090`. If you'd like to run a signaling server outside of this example, you can see instructions on how to do so in the [`libp2p-webrtc-star` README](https://github.com/libp2p/js-libp2p-webrtc-star).
|
||||
|
||||
When you run the server, you should see output that looks something like this:
|
||||
|
||||
```log
|
||||
$ npm run server
|
||||
|
||||
> libp2p-in-browser@1.0.0 server
|
||||
> star-signal
|
||||
|
||||
Listening on: http://0.0.0.0:9090
|
||||
```
|
||||
|
||||
## Running the examples
|
||||
|
||||
Start by running the Parcel server:
|
||||
Once you have started the signaling server, you can run the Parcel server.
|
||||
|
||||
```
|
||||
npm start
|
||||
@@ -36,11 +53,3 @@ This will compile the code and start a server listening on port [http://localhos
|
||||
Now, if you open a second browser tab to `http://localhost:1234`, you should discover your node from the previous tab. This is due to the fact that the `libp2p-webrtc-star` transport also acts as a Peer Discovery interface. Your node will be notified of any peer that connects to the same signaling server you are connected to. Once libp2p discovers this new peer, it will attempt to establish a direct WebRTC connection.
|
||||
|
||||
**Note**: In the example we assign libp2p to `window.libp2p`, in case you would like to play around with the API directly in the browser. You can of course make changes to `index.js` and Parcel will automatically rebuild and reload the browser tabs.
|
||||
|
||||
## Going to production?
|
||||
|
||||
This example uses public `libp2p-webrtc-star` servers. These servers should be used for experimenting and demos, they **MUST** not be used in production as there is no guarantee on availability.
|
||||
|
||||
You can see how to deploy your own signaling server in [libp2p/js-libp2p-webrtc-star/DEPLOYMENT.md](https://github.com/libp2p/js-libp2p-webrtc-star/blob/master/DEPLOYMENT.md).
|
||||
|
||||
Once you have your own server running, you should add its listen address in your libp2p node configuration.
|
||||
|
@@ -2,7 +2,6 @@ import 'babel-polyfill'
|
||||
import Libp2p from 'libp2p'
|
||||
import Websockets from 'libp2p-websockets'
|
||||
import WebRTCStar from 'libp2p-webrtc-star'
|
||||
import { NOISE } from 'libp2p-noise'
|
||||
import Secio from 'libp2p-secio'
|
||||
import Mplex from 'libp2p-mplex'
|
||||
import Boostrap from 'libp2p-bootstrap'
|
||||
@@ -10,18 +9,9 @@ import Boostrap from 'libp2p-bootstrap'
|
||||
document.addEventListener('DOMContentLoaded', async () => {
|
||||
// Create our libp2p node
|
||||
const libp2p = await Libp2p.create({
|
||||
addresses: {
|
||||
// Add the signaling server address, along with our PeerId to our multiaddrs list
|
||||
// libp2p will automatically attempt to dial to the signaling server so that it can
|
||||
// receive inbound connections from other peers
|
||||
listen: [
|
||||
'/dns4/wrtc-star1.par.dwebops.pub/tcp/443/wss/p2p-webrtc-star',
|
||||
'/dns4/wrtc-star2.sjc.dwebops.pub/tcp/443/wss/p2p-webrtc-star'
|
||||
]
|
||||
},
|
||||
modules: {
|
||||
transport: [Websockets, WebRTCStar],
|
||||
connEncryption: [NOISE, Secio],
|
||||
connEncryption: [Secio],
|
||||
streamMuxer: [Mplex],
|
||||
peerDiscovery: [Boostrap]
|
||||
},
|
||||
@@ -53,24 +43,30 @@ document.addEventListener('DOMContentLoaded', async () => {
|
||||
output.textContent += `${txt.trim()}\n`
|
||||
}
|
||||
|
||||
// Add the signaling server address, along with our PeerId to our multiaddrs list
|
||||
// libp2p will automatically attempt to dial to the signaling server so that it can
|
||||
// receive inbound connections from other peers
|
||||
const webrtcAddr = '/ip4/0.0.0.0/tcp/9090/wss/p2p-webrtc-star'
|
||||
libp2p.peerInfo.multiaddrs.add(webrtcAddr)
|
||||
|
||||
// Listen for new peers
|
||||
libp2p.on('peer:discovery', (peerId) => {
|
||||
log(`Found peer ${peerId.toB58String()}`)
|
||||
libp2p.on('peer:discovery', (peerInfo) => {
|
||||
log(`Found peer ${peerInfo.id.toB58String()}`)
|
||||
})
|
||||
|
||||
// Listen for new connections to peers
|
||||
libp2p.connectionManager.on('peer:connect', (connection) => {
|
||||
log(`Connected to ${connection.remotePeer.toB58String()}`)
|
||||
libp2p.on('peer:connect', (peerInfo) => {
|
||||
log(`Connected to ${peerInfo.id.toB58String()}`)
|
||||
})
|
||||
|
||||
// Listen for peers disconnecting
|
||||
libp2p.connectionManager.on('peer:disconnect', (connection) => {
|
||||
log(`Disconnected from ${connection.remotePeer.toB58String()}`)
|
||||
libp2p.on('peer:disconnect', (peerInfo) => {
|
||||
log(`Disconnected from ${peerInfo.id.toB58String()}`)
|
||||
})
|
||||
|
||||
await libp2p.start()
|
||||
status.innerText = 'libp2p started!'
|
||||
log(`libp2p id is ${libp2p.peerId.toB58String()}`)
|
||||
log(`libp2p id is ${libp2p.peerInfo.id.toB58String()}`)
|
||||
|
||||
// Export libp2p to the window so you can play with the API
|
||||
window.libp2p = libp2p
|
||||
|
@@ -8,7 +8,8 @@
|
||||
],
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1",
|
||||
"start": "parcel index.html"
|
||||
"start": "parcel index.html",
|
||||
"server": "star-signal"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
@@ -16,11 +17,10 @@
|
||||
"dependencies": {
|
||||
"@babel/preset-env": "^7.8.3",
|
||||
"libp2p": "../../",
|
||||
"libp2p-bootstrap": "^0.11",
|
||||
"libp2p-bootstrap": "^0.10.3",
|
||||
"libp2p-mplex": "^0.9.3",
|
||||
"libp2p-noise": "^1.1.0",
|
||||
"libp2p-secio": "^0.12.2",
|
||||
"libp2p-webrtc-star": "^0.18.0",
|
||||
"libp2p-webrtc-star": "^0.17.3",
|
||||
"libp2p-websockets": "^0.13.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
@@ -4,21 +4,22 @@
|
||||
const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const Mplex = require('libp2p-mplex')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
const KadDHT = require('libp2p-kad-dht')
|
||||
|
||||
const delay = require('delay')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [Mplex],
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
dht: KadDHT
|
||||
},
|
||||
config: {
|
||||
@@ -39,19 +40,16 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs)
|
||||
|
||||
await Promise.all([
|
||||
node1.dial(node2.peerId),
|
||||
node2.dial(node3.peerId)
|
||||
node1.dial(node2.peerInfo),
|
||||
node2.dial(node3.peerInfo)
|
||||
])
|
||||
|
||||
// The DHT routing tables need a moment to populate
|
||||
await delay(100)
|
||||
|
||||
const peer = await node1.peerRouting.findPeer(node3.peerId)
|
||||
const peer = await node1.peerRouting.findPeer(node3.peerInfo.id)
|
||||
|
||||
console.log('Found it, multiaddrs are:')
|
||||
peer.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${peer.id.toB58String()}`))
|
||||
peer.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
})();
|
||||
|
@@ -5,7 +5,7 @@ const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const Mplex = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const PeerInfo = require('peer-info')
|
||||
const CID = require('cids')
|
||||
const KadDHT = require('libp2p-kad-dht')
|
||||
|
||||
@@ -13,14 +13,15 @@ const all = require('it-all')
|
||||
const delay = require('delay')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [Mplex],
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
dht: KadDHT
|
||||
},
|
||||
config: {
|
||||
@@ -41,21 +42,15 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs)
|
||||
|
||||
await Promise.all([
|
||||
node1.dial(node2.peerId),
|
||||
node2.dial(node3.peerId)
|
||||
node1.dial(node2.peerInfo),
|
||||
node2.dial(node3.peerInfo)
|
||||
])
|
||||
|
||||
// Wait for onConnect handlers in the DHT
|
||||
await delay(100)
|
||||
|
||||
const cid = new CID('QmTp9VkYvnHyrqKQuFPiuZkiX9gPcqj6x5LJ1rmWuSySnL')
|
||||
await node1.contentRouting.provide(cid)
|
||||
|
||||
console.log('Node %s is providing %s', node1.peerId.toB58String(), cid.toBaseEncodedString())
|
||||
console.log('Node %s is providing %s', node1.peerInfo.id.toB58String(), cid.toBaseEncodedString())
|
||||
|
||||
// wait for propagation
|
||||
await delay(300)
|
||||
|
@@ -17,13 +17,10 @@ const Libp2p = require('libp2p')
|
||||
const KadDHT = require('libp2p-kad-dht')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
modules: {
|
||||
transport: [ TCP ],
|
||||
streamMuxer: [ Mplex ],
|
||||
connEncryption: [ NOISE, SECIO ],
|
||||
connEncryption: [ SECIO ],
|
||||
// we add the DHT module that will enable Peer and Content Routing
|
||||
dht: KadDHT
|
||||
},
|
||||
@@ -43,21 +40,18 @@ const node1 = nodes[0]
|
||||
const node2 = nodes[1]
|
||||
const node3 = nodes[2]
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs)
|
||||
|
||||
await Promise.all([
|
||||
node1.dial(node2.peerId),
|
||||
node2.dial(node3.peerId)
|
||||
node1.dial(node2.peerInfo),
|
||||
node2.dial(node3.peerInfo)
|
||||
])
|
||||
|
||||
// Set up of the cons might take time
|
||||
await delay(100)
|
||||
|
||||
const peer = await node1.peerRouting.findPeer(node3.peerId)
|
||||
const peer = await node1.peerRouting.findPeer(node3.peerInfo.id)
|
||||
|
||||
console.log('Found it, multiaddrs are:')
|
||||
peer.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${peer.id.toB58String()}`))
|
||||
peer.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
```
|
||||
|
||||
You should see the output being something like:
|
||||
@@ -65,8 +59,8 @@ You should see the output being something like:
|
||||
```Bash
|
||||
> node 1.js
|
||||
Found it, multiaddrs are:
|
||||
/ip4/127.0.0.1/tcp/63617
|
||||
/ip4/192.168.86.41/tcp/63617
|
||||
/ip4/127.0.0.1/tcp/63617/p2p/QmWrFXvZr9S4iDqycyoyc2zDdrT1jg9wpdenUTdd1LTar6
|
||||
/ip4/192.168.86.41/tcp/63617/p2p/QmWrFXvZr9S4iDqycyoyc2zDdrT1jg9wpdenUTdd1LTar6
|
||||
```
|
||||
|
||||
You have successfully used Peer Routing to find a peer that you were not directly connected. Now all you have to do is to dial to the multiaddrs you discovered.
|
||||
@@ -81,7 +75,7 @@ Instead of calling `peerRouting.findPeer`, we will use `contentRouting.provide`
|
||||
|
||||
```JavaScript
|
||||
await node1.contentRouting.provide(cid)
|
||||
console.log('Node %s is providing %s', node1.peerId.toB58String(), cid.toBaseEncodedString())
|
||||
console.log('Node %s is providing %s', node1.peerInfo.id.toB58String(), cid.toBaseEncodedString())
|
||||
|
||||
const provs = await all(node3.contentRouting.findProviders(cid, { timeout: 5000 }))
|
||||
|
||||
|
@@ -29,9 +29,7 @@ generate(otherSwarmKey)
|
||||
|
||||
console.log('nodes started...')
|
||||
|
||||
// Add node 2 data to node1's PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
await node1.dial(node2.peerId)
|
||||
await node1.dial(node2.peerInfo)
|
||||
|
||||
node2.handle('/private', ({ stream }) => {
|
||||
pipe(
|
||||
@@ -44,7 +42,7 @@ generate(otherSwarmKey)
|
||||
)
|
||||
})
|
||||
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, '/private')
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, '/private')
|
||||
|
||||
await pipe(
|
||||
['This message is sent on a private network'],
|
||||
|
@@ -4,35 +4,32 @@ const Libp2p = require('libp2p')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const Protector = require('libp2p/src/pnet')
|
||||
|
||||
/**
|
||||
* privateLibp2pNode returns a libp2p node function that will use the swarm
|
||||
* key with the given `swarmKey` to create the Protector
|
||||
* key at the given `swarmKeyPath` to create the Protector
|
||||
*
|
||||
* @param {Buffer} swarmKey
|
||||
* @returns {Promise<libp2p>} Returns a libp2pNode function for use in IPFS creation
|
||||
*/
|
||||
const privateLibp2pNode = async (swarmKey) => {
|
||||
const privateLibp2pNode = async (swarmKeyPath) => {
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
modules: {
|
||||
transport: [TCP], // We're only using the TCP transport for this example
|
||||
streamMuxer: [MPLEX], // We're only using mplex muxing
|
||||
// Let's make sure to use identifying crypto in our pnet since the protector doesn't
|
||||
// care about node identity, and only the presence of private keys
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
// Leave peer discovery empty, we don't want to find peers. We could omit the property, but it's
|
||||
// being left in for explicit readability.
|
||||
// We should explicitly dial pnet peers, or use a custom discovery service for finding nodes in our pnet
|
||||
peerDiscovery: [],
|
||||
connProtector: new Protector(swarmKey)
|
||||
connProtector: new Protector(swarmKeyPath)
|
||||
}
|
||||
})
|
||||
|
||||
node.peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
return node
|
||||
}
|
||||
|
||||
|
@@ -13,7 +13,6 @@
|
||||
"dependencies": {
|
||||
"libp2p": "../..",
|
||||
"libp2p-mplex": "^0.9.3",
|
||||
"libp2p-noise": "^1.1.0",
|
||||
"libp2p-secio": "^0.12.1",
|
||||
"libp2p-tcp": "^0.14.2"
|
||||
}
|
||||
|
@@ -3,20 +3,21 @@
|
||||
const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [NOISE, SECIO]
|
||||
connEncryption: [SECIO]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -31,9 +32,6 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
// Add node's 2 data to the PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
|
||||
// exact matching
|
||||
node2.handle('/your-protocol', ({ stream }) => {
|
||||
pipe(
|
||||
@@ -64,14 +62,14 @@ const createNode = async () => {
|
||||
})
|
||||
*/
|
||||
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, ['/your-protocol'])
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, ['/your-protocol'])
|
||||
await pipe(
|
||||
['my own protocol, wow!'],
|
||||
stream
|
||||
)
|
||||
|
||||
/*
|
||||
const { stream } = node1.dialProtocol(node2.peerId, ['/another-protocol/1.0.0'])
|
||||
const { stream } = node1.dialProtocol(node2.peerInfo, ['/another-protocol/1.0.0'])
|
||||
|
||||
await pipe(
|
||||
['my own protocol, wow!'],
|
||||
|
@@ -3,20 +3,21 @@
|
||||
const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [NOISE, SECIO]
|
||||
connEncryption: [SECIO]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -31,9 +32,6 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
// Add node's 2 data to the PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
|
||||
node2.handle(['/a', '/b'], ({ protocol, stream }) => {
|
||||
pipe(
|
||||
stream,
|
||||
@@ -45,19 +43,19 @@ const createNode = async () => {
|
||||
)
|
||||
})
|
||||
|
||||
const { stream: stream1 } = await node1.dialProtocol(node2.peerId, ['/a'])
|
||||
const { stream: stream1 } = await node1.dialProtocol(node2.peerInfo, ['/a'])
|
||||
await pipe(
|
||||
['protocol (a)'],
|
||||
stream1
|
||||
)
|
||||
|
||||
const { stream: stream2 } = await node1.dialProtocol(node2.peerId, ['/b'])
|
||||
const { stream: stream2 } = await node1.dialProtocol(node2.peerInfo, ['/b'])
|
||||
await pipe(
|
||||
['protocol (b)'],
|
||||
stream2
|
||||
)
|
||||
|
||||
const { stream: stream3 } = await node1.dialProtocol(node2.peerId, ['/b'])
|
||||
const { stream: stream3 } = await node1.dialProtocol(node2.peerInfo, ['/b'])
|
||||
await pipe(
|
||||
['another stream on protocol (b)'],
|
||||
stream3
|
||||
|
@@ -4,20 +4,21 @@
|
||||
const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [NOISE, SECIO]
|
||||
connEncryption: [SECIO]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -31,9 +32,6 @@ const createNode = async () => {
|
||||
createNode(),
|
||||
createNode()
|
||||
])
|
||||
|
||||
// Add node's 2 data to the PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
|
||||
node1.handle('/node-1', ({ stream }) => {
|
||||
pipe(
|
||||
@@ -57,13 +55,13 @@ const createNode = async () => {
|
||||
)
|
||||
})
|
||||
|
||||
const { stream: stream1 } = await node1.dialProtocol(node2.peerId, ['/node-2'])
|
||||
const { stream: stream1 } = await node1.dialProtocol(node2.peerInfo, ['/node-2'])
|
||||
await pipe(
|
||||
['from 1 to 2'],
|
||||
stream1
|
||||
)
|
||||
|
||||
const { stream: stream2 } = await node2.dialProtocol(node1.peerId, ['/node-1'])
|
||||
const { stream: stream2 } = await node2.dialProtocol(node1.peerInfo, ['/node-1'])
|
||||
await pipe(
|
||||
['from 2 to 1'],
|
||||
stream2
|
||||
|
@@ -6,7 +6,7 @@ The feature of agreeing on a protocol over an established connection is what we
|
||||
|
||||
# 1. Handle multiple protocols
|
||||
|
||||
Let's see _protocol multiplexing_ in action! You will need the following modules for this example: `libp2p`, `libp2p-tcp`, `peer-id`, `it-pipe`, `it-buffer` and `streaming-iterables`. This example reuses the base left by the [Transports](../transports) example. You can see the complete solution at [1.js](./1.js).
|
||||
Let's see _protocol multiplexing_ in action! You will need the following modules for this example: `libp2p`, `libp2p-tcp`, `peer-info`, `it-pipe`, `it-buffer` and `streaming-iterables`. This example reuses the base left by the [Transports](../transports) example. You can see the complete solution at [1.js](./1.js).
|
||||
|
||||
After creating the nodes, we need to tell libp2p which protocols to handle.
|
||||
|
||||
@@ -19,9 +19,6 @@ const { toBuffer } = require('it-buffer')
|
||||
const node1 = nodes[0]
|
||||
const node2 = nodes[1]
|
||||
|
||||
// Add node's 2 data to the PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
|
||||
// Here we are telling libp2p that if someone dials this node to talk with the `/your-protocol`
|
||||
// multicodec, the protocol identifier, please call this handler and give it the stream
|
||||
// so that incomming data can be handled
|
||||
@@ -40,7 +37,7 @@ node2.handle('/your-protocol', ({ stream }) => {
|
||||
After the protocol is _handled_, now we can dial to it.
|
||||
|
||||
```JavaScript
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, ['/your-protocol'])
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, ['/your-protocol'])
|
||||
|
||||
await pipe(
|
||||
['my own protocol, wow!'],
|
||||
@@ -62,7 +59,7 @@ node2.handle('/another-protocol/1.0.1', ({ stream }) => {
|
||||
)
|
||||
})
|
||||
// ...
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, ['/another-protocol/1.0.0'])
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, ['/another-protocol/1.0.0'])
|
||||
|
||||
await pipe(
|
||||
['my own protocol, wow!'],
|
||||
@@ -131,19 +128,19 @@ node2.handle(['/a', '/b'], ({ protocol, stream }) => {
|
||||
)
|
||||
})
|
||||
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, ['/a'])
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, ['/a'])
|
||||
await pipe(
|
||||
['protocol (a)'],
|
||||
stream
|
||||
)
|
||||
|
||||
const { stream: stream2 } = await node1.dialProtocol(node2.peerId, ['/b'])
|
||||
const { stream: stream2 } = await node1.dialProtocol(node2.peerInfo, ['/b'])
|
||||
await pipe(
|
||||
['protocol (b)'],
|
||||
stream2
|
||||
)
|
||||
|
||||
const { stream: stream3 } = await node1.dialProtocol(node2.peerId, ['/b'])
|
||||
const { stream: stream3 } = await node1.dialProtocol(node2.peerInfo, ['/b'])
|
||||
await pipe(
|
||||
['another stream on protocol (b)'],
|
||||
stream3
|
||||
|
@@ -5,19 +5,20 @@ const { Buffer } = require('buffer')
|
||||
const Libp2p = require('../../')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const Mplex = require('libp2p-mplex')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
const Gossipsub = require('libp2p-gossipsub')
|
||||
|
||||
const createNode = async () => {
|
||||
const peerInfo = await PeerInfo.create()
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [Mplex],
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
pubsub: Gossipsub
|
||||
}
|
||||
})
|
||||
@@ -34,9 +35,7 @@ const createNode = async () => {
|
||||
createNode()
|
||||
])
|
||||
|
||||
// Add node's 2 data to the PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
await node1.dial(node2.peerId)
|
||||
await node1.dial(node2.peerInfo)
|
||||
|
||||
await node1.pubsub.subscribe(topic, (msg) => {
|
||||
console.log(`node1 received: ${msg.data.toString()}`)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Publish Subscribe
|
||||
|
||||
Publish Subscribe is also included on the stack. Currently, we have two PubSub implementation available [libp2p-floodsub](https://github.com/libp2p/js-libp2p-floodsub) and [libp2p-gossipsub](https://github.com/ChainSafe/js-libp2p-gossipsub), with many more being researched at [research-pubsub](https://github.com/libp2p/research-pubsub).
|
||||
Publish Subscribe is also included on the stack. Currently, we have two PubSub implementation available [libp2p-floodsub](https://github.com/libp2p/js-libp2p-floodsub) and [libp2p-gossipsub](https://github.com/ChainSafe/gossipsub-js), with many more being researched at [research-pubsub](https://github.com/libp2p/research-pubsub).
|
||||
|
||||
We've seen many interesting use cases appear with this, here are some highlights:
|
||||
|
||||
@@ -21,13 +21,10 @@ const Libp2p = require('libp2p')
|
||||
const Gossipsub = require('libp2p-gossipsub')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
modules: {
|
||||
transport: [ TCP ],
|
||||
streamMuxer: [ Mplex ],
|
||||
connEncryption: [ NOISE, SECIO ],
|
||||
connEncryption: [ SECIO ],
|
||||
// we add the Pubsub module we want
|
||||
pubsub: Gossipsub
|
||||
}
|
||||
@@ -42,10 +39,7 @@ const topic = 'news'
|
||||
const node1 = nodes[0]
|
||||
const node2 = nodes[1]
|
||||
|
||||
// Add node's 2 data to the PeerStore
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
|
||||
await node1.dial(node2.peerId)
|
||||
await node1.dial(node2.peerInfo)
|
||||
|
||||
await node1.pubsub.subscribe(topic, (msg) => {
|
||||
console.log(`node1 received: ${msg.data.toString()}`)
|
||||
|
@@ -3,19 +3,19 @@
|
||||
|
||||
const Libp2p = require('../..')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const createNode = async (peerInfo) => {
|
||||
// To signall the addresses we want to be available, we use
|
||||
// the multiaddr format, a self describable address
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const createNode = async () => {
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
// To signall the addresses we want to be available, we use
|
||||
// the multiaddr format, a self describable address
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
connEncryption: [NOISE, SECIO]
|
||||
connEncryption: [SECIO]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -24,9 +24,10 @@ const createNode = async () => {
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
const node = await createNode()
|
||||
const peerInfo = await PeerInfo.create()
|
||||
const node = await createNode(peerInfo)
|
||||
|
||||
console.log('node has started (true/false):', node.isStarted())
|
||||
console.log('listening on:')
|
||||
node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`))
|
||||
node.peerInfo.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
})();
|
||||
|
@@ -3,23 +3,23 @@
|
||||
|
||||
const Libp2p = require('../..')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const pipe = require('it-pipe')
|
||||
const concat = require('it-concat')
|
||||
|
||||
const createNode = async () => {
|
||||
const createNode = async (peerInfo) => {
|
||||
// To signall the addresses we want to be available, we use
|
||||
// the multiaddr format, a self describable address
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
// To signall the addresses we want to be available, we use
|
||||
// the multiaddr format, a self describable address
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
streamMuxer: [MPLEX]
|
||||
}
|
||||
})
|
||||
@@ -30,13 +30,17 @@ const createNode = async () => {
|
||||
|
||||
function printAddrs (node, number) {
|
||||
console.log('node %s is listening on:', number)
|
||||
node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`))
|
||||
node.peerInfo.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
const [peerInfo1, peerInfo2] = await Promise.all([
|
||||
PeerInfo.create(),
|
||||
PeerInfo.create()
|
||||
])
|
||||
const [node1, node2] = await Promise.all([
|
||||
createNode(),
|
||||
createNode()
|
||||
createNode(peerInfo1),
|
||||
createNode(peerInfo2)
|
||||
])
|
||||
|
||||
printAddrs(node1, '1')
|
||||
@@ -50,8 +54,7 @@ function printAddrs (node, number) {
|
||||
console.log(result.toString())
|
||||
})
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, '/print')
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, '/print')
|
||||
|
||||
await pipe(
|
||||
['Hello', ' ', 'p2p', ' ', 'world', '!'],
|
||||
|
@@ -4,24 +4,24 @@
|
||||
const Libp2p = require('../..')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const WebSockets = require('libp2p-websockets')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const PeerInfo = require('peer-info')
|
||||
|
||||
const pipe = require('it-pipe')
|
||||
|
||||
const createNode = async (transports, addresses = []) => {
|
||||
if (!Array.isArray(addresses)) {
|
||||
addresses = [addresses]
|
||||
const createNode = async (peerInfo, transports, multiaddrs = []) => {
|
||||
if (!Array.isArray(multiaddrs)) {
|
||||
multiaddrs = [multiaddrs]
|
||||
}
|
||||
|
||||
multiaddrs.forEach((addr) => peerInfo.multiaddrs.add(addr))
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: addresses.map((a) => a)
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: transports,
|
||||
connEncryption: [NOISE, SECIO],
|
||||
connEncryption: [SECIO],
|
||||
streamMuxer: [MPLEX]
|
||||
}
|
||||
})
|
||||
@@ -32,7 +32,7 @@ const createNode = async (transports, addresses = []) => {
|
||||
|
||||
function printAddrs(node, number) {
|
||||
console.log('node %s is listening on:', number)
|
||||
node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`))
|
||||
node.peerInfo.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
}
|
||||
|
||||
function print ({ stream }) {
|
||||
@@ -47,10 +47,15 @@ function print ({ stream }) {
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
const [peerInfo1, peerInfo2, peerInfo3] = await Promise.all([
|
||||
PeerInfo.create(),
|
||||
PeerInfo.create(),
|
||||
PeerInfo.create()
|
||||
])
|
||||
const [node1, node2, node3] = await Promise.all([
|
||||
createNode([TCP], '/ip4/0.0.0.0/tcp/0'),
|
||||
createNode([TCP, WebSockets], ['/ip4/0.0.0.0/tcp/0', '/ip4/127.0.0.1/tcp/10000/ws']),
|
||||
createNode([WebSockets], '/ip4/127.0.0.1/tcp/20000/ws')
|
||||
createNode(peerInfo1, [TCP], '/ip4/0.0.0.0/tcp/0'),
|
||||
createNode(peerInfo2, [TCP, WebSockets], ['/ip4/0.0.0.0/tcp/0', '/ip4/127.0.0.1/tcp/10000/ws']),
|
||||
createNode(peerInfo3, [WebSockets], '/ip4/127.0.0.1/tcp/20000/ws')
|
||||
])
|
||||
|
||||
printAddrs(node1, '1')
|
||||
@@ -61,19 +66,15 @@ function print ({ stream }) {
|
||||
node2.handle('/print', print)
|
||||
node3.handle('/print', print)
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs)
|
||||
node3.peerStore.addressBook.set(node1.peerId, node1.multiaddrs)
|
||||
|
||||
// node 1 (TCP) dials to node 2 (TCP+WebSockets)
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, '/print')
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, '/print')
|
||||
await pipe(
|
||||
['node 1 dialed to node 2 successfully'],
|
||||
stream
|
||||
)
|
||||
|
||||
// node 2 (TCP+WebSockets) dials to node 2 (WebSockets)
|
||||
const { stream: stream2 } = await node2.dialProtocol(node3.peerId, '/print')
|
||||
const { stream: stream2 } = await node2.dialProtocol(node3.peerInfo, '/print')
|
||||
await pipe(
|
||||
['node 2 dialed to node 3 successfully'],
|
||||
stream2
|
||||
@@ -81,7 +82,7 @@ function print ({ stream }) {
|
||||
|
||||
// node 3 (listening WebSockets) can dial node 1 (TCP)
|
||||
try {
|
||||
await node3.dialProtocol(node1.peerId, '/print')
|
||||
await node3.dialProtocol(node1.peerInfo, '/print')
|
||||
} catch (err) {
|
||||
console.log('node 3 failed to dial to node 1 with:', err.message)
|
||||
}
|
||||
|
@@ -25,19 +25,18 @@ First thing is to create our own libp2p node! Insert:
|
||||
|
||||
const Libp2p = require('libp2p')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const { NOISE } = require('libp2p-noise')
|
||||
const SECIO = require('libp2p-secio')
|
||||
|
||||
const createNode = async () => {
|
||||
const createNode = async (peerInfo) => {
|
||||
// To signall the addresses we want to be available, we use
|
||||
// the multiaddr format, a self describable address
|
||||
peerInfo.multiaddrs.add('/ip4/0.0.0.0/tcp/0')
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
// To signall the addresses we want to be available, we use
|
||||
// the multiaddr format, a self describable address
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: [ TCP ],
|
||||
connEncryption: [ NOISE, SECIO ]
|
||||
connEncryption: [ SECIO ]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -49,7 +48,8 @@ const createNode = async () => {
|
||||
Now that we have a function to create our own libp2p node, let's create a node with it.
|
||||
|
||||
```JavaScript
|
||||
const node = await createNode()
|
||||
const peerInfo = await PeerInfo.create()
|
||||
const node = await createNode(peerInfo)
|
||||
|
||||
// At this point the node has started
|
||||
console.log('node has started (true/false):', node.isStarted())
|
||||
@@ -59,7 +59,7 @@ console.log('node has started (true/false):', node.isStarted())
|
||||
// 0, which means "listen in any network interface and pick
|
||||
// a port for me
|
||||
console.log('listening on:')
|
||||
node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`))
|
||||
node.peerInfo.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
```
|
||||
|
||||
Running this should result in something like:
|
||||
@@ -96,7 +96,7 @@ We are going to reuse the `createNode` function from step 1, but this time to ma
|
||||
```JavaScript
|
||||
function printAddrs (node, number) {
|
||||
console.log('node %s is listening on:', number)
|
||||
node.multiaddrs.forEach((ma) => console.log(`${ma.toString()}/p2p/${node.peerId.toB58String()}`))
|
||||
node.peerInfo.multiaddrs.forEach((ma) => console.log(ma.toString()))
|
||||
}
|
||||
```
|
||||
|
||||
@@ -104,6 +104,10 @@ Then,
|
||||
|
||||
```js
|
||||
;(async () => {
|
||||
const [peerInfo1, peerInfo2] = await Promise.all([
|
||||
PeerInfo.create(),
|
||||
PeerInfo.create()
|
||||
])
|
||||
const [node1, node2] = await Promise.all([
|
||||
createNode(),
|
||||
createNode()
|
||||
@@ -123,8 +127,7 @@ Then,
|
||||
)
|
||||
})
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, '/print')
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, '/print')
|
||||
|
||||
await pipe(
|
||||
['Hello', ' ', 'p2p', ' ', 'world', '!'],
|
||||
@@ -163,19 +166,18 @@ We want to create 3 nodes, one with TCP, one with TCP+WebSockets and one with ju
|
||||
```JavaScript
|
||||
// ...
|
||||
|
||||
const createNode = async (transports, multiaddrs = []) => {
|
||||
const createNode = async (peerInfo, transports, multiaddrs = []) => {
|
||||
if (!Array.isArray(multiaddrs)) {
|
||||
multiaddrs = [multiaddrs]
|
||||
}
|
||||
|
||||
multiaddrs.forEach((addr) => peerInfo.multiaddrs.add(addr))
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: multiaddrs.map((a) => multiaddr(a))
|
||||
},
|
||||
peerInfo,
|
||||
modules: {
|
||||
transport: transports,
|
||||
connEncryption: [SECIO],
|
||||
streamMuxer: [MPLEX]
|
||||
connEncryption: [ SECIO ]
|
||||
}
|
||||
})
|
||||
|
||||
@@ -192,10 +194,15 @@ Let's update our flow to create nodes and see how they behave when dialing to ea
|
||||
const WebSockets = require('libp2p-websockets')
|
||||
const TCP = require('libp2p-tcp')
|
||||
|
||||
const [peerInfo1, peerInfo2, peerInfo3] = await Promise.all([
|
||||
PeerInfo.create(),
|
||||
PeerInfo.create(),
|
||||
PeerInfo.create()
|
||||
])
|
||||
const [node1, node2, node3] = await Promise.all([
|
||||
createNode([TCP], '/ip4/0.0.0.0/tcp/0'),
|
||||
createNode([TCP, WebSockets], ['/ip4/0.0.0.0/tcp/0', '/ip4/127.0.0.1/tcp/10000/ws']),
|
||||
createNode([WebSockets], '/ip4/127.0.0.1/tcp/20000/ws')
|
||||
createNode(peerInfo1, [TCP], '/ip4/0.0.0.0/tcp/0'),
|
||||
createNode(peerInfo2, [TCP, WebSockets], ['/ip4/0.0.0.0/tcp/0', '/ip4/127.0.0.1/tcp/10000/ws']),
|
||||
createNode(peerInfo3, [WebSockets], '/ip4/127.0.0.1/tcp/20000/ws')
|
||||
])
|
||||
|
||||
printAddrs(node1, '1')
|
||||
@@ -206,19 +213,15 @@ node1.handle('/print', print)
|
||||
node2.handle('/print', print)
|
||||
node3.handle('/print', print)
|
||||
|
||||
node1.peerStore.addressBook.set(node2.peerId, node2.multiaddrs)
|
||||
node2.peerStore.addressBook.set(node3.peerId, node3.multiaddrs)
|
||||
node3.peerStore.addressBook.set(node1.peerId, node1.multiaddrs)
|
||||
|
||||
// node 1 (TCP) dials to node 2 (TCP+WebSockets)
|
||||
const { stream } = await node1.dialProtocol(node2.peerId, '/print')
|
||||
const { stream } = await node1.dialProtocol(node2.peerInfo, '/print')
|
||||
await pipe(
|
||||
['node 1 dialed to node 2 successfully'],
|
||||
stream
|
||||
)
|
||||
|
||||
// node 2 (TCP+WebSockets) dials to node 2 (WebSockets)
|
||||
const { stream: stream2 } = await node2.dialProtocol(node3.peerId, '/print')
|
||||
const { stream: stream2 } = await node2.dialProtocol(node3.peerInfo, '/print')
|
||||
await pipe(
|
||||
['node 2 dialed to node 3 successfully'],
|
||||
stream2
|
||||
@@ -226,7 +229,7 @@ await pipe(
|
||||
|
||||
// node 3 (WebSockets) attempts to dial to node 1 (TCP)
|
||||
try {
|
||||
await node3.dialProtocol(node1.peerId, '/print')
|
||||
await node3.dialProtocol(node1.peerInfo, '/print')
|
||||
} catch (err) {
|
||||
console.log('node 3 failed to dial to node 1 with:', err.message)
|
||||
}
|
||||
|
@@ -33,7 +33,7 @@
|
||||
["libp2p/js-libp2p-kad-dht", "libp2p-kad-dht"],
|
||||
["libp2p/js-libp2p-mdns", "libp2p-mdns"],
|
||||
["libp2p/js-libp2p-webrtc-star", "libp2p-webrtc-star"],
|
||||
["ChainSafe/discv5", "@chainsafe/discv5"],
|
||||
["ChainSafe/discv5", "discv5"],
|
||||
|
||||
"content routing",
|
||||
["libp2p/js-libp2p-delegated-content-routing", "libp2p-delegated-content-routing"],
|
||||
@@ -49,11 +49,12 @@
|
||||
|
||||
"data types",
|
||||
["libp2p/js-peer-id", "peer-id"],
|
||||
["libp2p/js-peer-info", "peer-info"],
|
||||
|
||||
"pubsub",
|
||||
["libp2p/js-libp2p-pubsub", "libp2p-pubsub"],
|
||||
["libp2p/js-libp2p-floodsub", "libp2p-floodsub"],
|
||||
["ChainSafe/js-libp2p-gossipsub", "libp2p-gossipsub"],
|
||||
["ChainSafe/gossipsub-js", "libp2p-gossipsub"],
|
||||
|
||||
"extensions",
|
||||
["libp2p/js-libp2p-nat-mgnr", "libp2p-nat-mgnr"],
|
||||
|
68
package.json
68
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "libp2p",
|
||||
"version": "0.28.7",
|
||||
"version": "0.27.7",
|
||||
"description": "JavaScript implementation of libp2p, a modular peer to peer network stack",
|
||||
"leadMaintainer": "Jacob Heun <jacobheun@gmail.com>",
|
||||
"main": "src/index.js",
|
||||
@@ -50,7 +50,7 @@
|
||||
"err-code": "^2.0.0",
|
||||
"events": "^3.1.0",
|
||||
"hashlru": "^2.3.0",
|
||||
"interface-datastore": "^1.0.4",
|
||||
"interface-datastore": "^0.8.3",
|
||||
"ipfs-utils": "^2.2.0",
|
||||
"it-all": "^1.0.1",
|
||||
"it-buffer": "^0.1.2",
|
||||
@@ -59,7 +59,7 @@
|
||||
"it-pipe": "^1.1.0",
|
||||
"it-protocol-buffers": "^0.2.0",
|
||||
"libp2p-crypto": "^0.17.6",
|
||||
"libp2p-interfaces": "^0.3.1",
|
||||
"libp2p-interfaces": "^0.3.0",
|
||||
"libp2p-utils": "^0.1.2",
|
||||
"mafmt": "^7.0.0",
|
||||
"merge-options": "^2.0.0",
|
||||
@@ -67,14 +67,12 @@
|
||||
"multiaddr": "^7.4.3",
|
||||
"multistream-select": "^0.15.0",
|
||||
"mutable-proxy": "^1.0.0",
|
||||
"node-forge": "^0.9.1",
|
||||
"p-any": "^3.0.0",
|
||||
"p-fifo": "^1.0.0",
|
||||
"p-settle": "^4.0.1",
|
||||
"peer-id": "^0.13.11",
|
||||
"protons": "^1.0.1",
|
||||
"retimer": "^2.0.0",
|
||||
"sanitize-filename": "^1.6.3",
|
||||
"streaming-iterables": "^4.1.0",
|
||||
"timeout-abort-controller": "^1.0.0",
|
||||
"xsalsa20": "^1.0.2"
|
||||
@@ -82,42 +80,35 @@
|
||||
"devDependencies": {
|
||||
"@nodeutils/defaults-deep": "^1.1.0",
|
||||
"abortable-iterator": "^3.0.0",
|
||||
"aegir": "^22.0.0",
|
||||
"aegir": "^21.9.0",
|
||||
"chai": "^4.2.0",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"chai-bytes": "^0.1.2",
|
||||
"chai-string": "^1.5.0",
|
||||
"cids": "^0.8.0",
|
||||
"datastore-fs": "^1.1.0",
|
||||
"datastore-level": "^1.1.0",
|
||||
"delay": "^4.3.0",
|
||||
"dirty-chai": "^2.0.1",
|
||||
"interop-libp2p": "^0.1.0",
|
||||
"interop-libp2p": "libp2p/interop#chore/update-libp2p-daemon-with-peerstore",
|
||||
"ipfs-http-client": "^44.0.0",
|
||||
"it-concat": "^1.0.0",
|
||||
"it-pair": "^1.0.0",
|
||||
"it-pushable": "^1.4.0",
|
||||
"level": "^6.0.1",
|
||||
"libp2p-bootstrap": "^0.11.0",
|
||||
"libp2p-delegated-content-routing": "^0.5.0",
|
||||
"libp2p-delegated-peer-routing": "^0.5.0",
|
||||
"libp2p-floodsub": "^0.21.0",
|
||||
"libp2p-gossipsub": "^0.4.6",
|
||||
"libp2p-gossipsub": "^0.4.0",
|
||||
"libp2p-kad-dht": "^0.19.1",
|
||||
"libp2p-mdns": "^0.14.1",
|
||||
"libp2p-noise": "^1.1.0",
|
||||
"libp2p-mplex": "^0.9.5",
|
||||
"libp2p-noise": "^1.1.1",
|
||||
"libp2p-secio": "^0.12.4",
|
||||
"libp2p-tcp": "^0.14.1",
|
||||
"libp2p-webrtc-star": "^0.18.0",
|
||||
"libp2p-websockets": "^0.13.1",
|
||||
"multihashes": "^0.4.19",
|
||||
"nock": "^12.0.3",
|
||||
"p-defer": "^3.0.0",
|
||||
"p-times": "^3.0.0",
|
||||
"p-wait-for": "^3.1.0",
|
||||
"promisify-es6": "^1.0.3",
|
||||
"rimraf": "^3.0.2",
|
||||
"sinon": "^9.0.2"
|
||||
},
|
||||
"contributors": [
|
||||
@@ -125,44 +116,41 @@
|
||||
"Jacob Heun <jacobheun@gmail.com>",
|
||||
"Vasco Santos <vasco.santos@moxy.studio>",
|
||||
"Alan Shaw <alan@tableflip.io>",
|
||||
"Cayman <caymannava@gmail.com>",
|
||||
"Pedro Teixeira <i@pgte.me>",
|
||||
"Friedel Ziegelmayer <dignifiedquire@gmail.com>",
|
||||
"Alex Potsides <alex@achingbrain.net>",
|
||||
"Maciej Krüger <mkg20001@gmail.com>",
|
||||
"Alex Potsides <alex@achingbrain.net>",
|
||||
"Hugo Dias <mail@hugodias.me>",
|
||||
"Volker Mische <volker.mische@gmail.com>",
|
||||
"dirkmc <dirkmdev@gmail.com>",
|
||||
"Volker Mische <volker.mische@gmail.com>",
|
||||
"Richard Littauer <richard.littauer@gmail.com>",
|
||||
"Thomas Eizinger <thomas@eizinger.io>",
|
||||
"Ryan Bell <ryan@piing.net>",
|
||||
"Giovanni T. Parra <fiatjaf@gmail.com>",
|
||||
"Andrew Nesbitt <andrewnez@gmail.com>",
|
||||
"ᴠɪᴄᴛᴏʀ ʙᴊᴇʟᴋʜᴏʟᴍ <victorbjelkholm@gmail.com>",
|
||||
"Andrew Nesbitt <andrewnez@gmail.com>",
|
||||
"Cayman <caymannava@gmail.com>",
|
||||
"Elven <mon.samuel@qq.com>",
|
||||
"Didrik Nordström <didrik.nordstrom@gmail.com>",
|
||||
"Giovanni T. Parra <fiatjaf@gmail.com>",
|
||||
"Ryan Bell <ryan@piing.net>",
|
||||
"Thomas Eizinger <thomas@eizinger.io>",
|
||||
"Kevin Kwok <antimatter15@gmail.com>",
|
||||
"Henrique Dias <hacdias@gmail.com>",
|
||||
"Nuno Nogueira <nunofmn@gmail.com>",
|
||||
"RasmusErik Voel Jensen <github@solsort.com>",
|
||||
"Florian-Merle <florian.david.merle@gmail.com>",
|
||||
"Soeren <nikorpoulsen@gmail.com>",
|
||||
"Sönke Hahn <soenkehahn@gmail.com>",
|
||||
"Tiago Alves <alvesjtiago@gmail.com>",
|
||||
"Fei Liu <liu.feiwood@gmail.com>",
|
||||
"Dmitriy Ryajov <dryajov@gmail.com>",
|
||||
"Diogo Silva <fsdiogo@gmail.com>",
|
||||
"Yusef Napora <yusef@napora.org>",
|
||||
"Zane Starr <zcstarr@gmail.com>",
|
||||
"Didrik Nordström <didrik.nordstrom@gmail.com>",
|
||||
"Daijiro Wachi <daijiro.wachi@gmail.com>",
|
||||
"ebinks <elizabethjbinks@gmail.com>",
|
||||
"Chris Bratlien <chrisbratlien@gmail.com>",
|
||||
"isan_rivkin <isanrivkin@gmail.com>",
|
||||
"robertkiel <robert.kiel@validitylabs.org>",
|
||||
"RasmusErik Voel Jensen <github@solsort.com>",
|
||||
"Bernd Strehl <bernd.strehl@gmail.com>",
|
||||
"Chris Bratlien <chrisbratlien@gmail.com>",
|
||||
"Daijiro Wachi <daijiro.wachi@gmail.com>",
|
||||
"Diogo Silva <fsdiogo@gmail.com>",
|
||||
"Dmitriy Ryajov <dryajov@gmail.com>",
|
||||
"Fei Liu <liu.feiwood@gmail.com>",
|
||||
"Florian-Merle <florian.david.merle@gmail.com>",
|
||||
"Francis Gulotta <wizard@roborooter.com>",
|
||||
"Henrique Dias <hacdias@gmail.com>",
|
||||
"Irakli Gozalishvili <rfobic@gmail.com>",
|
||||
"Joel Gustafson <joelg@mit.edu>",
|
||||
"Julien Bouquillon <contact@revolunet.com>",
|
||||
"Kevin Kwok <antimatter15@gmail.com>",
|
||||
"Nuno Nogueira <nunofmn@gmail.com>",
|
||||
"Soeren <nikorpoulsen@gmail.com>",
|
||||
"Sönke Hahn <soenkehahn@gmail.com>"
|
||||
"Joel Gustafson <joelg@mit.edu>"
|
||||
]
|
||||
}
|
||||
|
@@ -1,20 +1,12 @@
|
||||
# js-libp2p-circuit
|
||||
|
||||
> Node.js implementation of the Circuit module that libp2p uses, which implements the [interface-connection](https://github.com/libp2p/js-libp2p-interfaces/tree/master/src/connection) interface for dial/listen.
|
||||
> Node.js implementation of the Circuit module that libp2p uses, which implements the [interface-connection](https://github.com/libp2p/js-interfaces/tree/master/src/connection) interface for dial/listen.
|
||||
|
||||
**Note**: git history prior to merging into js-libp2p can be found in the original repository, https://github.com/libp2p/js-libp2p-circuit.
|
||||
|
||||
`libp2p-circuit` implements the circuit-relay mechanism that allows nodes that don't speak the same protocol to communicate using a third _relay_ node. You can read more about this in its [spec](https://github.com/libp2p/specs/tree/master/relay).
|
||||
`libp2p-circuit` implements the circuit-relay mechanism that allows nodes that don't speak the same protocol to communicate using a third _relay_ node.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [js-libp2p-circuit](#js-libp2p-circuit)
|
||||
- [Why?](#why)
|
||||
- [libp2p-circuit and IPFS](#libp2p-circuit-and-ipfs)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Usage](#usage)
|
||||
- [API](#api)
|
||||
- [Implementation rational](#implementation-rational)
|
||||
This module uses [pull-streams](https://pull-stream.github.io) for all stream based interfaces.
|
||||
|
||||
### Why?
|
||||
|
||||
@@ -24,42 +16,77 @@ The use of circuit-relaying is not limited to routing traffic between browser no
|
||||
- routing traffic between private nets and circumventing NAT layers
|
||||
- route mangling for better privacy (matreshka/shallot dialing).
|
||||
|
||||
It's also possible to use it for clients that implement exotic transports such as devices that only have bluetooth radios to be reachable over bluetooth enabled relays and become full p2p nodes.
|
||||
It's also possible to use it for clients that implement exotic transports such as devices that only have bluetooth radios to be reachable over bluetooth enabled relays and become full p2p nodes.
|
||||
|
||||
### libp2p-circuit and IPFS
|
||||
|
||||
Prior to `libp2p-circuit` there was a rift in the IPFS network, were IPFS nodes could only access content from nodes that speak the same protocol, for example TCP only nodes could only dial to other TCP only nodes, same for any other protocol combination. In practice, this limitation was most visible in JS-IPFS browser nodes, since they can only dial out but not be dialed in over WebRTC or WebSockets, hence any content that the browser node held was not reachable by the rest of the network even through it was announced on the DHT. Non browser IPFS nodes would usually speak more than one protocol such as TCP, WebSockets and/or WebRTC, this made the problem less severe outside of the browser. `libp2p-circuit` solves this problem completely, as long as there are `relay nodes` capable of routing traffic between those nodes their content should be available to the rest of the IPFS network.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [js-libp2p-circuit](#js-libp2p-circuit)
|
||||
- [Why?](#why)
|
||||
- [libp2p-circuit and IPFS](#libp2p-circuit-and-ipfs)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Create dialer/listener](#create-dialerlistener)
|
||||
- [Create `relay`](#create-relay)
|
||||
- [API](#api)
|
||||
- [Implementation rational](#implementation-rational)
|
||||
|
||||
## Usage
|
||||
|
||||
Libp2p circuit configuration can be seen at [Setup with Relay](../../doc/CONFIGURATION.md#setup-with-relay).
|
||||
### Example
|
||||
|
||||
Once you have a circuit relay node running, you can configure other nodes to use it as a relay as follows:
|
||||
#### Create dialer/listener
|
||||
|
||||
```js
|
||||
const Circuit = require('libp2p-circuit')
|
||||
const multiaddr = require('multiaddr')
|
||||
const Libp2p = require('libp2p')
|
||||
const TCP = require('libp2p-tcp')
|
||||
const MPLEX = require('libp2p-mplex')
|
||||
const SECIO = require('libp2p-secio')
|
||||
const pull = require('pull-stream')
|
||||
|
||||
const relayAddr = ...
|
||||
const mh1 = multiaddr('/p2p-circuit/p2p/QmHash') // dial /p2p/QmHash over any circuit
|
||||
|
||||
const node = await Libp2p.create({
|
||||
addresses: {
|
||||
listen: [multiaddr(`${relayAddr}/p2p-circuit`)]
|
||||
},
|
||||
modules: {
|
||||
transport: [TCP],
|
||||
streamMuxer: [MPLEX],
|
||||
connEncryption: [SECIO]
|
||||
},
|
||||
config: {
|
||||
relay: { // Circuit Relay options (this config is part of libp2p core configurations)
|
||||
enabled: true // Allows you to dial and accept relayed connections. Does not make you a relay.
|
||||
}
|
||||
}
|
||||
const circuit = new Circuit(swarmInstance, options) // pass swarm instance and options
|
||||
|
||||
const listener = circuit.createListener(mh1, (connection) => {
|
||||
console.log('new connection opened')
|
||||
pull(
|
||||
pull.values(['hello']),
|
||||
socket
|
||||
)
|
||||
})
|
||||
|
||||
listener.listen(() => {
|
||||
console.log('listening')
|
||||
|
||||
pull(
|
||||
circuit.dial(mh1),
|
||||
pull.log,
|
||||
pull.onEnd(() => {
|
||||
circuit.close()
|
||||
})
|
||||
)
|
||||
})
|
||||
```
|
||||
|
||||
Outputs:
|
||||
|
||||
```sh
|
||||
listening
|
||||
new connection opened
|
||||
hello
|
||||
```
|
||||
|
||||
#### Create `relay`
|
||||
|
||||
```js
|
||||
const Relay = require('libp2p-circuit').Relay
|
||||
|
||||
const relay = new Relay(options)
|
||||
|
||||
relay.mount(swarmInstance) // start relaying traffic
|
||||
```
|
||||
|
||||
## API
|
||||
@@ -74,7 +101,7 @@ Both for dialing and listening.
|
||||
|
||||
### Implementation rational
|
||||
|
||||
This module is not a transport, however it implements `interface-transport` interface in order to allow circuit to be plugged with `libp2p`. The rational behind it is that, `libp2p-circuit` has a dial and listen flow, which fits nicely with other transports, moreover, it requires the _raw_ connection to be encrypted and muxed just as a regular transport's connection does. All in all, `interface-transport` ended up being the correct level of abstraction for circuit, as well as allowed us to reuse existing integration points in `libp2p` and `libp2p` without adding any ad-hoc logic. All parts of `interface-transport` are used, including `.getAddr` which returns a list of `/p2p-circuit` addresses that circuit is currently listening.
|
||||
This module is not a transport, however it implements `interface-transport` interface in order to allow circuit to be plugged with `libp2p-swarm`. The rational behind it is that, `libp2p-circuit` has a dial and listen flow, which fits nicely with other transports, moreover, it requires the _raw_ connection to be encrypted and muxed just as a regular transport's connection does. All in all, `interface-transport` ended up being the correct level of abstraction for circuit, as well as allowed us to reuse existing integration points in `libp2p-swarm` and `libp2p` without adding any ad-hoc logic. All parts of `interface-transport` are used, including `.getAddr` which returns a list of `/p2p-circuit` addresses that circuit is currently listening.
|
||||
|
||||
```
|
||||
libp2p libp2p-circuit (transport)
|
||||
@@ -82,13 +109,13 @@ libp2p
|
||||
| +---------------------------------+ | | |
|
||||
| | | | | +------------------+ |
|
||||
| | | | circuit-relay listens for the HOP | | | |
|
||||
| | libp2p <------------------------------------------------| circuit-relay | |
|
||||
| | libp2p-swarm <------------------------------------------------| circuit-relay | |
|
||||
| | | | message to handle incomming relay | | | |
|
||||
| | | | requests from other nodes | +------------------+ |
|
||||
| +---------------------------------+ | | |
|
||||
| ^ ^ ^ ^ ^ ^ | | +------------------+ |
|
||||
| | | | | | | | | | +-------------+ | |
|
||||
| | | | | | | | dialer uses libp2p to dial | | | | | |
|
||||
| | | | | | | | dialer uses libp2p-swarm to dial | | | | | |
|
||||
| | | | +----------------------------------------------------------------------> dialer | | |
|
||||
| | | transports | | to a circuit-relay node using the | | | | | |
|
||||
| | | | | | | HOP message | | +-------------+ | |
|
||||
|
@@ -3,8 +3,6 @@
|
||||
const mergeOptions = require('merge-options')
|
||||
const Constants = require('./constants')
|
||||
|
||||
const { FaultTolerance } = require('./transport-manager')
|
||||
|
||||
const DefaultConfig = {
|
||||
addresses: {
|
||||
listen: [],
|
||||
@@ -12,10 +10,7 @@ const DefaultConfig = {
|
||||
noAnnounce: []
|
||||
},
|
||||
connectionManager: {
|
||||
minConnections: 25
|
||||
},
|
||||
transportManager: {
|
||||
faultTolerance: FaultTolerance.FATAL_ALL
|
||||
minPeers: 25
|
||||
},
|
||||
dialer: {
|
||||
maxParallelDials: Constants.MAX_PARALLEL_DIALS,
|
||||
|
@@ -1,12 +1,9 @@
|
||||
'use strict'
|
||||
|
||||
const debug = require('debug')
|
||||
const log = debug('libp2p:connection-manager')
|
||||
log.error = debug('libp2p:connection-manager:error')
|
||||
|
||||
const errcode = require('err-code')
|
||||
const mergeOptions = require('merge-options')
|
||||
const LatencyMonitor = require('./latency-monitor')
|
||||
const debug = require('debug')('libp2p:connection-manager')
|
||||
const retimer = require('retimer')
|
||||
|
||||
const { EventEmitter } = require('events')
|
||||
@@ -25,7 +22,6 @@ const defaultOptions = {
|
||||
maxReceivedData: Infinity,
|
||||
maxEventLoopDelay: Infinity,
|
||||
pollInterval: 2000,
|
||||
autoDialInterval: 10000,
|
||||
movingAverageInterval: 60000,
|
||||
defaultPeerValue: 1
|
||||
}
|
||||
@@ -49,8 +45,6 @@ class ConnectionManager extends EventEmitter {
|
||||
* @param {Number} options.pollInterval How often, in milliseconds, metrics and latency should be checked. Default=2000
|
||||
* @param {Number} options.movingAverageInterval How often, in milliseconds, to compute averages. Default=60000
|
||||
* @param {Number} options.defaultPeerValue The value of the peer. Default=1
|
||||
* @param {boolean} options.autoDial Should preemptively guarantee connections are above the low watermark. Default=true
|
||||
* @param {Number} options.autoDialInterval How often, in milliseconds, it should preemptively guarantee connections are above the low watermark. Default=10000
|
||||
*/
|
||||
constructor (libp2p, options) {
|
||||
super()
|
||||
@@ -63,7 +57,7 @@ class ConnectionManager extends EventEmitter {
|
||||
throw errcode(new Error('Connection Manager maxConnections must be greater than minConnections'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
log('options: %j', this._options)
|
||||
debug('options: %j', this._options)
|
||||
|
||||
this._libp2p = libp2p
|
||||
|
||||
@@ -79,11 +73,8 @@ class ConnectionManager extends EventEmitter {
|
||||
*/
|
||||
this.connections = new Map()
|
||||
|
||||
this._started = false
|
||||
this._timer = null
|
||||
this._autoDialTimeout = null
|
||||
this._checkMetrics = this._checkMetrics.bind(this)
|
||||
this._autoDial = this._autoDial.bind(this)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -110,11 +101,7 @@ class ConnectionManager extends EventEmitter {
|
||||
})
|
||||
this._onLatencyMeasure = this._onLatencyMeasure.bind(this)
|
||||
this._latencyMonitor.on('data', this._onLatencyMeasure)
|
||||
|
||||
this._started = true
|
||||
log('started')
|
||||
|
||||
this._options.autoDial && this._autoDial()
|
||||
debug('started')
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -122,13 +109,11 @@ class ConnectionManager extends EventEmitter {
|
||||
* @async
|
||||
*/
|
||||
async stop () {
|
||||
this._autoDialTimeout && this._autoDialTimeout.clear()
|
||||
this._timer && this._timer.clear()
|
||||
this._latencyMonitor && this._latencyMonitor.removeListener('data', this._onLatencyMeasure)
|
||||
|
||||
this._started = false
|
||||
await this._close()
|
||||
log('stopped')
|
||||
debug('stopped')
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -172,13 +157,13 @@ class ConnectionManager extends EventEmitter {
|
||||
_checkMetrics () {
|
||||
const movingAverages = this._libp2p.metrics.global.movingAverages
|
||||
const received = movingAverages.dataReceived[this._options.movingAverageInterval].movingAverage()
|
||||
this._checkMaxLimit('maxReceivedData', received)
|
||||
this._checkLimit('maxReceivedData', received)
|
||||
const sent = movingAverages.dataSent[this._options.movingAverageInterval].movingAverage()
|
||||
this._checkMaxLimit('maxSentData', sent)
|
||||
this._checkLimit('maxSentData', sent)
|
||||
const total = received + sent
|
||||
this._checkMaxLimit('maxData', total)
|
||||
log('metrics update', total)
|
||||
this._timer = retimer(this._checkMetrics, this._options.pollInterval)
|
||||
this._checkLimit('maxData', total)
|
||||
debug('metrics update', total)
|
||||
this._timer.reschedule(this._options.pollInterval)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -190,20 +175,21 @@ class ConnectionManager extends EventEmitter {
|
||||
const peerIdStr = peerId.toB58String()
|
||||
const storedConn = this.connections.get(peerIdStr)
|
||||
|
||||
this.emit('peer:connect', connection)
|
||||
if (storedConn) {
|
||||
storedConn.push(connection)
|
||||
} else {
|
||||
this.connections.set(peerIdStr, [connection])
|
||||
this.emit('peer:connect', connection)
|
||||
}
|
||||
|
||||
this._libp2p.peerStore.addressBook.add(peerId, [connection.remoteAddr])
|
||||
this._libp2p.peerStore.keyBook.set(peerId, peerId.pubKey)
|
||||
|
||||
if (!this._peerValues.has(peerIdStr)) {
|
||||
this._peerValues.set(peerIdStr, this._options.defaultPeerValue)
|
||||
}
|
||||
|
||||
this._checkMaxLimit('maxConnections', this.size)
|
||||
this._checkLimit('maxConnections', this.size)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -230,19 +216,6 @@ class ConnectionManager extends EventEmitter {
|
||||
* @returns {Connection}
|
||||
*/
|
||||
get (peerId) {
|
||||
const connections = this.getAll(peerId)
|
||||
if (connections.length) {
|
||||
return connections[0]
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all open connections with a peer.
|
||||
* @param {PeerId} peerId
|
||||
* @returns {Array<Connection>}
|
||||
*/
|
||||
getAll (peerId) {
|
||||
if (!PeerId.isPeerId(peerId)) {
|
||||
throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
@@ -250,11 +223,11 @@ class ConnectionManager extends EventEmitter {
|
||||
const id = peerId.toB58String()
|
||||
const connections = this.connections.get(id)
|
||||
|
||||
// Return all open connections
|
||||
// Return the first, open connection
|
||||
if (connections) {
|
||||
return connections.filter(connection => connection.stat.status === 'open')
|
||||
return connections.find(connection => connection.stat.status === 'open')
|
||||
}
|
||||
return []
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -263,7 +236,7 @@ class ConnectionManager extends EventEmitter {
|
||||
* @param {*} summary The LatencyMonitor summary
|
||||
*/
|
||||
_onLatencyMeasure (summary) {
|
||||
this._checkMaxLimit('maxEventLoopDelay', summary.avgMs)
|
||||
this._checkLimit('maxEventLoopDelay', summary.avgMs)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -272,61 +245,15 @@ class ConnectionManager extends EventEmitter {
|
||||
* @param {string} name The name of the field to check limits for
|
||||
* @param {number} value The current value of the field
|
||||
*/
|
||||
_checkMaxLimit (name, value) {
|
||||
_checkLimit (name, value) {
|
||||
const limit = this._options[name]
|
||||
log('checking limit of %s. current value: %d of %d', name, value, limit)
|
||||
debug('checking limit of %s. current value: %d of %d', name, value, limit)
|
||||
if (value > limit) {
|
||||
log('%s: limit exceeded: %s, %d', this._peerId, name, value)
|
||||
debug('%s: limit exceeded: %s, %d', this._peerId, name, value)
|
||||
this._maybeDisconnectOne()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Proactively tries to connect to known peers stored in the PeerStore.
|
||||
* It will keep the number of connections below the upper limit and sort
|
||||
* the peers to connect based on wether we know their keys and protocols.
|
||||
* @async
|
||||
* @private
|
||||
*/
|
||||
async _autoDial () {
|
||||
const minConnections = this._options.minConnections
|
||||
|
||||
// Already has enough connections
|
||||
if (this.size >= minConnections) {
|
||||
this._autoDialTimeout = retimer(this._autoDial, this._options.autoDialInterval)
|
||||
return
|
||||
}
|
||||
|
||||
// Sort peers on wether we know protocols of public keys for them
|
||||
const peers = Array.from(this._libp2p.peerStore.peers.values())
|
||||
.sort((a, b) => {
|
||||
if (b.protocols && b.protocols.length && (!a.protocols || !a.protocols.length)) {
|
||||
return 1
|
||||
} else if (b.id.pubKey && !a.id.pubKey) {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
})
|
||||
|
||||
for (let i = 0; i < peers.length && this.size < minConnections; i++) {
|
||||
if (!this.get(peers[i].id)) {
|
||||
log('connecting to a peerStore stored peer %s', peers[i].id.toB58String())
|
||||
try {
|
||||
await this._libp2p.dialer.connectToPeer(peers[i].id)
|
||||
|
||||
// Connection Manager was stopped
|
||||
if (!this._started) {
|
||||
return
|
||||
}
|
||||
} catch (err) {
|
||||
log.error('could not connect to peerStore stored peer', err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this._autoDialTimeout = retimer(this._autoDial, this._options.autoDialInterval)
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have more connections than our maximum, close a connection
|
||||
* to the lowest valued peer.
|
||||
@@ -335,12 +262,12 @@ class ConnectionManager extends EventEmitter {
|
||||
_maybeDisconnectOne () {
|
||||
if (this._options.minConnections < this.connections.size) {
|
||||
const peerValues = Array.from(this._peerValues).sort(byPeerValue)
|
||||
log('%s: sorted peer values: %j', this._peerId, peerValues)
|
||||
debug('%s: sorted peer values: %j', this._peerId, peerValues)
|
||||
const disconnectPeer = peerValues[0]
|
||||
if (disconnectPeer) {
|
||||
const peerId = disconnectPeer[0]
|
||||
log('%s: lowest value peer is %s', this._peerId, peerId)
|
||||
log('%s: closing a connection to %j', this._peerId, peerId)
|
||||
debug('%s: lowest value peer is %s', this._peerId, peerId)
|
||||
debug('%s: closing a connection to %j', this._peerId, peerId)
|
||||
for (const connections of this.connections.values()) {
|
||||
if (connections[0].remotePeer.toB58String() === peerId) {
|
||||
connections[0].close()
|
||||
|
@@ -208,7 +208,7 @@ class IdentifyService {
|
||||
* @param {*} options.stream
|
||||
* @param {Connection} options.connection
|
||||
*/
|
||||
async _handleIdentify ({ connection, stream }) {
|
||||
_handleIdentify ({ connection, stream }) {
|
||||
let publicKey = Buffer.alloc(0)
|
||||
if (this.peerId.pubKey) {
|
||||
publicKey = this.peerId.pubKey.bytes
|
||||
@@ -223,16 +223,12 @@ class IdentifyService {
|
||||
protocols: Array.from(this._protocols.keys())
|
||||
})
|
||||
|
||||
try {
|
||||
await pipe(
|
||||
[message],
|
||||
lp.encode(),
|
||||
stream,
|
||||
consume
|
||||
)
|
||||
} catch (err) {
|
||||
log.error('could not respond to identify request', err)
|
||||
}
|
||||
pipe(
|
||||
[message],
|
||||
lp.encode(),
|
||||
stream,
|
||||
consume
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -243,16 +239,17 @@ class IdentifyService {
|
||||
* @param {Connection} options.connection
|
||||
*/
|
||||
async _handlePush ({ connection, stream }) {
|
||||
const [data] = await pipe(
|
||||
[],
|
||||
stream,
|
||||
lp.decode(),
|
||||
take(1),
|
||||
toBuffer,
|
||||
collect
|
||||
)
|
||||
|
||||
let message
|
||||
try {
|
||||
const [data] = await pipe(
|
||||
[],
|
||||
stream,
|
||||
lp.decode(),
|
||||
take(1),
|
||||
toBuffer,
|
||||
collect
|
||||
)
|
||||
message = Message.decode(data)
|
||||
} catch (err) {
|
||||
return log.error('received invalid message', err)
|
||||
|
70
src/index.js
70
src/index.js
@@ -19,7 +19,6 @@ const AddressManager = require('./address-manager')
|
||||
const ConnectionManager = require('./connection-manager')
|
||||
const Circuit = require('./circuit')
|
||||
const Dialer = require('./dialer')
|
||||
const Keychain = require('./keychain')
|
||||
const Metrics = require('./metrics')
|
||||
const TransportManager = require('./transport-manager')
|
||||
const Upgrader = require('./upgrader')
|
||||
@@ -65,13 +64,7 @@ class Libp2p extends EventEmitter {
|
||||
this._discovery = new Map() // Discovery service instances/references
|
||||
|
||||
// Create the Connection Manager
|
||||
if (this._options.connectionManager.minPeers) { // Remove in 0.29
|
||||
this._options.connectionManager.minConnections = this._options.connectionManager.minPeers
|
||||
}
|
||||
this.connectionManager = new ConnectionManager(this, {
|
||||
autoDial: this._config.peerDiscovery.autoDial,
|
||||
...this._options.connectionManager
|
||||
})
|
||||
this.connectionManager = new ConnectionManager(this, this._options.connectionManager)
|
||||
|
||||
// Create Metrics
|
||||
if (this._options.metrics.enabled) {
|
||||
@@ -81,21 +74,6 @@ class Libp2p extends EventEmitter {
|
||||
})
|
||||
}
|
||||
|
||||
// Create keychain
|
||||
if (this._options.keychain && this._options.keychain.pass && this._options.keychain.datastore) {
|
||||
log('creating keychain')
|
||||
|
||||
const keychainOpts = Keychain.generateOptions()
|
||||
|
||||
this.keychain = new Keychain(this._options.keychain.datastore, {
|
||||
passPhrase: this._options.keychain.pass,
|
||||
...keychainOpts,
|
||||
...this._options.keychain
|
||||
})
|
||||
|
||||
log('keychain constructed')
|
||||
}
|
||||
|
||||
// Setup the Upgrader
|
||||
this.upgrader = new Upgrader({
|
||||
localPeer: this.peerId,
|
||||
@@ -107,8 +85,7 @@ class Libp2p extends EventEmitter {
|
||||
// Setup the transport manager
|
||||
this.transportManager = new TransportManager({
|
||||
libp2p: this,
|
||||
upgrader: this.upgrader,
|
||||
faultTolerance: this._options.transportManager.faultTolerance
|
||||
upgrader: this.upgrader
|
||||
})
|
||||
|
||||
// Create the Registrar
|
||||
@@ -172,7 +149,6 @@ class Libp2p extends EventEmitter {
|
||||
if (this._modules.dht) {
|
||||
const DHT = this._modules.dht
|
||||
this._dht = new DHT({
|
||||
libp2p: this,
|
||||
dialer: this.dialer,
|
||||
peerId: this.peerId,
|
||||
peerStore: this.peerStore,
|
||||
@@ -273,20 +249,6 @@ class Libp2p extends EventEmitter {
|
||||
log('libp2p has stopped')
|
||||
}
|
||||
|
||||
/**
|
||||
* Load keychain keys from the datastore.
|
||||
* Imports the private key as 'self', if needed.
|
||||
* @async
|
||||
* @returns {void}
|
||||
*/
|
||||
async loadKeychain () {
|
||||
try {
|
||||
await this.keychain.findKeyByName('self')
|
||||
} catch (err) {
|
||||
await this.keychain.importPeer('self', this.peerId)
|
||||
}
|
||||
}
|
||||
|
||||
isStarted () {
|
||||
return this._isStarted
|
||||
}
|
||||
@@ -301,7 +263,7 @@ class Libp2p extends EventEmitter {
|
||||
}
|
||||
|
||||
/**
|
||||
* Dials to the provided peer. If successful, the known metadata of the
|
||||
* Dials to the provided peer. If successful, the known `Peer` data of the
|
||||
* peer will be added to the nodes `peerStore`
|
||||
* @param {PeerId|Multiaddr|string} peer The peer to dial
|
||||
* @param {object} options
|
||||
@@ -314,7 +276,7 @@ class Libp2p extends EventEmitter {
|
||||
|
||||
/**
|
||||
* Dials to the provided peer and handshakes with the given protocol.
|
||||
* If successful, the known metadata of the peer will be added to the nodes `peerStore`,
|
||||
* If successful, the known `Peer` data of the peer will be added to the nodes `peerStore`,
|
||||
* and the `Connection` will be returned
|
||||
* @async
|
||||
* @param {PeerId|Multiaddr|string} peer The peer to dial
|
||||
@@ -466,28 +428,28 @@ class Libp2p extends EventEmitter {
|
||||
async _onDidStart () {
|
||||
this._isStarted = true
|
||||
|
||||
this.connectionManager.start()
|
||||
|
||||
this.peerStore.on('peer', peerId => {
|
||||
this.emit('peer:discovery', peerId)
|
||||
this._maybeConnect(peerId)
|
||||
})
|
||||
|
||||
// Once we start, emit any peers we may have already discovered
|
||||
// TODO: this should be removed, as we already discovered these peers in the past
|
||||
for (const peer of this.peerStore.peers.values()) {
|
||||
this.emit('peer:discovery', peer.id)
|
||||
}
|
||||
|
||||
this.connectionManager.start()
|
||||
|
||||
// Peer discovery
|
||||
await this._setupPeerDiscovery()
|
||||
|
||||
// Once we start, emit and dial any peers we may have already discovered
|
||||
for (const peer of this.peerStore.peers.values()) {
|
||||
this.emit('peer:discovery', peer.id)
|
||||
this._maybeConnect(peer.id)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called whenever peer discovery services emit `peer` events.
|
||||
* Known peers may be emitted.
|
||||
* @private
|
||||
* @param {{ id: PeerId, multiaddrs: Array<Multiaddr>, protocols: Array<string> }} peer
|
||||
* @param {PeerDara} peer
|
||||
*/
|
||||
_onDiscoveryPeer (peer) {
|
||||
if (peer.id.toB58String() === this.peerId.toB58String()) {
|
||||
@@ -502,15 +464,15 @@ class Libp2p extends EventEmitter {
|
||||
/**
|
||||
* Will dial to the given `peerId` if the current number of
|
||||
* connected peers is less than the configured `ConnectionManager`
|
||||
* minConnections.
|
||||
* minPeers.
|
||||
* @private
|
||||
* @param {PeerId} peerId
|
||||
*/
|
||||
async _maybeConnect (peerId) {
|
||||
// If auto dialing is on and we have no connection to the peer, check if we should dial
|
||||
if (this._config.peerDiscovery.autoDial === true && !this.connectionManager.get(peerId)) {
|
||||
const minConnections = this._options.connectionManager.minConnections || 0
|
||||
if (minConnections > this.connectionManager.size) {
|
||||
const minPeers = this._options.connectionManager.minPeers || 0
|
||||
if (minPeers > this.connectionManager.size) {
|
||||
log('connecting to discovered peer %s', peerId.toB58String())
|
||||
try {
|
||||
await this.dialer.connectToPeer(peerId)
|
||||
|
@@ -1,55 +0,0 @@
|
||||
# js-libp2p-keychain
|
||||
|
||||
> A secure key chain for libp2p in JavaScript
|
||||
|
||||
## Features
|
||||
|
||||
- Manages the lifecycle of a key
|
||||
- Keys are encrypted at rest
|
||||
- Enforces the use of safe key names
|
||||
- Uses encrypted PKCS 8 for key storage
|
||||
- Uses PBKDF2 for a "stetched" key encryption key
|
||||
- Enforces NIST SP 800-131A and NIST SP 800-132
|
||||
- Uses PKCS 7: CMS (aka RFC 5652) to provide cryptographically protected messages
|
||||
- Delays reporting errors to slow down brute force attacks
|
||||
|
||||
### KeyInfo
|
||||
|
||||
The key management and naming service API all return a `KeyInfo` object. The `id` is a universally unique identifier for the key. The `name` is local to the key chain.
|
||||
|
||||
```js
|
||||
{
|
||||
name: 'rsa-key',
|
||||
id: 'QmYWYSUZ4PV6MRFYpdtEDJBiGs4UrmE6g8wmAWSePekXVW'
|
||||
}
|
||||
```
|
||||
|
||||
The **key id** is the SHA-256 [multihash](https://github.com/multiformats/multihash) of its public key. The *public key* is a [protobuf encoding](https://github.com/libp2p/js-libp2p-crypto/blob/master/src/keys/keys.proto.js) containing a type and the [DER encoding](https://en.wikipedia.org/wiki/X.690) of the PKCS [SubjectPublicKeyInfo](https://www.ietf.org/rfc/rfc3279.txt).
|
||||
|
||||
### Private key storage
|
||||
|
||||
A private key is stored as an encrypted PKCS 8 structure in the PEM format. It is protected by a key generated from the key chain's *passPhrase* using **PBKDF2**.
|
||||
|
||||
The default options for generating the derived encryption key are in the `dek` object. This, along with the passPhrase, is the input to a `PBKDF2` function.
|
||||
|
||||
```js
|
||||
const defaultOptions = {
|
||||
//See https://cryptosense.com/parameter-choice-for-pbkdf2/
|
||||
dek: {
|
||||
keyLength: 512 / 8,
|
||||
iterationCount: 1000,
|
||||
salt: 'at least 16 characters long',
|
||||
hash: 'sha2-512'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||

|
||||
|
||||
### Physical storage
|
||||
|
||||
The actual physical storage of an encrypted key is left to implementations of [interface-datastore](https://github.com/ipfs/interface-datastore/). A key benifit is that now the key chain can be used in browser with the [js-datastore-level](https://github.com/ipfs/js-datastore-level) implementation.
|
||||
|
||||
### Cryptographic Message Syntax (CMS)
|
||||
|
||||
CMS, aka [PKCS #7](https://en.wikipedia.org/wiki/PKCS) and [RFC 5652](https://tools.ietf.org/html/rfc5652), describes an encapsulation syntax for data protection. It is used to digitally sign, digest, authenticate, or encrypt arbitrary message content. Basically, `cms.encrypt` creates a DER message that can be only be read by someone holding the private key.
|
@@ -1,122 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
require('node-forge/lib/pkcs7')
|
||||
require('node-forge/lib/pbe')
|
||||
const forge = require('node-forge/lib/forge')
|
||||
const { certificateForKey, findAsync } = require('./util')
|
||||
const errcode = require('err-code')
|
||||
|
||||
/**
|
||||
* Cryptographic Message Syntax (aka PKCS #7)
|
||||
*
|
||||
* CMS describes an encapsulation syntax for data protection. It
|
||||
* is used to digitally sign, digest, authenticate, or encrypt
|
||||
* arbitrary message content.
|
||||
*
|
||||
* See RFC 5652 for all the details.
|
||||
*/
|
||||
class CMS {
|
||||
/**
|
||||
* Creates a new instance with a keychain
|
||||
*
|
||||
* @param {Keychain} keychain - the available keys
|
||||
*/
|
||||
constructor (keychain) {
|
||||
if (!keychain) {
|
||||
throw errcode(new Error('keychain is required'), 'ERR_KEYCHAIN_REQUIRED')
|
||||
}
|
||||
|
||||
this.keychain = keychain
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates some protected data.
|
||||
*
|
||||
* The output Buffer contains the PKCS #7 message in DER.
|
||||
*
|
||||
* @param {string} name - The local key name.
|
||||
* @param {Buffer} plain - The data to encrypt.
|
||||
* @returns {undefined}
|
||||
*/
|
||||
async encrypt (name, plain) {
|
||||
if (!Buffer.isBuffer(plain)) {
|
||||
throw errcode(new Error('Plain data must be a Buffer'), 'ERR_INVALID_PARAMS')
|
||||
}
|
||||
|
||||
const key = await this.keychain.findKeyByName(name)
|
||||
const pem = await this.keychain._getPrivateKey(name)
|
||||
const privateKey = forge.pki.decryptRsaPrivateKey(pem, this.keychain._())
|
||||
const certificate = await certificateForKey(key, privateKey)
|
||||
|
||||
// create a p7 enveloped message
|
||||
const p7 = forge.pkcs7.createEnvelopedData()
|
||||
p7.addRecipient(certificate)
|
||||
p7.content = forge.util.createBuffer(plain)
|
||||
p7.encrypt()
|
||||
|
||||
// convert message to DER
|
||||
const der = forge.asn1.toDer(p7.toAsn1()).getBytes()
|
||||
return Buffer.from(der, 'binary')
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads some protected data.
|
||||
*
|
||||
* The keychain must contain one of the keys used to encrypt the data. If none of the keys
|
||||
* exists, an Error is returned with the property 'missingKeys'. It is array of key ids.
|
||||
*
|
||||
* @param {Buffer} cmsData - The CMS encrypted data to decrypt.
|
||||
* @returns {undefined}
|
||||
*/
|
||||
async decrypt (cmsData) {
|
||||
if (!Buffer.isBuffer(cmsData)) {
|
||||
throw errcode(new Error('CMS data is required'), 'ERR_INVALID_PARAMS')
|
||||
}
|
||||
|
||||
let cms
|
||||
try {
|
||||
const buf = forge.util.createBuffer(cmsData.toString('binary'))
|
||||
const obj = forge.asn1.fromDer(buf)
|
||||
cms = forge.pkcs7.messageFromAsn1(obj)
|
||||
} catch (err) {
|
||||
throw errcode(new Error('Invalid CMS: ' + err.message), 'ERR_INVALID_CMS')
|
||||
}
|
||||
|
||||
// Find a recipient whose key we hold. We only deal with recipient certs
|
||||
// issued by ipfs (O=ipfs).
|
||||
const recipients = cms.recipients
|
||||
.filter(r => r.issuer.find(a => a.shortName === 'O' && a.value === 'ipfs'))
|
||||
.filter(r => r.issuer.find(a => a.shortName === 'CN'))
|
||||
.map(r => {
|
||||
return {
|
||||
recipient: r,
|
||||
keyId: r.issuer.find(a => a.shortName === 'CN').value
|
||||
}
|
||||
})
|
||||
|
||||
const r = await findAsync(recipients, async (recipient) => {
|
||||
try {
|
||||
const key = await this.keychain.findKeyById(recipient.keyId)
|
||||
if (key) return true
|
||||
} catch (err) {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if (!r) {
|
||||
const missingKeys = recipients.map(r => r.keyId)
|
||||
throw errcode(new Error('Decryption needs one of the key(s): ' + missingKeys.join(', ')), 'ERR_MISSING_KEYS', {
|
||||
missingKeys
|
||||
})
|
||||
}
|
||||
|
||||
const key = await this.keychain.findKeyById(r.keyId)
|
||||
const pem = await this.keychain._getPrivateKey(key.name)
|
||||
const privateKey = forge.pki.decryptRsaPrivateKey(pem, this.keychain._())
|
||||
cms.decrypt(r.recipient, privateKey)
|
||||
return Buffer.from(cms.content.getBytes(), 'binary')
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CMS
|
Binary file not shown.
Before Width: | Height: | Size: 25 KiB |
@@ -1 +0,0 @@
|
||||
<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36" version="7.8.2" editor="www.draw.io"><diagram id="a8b2919f-aefc-d24c-c550-ea0bf34e92af" name="Page-1">7VlNb6MwEP01HLfCGBJ6bNJ2V9pdqVIP2x4dcMAKYGScJumvXxNsvkw+SmgSVe2hMs9mbL839swQA07j9U+G0vAv9XFkWKa/NuC9YVmua4n/ObApAOjCAggY8QsIVMAzeccSNCW6JD7OGgM5pREnaRP0aJJgjzcwxBhdNYfNadScNUUB1oBnD0U6+o/4PJTbssYV/guTIFQzg9Ft0TND3iJgdJnI+QwLzrd/RXeMlC250SxEPl3VIPhgwCmjlBeteD3FUU6toq1473FHb7luhhN+zAtSpzcULeXWU5RluYmQoQzLRfKNIobjtbA7CXkcCQCIZsYZXeApjSgTSEITMXIyJ1HUglBEgkQ8emJlWOCTN8w4EZTfyY6Y+H4+zWQVEo6fU+Tlc66EfwlsSynOF22KJ7loYQCvd24clHQKL8U0xpxtxBDlolIA6aBgJJ9Xldy2hMKa0ko3JB0sKA1XJIuG5Lmbc6hx/jT5ff9oaWQL50jzZsqoh4Uq3dTUtBiAF9AmxtaJAVYHM6MBmLE1Zny8EABNOaFJ9nW9sfQryfr4fN7oaJxrNOPEv8sv1ZyvSFwPxGuSLjbJNi85GzcmGCvgdQvAUQk8YUbE8nK6a7xhX7uKD7JWo8XpoEVhDEeIk7em+S6u5AxPlIiJq6PQEgWMraaJjC6Zh+Vb9Uu2bUiFw12GOGIB5pqhrXTlto9SczSomk5Dyw9IJsL1dku1C+9SKpYHR5Fvmj1VhE1D2ukbTkX3WlQsuGmErbqw4KLnE5oHBDlWWbt10K22i+xQVgiANrVhaT4g271g22xfKI3kTDQKi33d5rY7fB4Mmgxn5B3NtgNy/5D7EKOdieHcfyhcRmiGo0mZBauwW+XBe+KlzOblSoxSz7pjunvj6A8RgcpaY9Mw3tfZ1BA6n2f41IOt6puaRAucrz/AiSbUNaR/Fjxj+geAxk668PJqRLiPexX8QPuS/OjVmo84yjhleqV2CXac9o18Vnb06uEm3e01PvWW8XZfh4iZFdn+n9mQTLWSCQhcjanRntB5ElF6yl9cQl++zGpfbo7unp9VZgE9M2dJoFFdbRmc5cRarRMLLd0P3S5KnAEoGWuUaHwcTHPXhL/U2q/NjPdF+k6tIHV6J8AqeF9PBtzyZxu2HLVvaQPdlqHhShswaG0zmLQdVWsRbb+lPV5avf44Qdpm2Vo/67JLnfb+oo86RDeNKxLdHkr0208TXcXGz/pW0S066C+61SG6/S36x0TXC7VTRP9SH43VLahyzHZpc/xHY7DfUG85xWP1A2MxvPoRFz78Bw==</diagram></mxfile>
|
@@ -1,469 +0,0 @@
|
||||
/* eslint max-nested-callbacks: ["error", 5] */
|
||||
'use strict'
|
||||
|
||||
const sanitize = require('sanitize-filename')
|
||||
const mergeOptions = require('merge-options')
|
||||
const crypto = require('libp2p-crypto')
|
||||
const DS = require('interface-datastore')
|
||||
const CMS = require('./cms')
|
||||
const errcode = require('err-code')
|
||||
|
||||
const keyPrefix = '/pkcs8/'
|
||||
const infoPrefix = '/info/'
|
||||
|
||||
// NIST SP 800-132
|
||||
const NIST = {
|
||||
minKeyLength: 112 / 8,
|
||||
minSaltLength: 128 / 8,
|
||||
minIterationCount: 1000
|
||||
}
|
||||
|
||||
const defaultOptions = {
|
||||
// See https://cryptosense.com/parametesr-choice-for-pbkdf2/
|
||||
dek: {
|
||||
keyLength: 512 / 8,
|
||||
iterationCount: 10000,
|
||||
salt: 'you should override this value with a crypto secure random number',
|
||||
hash: 'sha2-512'
|
||||
}
|
||||
}
|
||||
|
||||
function validateKeyName (name) {
|
||||
if (!name) return false
|
||||
if (typeof name !== 'string') return false
|
||||
return name === sanitize(name.trim())
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an error after a delay
|
||||
*
|
||||
* This assumes than an error indicates that the keychain is under attack. Delay returning an
|
||||
* error to make brute force attacks harder.
|
||||
*
|
||||
* @param {string | Error} err - The error
|
||||
* @private
|
||||
*/
|
||||
async function throwDelayed (err) {
|
||||
const min = 200
|
||||
const max = 1000
|
||||
const delay = Math.random() * (max - min) + min
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, delay))
|
||||
throw err
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a key name into a datastore name.
|
||||
*
|
||||
* @param {string} name
|
||||
* @returns {DS.Key}
|
||||
* @private
|
||||
*/
|
||||
function DsName (name) {
|
||||
return new DS.Key(keyPrefix + name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a key name into a datastore info name.
|
||||
*
|
||||
* @param {string} name
|
||||
* @returns {DS.Key}
|
||||
* @private
|
||||
*/
|
||||
function DsInfoName (name) {
|
||||
return new DS.Key(infoPrefix + name)
|
||||
}
|
||||
|
||||
/**
|
||||
* Information about a key.
|
||||
*
|
||||
* @typedef {Object} KeyInfo
|
||||
*
|
||||
* @property {string} id - The universally unique key id.
|
||||
* @property {string} name - The local key name.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Manages the lifecycle of a key. Keys are encrypted at rest using PKCS #8.
|
||||
*
|
||||
* A key in the store has two entries
|
||||
* - '/info/*key-name*', contains the KeyInfo for the key
|
||||
* - '/pkcs8/*key-name*', contains the PKCS #8 for the key
|
||||
*
|
||||
*/
|
||||
class Keychain {
|
||||
/**
|
||||
* Creates a new instance of a key chain.
|
||||
*
|
||||
* @param {DS} store - where the key are.
|
||||
* @param {object} options - ???
|
||||
*/
|
||||
constructor (store, options) {
|
||||
if (!store) {
|
||||
throw new Error('store is required')
|
||||
}
|
||||
this.store = store
|
||||
|
||||
this.opts = mergeOptions(defaultOptions, options)
|
||||
|
||||
// Enforce NIST SP 800-132
|
||||
if (!this.opts.passPhrase || this.opts.passPhrase.length < 20) {
|
||||
throw new Error('passPhrase must be least 20 characters')
|
||||
}
|
||||
if (this.opts.dek.keyLength < NIST.minKeyLength) {
|
||||
throw new Error(`dek.keyLength must be least ${NIST.minKeyLength} bytes`)
|
||||
}
|
||||
if (this.opts.dek.salt.length < NIST.minSaltLength) {
|
||||
throw new Error(`dek.saltLength must be least ${NIST.minSaltLength} bytes`)
|
||||
}
|
||||
if (this.opts.dek.iterationCount < NIST.minIterationCount) {
|
||||
throw new Error(`dek.iterationCount must be least ${NIST.minIterationCount}`)
|
||||
}
|
||||
|
||||
// Create the derived encrypting key
|
||||
const dek = crypto.pbkdf2(
|
||||
this.opts.passPhrase,
|
||||
this.opts.dek.salt,
|
||||
this.opts.dek.iterationCount,
|
||||
this.opts.dek.keyLength,
|
||||
this.opts.dek.hash)
|
||||
Object.defineProperty(this, '_', { value: () => dek })
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets an object that can encrypt/decrypt protected data
|
||||
* using the Cryptographic Message Syntax (CMS).
|
||||
*
|
||||
* CMS describes an encapsulation syntax for data protection. It
|
||||
* is used to digitally sign, digest, authenticate, or encrypt
|
||||
* arbitrary message content.
|
||||
*
|
||||
* @returns {CMS}
|
||||
*/
|
||||
get cms () {
|
||||
return new CMS(this)
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the options for a keychain. A random salt is produced.
|
||||
*
|
||||
* @returns {object}
|
||||
*/
|
||||
static generateOptions () {
|
||||
const options = Object.assign({}, defaultOptions)
|
||||
const saltLength = Math.ceil(NIST.minSaltLength / 3) * 3 // no base64 padding
|
||||
options.dek.salt = crypto.randomBytes(saltLength).toString('base64')
|
||||
return options
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets an object that can encrypt/decrypt protected data.
|
||||
* The default options for a keychain.
|
||||
*
|
||||
* @returns {object}
|
||||
*/
|
||||
static get options () {
|
||||
return defaultOptions
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new key.
|
||||
*
|
||||
* @param {string} name - The local key name; cannot already exist.
|
||||
* @param {string} type - One of the key types; 'rsa'.
|
||||
* @param {int} size - The key size in bits.
|
||||
* @returns {KeyInfo}
|
||||
*/
|
||||
async createKey (name, type, size) {
|
||||
const self = this
|
||||
|
||||
if (!validateKeyName(name) || name === 'self') {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
|
||||
if (typeof type !== 'string') {
|
||||
return throwDelayed(errcode(new Error(`Invalid key type '${type}'`), 'ERR_INVALID_KEY_TYPE'))
|
||||
}
|
||||
|
||||
if (!Number.isSafeInteger(size)) {
|
||||
return throwDelayed(errcode(new Error(`Invalid key size '${size}'`), 'ERR_INVALID_KEY_SIZE'))
|
||||
}
|
||||
|
||||
const dsname = DsName(name)
|
||||
const exists = await self.store.has(dsname)
|
||||
if (exists) return throwDelayed(errcode(new Error(`Key '${name}' already exists`), 'ERR_KEY_ALREADY_EXISTS'))
|
||||
|
||||
switch (type.toLowerCase()) {
|
||||
case 'rsa':
|
||||
if (size < 2048) {
|
||||
return throwDelayed(errcode(new Error(`Invalid RSA key size ${size}`), 'ERR_INVALID_KEY_SIZE'))
|
||||
}
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
let keyInfo
|
||||
try {
|
||||
const keypair = await crypto.keys.generateKeyPair(type, size)
|
||||
const kid = await keypair.id()
|
||||
const pem = await keypair.export(this._())
|
||||
keyInfo = {
|
||||
name: name,
|
||||
id: kid
|
||||
}
|
||||
const batch = self.store.batch()
|
||||
batch.put(dsname, pem)
|
||||
batch.put(DsInfoName(name), JSON.stringify(keyInfo))
|
||||
|
||||
await batch.commit()
|
||||
} catch (err) {
|
||||
return throwDelayed(err)
|
||||
}
|
||||
|
||||
return keyInfo
|
||||
}
|
||||
|
||||
/**
|
||||
* List all the keys.
|
||||
*
|
||||
* @returns {KeyInfo[]}
|
||||
*/
|
||||
async listKeys () {
|
||||
const self = this
|
||||
const query = {
|
||||
prefix: infoPrefix
|
||||
}
|
||||
|
||||
const info = []
|
||||
for await (const value of self.store.query(query)) {
|
||||
info.push(JSON.parse(value.value))
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a key by it's id.
|
||||
*
|
||||
* @param {string} id - The universally unique key identifier.
|
||||
* @returns {KeyInfo}
|
||||
*/
|
||||
async findKeyById (id) {
|
||||
try {
|
||||
const keys = await this.listKeys()
|
||||
return keys.find((k) => k.id === id)
|
||||
} catch (err) {
|
||||
return throwDelayed(err)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a key by it's name.
|
||||
*
|
||||
* @param {string} name - The local key name.
|
||||
* @returns {KeyInfo}
|
||||
*/
|
||||
async findKeyByName (name) {
|
||||
if (!validateKeyName(name)) {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
|
||||
const dsname = DsInfoName(name)
|
||||
try {
|
||||
const res = await this.store.get(dsname)
|
||||
return JSON.parse(res.toString())
|
||||
} catch (err) {
|
||||
return throwDelayed(errcode(new Error(`Key '${name}' does not exist. ${err.message}`), 'ERR_KEY_NOT_FOUND'))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an existing key.
|
||||
*
|
||||
* @param {string} name - The local key name; must already exist.
|
||||
* @returns {KeyInfo}
|
||||
*/
|
||||
async removeKey (name) {
|
||||
const self = this
|
||||
if (!validateKeyName(name) || name === 'self') {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
const dsname = DsName(name)
|
||||
const keyInfo = await self.findKeyByName(name)
|
||||
const batch = self.store.batch()
|
||||
batch.delete(dsname)
|
||||
batch.delete(DsInfoName(name))
|
||||
await batch.commit()
|
||||
return keyInfo
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename a key
|
||||
*
|
||||
* @param {string} oldName - The old local key name; must already exist.
|
||||
* @param {string} newName - The new local key name; must not already exist.
|
||||
* @returns {KeyInfo}
|
||||
*/
|
||||
async renameKey (oldName, newName) {
|
||||
const self = this
|
||||
if (!validateKeyName(oldName) || oldName === 'self') {
|
||||
return throwDelayed(errcode(new Error(`Invalid old key name '${oldName}'`), 'ERR_OLD_KEY_NAME_INVALID'))
|
||||
}
|
||||
if (!validateKeyName(newName) || newName === 'self') {
|
||||
return throwDelayed(errcode(new Error(`Invalid new key name '${newName}'`), 'ERR_NEW_KEY_NAME_INVALID'))
|
||||
}
|
||||
const oldDsname = DsName(oldName)
|
||||
const newDsname = DsName(newName)
|
||||
const oldInfoName = DsInfoName(oldName)
|
||||
const newInfoName = DsInfoName(newName)
|
||||
|
||||
const exists = await self.store.has(newDsname)
|
||||
if (exists) return throwDelayed(errcode(new Error(`Key '${newName}' already exists`), 'ERR_KEY_ALREADY_EXISTS'))
|
||||
|
||||
try {
|
||||
let res = await this.store.get(oldDsname)
|
||||
const pem = res.toString()
|
||||
res = await self.store.get(oldInfoName)
|
||||
|
||||
const keyInfo = JSON.parse(res.toString())
|
||||
keyInfo.name = newName
|
||||
const batch = self.store.batch()
|
||||
batch.put(newDsname, pem)
|
||||
batch.put(newInfoName, JSON.stringify(keyInfo))
|
||||
batch.delete(oldDsname)
|
||||
batch.delete(oldInfoName)
|
||||
await batch.commit()
|
||||
return keyInfo
|
||||
} catch (err) {
|
||||
return throwDelayed(err)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Export an existing key as a PEM encrypted PKCS #8 string
|
||||
*
|
||||
* @param {string} name - The local key name; must already exist.
|
||||
* @param {string} password - The password
|
||||
* @returns {string}
|
||||
*/
|
||||
async exportKey (name, password) {
|
||||
if (!validateKeyName(name)) {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
if (!password) {
|
||||
return throwDelayed(errcode(new Error('Password is required'), 'ERR_PASSWORD_REQUIRED'))
|
||||
}
|
||||
|
||||
const dsname = DsName(name)
|
||||
try {
|
||||
const res = await this.store.get(dsname)
|
||||
const pem = res.toString()
|
||||
const privateKey = await crypto.keys.import(pem, this._())
|
||||
return privateKey.export(password)
|
||||
} catch (err) {
|
||||
return throwDelayed(err)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Import a new key from a PEM encoded PKCS #8 string
|
||||
*
|
||||
* @param {string} name - The local key name; must not already exist.
|
||||
* @param {string} pem - The PEM encoded PKCS #8 string
|
||||
* @param {string} password - The password.
|
||||
* @returns {KeyInfo}
|
||||
*/
|
||||
async importKey (name, pem, password) {
|
||||
const self = this
|
||||
if (!validateKeyName(name) || name === 'self') {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
if (!pem) {
|
||||
return throwDelayed(errcode(new Error('PEM encoded key is required'), 'ERR_PEM_REQUIRED'))
|
||||
}
|
||||
const dsname = DsName(name)
|
||||
const exists = await self.store.has(dsname)
|
||||
if (exists) return throwDelayed(errcode(new Error(`Key '${name}' already exists`), 'ERR_KEY_ALREADY_EXISTS'))
|
||||
|
||||
let privateKey
|
||||
try {
|
||||
privateKey = await crypto.keys.import(pem, password)
|
||||
} catch (err) {
|
||||
return throwDelayed(errcode(new Error('Cannot read the key, most likely the password is wrong'), 'ERR_CANNOT_READ_KEY'))
|
||||
}
|
||||
|
||||
let kid
|
||||
try {
|
||||
kid = await privateKey.id()
|
||||
pem = await privateKey.export(this._())
|
||||
} catch (err) {
|
||||
return throwDelayed(err)
|
||||
}
|
||||
|
||||
const keyInfo = {
|
||||
name: name,
|
||||
id: kid
|
||||
}
|
||||
const batch = self.store.batch()
|
||||
batch.put(dsname, pem)
|
||||
batch.put(DsInfoName(name), JSON.stringify(keyInfo))
|
||||
await batch.commit()
|
||||
|
||||
return keyInfo
|
||||
}
|
||||
|
||||
async importPeer (name, peer) {
|
||||
const self = this
|
||||
if (!validateKeyName(name)) {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
if (!peer || !peer.privKey) {
|
||||
return throwDelayed(errcode(new Error('Peer.privKey is required'), 'ERR_MISSING_PRIVATE_KEY'))
|
||||
}
|
||||
|
||||
const privateKey = peer.privKey
|
||||
const dsname = DsName(name)
|
||||
const exists = await self.store.has(dsname)
|
||||
if (exists) return throwDelayed(errcode(new Error(`Key '${name}' already exists`), 'ERR_KEY_ALREADY_EXISTS'))
|
||||
|
||||
try {
|
||||
const kid = await privateKey.id()
|
||||
const pem = await privateKey.export(this._())
|
||||
const keyInfo = {
|
||||
name: name,
|
||||
id: kid
|
||||
}
|
||||
const batch = self.store.batch()
|
||||
batch.put(dsname, pem)
|
||||
batch.put(DsInfoName(name), JSON.stringify(keyInfo))
|
||||
await batch.commit()
|
||||
return keyInfo
|
||||
} catch (err) {
|
||||
return throwDelayed(err)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the private key as PEM encoded PKCS #8 string.
|
||||
*
|
||||
* @param {string} name
|
||||
* @returns {string}
|
||||
* @private
|
||||
*/
|
||||
async _getPrivateKey (name) {
|
||||
if (!validateKeyName(name)) {
|
||||
return throwDelayed(errcode(new Error(`Invalid key name '${name}'`), 'ERR_INVALID_KEY_NAME'))
|
||||
}
|
||||
|
||||
try {
|
||||
const dsname = DsName(name)
|
||||
const res = await this.store.get(dsname)
|
||||
return res.toString()
|
||||
} catch (err) {
|
||||
return throwDelayed(errcode(new Error(`Key '${name}' does not exist. ${err.message}`), 'ERR_KEY_NOT_FOUND'))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = Keychain
|
@@ -1,89 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
require('node-forge/lib/x509')
|
||||
const forge = require('node-forge/lib/forge')
|
||||
const pki = forge.pki
|
||||
exports = module.exports
|
||||
|
||||
/**
|
||||
* Gets a self-signed X.509 certificate for the key.
|
||||
*
|
||||
* The output Buffer contains the PKCS #7 message in DER.
|
||||
*
|
||||
* TODO: move to libp2p-crypto package
|
||||
*
|
||||
* @param {KeyInfo} key - The id and name of the key
|
||||
* @param {RsaPrivateKey} privateKey - The naked key
|
||||
* @returns {undefined}
|
||||
*/
|
||||
exports.certificateForKey = (key, privateKey) => {
|
||||
const publicKey = pki.setRsaPublicKey(privateKey.n, privateKey.e)
|
||||
const cert = pki.createCertificate()
|
||||
cert.publicKey = publicKey
|
||||
cert.serialNumber = '01'
|
||||
cert.validity.notBefore = new Date()
|
||||
cert.validity.notAfter = new Date()
|
||||
cert.validity.notAfter.setFullYear(cert.validity.notBefore.getFullYear() + 10)
|
||||
const attrs = [{
|
||||
name: 'organizationName',
|
||||
value: 'ipfs'
|
||||
}, {
|
||||
shortName: 'OU',
|
||||
value: 'keystore'
|
||||
}, {
|
||||
name: 'commonName',
|
||||
value: key.id
|
||||
}]
|
||||
cert.setSubject(attrs)
|
||||
cert.setIssuer(attrs)
|
||||
cert.setExtensions([{
|
||||
name: 'basicConstraints',
|
||||
cA: true
|
||||
}, {
|
||||
name: 'keyUsage',
|
||||
keyCertSign: true,
|
||||
digitalSignature: true,
|
||||
nonRepudiation: true,
|
||||
keyEncipherment: true,
|
||||
dataEncipherment: true
|
||||
}, {
|
||||
name: 'extKeyUsage',
|
||||
serverAuth: true,
|
||||
clientAuth: true,
|
||||
codeSigning: true,
|
||||
emailProtection: true,
|
||||
timeStamping: true
|
||||
}, {
|
||||
name: 'nsCertType',
|
||||
client: true,
|
||||
server: true,
|
||||
email: true,
|
||||
objsign: true,
|
||||
sslCA: true,
|
||||
emailCA: true,
|
||||
objCA: true
|
||||
}])
|
||||
// self-sign certificate
|
||||
cert.sign(privateKey)
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the first item in a collection that is matched in the
|
||||
* `asyncCompare` function.
|
||||
*
|
||||
* `asyncCompare` is an async function that must
|
||||
* resolve to either `true` or `false`.
|
||||
*
|
||||
* @param {Array} array
|
||||
* @param {function(*)} asyncCompare An async function that returns a boolean
|
||||
*/
|
||||
async function findAsync (array, asyncCompare) {
|
||||
const promises = array.map(asyncCompare)
|
||||
const results = await Promise.all(promises)
|
||||
const index = results.findIndex(result => result)
|
||||
return array[index]
|
||||
}
|
||||
|
||||
module.exports.findAsync = findAsync
|
@@ -215,7 +215,7 @@ class Metrics {
|
||||
|
||||
const _sink = stream.sink
|
||||
stream.sink = source => {
|
||||
return pipe(
|
||||
pipe(
|
||||
source,
|
||||
tap(chunk => metrics._onMessage({
|
||||
remotePeer,
|
||||
|
@@ -128,7 +128,11 @@ class Stats extends EventEmitter {
|
||||
* @returns {void}
|
||||
*/
|
||||
_resetComputeTimeout () {
|
||||
this._timeout = retimer(this._update, this._nextTimeout())
|
||||
if (this._timeout) {
|
||||
this._timeout.reschedule(this._nextTimeout())
|
||||
} else {
|
||||
this._timeout = retimer(this._update, this._nextTimeout())
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -4,38 +4,33 @@ Libp2p's PeerStore is responsible for keeping an updated register with the relev
|
||||
|
||||
The PeerStore manages the high level operations on its inner books. Moreover, the PeerStore should be responsible for notifying interested parties of relevant events, through its Event Emitter.
|
||||
|
||||
## Submitting records to the PeerStore
|
||||
## Data gathering
|
||||
|
||||
Several libp2p subsystems will perform operations that might gather relevant information about peers.
|
||||
Several libp2p subsystems will perform operations, which will gather relevant information about peers. Some operations might not have this as an end goal, but can also gather important data.
|
||||
|
||||
### Identify
|
||||
- The Identify protocol automatically runs on every connection when multiplexing is enabled. The protocol will put the multiaddrs and protocols provided by the peer to the PeerStore.
|
||||
- In the background, the Identify Service is also waiting for protocol change notifications of peers via the IdentifyPush protocol. Peers may leverage the `identify-push` message to communicate protocol changes to all connected peers, so that their PeerStore can be updated with the updated protocols.
|
||||
- While it is currently not supported in js-libp2p, future iterations may also support the [IdentifyDelta protocol](https://github.com/libp2p/specs/pull/176).
|
||||
- Taking into account that the Identify protocol records are directly from the peer, they should be considered the source of truth and weighted accordingly.
|
||||
In a libp2p node's life, it will discover peers through its discovery protocols. In a typical discovery protocol, addresses of the peer are discovered along with its peer id. Once this happens, the PeerStore should collect this information for future (or immediate) usage by other subsystems. When the information is stored, the PeerStore should inform interested parties of the peer discovered (`peer` event).
|
||||
|
||||
### Peer Discovery
|
||||
- Libp2p discovery protocols aim to discover new peers in the network. In a typical discovery protocol, addresses of the peer are discovered along with its peer id. Once this happens, a libp2p discovery protocol should emit a `peer` event with the information of the discovered peer and this information will be added to the PeerStore by libp2p.
|
||||
Taking into account a different scenario, a peer might perform/receive a dial request to/from a unkwown peer. In such a scenario, the PeerStore must store the peer's multiaddr once a connection is established.
|
||||
|
||||
### Dialer
|
||||
- Libp2p API supports dialing a peer given a `multiaddr`, and no prior knowledge of the peer. If the node is able to establish a connection with the peer, it and its multiaddr is added to the PeerStore.
|
||||
- When a connection is being upgraded, more precisely after its encryption, or even in a discovery protocol, a libp2p node can get to know other parties public keys. In this scenario, libp2p will add the peer's public key to its `KeyBook`.
|
||||
When a connection is being upgraded, more precisely after its encryption, or even in a discovery protocol, a libp2p node can get to know other parties public keys. In this scenario, libp2p will add the peer's public key to its `KeyBook`.
|
||||
|
||||
### DHT
|
||||
- On some DHT operations, such as finding providers for a given CID, nodes may exchange peer data as part of the query. This passive peer discovery should result in the DHT emitting the `peer` event in the same way [Peer Discovery](#peerdiscovery) does.
|
||||
After a connection is established with a peer, the Identify protocol will run automatically. A stream is created and peers exchange their information (Multiaddrs, running protocols and their public key). Once this information is obtained, it should be added to the PeerStore. In this specific case, as we are speaking to the source of truth, we should ensure the PeerStore is prioritizing these records. If the recorded `multiaddrs` or `protocols` have changed, interested parties must be informed via the `change:multiaddrs` or `change:protocols` events respectively.
|
||||
|
||||
## Retrieving records from the PeerStore
|
||||
In the background, the Identify Service is also waiting for protocol change notifications of peers via the IdentifyPush protocol. Peers may leverage the `identify-push` message to communicate protocol changes to all connected peers, so that their PeerStore can be updated with the updated protocols. As the `identify-push` also sends complete and updated information, the data in the PeerStore can be replaced.
|
||||
|
||||
When data in the PeerStore is updated the PeerStore will emit events based on the changes, to allow applications and other subsystems to take action on those changes. Any subsystem interested in these notifications should subscribe the [`PeerStore events`][peer-store-events].
|
||||
(To consider: Should we not replace until we get to multiaddr confidence? we might loose true information as we will talk with older nodes on the network.)
|
||||
|
||||
### Peer
|
||||
- Each time a new peer is discovered, the PeerStore should emit a [`peer` event][peer-store-events], so that interested parties can leverage this peer and establish a connection with it.
|
||||
While it is currently not supported in js-libp2p, future iterations may also support the [IdentifyDelta protocol](https://github.com/libp2p/specs/pull/176).
|
||||
|
||||
### Protocols
|
||||
- When the known protocols of a peer change, the PeerStore emits a [`change:protocols` event][peer-store-events].
|
||||
It is also possible to gather relevant information for peers from other protocols / subsystems. For instance, in `DHT` operations, nodes can exchange peer data as part of the `DHT` operation. In this case, we can learn additional information about a peer we already know. In this scenario the PeerStore should not replace the existing data it has, just add it.
|
||||
|
||||
### Multiaddrs
|
||||
- When the known listening `multiaddrs` of a peer change, the PeerStore emits a [`change:multiaddrs` event][peer-store-events].
|
||||
## Data Consumption
|
||||
|
||||
When the PeerStore data is updated, this information might be important for different parties.
|
||||
|
||||
Every time a peer needs to dial another peer, it is essential that it knows the multiaddrs used by the peer, in order to perform a successful dial to it. The same is true for pinging a peer. While the `AddressBook` is going to keep its data updated, it will also emit `change:multiaddrs` events so that subsystems/users interested in knowing these changes can be notified instead of polling the `AddressBook`.
|
||||
|
||||
Everytime a peer starts/stops supporting a protocol, libp2p subsystems or users might need to act accordingly. `js-libp2p` registrar orchestrates known peers, established connections and protocol topologies. This way, once a protocol is supported for a peer, the topology of that protocol should be informed that a new peer may be used and the subsystem can decide if it should open a new stream with that peer or not. For these situations, the `ProtoBook` will emit `change:protocols` events whenever supported protocols of a peer change.
|
||||
|
||||
## PeerStore implementation
|
||||
|
||||
@@ -75,11 +70,7 @@ A `peerId.toB58String()` identifier mapping to a `Set` of protocol identifier st
|
||||
|
||||
#### Metadata Book
|
||||
|
||||
The `metadataBook` keeps track of the known metadata of a peer. Its metadata is stored in a key value fashion, where a key identifier (`string`) represents a metadata value (`Buffer`).
|
||||
|
||||
`Map<string, Map<string, Buffer>>`
|
||||
|
||||
A `peerId.toB58String()` identifier mapping to the peer metadata Map.
|
||||
**Not Yet Implemented**
|
||||
|
||||
### API
|
||||
|
||||
@@ -89,7 +80,6 @@ Access to its underlying books:
|
||||
|
||||
- `peerStore.addressBook.*`
|
||||
- `peerStore.keyBook.*`
|
||||
- `peerStore.metadataBook.*`
|
||||
- `peerStore.protoBook.*`
|
||||
|
||||
### Events
|
||||
@@ -97,8 +87,6 @@ Access to its underlying books:
|
||||
- `peer` - emitted when a new peer is added.
|
||||
- `change:multiaadrs` - emitted when a known peer has a different set of multiaddrs.
|
||||
- `change:protocols` - emitted when a known peer supports a different set of protocols.
|
||||
- `change:pubkey` - emitted when a peer's public key is known.
|
||||
- `change:metadata` - emitted when known metadata of a peer changes.
|
||||
|
||||
## Data Persistence
|
||||
|
||||
@@ -130,6 +118,8 @@ All public keys are stored under the following pattern:
|
||||
|
||||
**MetadataBook**
|
||||
|
||||
_NOT_YET_IMPLEMENTED_
|
||||
|
||||
Metadata is stored under the following key pattern:
|
||||
|
||||
`/peers/metadata/<b32 peer id no padding>/<key>`
|
||||
@@ -142,4 +132,3 @@ Metadata is stored under the following key pattern:
|
||||
- When improving libp2p configuration, we should think about a possible way of allowing the configuration of Bootstrap to be influenced by the persisted peers, as a way to decrease the load on Bootstrap nodes.
|
||||
|
||||
[peer-id]: https://github.com/libp2p/js-peer-id
|
||||
[peer-store-events]: ../../doc/API.md#libp2ppeerstore
|
||||
|
@@ -97,6 +97,7 @@ class AddressBook extends Book {
|
||||
/**
|
||||
* Add known addresses of a provided peer.
|
||||
* If the peer is not known, it is set with the given addresses.
|
||||
* @override
|
||||
* @param {PeerId} peerId
|
||||
* @param {Array<Multiaddr>} multiaddrs
|
||||
* @returns {AddressBook}
|
||||
@@ -179,7 +180,7 @@ class AddressBook extends Book {
|
||||
const record = this.data.get(peerId.toB58String())
|
||||
|
||||
if (!record) {
|
||||
return []
|
||||
return undefined
|
||||
}
|
||||
|
||||
return record.map((address) => {
|
||||
|
@@ -10,7 +10,6 @@ const PeerId = require('peer-id')
|
||||
|
||||
const AddressBook = require('./address-book')
|
||||
const KeyBook = require('./key-book')
|
||||
const MetadataBook = require('./metadata-book')
|
||||
const ProtoBook = require('./proto-book')
|
||||
|
||||
const {
|
||||
@@ -22,8 +21,6 @@ const {
|
||||
* @fires PeerStore#peer Emitted when a new peer is added.
|
||||
* @fires PeerStore#change:protocols Emitted when a known peer supports a different set of protocols.
|
||||
* @fires PeerStore#change:multiaddrs Emitted when a known peer has a different set of multiaddrs.
|
||||
* @fires PeerStore#change:pubkey Emitted emitted when a peer's public key is known.
|
||||
* @fires PeerStore#change:metadata Emitted when the known metadata of a peer change.
|
||||
*/
|
||||
class PeerStore extends EventEmitter {
|
||||
/**
|
||||
@@ -50,11 +47,6 @@ class PeerStore extends EventEmitter {
|
||||
*/
|
||||
this.keyBook = new KeyBook(this)
|
||||
|
||||
/**
|
||||
* MetadataBook containing a map of peerIdStr to their metadata Map.
|
||||
*/
|
||||
this.metadataBook = new MetadataBook(this)
|
||||
|
||||
/**
|
||||
* ProtoBook containing a map of peerIdStr to supported protocols.
|
||||
*/
|
||||
@@ -76,17 +68,31 @@ class PeerStore extends EventEmitter {
|
||||
* @returns {Map<string, Peer>}
|
||||
*/
|
||||
get peers () {
|
||||
const storedPeers = new Set([
|
||||
...this.addressBook.data.keys(),
|
||||
...this.keyBook.data.keys(),
|
||||
...this.protoBook.data.keys(),
|
||||
...this.metadataBook.data.keys()
|
||||
])
|
||||
|
||||
const peersData = new Map()
|
||||
storedPeers.forEach((idStr) => {
|
||||
peersData.set(idStr, this.get(PeerId.createFromCID(idStr)))
|
||||
})
|
||||
|
||||
// AddressBook
|
||||
for (const [idStr, addresses] of this.addressBook.data.entries()) {
|
||||
const id = this.keyBook.data.get(idStr) || PeerId.createFromCID(idStr)
|
||||
peersData.set(idStr, {
|
||||
id,
|
||||
addresses,
|
||||
protocols: this.protoBook.get(id) || []
|
||||
})
|
||||
}
|
||||
|
||||
// ProtoBook
|
||||
for (const [idStr, protocols] of this.protoBook.data.entries()) {
|
||||
const pData = peersData.get(idStr)
|
||||
const id = this.keyBook.data.get(idStr) || PeerId.createFromCID(idStr)
|
||||
|
||||
if (!pData) {
|
||||
peersData.set(idStr, {
|
||||
id,
|
||||
addresses: [],
|
||||
protocols: Array.from(protocols)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return peersData
|
||||
}
|
||||
@@ -100,9 +106,8 @@ class PeerStore extends EventEmitter {
|
||||
const addressesDeleted = this.addressBook.delete(peerId)
|
||||
const keyDeleted = this.keyBook.delete(peerId)
|
||||
const protocolsDeleted = this.protoBook.delete(peerId)
|
||||
const metadataDeleted = this.metadataBook.delete(peerId)
|
||||
|
||||
return addressesDeleted || keyDeleted || protocolsDeleted || metadataDeleted
|
||||
return addressesDeleted || keyDeleted || protocolsDeleted
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -117,18 +122,16 @@ class PeerStore extends EventEmitter {
|
||||
|
||||
const id = this.keyBook.data.get(peerId.toB58String())
|
||||
const addresses = this.addressBook.get(peerId)
|
||||
const metadata = this.metadataBook.get(peerId)
|
||||
const protocols = this.protoBook.get(peerId)
|
||||
|
||||
if (!id && !addresses && !metadata && !protocols) {
|
||||
if (!addresses && !protocols) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return {
|
||||
id: id || peerId,
|
||||
addresses: addresses || [],
|
||||
protocols: protocols || [],
|
||||
metadata: metadata
|
||||
protocols: protocols || []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,161 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const errcode = require('err-code')
|
||||
const debug = require('debug')
|
||||
const log = debug('libp2p:peer-store:proto-book')
|
||||
log.error = debug('libp2p:peer-store:proto-book:error')
|
||||
|
||||
const { Buffer } = require('buffer')
|
||||
|
||||
const PeerId = require('peer-id')
|
||||
|
||||
const Book = require('./book')
|
||||
|
||||
const {
|
||||
codes: { ERR_INVALID_PARAMETERS }
|
||||
} = require('../errors')
|
||||
|
||||
/**
|
||||
* The MetadataBook is responsible for keeping the known supported
|
||||
* protocols of a peer.
|
||||
* @fires MetadataBook#change:metadata
|
||||
*/
|
||||
class MetadataBook extends Book {
|
||||
/**
|
||||
* @constructor
|
||||
* @param {PeerStore} peerStore
|
||||
*/
|
||||
constructor (peerStore) {
|
||||
/**
|
||||
* PeerStore Event emitter, used by the MetadataBook to emit:
|
||||
* "change:metadata" - emitted when the known metadata of a peer change.
|
||||
*/
|
||||
super({
|
||||
peerStore,
|
||||
eventName: 'change:metadata',
|
||||
eventProperty: 'metadata'
|
||||
})
|
||||
|
||||
/**
|
||||
* Map known peers to their known protocols.
|
||||
* @type {Map<string, Map<string, Buffer>>}
|
||||
*/
|
||||
this.data = new Map()
|
||||
}
|
||||
|
||||
/**
|
||||
* Set metadata key and value of a provided peer.
|
||||
* @override
|
||||
* @param {PeerId} peerId
|
||||
* @param {string} key metadata key
|
||||
* @param {Buffer} value metadata value
|
||||
* @returns {ProtoBook}
|
||||
*/
|
||||
set (peerId, key, value) {
|
||||
if (!PeerId.isPeerId(peerId)) {
|
||||
log.error('peerId must be an instance of peer-id to store data')
|
||||
throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
if (typeof key !== 'string' || !Buffer.isBuffer(value)) {
|
||||
log.error('valid key and value must be provided to store data')
|
||||
throw errcode(new Error('valid key and value must be provided'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
this._setValue(peerId, key, value)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* Set data into the datastructure
|
||||
* @override
|
||||
*/
|
||||
_setValue (peerId, key, value, { emit = true } = {}) {
|
||||
const id = peerId.toB58String()
|
||||
const rec = this.data.get(id) || new Map()
|
||||
const recMap = rec.get(key)
|
||||
|
||||
// Already exists and is equal
|
||||
if (recMap && value.equals(recMap)) {
|
||||
log(`the metadata provided to store is equal to the already stored for ${id} on ${key}`)
|
||||
return
|
||||
}
|
||||
|
||||
rec.set(key, value)
|
||||
this.data.set(id, rec)
|
||||
|
||||
emit && this._emit(peerId, key)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the known data of a provided peer.
|
||||
* @param {PeerId} peerId
|
||||
* @returns {Map<string, Buffer>}
|
||||
*/
|
||||
get (peerId) {
|
||||
if (!PeerId.isPeerId(peerId)) {
|
||||
throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
return this.data.get(peerId.toB58String())
|
||||
}
|
||||
|
||||
/**
|
||||
* Get specific metadata value, if it exists
|
||||
* @param {PeerId} peerId
|
||||
* @param {string} key
|
||||
* @returns {Buffer}
|
||||
*/
|
||||
getValue (peerId, key) {
|
||||
if (!PeerId.isPeerId(peerId)) {
|
||||
throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
const rec = this.data.get(peerId.toB58String())
|
||||
return rec && rec.get(key)
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the provided peer from the book.
|
||||
* @param {PeerId} peerId
|
||||
* @returns {boolean}
|
||||
*/
|
||||
delete (peerId) {
|
||||
if (!PeerId.isPeerId(peerId)) {
|
||||
throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
if (!this.data.delete(peerId.toB58String())) {
|
||||
return false
|
||||
}
|
||||
|
||||
this._emit(peerId)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the provided peer metadata key from the book.
|
||||
* @param {PeerId} peerId
|
||||
* @param {string} key
|
||||
* @returns {boolean}
|
||||
*/
|
||||
deleteValue (peerId, key) {
|
||||
if (!PeerId.isPeerId(peerId)) {
|
||||
throw errcode(new Error('peerId must be an instance of peer-id'), ERR_INVALID_PARAMETERS)
|
||||
}
|
||||
|
||||
const rec = this.data.get(peerId.toB58String())
|
||||
|
||||
if (!rec || !rec.delete(key)) {
|
||||
return false
|
||||
}
|
||||
|
||||
this._emit(peerId, key)
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = MetadataBook
|
@@ -8,8 +8,5 @@ module.exports.NAMESPACE_ADDRESS = '/peers/addrs/'
|
||||
// /peers/keys/<b32 peer id no padding>
|
||||
module.exports.NAMESPACE_KEYS = '/peers/keys/'
|
||||
|
||||
// /peers/metadata/<b32 peer id no padding>/<key>
|
||||
module.exports.NAMESPACE_METADATA = '/peers/metadata/'
|
||||
|
||||
// /peers/addrs/<b32 peer id no padding>
|
||||
module.exports.NAMESPACE_PROTOCOL = '/peers/protos/'
|
||||
|
@@ -14,7 +14,6 @@ const {
|
||||
NAMESPACE_ADDRESS,
|
||||
NAMESPACE_COMMON,
|
||||
NAMESPACE_KEYS,
|
||||
NAMESPACE_METADATA,
|
||||
NAMESPACE_PROTOCOL
|
||||
} = require('./consts')
|
||||
|
||||
@@ -44,12 +43,6 @@ class PersistentPeerStore extends PeerStore {
|
||||
*/
|
||||
this._dirtyPeers = new Set()
|
||||
|
||||
/**
|
||||
* Peers metadata changed mapping peer identifers to metadata changed.
|
||||
* @type {Map<string, Set<string>>}
|
||||
*/
|
||||
this._dirtyMetadata = new Map()
|
||||
|
||||
this.threshold = threshold
|
||||
this._addDirtyPeer = this._addDirtyPeer.bind(this)
|
||||
}
|
||||
@@ -65,7 +58,6 @@ class PersistentPeerStore extends PeerStore {
|
||||
this.on('change:protocols', this._addDirtyPeer)
|
||||
this.on('change:multiaddrs', this._addDirtyPeer)
|
||||
this.on('change:pubkey', this._addDirtyPeer)
|
||||
this.on('change:metadata', this._addDirtyPeerMetadata)
|
||||
|
||||
// Load data
|
||||
for await (const entry of this._datastore.query({ prefix: NAMESPACE_COMMON })) {
|
||||
@@ -96,35 +88,7 @@ class PersistentPeerStore extends PeerStore {
|
||||
|
||||
if (this._dirtyPeers.size >= this.threshold) {
|
||||
// Commit current data
|
||||
this._commitData().catch(err => {
|
||||
log.error('error committing data', err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add modified metadata peer to the set.
|
||||
* @private
|
||||
* @param {Object} params
|
||||
* @param {PeerId} params.peerId
|
||||
* @param {string} params.metadata
|
||||
*/
|
||||
_addDirtyPeerMetadata ({ peerId, metadata }) {
|
||||
const peerIdstr = peerId.toB58String()
|
||||
|
||||
log('add dirty metadata peer', peerIdstr)
|
||||
this._dirtyPeers.add(peerIdstr)
|
||||
|
||||
// Add dirty metadata key
|
||||
const mData = this._dirtyMetadata.get(peerIdstr) || new Set()
|
||||
mData.add(metadata)
|
||||
this._dirtyMetadata.set(peerIdstr, mData)
|
||||
|
||||
if (this._dirtyPeers.size >= this.threshold) {
|
||||
// Commit current data
|
||||
this._commitData().catch(err => {
|
||||
log.error('error committing data', err)
|
||||
})
|
||||
this._commitData()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,9 +120,6 @@ class PersistentPeerStore extends PeerStore {
|
||||
// Key Book
|
||||
this._batchKeyBook(peerId, batch)
|
||||
|
||||
// Metadata Book
|
||||
this._batchMetadataBook(peerId, batch)
|
||||
|
||||
// Proto Book
|
||||
this._batchProtoBook(peerId, batch)
|
||||
}
|
||||
@@ -223,32 +184,6 @@ class PersistentPeerStore extends PeerStore {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add metadata book data of the peer to the batch.
|
||||
* @private
|
||||
* @param {PeerId} peerId
|
||||
* @param {Object} batch
|
||||
*/
|
||||
_batchMetadataBook (peerId, batch) {
|
||||
const b32key = peerId.toString()
|
||||
const dirtyMetada = this._dirtyMetadata.get(peerId.toB58String()) || []
|
||||
|
||||
try {
|
||||
dirtyMetada.forEach((dirtyKey) => {
|
||||
const key = new Key(`${NAMESPACE_METADATA}${b32key}/${dirtyKey}`)
|
||||
const dirtyValue = this.metadataBook.getValue(peerId, dirtyKey)
|
||||
|
||||
if (dirtyValue) {
|
||||
batch.put(key, dirtyValue)
|
||||
} else {
|
||||
batch.delete(key)
|
||||
}
|
||||
})
|
||||
} catch (err) {
|
||||
log.error(err)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add proto book data of the peer to the batch.
|
||||
* @private
|
||||
@@ -309,13 +244,6 @@ class PersistentPeerStore extends PeerStore {
|
||||
decoded,
|
||||
{ emit: false })
|
||||
break
|
||||
case 'metadata':
|
||||
this.metadataBook._setValue(
|
||||
peerId,
|
||||
keyParts[4],
|
||||
value,
|
||||
{ emit: false })
|
||||
break
|
||||
case 'protos':
|
||||
decoded = Protocols.decode(value)
|
||||
|
||||
|
@@ -83,6 +83,7 @@ class ProtoBook extends Book {
|
||||
/**
|
||||
* Adds known protocols of a provided peer.
|
||||
* If the peer was not known before, it will be added.
|
||||
* @override
|
||||
* @param {PeerId} peerId
|
||||
* @param {Array<string>} protocols
|
||||
* @returns {ProtoBook}
|
||||
|
@@ -17,7 +17,7 @@ const handshake = require('it-handshake')
|
||||
const { NONCE_LENGTH } = require('./key-generator')
|
||||
const debug = require('debug')
|
||||
const log = debug('libp2p:pnet')
|
||||
log.error = debug('libp2p:pnet:err')
|
||||
log.err = debug('libp2p:pnet:err')
|
||||
|
||||
/**
|
||||
* Takes a Private Shared Key (psk) and provides a `protect` method
|
||||
@@ -69,7 +69,7 @@ class Protector {
|
||||
// Decrypt all inbound traffic
|
||||
createUnboxStream(remoteNonce, this.psk),
|
||||
external
|
||||
).catch(log.error)
|
||||
)
|
||||
|
||||
return internal
|
||||
}
|
||||
|
@@ -13,14 +13,12 @@ class TransportManager {
|
||||
* @param {object} options
|
||||
* @param {Libp2p} options.libp2p The Libp2p instance. It will be passed to the transports.
|
||||
* @param {Upgrader} options.upgrader The upgrader to provide to the transports
|
||||
* @param {boolean} [options.faultTolerance = FAULT_TOLERANCE.FATAL_ALL] Address listen error tolerance.
|
||||
*/
|
||||
constructor ({ libp2p, upgrader, faultTolerance = FAULT_TOLERANCE.FATAL_ALL }) {
|
||||
constructor ({ libp2p, upgrader }) {
|
||||
this.libp2p = libp2p
|
||||
this.upgrader = upgrader
|
||||
this._transports = new Map()
|
||||
this._listeners = new Map()
|
||||
this.faultTolerance = faultTolerance
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -175,11 +173,7 @@ class TransportManager {
|
||||
// If no transports were able to listen, throw an error. This likely
|
||||
// means we were given addresses we do not have transports for
|
||||
if (couldNotListen.length === this._transports.size) {
|
||||
const message = `no valid addresses were provided for transports [${couldNotListen}]`
|
||||
if (this.faultTolerance === FAULT_TOLERANCE.FATAL_ALL) {
|
||||
throw errCode(new Error(message), codes.ERR_NO_VALID_ADDRESSES)
|
||||
}
|
||||
log(`libp2p in dial mode only: ${message}`)
|
||||
throw errCode(new Error(`no valid addresses were provided for transports [${couldNotListen}]`), codes.ERR_NO_VALID_ADDRESSES)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,18 +212,4 @@ class TransportManager {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum Transport Manager Fault Tolerance values.
|
||||
* FATAL_ALL should be used for failing in any listen circumstance.
|
||||
* NO_FATAL should be used for not failing when not listening.
|
||||
* @readonly
|
||||
* @enum {number}
|
||||
*/
|
||||
const FAULT_TOLERANCE = {
|
||||
FATAL_ALL: 0,
|
||||
NO_FATAL: 1
|
||||
}
|
||||
|
||||
TransportManager.FaultTolerance = FAULT_TOLERANCE
|
||||
|
||||
module.exports = TransportManager
|
||||
|
@@ -232,7 +232,7 @@ class Upgrader {
|
||||
log('%s: incoming stream opened on %s', direction, protocol)
|
||||
if (this.metrics) this.metrics.trackStream({ stream, remotePeer, protocol })
|
||||
connection.addStream(muxedStream, { protocol })
|
||||
this._onStream({ connection, stream: { ...muxedStream, ...stream }, protocol })
|
||||
this._onStream({ connection, stream, protocol })
|
||||
} catch (err) {
|
||||
log.error(err)
|
||||
}
|
||||
@@ -258,7 +258,7 @@ class Upgrader {
|
||||
}
|
||||
|
||||
// Pipe all data through the muxer
|
||||
pipe(upgradedConn, muxer, upgradedConn).catch(log.error)
|
||||
pipe(upgradedConn, muxer, upgradedConn)
|
||||
}
|
||||
|
||||
const _timeline = maConn.timeline
|
||||
|
@@ -7,9 +7,6 @@ chai.use(require('chai-as-promised'))
|
||||
const { expect } = chai
|
||||
const sinon = require('sinon')
|
||||
|
||||
const delay = require('delay')
|
||||
const pWaitFor = require('p-wait-for')
|
||||
|
||||
const peerUtils = require('../utils/creators/peer')
|
||||
const mockConnection = require('../utils/mockConnection')
|
||||
const baseOptions = require('../utils/base-options.browser')
|
||||
@@ -115,148 +112,4 @@ describe('libp2p.connections', () => {
|
||||
await libp2p.stop()
|
||||
await remoteLibp2p.stop()
|
||||
})
|
||||
|
||||
describe('proactive connections', () => {
|
||||
let nodes = []
|
||||
|
||||
beforeEach(async () => {
|
||||
nodes = await peerUtils.createPeer({
|
||||
number: 2,
|
||||
config: {
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/0/ws']
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(nodes.map((node) => node.stop()))
|
||||
sinon.reset()
|
||||
})
|
||||
|
||||
it('should connect to all the peers stored in the PeerStore, if their number is below minConnections', async () => {
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
fixture: false,
|
||||
started: false,
|
||||
config: {
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/0/ws']
|
||||
},
|
||||
connectionManager: {
|
||||
minConnections: 3
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Populate PeerStore before starting
|
||||
libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs)
|
||||
libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs)
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
// Wait for peers to connect
|
||||
await pWaitFor(() => libp2p.connectionManager.size === 2)
|
||||
|
||||
await libp2p.stop()
|
||||
})
|
||||
|
||||
it('should connect to all the peers stored in the PeerStore until reaching the minConnections', async () => {
|
||||
const minConnections = 1
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
fixture: false,
|
||||
started: false,
|
||||
config: {
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/0/ws']
|
||||
},
|
||||
connectionManager: {
|
||||
minConnections
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Populate PeerStore before starting
|
||||
libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs)
|
||||
libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs)
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
// Wait for peer to connect
|
||||
await pWaitFor(() => libp2p.connectionManager.size === minConnections)
|
||||
|
||||
// Wait more time to guarantee no other connection happened
|
||||
await delay(200)
|
||||
expect(libp2p.connectionManager.size).to.eql(minConnections)
|
||||
|
||||
await libp2p.stop()
|
||||
})
|
||||
|
||||
it('should connect to all the peers stored in the PeerStore until reaching the minConnections sorted', async () => {
|
||||
const minConnections = 1
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
fixture: false,
|
||||
started: false,
|
||||
config: {
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/0/ws']
|
||||
},
|
||||
connectionManager: {
|
||||
minConnections
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Populate PeerStore before starting
|
||||
libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs)
|
||||
libp2p.peerStore.addressBook.set(nodes[1].peerId, nodes[1].multiaddrs)
|
||||
libp2p.peerStore.protoBook.set(nodes[1].peerId, ['/protocol-min-conns'])
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
// Wait for peer to connect
|
||||
await pWaitFor(() => libp2p.connectionManager.size === minConnections)
|
||||
|
||||
// Should have connected to the peer with protocols
|
||||
expect(libp2p.connectionManager.get(nodes[0].peerId)).to.not.exist()
|
||||
expect(libp2p.connectionManager.get(nodes[1].peerId)).to.exist()
|
||||
|
||||
await libp2p.stop()
|
||||
})
|
||||
|
||||
it('should connect to peers in the PeerStore when a peer disconnected', async () => {
|
||||
const minConnections = 1
|
||||
const autoDialInterval = 1000
|
||||
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
fixture: false,
|
||||
config: {
|
||||
addresses: {
|
||||
listen: ['/ip4/127.0.0.1/tcp/0/ws']
|
||||
},
|
||||
connectionManager: {
|
||||
minConnections,
|
||||
autoDialInterval
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Populate PeerStore after starting (discovery)
|
||||
libp2p.peerStore.addressBook.set(nodes[0].peerId, nodes[0].multiaddrs)
|
||||
|
||||
// Wait for peer to connect
|
||||
const conn = await libp2p.dial(nodes[0].peerId)
|
||||
expect(libp2p.connectionManager.get(nodes[0].peerId)).to.exist()
|
||||
|
||||
await conn.close()
|
||||
// Closed
|
||||
await pWaitFor(() => libp2p.connectionManager.size === 0)
|
||||
// Connected
|
||||
await pWaitFor(() => libp2p.connectionManager.size === 1)
|
||||
|
||||
expect(libp2p.connectionManager.get(nodes[0].peerId)).to.exist()
|
||||
|
||||
await libp2p.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -58,8 +58,7 @@ describe('Connection Manager', () => {
|
||||
config: {
|
||||
modules: baseOptions.modules,
|
||||
connectionManager: {
|
||||
maxConnections: max,
|
||||
minConnections: 2
|
||||
maxConnections: max
|
||||
}
|
||||
},
|
||||
started: false
|
||||
@@ -97,8 +96,7 @@ describe('Connection Manager', () => {
|
||||
config: {
|
||||
modules: baseOptions.modules,
|
||||
connectionManager: {
|
||||
maxConnections: max,
|
||||
minConnections: 0
|
||||
maxConnections: max
|
||||
}
|
||||
},
|
||||
started: false
|
||||
|
@@ -242,8 +242,8 @@ describe('Identify', () => {
|
||||
expect(connection).to.exist()
|
||||
|
||||
// Wait for peer store to be updated
|
||||
// Dialer._createDialTarget (add), Identify (replace)
|
||||
await pWaitFor(() => peerStoreSpySet.callCount === 1 && peerStoreSpyAdd.callCount === 1)
|
||||
// Dialer._createDialTarget (add), Connected (add), Identify (replace)
|
||||
await pWaitFor(() => peerStoreSpySet.callCount === 1 && peerStoreSpyAdd.callCount === 2)
|
||||
expect(libp2p.identifyService.identify.callCount).to.equal(1)
|
||||
|
||||
// The connection should have no open streams
|
||||
|
@@ -1,74 +0,0 @@
|
||||
/* eslint max-nested-callbacks: ["error", 8] */
|
||||
/* eslint-env mocha */
|
||||
'use strict'
|
||||
|
||||
const chai = require('chai')
|
||||
const dirtyChai = require('dirty-chai')
|
||||
const expect = chai.expect
|
||||
chai.use(dirtyChai)
|
||||
chai.use(require('chai-string'))
|
||||
|
||||
const os = require('os')
|
||||
const path = require('path')
|
||||
const { isNode } = require('ipfs-utils/src/env')
|
||||
const FsStore = require('datastore-fs')
|
||||
const LevelStore = require('datastore-level')
|
||||
|
||||
const Keychain = require('../../src/keychain')
|
||||
|
||||
describe('cms interop', () => {
|
||||
const passPhrase = 'this is not a secure phrase'
|
||||
const aliceKeyName = 'cms-interop-alice'
|
||||
let ks
|
||||
|
||||
before(() => {
|
||||
const datastore = isNode
|
||||
? new FsStore(path.join(os.tmpdir(), 'test-keystore-1-' + Date.now()))
|
||||
: new LevelStore('test-keystore-1', { db: require('level') })
|
||||
ks = new Keychain(datastore, { passPhrase: passPhrase })
|
||||
})
|
||||
|
||||
const plainData = Buffer.from('This is a message from Alice to Bob')
|
||||
|
||||
it('imports openssl key', async function () {
|
||||
this.timeout(10 * 1000)
|
||||
const aliceKid = 'QmNzBqPwp42HZJccsLtc4ok6LjZAspckgs2du5tTmjPfFA'
|
||||
const alice = `-----BEGIN ENCRYPTED PRIVATE KEY-----
|
||||
MIICxjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIMhYqiVoLJMICAggA
|
||||
MBQGCCqGSIb3DQMHBAhU7J9bcJPLDQSCAoDzi0dP6z97wJBs3jK2hDvZYdoScknG
|
||||
QMPOnpG1LO3IZ7nFha1dta5liWX+xRFV04nmVYkkNTJAPS0xjJOG9B5Hm7wm8uTd
|
||||
1rOaYKOW5S9+1sD03N+fAx9DDFtB7OyvSdw9ty6BtHAqlFk3+/APASJS12ak2pg7
|
||||
/Ei6hChSYYRS9WWGw4lmSitOBxTmrPY1HmODXkR3txR17LjikrMTd6wyky9l/u7A
|
||||
CgkMnj1kn49McOBJ4gO14c9524lw9OkPatyZK39evFhx8AET73LrzCnsf74HW9Ri
|
||||
dKq0FiKLVm2wAXBZqdd5ll/TPj3wmFqhhLSj/txCAGg+079gq2XPYxxYC61JNekA
|
||||
ATKev5zh8x1Mf1maarKN72sD28kS/J+aVFoARIOTxbG3g+1UbYs/00iFcuIaM4IY
|
||||
zB1kQUFe13iWBsJ9nfvN7TJNSVnh8NqHNbSg0SdzKlpZHHSWwOUrsKmxmw/XRVy/
|
||||
ufvN0hZQ3BuK5MZLixMWAyKc9zbZSOB7E7VNaK5Fmm85FRz0L1qRjHvoGcEIhrOt
|
||||
0sjbsRvjs33J8fia0FF9nVfOXvt/67IGBKxIMF9eE91pY5wJNwmXcBk8jghTZs83
|
||||
GNmMB+cGH1XFX4cT4kUGzvqTF2zt7IP+P2cQTS1+imKm7r8GJ7ClEZ9COWWdZIcH
|
||||
igg5jozKCW82JsuWSiW9tu0F/6DuvYiZwHS3OLiJP0CuLfbOaRw8Jia1RTvXEH7m
|
||||
3N0/kZ8hJIK4M/t/UAlALjeNtFxYrFgsPgLxxcq7al1ruG7zBq8L/G3RnkSjtHqE
|
||||
cn4oisOvxCprs4aM9UVjtZTCjfyNpX8UWwT1W3rySV+KQNhxuMy3RzmL
|
||||
-----END ENCRYPTED PRIVATE KEY-----
|
||||
`
|
||||
const key = await ks.importKey(aliceKeyName, alice, 'mypassword')
|
||||
expect(key.name).to.equal(aliceKeyName)
|
||||
expect(key.id).to.equal(aliceKid)
|
||||
})
|
||||
|
||||
it('decrypts node-forge example', async () => {
|
||||
const example = `
|
||||
MIIBcwYJKoZIhvcNAQcDoIIBZDCCAWACAQAxgfowgfcCAQAwYDBbMQ0wCwYDVQQK
|
||||
EwRpcGZzMREwDwYDVQQLEwhrZXlzdG9yZTE3MDUGA1UEAxMuUW1OekJxUHdwNDJI
|
||||
WkpjY3NMdGM0b2s2TGpaQXNwY2tnczJkdTV0VG1qUGZGQQIBATANBgkqhkiG9w0B
|
||||
AQEFAASBgLKXCZQYmMLuQ8m0Ex/rr3KNK+Q2+QG1zIbIQ9MFPUNQ7AOgGOHyL40k
|
||||
d1gr188EHuiwd90PafZoQF9VRSX9YtwGNqAE8+LD8VaITxCFbLGRTjAqeOUHR8cO
|
||||
knU1yykWGkdlbclCuu0NaAfmb8o0OX50CbEKZB7xmsv8tnqn0H0jMF4GCSqGSIb3
|
||||
DQEHATAdBglghkgBZQMEASoEEP/PW1JWehQx6/dsLkp/Mf+gMgQwFM9liLTqC56B
|
||||
nHILFmhac/+a/StQOKuf9dx5qXeGvt9LnwKuGGSfNX4g+dTkoa6N
|
||||
`
|
||||
const plain = await ks.cms.decrypt(Buffer.from(example, 'base64'))
|
||||
expect(plain).to.exist()
|
||||
expect(plain.toString()).to.equal(plainData.toString())
|
||||
})
|
||||
})
|
@@ -1,462 +0,0 @@
|
||||
/* eslint max-nested-callbacks: ["error", 8] */
|
||||
/* eslint-env mocha */
|
||||
'use strict'
|
||||
|
||||
const chai = require('chai')
|
||||
const { expect } = chai
|
||||
const fail = expect.fail
|
||||
chai.use(require('dirty-chai'))
|
||||
chai.use(require('chai-string'))
|
||||
|
||||
const peerUtils = require('../utils/creators/peer')
|
||||
|
||||
const os = require('os')
|
||||
const path = require('path')
|
||||
const { isNode } = require('ipfs-utils/src/env')
|
||||
const { MemoryDatastore } = require('interface-datastore')
|
||||
const FsStore = require('datastore-fs')
|
||||
const LevelStore = require('datastore-level')
|
||||
const Keychain = require('../../src/keychain')
|
||||
const PeerId = require('peer-id')
|
||||
|
||||
describe('keychain', () => {
|
||||
const passPhrase = 'this is not a secure phrase'
|
||||
const rsaKeyName = 'tajné jméno'
|
||||
const renamedRsaKeyName = 'ชื่อลับ'
|
||||
let rsaKeyInfo
|
||||
let emptyKeystore
|
||||
let ks
|
||||
let datastore1, datastore2
|
||||
|
||||
before(() => {
|
||||
datastore1 = isNode
|
||||
? new FsStore(path.join(os.tmpdir(), 'test-keystore-1-' + Date.now()))
|
||||
: new LevelStore('test-keystore-1', { db: require('level') })
|
||||
datastore2 = isNode
|
||||
? new FsStore(path.join(os.tmpdir(), 'test-keystore-2-' + Date.now()))
|
||||
: new LevelStore('test-keystore-2', { db: require('level') })
|
||||
|
||||
ks = new Keychain(datastore2, { passPhrase: passPhrase })
|
||||
emptyKeystore = new Keychain(datastore1, { passPhrase: passPhrase })
|
||||
})
|
||||
|
||||
it('needs a pass phrase to encrypt a key', () => {
|
||||
expect(() => new Keychain(datastore2)).to.throw()
|
||||
})
|
||||
|
||||
it('needs a NIST SP 800-132 non-weak pass phrase', () => {
|
||||
expect(() => new Keychain(datastore2, { passPhrase: '< 20 character' })).to.throw()
|
||||
})
|
||||
|
||||
it('needs a store to persist a key', () => {
|
||||
expect(() => new Keychain(null, { passPhrase: passPhrase })).to.throw()
|
||||
})
|
||||
|
||||
it('has default options', () => {
|
||||
expect(Keychain.options).to.exist()
|
||||
})
|
||||
|
||||
it('needs a supported hashing alorithm', () => {
|
||||
const ok = new Keychain(datastore2, { passPhrase: passPhrase, dek: { hash: 'sha2-256' } })
|
||||
expect(ok).to.exist()
|
||||
expect(() => new Keychain(datastore2, { passPhrase: passPhrase, dek: { hash: 'my-hash' } })).to.throw()
|
||||
})
|
||||
|
||||
it('can generate options', () => {
|
||||
const options = Keychain.generateOptions()
|
||||
options.passPhrase = passPhrase
|
||||
const chain = new Keychain(datastore2, options)
|
||||
expect(chain).to.exist()
|
||||
})
|
||||
|
||||
describe('key name', () => {
|
||||
it('is a valid filename and non-ASCII', async () => {
|
||||
const errors = await Promise.all([
|
||||
ks.removeKey('../../nasty').then(fail, err => err),
|
||||
ks.removeKey('').then(fail, err => err),
|
||||
ks.removeKey(' ').then(fail, err => err),
|
||||
ks.removeKey(null).then(fail, err => err),
|
||||
ks.removeKey(undefined).then(fail, err => err)
|
||||
])
|
||||
|
||||
expect(errors).to.have.length(5)
|
||||
errors.forEach(error => {
|
||||
expect(error).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('key', () => {
|
||||
it('can be an RSA key', async () => {
|
||||
rsaKeyInfo = await ks.createKey(rsaKeyName, 'rsa', 2048)
|
||||
expect(rsaKeyInfo).to.exist()
|
||||
expect(rsaKeyInfo).to.have.property('name', rsaKeyName)
|
||||
expect(rsaKeyInfo).to.have.property('id')
|
||||
})
|
||||
|
||||
it('is encrypted PEM encoded PKCS #8', async () => {
|
||||
const pem = await ks._getPrivateKey(rsaKeyName)
|
||||
return expect(pem).to.startsWith('-----BEGIN ENCRYPTED PRIVATE KEY-----')
|
||||
})
|
||||
|
||||
it('throws if an invalid private key name is given', async () => {
|
||||
const err = await ks._getPrivateKey(undefined).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
|
||||
it('throws if a private key cant be found', async () => {
|
||||
const err = await ks._getPrivateKey('not real').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_KEY_NOT_FOUND')
|
||||
})
|
||||
|
||||
it('does not overwrite existing key', async () => {
|
||||
const err = await ks.createKey(rsaKeyName, 'rsa', 2048).then(fail, err => err)
|
||||
expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS')
|
||||
})
|
||||
|
||||
it('cannot create the "self" key', async () => {
|
||||
const err = await ks.createKey('self', 'rsa', 2048).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
|
||||
it('should validate name is string', async () => {
|
||||
const err = await ks.createKey(5, 'rsa', 2048).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
|
||||
it('should validate type is string', async () => {
|
||||
const err = await ks.createKey('TEST' + Date.now(), null, 2048).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_TYPE')
|
||||
})
|
||||
|
||||
it('should validate size is integer', async () => {
|
||||
const err = await ks.createKey('TEST' + Date.now(), 'rsa', 'string').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_SIZE')
|
||||
})
|
||||
|
||||
describe('implements NIST SP 800-131A', () => {
|
||||
it('disallows RSA length < 2048', async () => {
|
||||
const err = await ks.createKey('bad-nist-rsa', 'rsa', 1024).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_SIZE')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('query', () => {
|
||||
it('finds all existing keys', async () => {
|
||||
const keys = await ks.listKeys()
|
||||
expect(keys).to.exist()
|
||||
const mykey = keys.find((k) => k.name.normalize() === rsaKeyName.normalize())
|
||||
expect(mykey).to.exist()
|
||||
})
|
||||
|
||||
it('finds a key by name', async () => {
|
||||
const key = await ks.findKeyByName(rsaKeyName)
|
||||
expect(key).to.exist()
|
||||
expect(key).to.deep.equal(rsaKeyInfo)
|
||||
})
|
||||
|
||||
it('finds a key by id', async () => {
|
||||
const key = await ks.findKeyById(rsaKeyInfo.id)
|
||||
expect(key).to.exist()
|
||||
expect(key).to.deep.equal(rsaKeyInfo)
|
||||
})
|
||||
|
||||
it('returns the key\'s name and id', async () => {
|
||||
const keys = await ks.listKeys()
|
||||
expect(keys).to.exist()
|
||||
keys.forEach((key) => {
|
||||
expect(key).to.have.property('name')
|
||||
expect(key).to.have.property('id')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('CMS protected data', () => {
|
||||
const plainData = Buffer.from('This is a message from Alice to Bob')
|
||||
let cms
|
||||
|
||||
it('service is available', () => {
|
||||
expect(ks).to.have.property('cms')
|
||||
})
|
||||
|
||||
it('requires a key', async () => {
|
||||
const err = await ks.cms.encrypt('no-key', plainData).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_KEY_NOT_FOUND')
|
||||
})
|
||||
|
||||
it('requires plain data as a Buffer', async () => {
|
||||
const err = await ks.cms.encrypt(rsaKeyName, 'plain data').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_PARAMS')
|
||||
})
|
||||
|
||||
it('encrypts', async () => {
|
||||
cms = await ks.cms.encrypt(rsaKeyName, plainData)
|
||||
expect(cms).to.exist()
|
||||
expect(cms).to.be.instanceOf(Buffer)
|
||||
})
|
||||
|
||||
it('is a PKCS #7 message', async () => {
|
||||
const err = await ks.cms.decrypt('not CMS').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_PARAMS')
|
||||
})
|
||||
|
||||
it('is a PKCS #7 binary message', async () => {
|
||||
const err = await ks.cms.decrypt(plainData).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_CMS')
|
||||
})
|
||||
|
||||
it('cannot be read without the key', async () => {
|
||||
const err = await emptyKeystore.cms.decrypt(cms).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('missingKeys')
|
||||
expect(err.missingKeys).to.eql([rsaKeyInfo.id])
|
||||
expect(err).to.have.property('code', 'ERR_MISSING_KEYS')
|
||||
})
|
||||
|
||||
it('can be read with the key', async () => {
|
||||
const plain = await ks.cms.decrypt(cms)
|
||||
expect(plain).to.exist()
|
||||
expect(plain.toString()).to.equal(plainData.toString())
|
||||
})
|
||||
})
|
||||
|
||||
describe('exported key', () => {
|
||||
let pemKey
|
||||
|
||||
it('requires the password', async () => {
|
||||
const err = await ks.exportKey(rsaKeyName).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_PASSWORD_REQUIRED')
|
||||
})
|
||||
|
||||
it('requires the key name', async () => {
|
||||
const err = await ks.exportKey(undefined, 'password').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
|
||||
it('is a PKCS #8 encrypted pem', async () => {
|
||||
pemKey = await ks.exportKey(rsaKeyName, 'password')
|
||||
expect(pemKey).to.startsWith('-----BEGIN ENCRYPTED PRIVATE KEY-----')
|
||||
})
|
||||
|
||||
it('can be imported', async () => {
|
||||
const key = await ks.importKey('imported-key', pemKey, 'password')
|
||||
expect(key.name).to.equal('imported-key')
|
||||
expect(key.id).to.equal(rsaKeyInfo.id)
|
||||
})
|
||||
|
||||
it('requires the pem', async () => {
|
||||
const err = await ks.importKey('imported-key', undefined, 'password').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_PEM_REQUIRED')
|
||||
})
|
||||
|
||||
it('cannot be imported as an existing key name', async () => {
|
||||
const err = await ks.importKey(rsaKeyName, pemKey, 'password').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS')
|
||||
})
|
||||
|
||||
it('cannot be imported with the wrong password', async () => {
|
||||
const err = await ks.importKey('a-new-name-for-import', pemKey, 'not the password').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_CANNOT_READ_KEY')
|
||||
})
|
||||
})
|
||||
|
||||
describe('peer id', () => {
|
||||
const alicePrivKey = 'CAASpgkwggSiAgEAAoIBAQC2SKo/HMFZeBml1AF3XijzrxrfQXdJzjePBZAbdxqKR1Mc6juRHXij6HXYPjlAk01BhF1S3Ll4Lwi0cAHhggf457sMg55UWyeGKeUv0ucgvCpBwlR5cQ020i0MgzjPWOLWq1rtvSbNcAi2ZEVn6+Q2EcHo3wUvWRtLeKz+DZSZfw2PEDC+DGPJPl7f8g7zl56YymmmzH9liZLNrzg/qidokUv5u1pdGrcpLuPNeTODk0cqKB+OUbuKj9GShYECCEjaybJDl9276oalL9ghBtSeEv20kugatTvYy590wFlJkkvyl+nPxIH0EEYMKK9XRWlu9XYnoSfboiwcv8M3SlsjAgMBAAECggEAZtju/bcKvKFPz0mkHiaJcpycy9STKphorpCT83srBVQi59CdFU6Mj+aL/xt0kCPMVigJw8P3/YCEJ9J+rS8BsoWE+xWUEsJvtXoT7vzPHaAtM3ci1HZd302Mz1+GgS8Epdx+7F5p80XAFLDUnELzOzKftvWGZmWfSeDnslwVONkL/1VAzwKy7Ce6hk4SxRE7l2NE2OklSHOzCGU1f78ZzVYKSnS5Ag9YrGjOAmTOXDbKNKN/qIorAQ1bovzGoCwx3iGIatQKFOxyVCyO1PsJYT7JO+kZbhBWRRE+L7l+ppPER9bdLFxs1t5CrKc078h+wuUr05S1P1JjXk68pk3+kQKBgQDeK8AR11373Mzib6uzpjGzgNRMzdYNuExWjxyxAzz53NAR7zrPHvXvfIqjDScLJ4NcRO2TddhXAfZoOPVH5k4PJHKLBPKuXZpWlookCAyENY7+Pd55S8r+a+MusrMagYNljb5WbVTgN8cgdpim9lbbIFlpN6SZaVjLQL3J8TWH6wKBgQDSChzItkqWX11CNstJ9zJyUE20I7LrpyBJNgG1gtvz3ZMUQCn3PxxHtQzN9n1P0mSSYs+jBKPuoSyYLt1wwe10/lpgL4rkKWU3/m1Myt0tveJ9WcqHh6tzcAbb/fXpUFT/o4SWDimWkPkuCb+8j//2yiXk0a/T2f36zKMuZvujqQKBgC6B7BAQDG2H2B/ijofp12ejJU36nL98gAZyqOfpLJ+FeMz4TlBDQ+phIMhnHXA5UkdDapQ+zA3SrFk+6yGk9Vw4Hf46B+82SvOrSbmnMa+PYqKYIvUzR4gg34rL/7AhwnbEyD5hXq4dHwMNsIDq+l2elPjwm/U9V0gdAl2+r50HAoGALtsKqMvhv8HucAMBPrLikhXP/8um8mMKFMrzfqZ+otxfHzlhI0L08Bo3jQrb0Z7ByNY6M8epOmbCKADsbWcVre/AAY0ZkuSZK/CaOXNX/AhMKmKJh8qAOPRY02LIJRBCpfS4czEdnfUhYV/TYiFNnKRj57PPYZdTzUsxa/yVTmECgYBr7slQEjb5Onn5mZnGDh+72BxLNdgwBkhO0OCdpdISqk0F0Pxby22DFOKXZEpiyI9XYP1C8wPiJsShGm2yEwBPWXnrrZNWczaVuCbXHrZkWQogBDG3HGXNdU4MAWCyiYlyinIBpPpoAJZSzpGLmWbMWh28+RJS6AQX6KHrK1o2uw=='
|
||||
let alice
|
||||
|
||||
before(async function () {
|
||||
const encoded = Buffer.from(alicePrivKey, 'base64')
|
||||
alice = await PeerId.createFromPrivKey(encoded)
|
||||
})
|
||||
|
||||
it('private key can be imported', async () => {
|
||||
const key = await ks.importPeer('alice', alice)
|
||||
expect(key.name).to.equal('alice')
|
||||
expect(key.id).to.equal(alice.toB58String())
|
||||
})
|
||||
|
||||
it('private key import requires a valid name', async () => {
|
||||
const err = await ks.importPeer(undefined, alice).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
|
||||
it('private key import requires the peer', async () => {
|
||||
const err = await ks.importPeer('alice').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_MISSING_PRIVATE_KEY')
|
||||
})
|
||||
|
||||
it('key id exists', async () => {
|
||||
const key = await ks.findKeyById(alice.toB58String())
|
||||
expect(key).to.exist()
|
||||
expect(key).to.have.property('name', 'alice')
|
||||
expect(key).to.have.property('id', alice.toB58String())
|
||||
})
|
||||
|
||||
it('key name exists', async () => {
|
||||
const key = await ks.findKeyByName('alice')
|
||||
expect(key).to.exist()
|
||||
expect(key).to.have.property('name', 'alice')
|
||||
expect(key).to.have.property('id', alice.toB58String())
|
||||
})
|
||||
})
|
||||
|
||||
describe('rename', () => {
|
||||
it('requires an existing key name', async () => {
|
||||
const err = await ks.renameKey('not-there', renamedRsaKeyName).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_NOT_FOUND')
|
||||
})
|
||||
|
||||
it('requires a valid new key name', async () => {
|
||||
const err = await ks.renameKey(rsaKeyName, '..\not-valid').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_NEW_KEY_NAME_INVALID')
|
||||
})
|
||||
|
||||
it('does not overwrite existing key', async () => {
|
||||
const err = await ks.renameKey(rsaKeyName, rsaKeyName).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_KEY_ALREADY_EXISTS')
|
||||
})
|
||||
|
||||
it('cannot create the "self" key', async () => {
|
||||
const err = await ks.renameKey(rsaKeyName, 'self').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_NEW_KEY_NAME_INVALID')
|
||||
})
|
||||
|
||||
it('removes the existing key name', async () => {
|
||||
const key = await ks.renameKey(rsaKeyName, renamedRsaKeyName)
|
||||
expect(key).to.exist()
|
||||
expect(key).to.have.property('name', renamedRsaKeyName)
|
||||
expect(key).to.have.property('id', rsaKeyInfo.id)
|
||||
// Try to find the changed key
|
||||
const err = await ks.findKeyByName(rsaKeyName).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
})
|
||||
|
||||
it('creates the new key name', async () => {
|
||||
const key = await ks.findKeyByName(renamedRsaKeyName)
|
||||
expect(key).to.exist()
|
||||
expect(key).to.have.property('name', renamedRsaKeyName)
|
||||
})
|
||||
|
||||
it('does not change the key ID', async () => {
|
||||
const key = await ks.findKeyByName(renamedRsaKeyName)
|
||||
expect(key).to.exist()
|
||||
expect(key).to.have.property('name', renamedRsaKeyName)
|
||||
expect(key).to.have.property('id', rsaKeyInfo.id)
|
||||
})
|
||||
|
||||
it('throws with invalid key names', async () => {
|
||||
const err = await ks.findKeyByName(undefined).then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
})
|
||||
|
||||
describe('key removal', () => {
|
||||
it('cannot remove the "self" key', async () => {
|
||||
const err = await ks.removeKey('self').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_INVALID_KEY_NAME')
|
||||
})
|
||||
|
||||
it('cannot remove an unknown key', async () => {
|
||||
const err = await ks.removeKey('not-there').then(fail, err => err)
|
||||
expect(err).to.exist()
|
||||
expect(err).to.have.property('code', 'ERR_KEY_NOT_FOUND')
|
||||
})
|
||||
|
||||
it('can remove a known key', async () => {
|
||||
const key = await ks.removeKey(renamedRsaKeyName)
|
||||
expect(key).to.exist()
|
||||
expect(key).to.have.property('name', renamedRsaKeyName)
|
||||
expect(key).to.have.property('id', rsaKeyInfo.id)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('libp2p.keychain', () => {
|
||||
it('needs a passphrase to be used, otherwise throws an error', async () => {
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
started: false
|
||||
})
|
||||
|
||||
try {
|
||||
await libp2p.keychain.createKey('keyName', 'rsa', 2048)
|
||||
} catch (err) {
|
||||
expect(err).to.exist()
|
||||
return
|
||||
}
|
||||
throw new Error('should throw an error using the keychain if no passphrase provided')
|
||||
})
|
||||
|
||||
it('can be used if a passphrase is provided', async () => {
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
started: false,
|
||||
config: {
|
||||
keychain: {
|
||||
datastore: new MemoryDatastore(),
|
||||
pass: '12345678901234567890'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await libp2p.loadKeychain()
|
||||
|
||||
const kInfo = await libp2p.keychain.createKey('keyName', 'rsa', 2048)
|
||||
expect(kInfo).to.exist()
|
||||
})
|
||||
|
||||
it('can reload keys', async () => {
|
||||
const datastore = new MemoryDatastore()
|
||||
const [libp2p] = await peerUtils.createPeer({
|
||||
started: false,
|
||||
config: {
|
||||
keychain: {
|
||||
datastore,
|
||||
pass: '12345678901234567890'
|
||||
}
|
||||
}
|
||||
})
|
||||
await libp2p.loadKeychain()
|
||||
|
||||
const kInfo = await libp2p.keychain.createKey('keyName', 'rsa', 2048)
|
||||
expect(kInfo).to.exist()
|
||||
|
||||
const [libp2p2] = await peerUtils.createPeer({
|
||||
started: false,
|
||||
config: {
|
||||
keychain: {
|
||||
datastore,
|
||||
pass: '12345678901234567890'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await libp2p2.loadKeychain()
|
||||
const key = await libp2p2.keychain.findKeyByName('keyName')
|
||||
|
||||
expect(key).to.exist()
|
||||
})
|
||||
})
|
@@ -1,69 +0,0 @@
|
||||
/* eslint-env mocha */
|
||||
'use strict'
|
||||
|
||||
const chai = require('chai')
|
||||
const dirtyChai = require('dirty-chai')
|
||||
const expect = chai.expect
|
||||
chai.use(dirtyChai)
|
||||
const PeerId = require('peer-id')
|
||||
const multihash = require('multihashes')
|
||||
const crypto = require('libp2p-crypto')
|
||||
const rsaUtils = require('libp2p-crypto/src/keys/rsa-utils')
|
||||
const rsaClass = require('libp2p-crypto/src/keys/rsa-class')
|
||||
|
||||
const sample = {
|
||||
id: '122019318b6e5e0cf93a2314bf01269a2cc23cd3dcd452d742cdb9379d8646f6e4a9',
|
||||
privKey: 'CAASpgkwggSiAgEAAoIBAQC2SKo/HMFZeBml1AF3XijzrxrfQXdJzjePBZAbdxqKR1Mc6juRHXij6HXYPjlAk01BhF1S3Ll4Lwi0cAHhggf457sMg55UWyeGKeUv0ucgvCpBwlR5cQ020i0MgzjPWOLWq1rtvSbNcAi2ZEVn6+Q2EcHo3wUvWRtLeKz+DZSZfw2PEDC+DGPJPl7f8g7zl56YymmmzH9liZLNrzg/qidokUv5u1pdGrcpLuPNeTODk0cqKB+OUbuKj9GShYECCEjaybJDl9276oalL9ghBtSeEv20kugatTvYy590wFlJkkvyl+nPxIH0EEYMKK9XRWlu9XYnoSfboiwcv8M3SlsjAgMBAAECggEAZtju/bcKvKFPz0mkHiaJcpycy9STKphorpCT83srBVQi59CdFU6Mj+aL/xt0kCPMVigJw8P3/YCEJ9J+rS8BsoWE+xWUEsJvtXoT7vzPHaAtM3ci1HZd302Mz1+GgS8Epdx+7F5p80XAFLDUnELzOzKftvWGZmWfSeDnslwVONkL/1VAzwKy7Ce6hk4SxRE7l2NE2OklSHOzCGU1f78ZzVYKSnS5Ag9YrGjOAmTOXDbKNKN/qIorAQ1bovzGoCwx3iGIatQKFOxyVCyO1PsJYT7JO+kZbhBWRRE+L7l+ppPER9bdLFxs1t5CrKc078h+wuUr05S1P1JjXk68pk3+kQKBgQDeK8AR11373Mzib6uzpjGzgNRMzdYNuExWjxyxAzz53NAR7zrPHvXvfIqjDScLJ4NcRO2TddhXAfZoOPVH5k4PJHKLBPKuXZpWlookCAyENY7+Pd55S8r+a+MusrMagYNljb5WbVTgN8cgdpim9lbbIFlpN6SZaVjLQL3J8TWH6wKBgQDSChzItkqWX11CNstJ9zJyUE20I7LrpyBJNgG1gtvz3ZMUQCn3PxxHtQzN9n1P0mSSYs+jBKPuoSyYLt1wwe10/lpgL4rkKWU3/m1Myt0tveJ9WcqHh6tzcAbb/fXpUFT/o4SWDimWkPkuCb+8j//2yiXk0a/T2f36zKMuZvujqQKBgC6B7BAQDG2H2B/ijofp12ejJU36nL98gAZyqOfpLJ+FeMz4TlBDQ+phIMhnHXA5UkdDapQ+zA3SrFk+6yGk9Vw4Hf46B+82SvOrSbmnMa+PYqKYIvUzR4gg34rL/7AhwnbEyD5hXq4dHwMNsIDq+l2elPjwm/U9V0gdAl2+r50HAoGALtsKqMvhv8HucAMBPrLikhXP/8um8mMKFMrzfqZ+otxfHzlhI0L08Bo3jQrb0Z7ByNY6M8epOmbCKADsbWcVre/AAY0ZkuSZK/CaOXNX/AhMKmKJh8qAOPRY02LIJRBCpfS4czEdnfUhYV/TYiFNnKRj57PPYZdTzUsxa/yVTmECgYBr7slQEjb5Onn5mZnGDh+72BxLNdgwBkhO0OCdpdISqk0F0Pxby22DFOKXZEpiyI9XYP1C8wPiJsShGm2yEwBPWXnrrZNWczaVuCbXHrZkWQogBDG3HGXNdU4MAWCyiYlyinIBpPpoAJZSzpGLmWbMWh28+RJS6AQX6KHrK1o2uw==',
|
||||
pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2SKo/HMFZeBml1AF3XijzrxrfQXdJzjePBZAbdxqKR1Mc6juRHXij6HXYPjlAk01BhF1S3Ll4Lwi0cAHhggf457sMg55UWyeGKeUv0ucgvCpBwlR5cQ020i0MgzjPWOLWq1rtvSbNcAi2ZEVn6+Q2EcHo3wUvWRtLeKz+DZSZfw2PEDC+DGPJPl7f8g7zl56YymmmzH9liZLNrzg/qidokUv5u1pdGrcpLuPNeTODk0cqKB+OUbuKj9GShYECCEjaybJDl9276oalL9ghBtSeEv20kugatTvYy590wFlJkkvyl+nPxIH0EEYMKK9XRWlu9XYnoSfboiwcv8M3SlsjAgMBAAE='
|
||||
}
|
||||
|
||||
describe('peer ID', () => {
|
||||
let peer
|
||||
let publicKeyDer // a buffer
|
||||
|
||||
before(async () => {
|
||||
const encoded = Buffer.from(sample.privKey, 'base64')
|
||||
peer = await PeerId.createFromPrivKey(encoded)
|
||||
})
|
||||
|
||||
it('decoded public key', async () => {
|
||||
// get protobuf version of the public key
|
||||
const publicKeyProtobuf = peer.marshalPubKey()
|
||||
const publicKey = crypto.keys.unmarshalPublicKey(publicKeyProtobuf)
|
||||
publicKeyDer = publicKey.marshal()
|
||||
|
||||
// get protobuf version of the private key
|
||||
const privateKeyProtobuf = peer.marshalPrivKey()
|
||||
const key = await crypto.keys.unmarshalPrivateKey(privateKeyProtobuf)
|
||||
expect(key).to.exist()
|
||||
})
|
||||
|
||||
it('encoded public key with DER', async () => {
|
||||
const jwk = rsaUtils.pkixToJwk(publicKeyDer)
|
||||
const rsa = new rsaClass.RsaPublicKey(jwk)
|
||||
const keyId = await rsa.hash()
|
||||
const kids = multihash.toB58String(keyId)
|
||||
expect(kids).to.equal(peer.toB58String())
|
||||
})
|
||||
|
||||
it('encoded public key with JWT', async () => {
|
||||
const jwk = {
|
||||
kty: 'RSA',
|
||||
n: 'tkiqPxzBWXgZpdQBd14o868a30F3Sc43jwWQG3caikdTHOo7kR14o-h12D45QJNNQYRdUty5eC8ItHAB4YIH-Oe7DIOeVFsnhinlL9LnILwqQcJUeXENNtItDIM4z1ji1qta7b0mzXAItmRFZ-vkNhHB6N8FL1kbS3is_g2UmX8NjxAwvgxjyT5e3_IO85eemMpppsx_ZYmSza84P6onaJFL-btaXRq3KS7jzXkzg5NHKigfjlG7io_RkoWBAghI2smyQ5fdu-qGpS_YIQbUnhL9tJLoGrU72MufdMBZSZJL8pfpz8SB9BBGDCivV0VpbvV2J6En26IsHL_DN0pbIw',
|
||||
e: 'AQAB',
|
||||
alg: 'RS256',
|
||||
kid: '2011-04-29'
|
||||
}
|
||||
const rsa = new rsaClass.RsaPublicKey(jwk)
|
||||
const keyId = await rsa.hash()
|
||||
const kids = multihash.toB58String(keyId)
|
||||
expect(kids).to.equal(peer.toB58String())
|
||||
})
|
||||
|
||||
it('decoded private key', async () => {
|
||||
// get protobuf version of the private key
|
||||
const privateKeyProtobuf = peer.marshalPrivKey()
|
||||
const key = await crypto.keys.unmarshalPrivateKey(privateKeyProtobuf)
|
||||
expect(key).to.exist()
|
||||
})
|
||||
})
|
@@ -31,13 +31,10 @@ describe('peer discovery', () => {
|
||||
sinon.reset()
|
||||
})
|
||||
|
||||
it('should dial know peers on startup below the minConnections watermark', async () => {
|
||||
it('should dial know peers on startup', async () => {
|
||||
libp2p = new Libp2p({
|
||||
...baseOptions,
|
||||
peerId,
|
||||
connectionManager: {
|
||||
minConnections: 2
|
||||
}
|
||||
peerId
|
||||
})
|
||||
|
||||
libp2p.peerStore.addressBook.set(remotePeerId, [multiaddr('/ip4/165.1.1.1/tcp/80')])
|
||||
|
@@ -323,10 +323,10 @@ describe('addressBook', () => {
|
||||
throw new Error('invalid peerId should throw error')
|
||||
})
|
||||
|
||||
it('returns empty array if no multiaddrs are known for the provided peer', () => {
|
||||
it('returns undefined if no multiaddrs are known for the provided peer', () => {
|
||||
const addresses = ab.getMultiaddrsForPeer(peerId)
|
||||
|
||||
expect(addresses).to.be.empty()
|
||||
expect(addresses).to.not.exist()
|
||||
})
|
||||
|
||||
it('returns the multiaddrs stored', () => {
|
||||
|
@@ -1,380 +0,0 @@
|
||||
'use strict'
|
||||
/* eslint-env mocha */
|
||||
|
||||
const chai = require('chai')
|
||||
chai.use(require('dirty-chai'))
|
||||
chai.use(require('chai-bytes'))
|
||||
const { expect } = chai
|
||||
|
||||
const pDefer = require('p-defer')
|
||||
const PeerStore = require('../../src/peer-store')
|
||||
|
||||
const peerUtils = require('../utils/creators/peer')
|
||||
const {
|
||||
codes: { ERR_INVALID_PARAMETERS }
|
||||
} = require('../../src/errors')
|
||||
|
||||
describe('metadataBook', () => {
|
||||
let peerId
|
||||
|
||||
before(async () => {
|
||||
[peerId] = await peerUtils.createPeerId()
|
||||
})
|
||||
|
||||
describe('metadataBook.set', () => {
|
||||
let peerStore, mb
|
||||
|
||||
beforeEach(() => {
|
||||
peerStore = new PeerStore()
|
||||
mb = peerStore.metadataBook
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
peerStore.removeAllListeners()
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if invalid PeerId is provided', () => {
|
||||
try {
|
||||
mb.set('invalid peerId')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('invalid peerId should throw error')
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if no key provided', () => {
|
||||
try {
|
||||
mb.set(peerId)
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('no key provided should throw error')
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if no value provided', () => {
|
||||
try {
|
||||
mb.set(peerId, 'location')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('no value provided should throw error')
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if value is not a buffer', () => {
|
||||
try {
|
||||
mb.set(peerId, 'location', 'mars')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('invalid value provided should throw error')
|
||||
})
|
||||
|
||||
it('stores the content and emit change event', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
peerStore.once('change:metadata', ({ peerId, metadata }) => {
|
||||
expect(peerId).to.exist()
|
||||
expect(metadata).to.equal(metadataKey)
|
||||
defer.resolve()
|
||||
})
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
const value = mb.getValue(peerId, metadataKey)
|
||||
expect(value).to.equalBytes(metadataValue)
|
||||
|
||||
const peerMetadata = mb.get(peerId)
|
||||
expect(peerMetadata).to.exist()
|
||||
expect(peerMetadata.get(metadataKey)).to.equalBytes(metadataValue)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
|
||||
it('emits on set if not storing the exact same content', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
const metadataValue1 = Buffer.from('mars')
|
||||
const metadataValue2 = Buffer.from('saturn')
|
||||
|
||||
let changeCounter = 0
|
||||
peerStore.on('change:metadata', () => {
|
||||
changeCounter++
|
||||
if (changeCounter > 1) {
|
||||
defer.resolve()
|
||||
}
|
||||
})
|
||||
|
||||
// set 1
|
||||
mb.set(peerId, metadataKey, metadataValue1)
|
||||
|
||||
// set 2 (same content)
|
||||
mb.set(peerId, metadataKey, metadataValue2)
|
||||
|
||||
const value = mb.getValue(peerId, metadataKey)
|
||||
expect(value).to.equalBytes(metadataValue2)
|
||||
|
||||
const peerMetadata = mb.get(peerId)
|
||||
expect(peerMetadata).to.exist()
|
||||
expect(peerMetadata.get(metadataKey)).to.equalBytes(metadataValue2)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
|
||||
it('does not emit on set if it is storing the exact same content', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
let changeCounter = 0
|
||||
peerStore.on('change:metadata', () => {
|
||||
changeCounter++
|
||||
if (changeCounter > 1) {
|
||||
defer.reject()
|
||||
}
|
||||
})
|
||||
|
||||
// set 1
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
// set 2 (same content)
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
// Wait 50ms for incorrect second event
|
||||
setTimeout(() => {
|
||||
defer.resolve()
|
||||
}, 50)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
})
|
||||
|
||||
describe('metadataBook.get', () => {
|
||||
let peerStore, mb
|
||||
|
||||
beforeEach(() => {
|
||||
peerStore = new PeerStore()
|
||||
mb = peerStore.metadataBook
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if invalid PeerId is provided', () => {
|
||||
try {
|
||||
mb.get('invalid peerId')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('invalid peerId should throw error')
|
||||
})
|
||||
|
||||
it('returns undefined if no metadata is known for the provided peer', () => {
|
||||
const metadata = mb.get(peerId)
|
||||
|
||||
expect(metadata).to.not.exist()
|
||||
})
|
||||
|
||||
it('returns the metadata stored', () => {
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
const peerMetadata = mb.get(peerId)
|
||||
expect(peerMetadata).to.exist()
|
||||
expect(peerMetadata.get(metadataKey)).to.equalBytes(metadataValue)
|
||||
})
|
||||
})
|
||||
|
||||
describe('metadataBook.getValue', () => {
|
||||
let peerStore, mb
|
||||
|
||||
beforeEach(() => {
|
||||
peerStore = new PeerStore()
|
||||
mb = peerStore.metadataBook
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if invalid PeerId is provided', () => {
|
||||
try {
|
||||
mb.getValue('invalid peerId')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('invalid peerId should throw error')
|
||||
})
|
||||
|
||||
it('returns undefined if no metadata is known for the provided peer', () => {
|
||||
const metadataKey = 'location'
|
||||
const metadata = mb.getValue(peerId, metadataKey)
|
||||
|
||||
expect(metadata).to.not.exist()
|
||||
})
|
||||
|
||||
it('returns the metadata value stored for the given key', () => {
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
const value = mb.getValue(peerId, metadataKey)
|
||||
expect(value).to.exist()
|
||||
expect(value).to.equalBytes(metadataValue)
|
||||
})
|
||||
|
||||
it('returns undefined if no metadata is known for the provided peer and key', () => {
|
||||
const metadataKey = 'location'
|
||||
const metadataBadKey = 'nickname'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
const metadata = mb.getValue(peerId, metadataBadKey)
|
||||
|
||||
expect(metadata).to.not.exist()
|
||||
})
|
||||
})
|
||||
|
||||
describe('metadataBook.delete', () => {
|
||||
let peerStore, mb
|
||||
|
||||
beforeEach(() => {
|
||||
peerStore = new PeerStore()
|
||||
mb = peerStore.metadataBook
|
||||
})
|
||||
|
||||
it('throwns invalid parameters error if invalid PeerId is provided', () => {
|
||||
try {
|
||||
mb.delete('invalid peerId')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('invalid peerId should throw error')
|
||||
})
|
||||
|
||||
it('returns false if no records exist for the peer and no event is emitted', () => {
|
||||
const defer = pDefer()
|
||||
|
||||
peerStore.on('change:metadata', () => {
|
||||
defer.reject()
|
||||
})
|
||||
|
||||
const deleted = mb.delete(peerId)
|
||||
|
||||
expect(deleted).to.equal(false)
|
||||
|
||||
// Wait 50ms for incorrect invalid event
|
||||
setTimeout(() => {
|
||||
defer.resolve()
|
||||
}, 50)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
|
||||
it('returns true if the record exists and an event is emitted', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
// Listen after set
|
||||
peerStore.on('change:metadata', () => {
|
||||
defer.resolve()
|
||||
})
|
||||
|
||||
const deleted = mb.delete(peerId)
|
||||
|
||||
expect(deleted).to.equal(true)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
})
|
||||
|
||||
describe('metadataBook.deleteValue', () => {
|
||||
let peerStore, mb
|
||||
|
||||
beforeEach(() => {
|
||||
peerStore = new PeerStore()
|
||||
mb = peerStore.metadataBook
|
||||
})
|
||||
|
||||
it('throws invalid parameters error if invalid PeerId is provided', () => {
|
||||
try {
|
||||
mb.deleteValue('invalid peerId')
|
||||
} catch (err) {
|
||||
expect(err.code).to.equal(ERR_INVALID_PARAMETERS)
|
||||
return
|
||||
}
|
||||
throw new Error('invalid peerId should throw error')
|
||||
})
|
||||
|
||||
it('returns false if no records exist for the peer and no event is emitted', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
|
||||
peerStore.on('change:metadata', () => {
|
||||
defer.reject()
|
||||
})
|
||||
|
||||
const deleted = mb.deleteValue(peerId, metadataKey)
|
||||
|
||||
expect(deleted).to.equal(false)
|
||||
|
||||
// Wait 50ms for incorrect invalid event
|
||||
setTimeout(() => {
|
||||
defer.resolve()
|
||||
}, 50)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
|
||||
it('returns true if the record exists and an event is emitted', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
// Listen after set
|
||||
peerStore.on('change:metadata', () => {
|
||||
defer.resolve()
|
||||
})
|
||||
|
||||
const deleted = mb.deleteValue(peerId, metadataKey)
|
||||
|
||||
expect(deleted).to.equal(true)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
|
||||
it('returns false if there is a record for the peer but not the given metadata key', () => {
|
||||
const defer = pDefer()
|
||||
const metadataKey = 'location'
|
||||
const metadataBadKey = 'nickname'
|
||||
const metadataValue = Buffer.from('mars')
|
||||
|
||||
mb.set(peerId, metadataKey, metadataValue)
|
||||
|
||||
peerStore.on('change:metadata', () => {
|
||||
defer.reject()
|
||||
})
|
||||
|
||||
const deleted = mb.deleteValue(peerId, metadataBadKey)
|
||||
|
||||
expect(deleted).to.equal(false)
|
||||
|
||||
// Wait 50ms for incorrect invalid event
|
||||
setTimeout(() => {
|
||||
defer.resolve()
|
||||
}, 50)
|
||||
|
||||
return defer.promise
|
||||
})
|
||||
})
|
||||
})
|
@@ -24,6 +24,7 @@ describe('libp2p.peerStore', () => {
|
||||
})
|
||||
|
||||
it('adds peer address to AddressBook and keys to the keybook when establishing connection', async () => {
|
||||
const idStr = libp2p.peerId.toB58String()
|
||||
const remoteIdStr = remoteLibp2p.peerId.toB58String()
|
||||
|
||||
const spyAddressBook = sinon.spy(libp2p.peerStore.addressBook, 'add')
|
||||
@@ -39,10 +40,13 @@ describe('libp2p.peerStore', () => {
|
||||
const localPeers = libp2p.peerStore.peers
|
||||
expect(localPeers.size).to.equal(1)
|
||||
|
||||
const publicKeyInLocalPeer = localPeers.get(remoteIdStr).id.pubKey
|
||||
expect(publicKeyInLocalPeer.bytes).to.equalBytes(remoteLibp2p.peerId.pubKey.bytes)
|
||||
// TODO: needs https://github.com/NodeFactoryIo/js-libp2p-noise/issues/58
|
||||
// const publicKeyInLocalPeer = localPeers.get(remoteIdStr).id.pubKey
|
||||
// expect(publicKeyInLocalPeer.bytes).to.equalBytes(remoteLibp2p.peerId.pubKey.bytes)
|
||||
|
||||
const publicKeyInRemotePeer = remoteLibp2p.peerStore.keyBook.get(libp2p.peerId)
|
||||
const remotePeers = remoteLibp2p.peerStore.peers
|
||||
expect(remotePeers.size).to.equal(1)
|
||||
const publicKeyInRemotePeer = remotePeers.get(idStr).id.pubKey
|
||||
expect(publicKeyInRemotePeer).to.exist()
|
||||
expect(publicKeyInRemotePeer.bytes).to.equalBytes(libp2p.peerId.pubKey.bytes)
|
||||
})
|
||||
|
@@ -158,57 +158,4 @@ describe('peer-store', () => {
|
||||
expect(peerListenint4[1].id.toB58String()).to.eql(peerIds[3].toB58String())
|
||||
})
|
||||
})
|
||||
|
||||
describe('peerStore.peers', () => {
|
||||
let peerStore
|
||||
|
||||
beforeEach(() => {
|
||||
peerStore = new PeerStore()
|
||||
})
|
||||
|
||||
it('returns peers if only addresses are known', () => {
|
||||
peerStore.addressBook.set(peerIds[0], [addr1])
|
||||
|
||||
const peers = peerStore.peers
|
||||
expect(peers.size).to.equal(1)
|
||||
|
||||
const peerData = peers.get(peerIds[0].toB58String())
|
||||
expect(peerData).to.exist()
|
||||
expect(peerData.id).to.exist()
|
||||
expect(peerData.addresses).to.have.lengthOf(1)
|
||||
expect(peerData.protocols).to.have.lengthOf(0)
|
||||
expect(peerData.metadata).to.not.exist()
|
||||
})
|
||||
|
||||
it('returns peers if only protocols are known', () => {
|
||||
peerStore.protoBook.set(peerIds[0], [proto1])
|
||||
|
||||
const peers = peerStore.peers
|
||||
expect(peers.size).to.equal(1)
|
||||
|
||||
const peerData = peers.get(peerIds[0].toB58String())
|
||||
expect(peerData).to.exist()
|
||||
expect(peerData.id).to.exist()
|
||||
expect(peerData.addresses).to.have.lengthOf(0)
|
||||
expect(peerData.protocols).to.have.lengthOf(1)
|
||||
expect(peerData.metadata).to.not.exist()
|
||||
})
|
||||
|
||||
it('returns peers if only metadata is known', () => {
|
||||
const metadataKey = 'location'
|
||||
const metadataValue = Buffer.from('earth')
|
||||
peerStore.metadataBook.set(peerIds[0], metadataKey, metadataValue)
|
||||
|
||||
const peers = peerStore.peers
|
||||
expect(peers.size).to.equal(1)
|
||||
|
||||
const peerData = peers.get(peerIds[0].toB58String())
|
||||
expect(peerData).to.exist()
|
||||
expect(peerData.id).to.exist()
|
||||
expect(peerData.addresses).to.have.lengthOf(0)
|
||||
expect(peerData.protocols).to.have.lengthOf(0)
|
||||
expect(peerData.metadata).to.exist()
|
||||
expect(peerData.metadata.get(metadataKey)).to.equalBytes(metadataValue)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -5,6 +5,7 @@ const chai = require('chai')
|
||||
chai.use(require('dirty-chai'))
|
||||
const { expect } = chai
|
||||
const sinon = require('sinon')
|
||||
|
||||
const PeerStore = require('../../src/peer-store/persistent')
|
||||
const multiaddr = require('multiaddr')
|
||||
const { MemoryDatastore } = require('interface-datastore')
|
||||
@@ -61,7 +62,6 @@ describe('Persisted PeerStore', () => {
|
||||
const protocols = ['/ping/1.0.0']
|
||||
const spyDirty = sinon.spy(peerStore, '_addDirtyPeer')
|
||||
const spyDs = sinon.spy(datastore, 'batch')
|
||||
const commitSpy = sinon.spy(peerStore, '_commitData')
|
||||
|
||||
await peerStore.start()
|
||||
|
||||
@@ -71,18 +71,12 @@ describe('Persisted PeerStore', () => {
|
||||
expect(spyDirty).to.have.property('callCount', 1) // Address
|
||||
expect(spyDs).to.have.property('callCount', 1)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// ProtoBook
|
||||
peerStore.protoBook.set(peer, protocols)
|
||||
|
||||
expect(spyDirty).to.have.property('callCount', 2) // Protocol
|
||||
expect(spyDs).to.have.property('callCount', 2)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// Should have three peer records stored in the datastore
|
||||
const queryParams = {
|
||||
prefix: '/peers/'
|
||||
@@ -104,7 +98,6 @@ describe('Persisted PeerStore', () => {
|
||||
it('should load content to the peerStore when restart but not put in datastore again', async () => {
|
||||
const spyDs = sinon.spy(datastore, 'batch')
|
||||
const peers = await peerUtils.createPeerId({ number: 2 })
|
||||
const commitSpy = sinon.spy(peerStore, '_commitData')
|
||||
const multiaddrs = [
|
||||
multiaddr('/ip4/156.10.1.22/tcp/1000'),
|
||||
multiaddr('/ip4/156.10.1.23/tcp/1000')
|
||||
@@ -117,30 +110,15 @@ describe('Persisted PeerStore', () => {
|
||||
peerStore.addressBook.set(peers[0], [multiaddrs[0]])
|
||||
peerStore.addressBook.set(peers[1], [multiaddrs[1]])
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// KeyBook
|
||||
peerStore.keyBook.set(peers[0], peers[0].pubKey)
|
||||
peerStore.keyBook.set(peers[1], peers[1].pubKey)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// ProtoBook
|
||||
peerStore.protoBook.set(peers[0], protocols)
|
||||
peerStore.protoBook.set(peers[1], protocols)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// MetadataBook
|
||||
peerStore.metadataBook.set(peers[0], 'location', Buffer.from('earth'))
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
expect(spyDs).to.have.property('callCount', 7) // 2 Address + 2 Key + 2 Proto + 1 Metadata
|
||||
expect(spyDs).to.have.property('callCount', 6) // 2 AddressBook + 2 KeyBook + 2 ProtoBook
|
||||
expect(peerStore.peers.size).to.equal(2)
|
||||
|
||||
await peerStore.stop()
|
||||
@@ -153,21 +131,19 @@ describe('Persisted PeerStore', () => {
|
||||
|
||||
await peerStore.start()
|
||||
|
||||
expect(spy).to.have.property('callCount', 7)
|
||||
expect(spyDs).to.have.property('callCount', 7)
|
||||
expect(spy).to.have.property('callCount', 6)
|
||||
expect(spyDs).to.have.property('callCount', 6)
|
||||
|
||||
expect(peerStore.peers.size).to.equal(2)
|
||||
expect(peerStore.addressBook.data.size).to.equal(2)
|
||||
expect(peerStore.keyBook.data.size).to.equal(2)
|
||||
expect(peerStore.protoBook.data.size).to.equal(2)
|
||||
expect(peerStore.metadataBook.data.size).to.equal(1)
|
||||
})
|
||||
|
||||
it('should delete content from the datastore on delete', async () => {
|
||||
const [peer] = await peerUtils.createPeerId()
|
||||
const multiaddrs = [multiaddr('/ip4/156.10.1.22/tcp/1000')]
|
||||
const protocols = ['/ping/1.0.0']
|
||||
const commitSpy = sinon.spy(peerStore, '_commitData')
|
||||
|
||||
await peerStore.start()
|
||||
|
||||
@@ -175,31 +151,20 @@ describe('Persisted PeerStore', () => {
|
||||
peerStore.addressBook.set(peer, multiaddrs)
|
||||
// ProtoBook
|
||||
peerStore.protoBook.set(peer, protocols)
|
||||
// MetadataBook
|
||||
peerStore.metadataBook.set(peer, 'location', Buffer.from('earth'))
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
const spyDs = sinon.spy(datastore, 'batch')
|
||||
const spyAddressBook = sinon.spy(peerStore.addressBook, 'delete')
|
||||
const spyKeyBook = sinon.spy(peerStore.keyBook, 'delete')
|
||||
const spyProtoBook = sinon.spy(peerStore.protoBook, 'delete')
|
||||
const spyMetadataBook = sinon.spy(peerStore.metadataBook, 'delete')
|
||||
|
||||
// Delete from PeerStore
|
||||
peerStore.delete(peer)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
await peerStore.stop()
|
||||
|
||||
expect(spyAddressBook).to.have.property('callCount', 1)
|
||||
expect(spyKeyBook).to.have.property('callCount', 1)
|
||||
expect(spyProtoBook).to.have.property('callCount', 1)
|
||||
expect(spyMetadataBook).to.have.property('callCount', 1)
|
||||
expect(spyDs).to.have.property('callCount', 3)
|
||||
expect(spyDs).to.have.property('callCount', 2)
|
||||
|
||||
// Should have zero peer records stored in the datastore
|
||||
const queryParams = {
|
||||
@@ -222,9 +187,7 @@ describe('Persisted PeerStore', () => {
|
||||
|
||||
it('should not commit until threshold is reached', async () => {
|
||||
const spyDirty = sinon.spy(peerStore, '_addDirtyPeer')
|
||||
const spyDirtyMetadata = sinon.spy(peerStore, '_addDirtyPeerMetadata')
|
||||
const spyDs = sinon.spy(datastore, 'batch')
|
||||
const commitSpy = sinon.spy(peerStore, '_commitData')
|
||||
|
||||
const peers = await peerUtils.createPeerId({ number: 2 })
|
||||
|
||||
@@ -239,19 +202,11 @@ describe('Persisted PeerStore', () => {
|
||||
// Add Peer0 data in multiple books
|
||||
peerStore.addressBook.set(peers[0], multiaddrs)
|
||||
peerStore.protoBook.set(peers[0], protocols)
|
||||
peerStore.metadataBook.set(peers[0], 'location', Buffer.from('earth'))
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// Remove data from the same Peer
|
||||
peerStore.addressBook.delete(peers[0])
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
expect(spyDirty).to.have.property('callCount', 3) // 2 AddrBook ops, 1 ProtoBook op
|
||||
expect(spyDirtyMetadata).to.have.property('callCount', 1) // 1 MetadataBook op
|
||||
expect(peerStore._dirtyPeers.size).to.equal(1)
|
||||
expect(spyDs).to.have.property('callCount', 0)
|
||||
|
||||
@@ -265,19 +220,15 @@ describe('Persisted PeerStore', () => {
|
||||
// Add data for second book
|
||||
peerStore.addressBook.set(peers[1], multiaddrs)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
expect(spyDirty).to.have.property('callCount', 4)
|
||||
expect(spyDirtyMetadata).to.have.property('callCount', 1)
|
||||
expect(spyDs).to.have.property('callCount', 1)
|
||||
|
||||
// Should have three peer records stored in the datastore
|
||||
// Should have two peer records stored in the datastore
|
||||
let count = 0
|
||||
for await (const _ of datastore.query(queryParams)) { // eslint-disable-line
|
||||
count++
|
||||
}
|
||||
expect(count).to.equal(3)
|
||||
expect(count).to.equal(2)
|
||||
expect(peerStore.peers.size).to.equal(2)
|
||||
})
|
||||
|
||||
@@ -290,7 +241,7 @@ describe('Persisted PeerStore', () => {
|
||||
|
||||
await peerStore.start()
|
||||
|
||||
// Add Peer data in a book
|
||||
// Add Peer data in a booka
|
||||
peerStore.protoBook.set(peer, protocols)
|
||||
|
||||
expect(spyDs).to.have.property('callCount', 0)
|
||||
@@ -370,7 +321,6 @@ describe('libp2p.peerStore (Persisted)', () => {
|
||||
})
|
||||
|
||||
it('should load content to the peerStore when a new node is started with the same datastore', async () => {
|
||||
const commitSpy = sinon.spy(libp2p.peerStore, '_commitData')
|
||||
const peers = await peerUtils.createPeerId({ number: 2 })
|
||||
const multiaddrs = [
|
||||
multiaddr('/ip4/156.10.1.22/tcp/1000'),
|
||||
@@ -384,16 +334,10 @@ describe('libp2p.peerStore (Persisted)', () => {
|
||||
libp2p.peerStore.addressBook.set(peers[0], [multiaddrs[0]])
|
||||
libp2p.peerStore.addressBook.set(peers[1], [multiaddrs[1]])
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
// ProtoBook
|
||||
libp2p.peerStore.protoBook.set(peers[0], protocols)
|
||||
libp2p.peerStore.protoBook.set(peers[1], protocols)
|
||||
|
||||
// let batch commit complete
|
||||
await Promise.all(commitSpy.returnValues)
|
||||
|
||||
expect(libp2p.peerStore.peers.size).to.equal(2)
|
||||
|
||||
await libp2p.stop()
|
||||
|
@@ -189,135 +189,4 @@ describe('Pubsub subsystem operates correctly', () => {
|
||||
await defer.promise
|
||||
})
|
||||
})
|
||||
|
||||
describe('pubsub with intermittent connections', () => {
|
||||
beforeEach(async () => {
|
||||
libp2p = await create(mergeOptions(subsystemOptions, {
|
||||
peerId,
|
||||
addresses: {
|
||||
listen: [listenAddr]
|
||||
},
|
||||
config: {
|
||||
pubsub: {
|
||||
enabled: true,
|
||||
emitSelf: false
|
||||
}
|
||||
}
|
||||
}))
|
||||
|
||||
remoteLibp2p = await create(mergeOptions(subsystemOptions, {
|
||||
peerId: remotePeerId,
|
||||
addresses: {
|
||||
listen: [remoteListenAddr]
|
||||
},
|
||||
config: {
|
||||
pubsub: {
|
||||
enabled: true,
|
||||
emitSelf: false
|
||||
}
|
||||
}
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
await remoteLibp2p.start()
|
||||
|
||||
libp2p.peerStore.addressBook.set(remotePeerId, remoteLibp2p.multiaddrs)
|
||||
})
|
||||
|
||||
afterEach(() => Promise.all([
|
||||
libp2p && libp2p.stop(),
|
||||
remoteLibp2p && remoteLibp2p.stop()
|
||||
]))
|
||||
|
||||
afterEach(() => {
|
||||
sinon.restore()
|
||||
})
|
||||
|
||||
it('should receive pubsub messages after a node restart', async () => {
|
||||
const topic = 'test-topic'
|
||||
const data = 'hey!'
|
||||
const libp2pId = libp2p.peerId.toB58String()
|
||||
|
||||
let counter = 0
|
||||
const defer1 = pDefer()
|
||||
const defer2 = pDefer()
|
||||
const handler = (msg) => {
|
||||
expect(msg.data.toString()).to.equal(data)
|
||||
counter++
|
||||
counter === 1 ? defer1.resolve() : defer2.resolve()
|
||||
}
|
||||
|
||||
await libp2p.dial(remotePeerId)
|
||||
|
||||
let subscribedTopics = libp2p.pubsub.getTopics()
|
||||
expect(subscribedTopics).to.not.include(topic)
|
||||
|
||||
libp2p.pubsub.subscribe(topic, handler)
|
||||
|
||||
subscribedTopics = libp2p.pubsub.getTopics()
|
||||
expect(subscribedTopics).to.include(topic)
|
||||
|
||||
// wait for remoteLibp2p to know about libp2p subscription
|
||||
await pWaitFor(() => {
|
||||
const subscribedPeers = remoteLibp2p.pubsub.getSubscribers(topic)
|
||||
return subscribedPeers.includes(libp2pId)
|
||||
})
|
||||
remoteLibp2p.pubsub.publish(topic, data)
|
||||
|
||||
await defer1.promise
|
||||
|
||||
await remoteLibp2p.stop()
|
||||
await remoteLibp2p.start()
|
||||
|
||||
libp2p.peerStore.addressBook.set(remotePeerId, remoteLibp2p.multiaddrs)
|
||||
await libp2p.dial(remotePeerId)
|
||||
|
||||
// wait for remoteLibp2p to know about libp2p subscription
|
||||
await pWaitFor(() => {
|
||||
const subscribedPeers = remoteLibp2p.pubsub.getSubscribers(topic)
|
||||
return subscribedPeers.includes(libp2pId)
|
||||
})
|
||||
|
||||
remoteLibp2p.pubsub.publish(topic, data)
|
||||
|
||||
await defer2.promise
|
||||
})
|
||||
|
||||
it('should handle quick reconnects with a delayed disconnect', async () => {
|
||||
// Subscribe on both
|
||||
const handlerSpy = sinon.spy()
|
||||
const topic = 'reconnect-channel'
|
||||
await Promise.all([
|
||||
libp2p.pubsub.subscribe(topic, handlerSpy),
|
||||
remoteLibp2p.pubsub.subscribe(topic, handlerSpy)
|
||||
])
|
||||
// Create two connections to the remote peer
|
||||
const originalConnection = await libp2p.dialer.connectToPeer(remoteLibp2p.peerId)
|
||||
// second connection
|
||||
await libp2p.dialer.connectToPeer(remoteLibp2p.peerId)
|
||||
expect(libp2p.connections.get(remoteLibp2p.peerId.toB58String())).to.have.length(2)
|
||||
|
||||
// Wait for subscriptions to occur
|
||||
await pWaitFor(() => {
|
||||
return libp2p.pubsub.getSubscribers(topic).includes(remoteLibp2p.peerId.toB58String()) &&
|
||||
remoteLibp2p.pubsub.getSubscribers(topic).includes(libp2p.peerId.toB58String())
|
||||
})
|
||||
|
||||
// Verify messages go both ways
|
||||
libp2p.pubsub.publish(topic, 'message1')
|
||||
remoteLibp2p.pubsub.publish(topic, 'message2')
|
||||
await pWaitFor(() => handlerSpy.callCount === 2)
|
||||
expect(handlerSpy.args.map(([message]) => message.data.toString())).to.include.members(['message1', 'message2'])
|
||||
|
||||
// Disconnect the first connection (this acts as a delayed reconnect)
|
||||
await originalConnection.close()
|
||||
|
||||
// Verify messages go both ways after the disconnect
|
||||
handlerSpy.resetHistory()
|
||||
libp2p.pubsub.publish(topic, 'message3')
|
||||
remoteLibp2p.pubsub.publish(topic, 'message4')
|
||||
await pWaitFor(() => handlerSpy.callCount === 2)
|
||||
expect(handlerSpy.args.map(([message]) => message.data.toString())).to.include.members(['message3', 'message4'])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -15,8 +15,6 @@ const mockUpgrader = require('../utils/mockUpgrader')
|
||||
const { MULTIADDRS_WEBSOCKETS } = require('../fixtures/browser')
|
||||
const { codes: ErrorCodes } = require('../../src/errors')
|
||||
const Libp2p = require('../../src')
|
||||
const { FaultTolerance } = require('../../src/transport-manager')
|
||||
|
||||
const Peers = require('../fixtures/peers')
|
||||
const PeerId = require('peer-id')
|
||||
|
||||
@@ -167,55 +165,3 @@ describe('libp2p.transportManager', () => {
|
||||
expect(libp2p.transportManager.close.callCount).to.equal(1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('libp2p.transportManager (dial only)', () => {
|
||||
let peerId
|
||||
let libp2p
|
||||
|
||||
before(async () => {
|
||||
peerId = await PeerId.createFromJSON(Peers[0])
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
sinon.restore()
|
||||
libp2p && await libp2p.stop()
|
||||
})
|
||||
|
||||
it('fails to start if multiaddr fails to listen', async () => {
|
||||
libp2p = new Libp2p({
|
||||
peerId,
|
||||
addresses: {
|
||||
listen: [multiaddr('/ip4/127.0.0.1/tcp/0')]
|
||||
},
|
||||
modules: {
|
||||
transport: [Transport]
|
||||
}
|
||||
})
|
||||
|
||||
try {
|
||||
await libp2p.start()
|
||||
} catch (err) {
|
||||
expect(err).to.exist()
|
||||
expect(err.code).to.equal(ErrorCodes.ERR_NO_VALID_ADDRESSES)
|
||||
return
|
||||
}
|
||||
throw new Error('it should fail to start if multiaddr fails to listen')
|
||||
})
|
||||
|
||||
it('does not fail to start if multiaddr fails to listen when supporting dial only mode', async () => {
|
||||
libp2p = new Libp2p({
|
||||
peerId,
|
||||
addresses: {
|
||||
listen: [multiaddr('/ip4/127.0.0.1/tcp/0')]
|
||||
},
|
||||
transportManager: {
|
||||
faultTolerance: FaultTolerance.NO_FATAL
|
||||
},
|
||||
modules: {
|
||||
transport: [Transport]
|
||||
}
|
||||
})
|
||||
|
||||
await libp2p.start()
|
||||
})
|
||||
})
|
||||
|
@@ -4,7 +4,6 @@
|
||||
const { Buffer } = require('buffer')
|
||||
const chai = require('chai')
|
||||
chai.use(require('dirty-chai'))
|
||||
chai.use(require('chai-as-promised'))
|
||||
const { expect } = chai
|
||||
const sinon = require('sinon')
|
||||
const Muxer = require('libp2p-mplex')
|
||||
@@ -402,40 +401,6 @@ describe('libp2p.upgrader', () => {
|
||||
expect(libp2p.upgrader.protocols.get('/echo/1.0.1')).to.equal(echoHandler)
|
||||
})
|
||||
|
||||
it('should return muxed streams', async () => {
|
||||
const remotePeer = peers[1]
|
||||
libp2p = new Libp2p({
|
||||
peerId: peers[0],
|
||||
modules: {
|
||||
transport: [Transport],
|
||||
streamMuxer: [Muxer],
|
||||
connEncryption: [Crypto]
|
||||
}
|
||||
})
|
||||
const echoHandler = () => {}
|
||||
libp2p.handle(['/echo/1.0.0'], echoHandler)
|
||||
|
||||
const remoteUpgrader = new Upgrader({
|
||||
localPeer: remotePeer,
|
||||
muxers: new Map([[Muxer.multicodec, Muxer]]),
|
||||
cryptos: new Map([[Crypto.protocol, Crypto]])
|
||||
})
|
||||
remoteUpgrader.protocols.set('/echo/1.0.0', echoHandler)
|
||||
|
||||
const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer })
|
||||
const [localConnection] = await Promise.all([
|
||||
libp2p.upgrader.upgradeOutbound(outbound),
|
||||
remoteUpgrader.upgradeInbound(inbound)
|
||||
])
|
||||
sinon.spy(remoteUpgrader, '_onStream')
|
||||
|
||||
const { stream } = await localConnection.newStream(['/echo/1.0.0'])
|
||||
expect(stream).to.include.keys(['id', 'close', 'reset', 'timeline'])
|
||||
|
||||
const [arg0] = remoteUpgrader._onStream.getCall(0).args
|
||||
expect(arg0.stream).to.include.keys(['id', 'close', 'reset', 'timeline'])
|
||||
})
|
||||
|
||||
it('should emit connect and disconnect events', async () => {
|
||||
const remotePeer = peers[1]
|
||||
libp2p = new Libp2p({
|
||||
|
Reference in New Issue
Block a user