Compare commits

..

1 Commits

Author SHA1 Message Date
Vasco Santos
932e10dd9b chore: bench persisted peerStore 2020-06-02 14:32:45 +02:00
7 changed files with 335 additions and 102 deletions

View File

@@ -1,8 +1,3 @@
<a name="0.28.0"></a>
# [0.28.0](https://github.com/libp2p/js-libp2p/compare/v0.28.0-rc.0...v0.28.0) (2020-06-05)
<a name="0.28.0-rc.0"></a>
# [0.28.0-rc.0](https://github.com/libp2p/js-libp2p/compare/v0.27.8...v0.28.0-rc.0) (2020-05-28)

View File

@@ -1,6 +1,6 @@
{
"name": "libp2p",
"version": "0.28.0",
"version": "0.28.0-rc.0",
"description": "JavaScript implementation of libp2p, a modular peer to peer network stack",
"leadMaintainer": "Jacob Heun <jacobheun@gmail.com>",
"main": "src/index.js",
@@ -50,7 +50,7 @@
"err-code": "^2.0.0",
"events": "^3.1.0",
"hashlru": "^2.3.0",
"interface-datastore": "^1.0.4",
"interface-datastore": "^0.8.3",
"ipfs-utils": "^2.2.0",
"it-all": "^1.0.1",
"it-buffer": "^0.1.2",
@@ -92,7 +92,7 @@
"datastore-level": "^1.1.0",
"delay": "^4.3.0",
"dirty-chai": "^2.0.1",
"interop-libp2p": "^0.1.0",
"interop-libp2p": "libp2p/interop#chore/update-libp2p-daemon-with-peerstore",
"ipfs-http-client": "^44.0.0",
"it-concat": "^1.0.0",
"it-pair": "^1.0.0",

View File

@@ -96,9 +96,7 @@ class PersistentPeerStore extends PeerStore {
if (this._dirtyPeers.size >= this.threshold) {
// Commit current data
this._commitData().catch(err => {
log.error('error committing data', err)
})
this._commitData()
}
}
@@ -122,9 +120,7 @@ class PersistentPeerStore extends PeerStore {
if (this._dirtyPeers.size >= this.threshold) {
// Commit current data
this._commitData().catch(err => {
log.error('error committing data', err)
})
this._commitData()
}
}

167
test/benchmark.node.js Normal file
View File

@@ -0,0 +1,167 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const pWaitFor = require('p-wait-for')
const { createPeerId } = require('./utils/creators/peer')
const Libp2p = require('../src')
const Transport = require('libp2p-tcp')
const Muxer = require('libp2p-mplex')
const Crypto = require('libp2p-secio')
const MulticastDNS = require('libp2p-mdns')
const multiaddr = require('multiaddr')
const { MemoryDatastore } = require('interface-datastore')
describe.only('benchmark', function () {
this.timeout(30e3)
let nodes
const create = async (config = {}) => {
const peerIds = await createPeerId({ number: 5 })
nodes = await Promise.all(peerIds.map((peerId) => Libp2p.create({
peerId,
config: {
relay: {
enabled: true,
hop: {
enabled: false
}
}
},
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto],
peerDiscovery: [MulticastDNS]
},
config: {
peerDiscovery: {
autoDial: true, // Auto connect to discovered peers (limited by ConnectionManager minPeers)
// The `tag` property will be searched when creating the instance of your Peer Discovery service.
// The associated object, will be passed to the service when it is instantiated.
[MulticastDNS.tag]: {
interval: 1000,
enabled: true
}
// .. other discovery module options.
}
},
addresses: {
listen: [multiaddr('/ip4/127.0.0.1/tcp/0')]
},
...config
})))
nodes[1].connectionManager.on('peer:connected', () => {
console.log('connected')
})
await Promise.all(nodes.map((node) => node.start()))
}
const clearStore = (node) => {
node.peerStore.addressBook.data.clear()
node.peerStore.protoBook.data.clear()
node.peerStore.keyBook.data.clear()
node.peerStore.metadataBook.data.clear()
}
const test = async () => {
await nodes[4].start()
const start = Date.now()
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= nodes.length - 1 &&
nodes[1].connectionManager.connections.size >= nodes.length - 1 &&
nodes[2].connectionManager.connections.size >= nodes.length - 1 &&
nodes[3].connectionManager.connections.size >= nodes.length - 1 &&
nodes[4].connectionManager.connections.size >= nodes.length - 1
)
return Date.now() - start
}
afterEach(() => Promise.all(nodes.map((node) => node.stop())))
it('run without persistence', async () => {
await create({
datastore: new MemoryDatastore(),
peerStore: {
persistence: false,
threshold: 5
}
})
// wait all peers connected
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= nodes.length - 1 &&
nodes[1].connectionManager.connections.size >= nodes.length - 1 &&
nodes[2].connectionManager.connections.size >= nodes.length - 1 &&
nodes[3].connectionManager.connections.size >= nodes.length - 1 &&
nodes[4].connectionManager.connections.size >= nodes.length - 1
)
console.log('all peers connected')
await nodes[4].stop()
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= nodes.length - 2 &&
nodes[1].connectionManager.connections.size >= nodes.length - 2 &&
nodes[2].connectionManager.connections.size >= nodes.length - 2 &&
nodes[3].connectionManager.connections.size >= nodes.length - 2
)
clearStore(nodes[4])
console.log('peers disconnected 1')
const dur = await test()
console.log('all peers connected', dur)
})
it('run with persistence', async () => {
await create({
datastore: new MemoryDatastore(),
peerStore: {
persistence: true,
threshold: 5
}
})
// wait all peers connected
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= 4 &&
nodes[1].connectionManager.connections.size >= 4 &&
nodes[2].connectionManager.connections.size >= 4 &&
nodes[3].connectionManager.connections.size >= 4 &&
nodes[4].connectionManager.connections.size >= 4
)
console.log('all peers connected')
await nodes[4].stop()
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= 3 &&
nodes[1].connectionManager.connections.size >= 3 &&
nodes[2].connectionManager.connections.size >= 3 &&
nodes[3].connectionManager.connections.size >= 3
)
clearStore(nodes[4])
console.log('peers disconnected 1')
const dur = await test()
console.log('all peers connected', dur)
})
})

162
test/benchmark.spec.js Normal file
View File

@@ -0,0 +1,162 @@
'use strict'
/* eslint-env mocha */
const chai = require('chai')
chai.use(require('dirty-chai'))
const pWaitFor = require('p-wait-for')
const { isNode } = require('ipfs-utils/src/env')
const { createPeerId } = require('./utils/creators/peer')
const Libp2p = require('../src')
const Transport = require('libp2p-webrtc-star')
const Muxer = require('libp2p-mplex')
const Crypto = require('libp2p-secio')
const multiaddr = require('multiaddr')
const { MemoryDatastore } = require('interface-datastore')
const signallerAddr = '/ip4/127.0.0.1/tcp/15555/ws/p2p-webrtc-star'
describe.only('benchmark', function () {
this.timeout(30e3)
let nodes
if (isNode) {
return
}
const create = async (config = {}) => {
const peerIds = await createPeerId({ number: 5 })
nodes = await Promise.all(peerIds.map((peerId) => Libp2p.create({
peerId,
config: {
relay: {
enabled: true,
hop: {
enabled: false
}
}
},
modules: {
transport: [Transport],
streamMuxer: [Muxer],
connEncryption: [Crypto]
},
addresses: {
listen: [multiaddr(`${signallerAddr}/p2p/${peerId.toB58String()}`)]
},
...config
})))
nodes[1].connectionManager.on('peer:connected', () => {
console.log('connected')
})
await Promise.all(nodes.map((node) => node.start()))
}
const clearStore = (node) => {
node.peerStore.addressBook.data.clear()
node.peerStore.protoBook.data.clear()
node.peerStore.keyBook.data.clear()
node.peerStore.metadataBook.data.clear()
}
const test = async () => {
const start = new Date()
await nodes[4].start()
console.log('node start time', Date.now() - start)
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= nodes.length - 1 &&
nodes[1].connectionManager.connections.size >= nodes.length - 1 &&
nodes[2].connectionManager.connections.size >= nodes.length - 1 &&
nodes[3].connectionManager.connections.size >= nodes.length - 1 &&
nodes[4].connectionManager.connections.size >= nodes.length - 1
)
}
afterEach(() => Promise.all(nodes.map((node) => node.stop())))
it('run without persistence', async () => {
const start = Date.now()
await create({
datastore: new MemoryDatastore(),
peerStore: {
persistence: false,
threshold: 5
}
})
// wait all peers connected
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= nodes.length - 1 &&
nodes[1].connectionManager.connections.size >= nodes.length - 1 &&
nodes[2].connectionManager.connections.size >= nodes.length - 1 &&
nodes[3].connectionManager.connections.size >= nodes.length - 1 &&
nodes[4].connectionManager.connections.size >= nodes.length - 1
)
console.log('all peers connected')
await nodes[4].stop()
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= nodes.length - 2 &&
nodes[1].connectionManager.connections.size >= nodes.length - 2 &&
nodes[2].connectionManager.connections.size >= nodes.length - 2 &&
nodes[3].connectionManager.connections.size >= nodes.length - 2
)
clearStore(nodes[4])
console.log('peers disconnected 1')
await test()
console.log('all peers connected', Date.now() - start)
})
it.only('run with persistence', async () => {
const start = Date.now()
await create({
datastore: new MemoryDatastore(),
peerStore: {
persistence: true,
threshold: 5
}
})
// wait all peers connected
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= 4 &&
nodes[1].connectionManager.connections.size >= 4 &&
nodes[2].connectionManager.connections.size >= 4 &&
nodes[3].connectionManager.connections.size >= 4 &&
nodes[4].connectionManager.connections.size >= 4
)
console.log('all peers connected')
await nodes[4].stop()
await pWaitFor(() =>
nodes[0].connectionManager.connections.size >= 3 &&
nodes[1].connectionManager.connections.size >= 3 &&
nodes[2].connectionManager.connections.size >= 3 &&
nodes[3].connectionManager.connections.size >= 3
)
clearStore(nodes[4])
console.log('peers disconnected 1')
await test()
console.log('all peers connected', Date.now() - start)
})
})

View File

@@ -5,6 +5,7 @@ const chai = require('chai')
chai.use(require('dirty-chai'))
const { expect } = chai
const sinon = require('sinon')
const PeerStore = require('../../src/peer-store/persistent')
const multiaddr = require('multiaddr')
const { MemoryDatastore } = require('interface-datastore')
@@ -61,7 +62,6 @@ describe('Persisted PeerStore', () => {
const protocols = ['/ping/1.0.0']
const spyDirty = sinon.spy(peerStore, '_addDirtyPeer')
const spyDs = sinon.spy(datastore, 'batch')
const commitSpy = sinon.spy(peerStore, '_commitData')
await peerStore.start()
@@ -71,18 +71,12 @@ describe('Persisted PeerStore', () => {
expect(spyDirty).to.have.property('callCount', 1) // Address
expect(spyDs).to.have.property('callCount', 1)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// ProtoBook
peerStore.protoBook.set(peer, protocols)
expect(spyDirty).to.have.property('callCount', 2) // Protocol
expect(spyDs).to.have.property('callCount', 2)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// Should have three peer records stored in the datastore
const queryParams = {
prefix: '/peers/'
@@ -104,7 +98,6 @@ describe('Persisted PeerStore', () => {
it('should load content to the peerStore when restart but not put in datastore again', async () => {
const spyDs = sinon.spy(datastore, 'batch')
const peers = await peerUtils.createPeerId({ number: 2 })
const commitSpy = sinon.spy(peerStore, '_commitData')
const multiaddrs = [
multiaddr('/ip4/156.10.1.22/tcp/1000'),
multiaddr('/ip4/156.10.1.23/tcp/1000')
@@ -117,29 +110,17 @@ describe('Persisted PeerStore', () => {
peerStore.addressBook.set(peers[0], [multiaddrs[0]])
peerStore.addressBook.set(peers[1], [multiaddrs[1]])
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// KeyBook
peerStore.keyBook.set(peers[0], peers[0].pubKey)
peerStore.keyBook.set(peers[1], peers[1].pubKey)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// ProtoBook
peerStore.protoBook.set(peers[0], protocols)
peerStore.protoBook.set(peers[1], protocols)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// MetadataBook
peerStore.metadataBook.set(peers[0], 'location', Buffer.from('earth'))
// let batch commit complete
await Promise.all(commitSpy.returnValues)
expect(spyDs).to.have.property('callCount', 7) // 2 Address + 2 Key + 2 Proto + 1 Metadata
expect(peerStore.peers.size).to.equal(2)
@@ -167,7 +148,6 @@ describe('Persisted PeerStore', () => {
const [peer] = await peerUtils.createPeerId()
const multiaddrs = [multiaddr('/ip4/156.10.1.22/tcp/1000')]
const protocols = ['/ping/1.0.0']
const commitSpy = sinon.spy(peerStore, '_commitData')
await peerStore.start()
@@ -178,9 +158,6 @@ describe('Persisted PeerStore', () => {
// MetadataBook
peerStore.metadataBook.set(peer, 'location', Buffer.from('earth'))
// let batch commit complete
await Promise.all(commitSpy.returnValues)
const spyDs = sinon.spy(datastore, 'batch')
const spyAddressBook = sinon.spy(peerStore.addressBook, 'delete')
const spyKeyBook = sinon.spy(peerStore.keyBook, 'delete')
@@ -189,10 +166,6 @@ describe('Persisted PeerStore', () => {
// Delete from PeerStore
peerStore.delete(peer)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
await peerStore.stop()
expect(spyAddressBook).to.have.property('callCount', 1)
@@ -224,7 +197,6 @@ describe('Persisted PeerStore', () => {
const spyDirty = sinon.spy(peerStore, '_addDirtyPeer')
const spyDirtyMetadata = sinon.spy(peerStore, '_addDirtyPeerMetadata')
const spyDs = sinon.spy(datastore, 'batch')
const commitSpy = sinon.spy(peerStore, '_commitData')
const peers = await peerUtils.createPeerId({ number: 2 })
@@ -241,15 +213,9 @@ describe('Persisted PeerStore', () => {
peerStore.protoBook.set(peers[0], protocols)
peerStore.metadataBook.set(peers[0], 'location', Buffer.from('earth'))
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// Remove data from the same Peer
peerStore.addressBook.delete(peers[0])
// let batch commit complete
await Promise.all(commitSpy.returnValues)
expect(spyDirty).to.have.property('callCount', 3) // 2 AddrBook ops, 1 ProtoBook op
expect(spyDirtyMetadata).to.have.property('callCount', 1) // 1 MetadataBook op
expect(peerStore._dirtyPeers.size).to.equal(1)
@@ -265,9 +231,6 @@ describe('Persisted PeerStore', () => {
// Add data for second book
peerStore.addressBook.set(peers[1], multiaddrs)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
expect(spyDirty).to.have.property('callCount', 4)
expect(spyDirtyMetadata).to.have.property('callCount', 1)
expect(spyDs).to.have.property('callCount', 1)
@@ -370,7 +333,6 @@ describe('libp2p.peerStore (Persisted)', () => {
})
it('should load content to the peerStore when a new node is started with the same datastore', async () => {
const commitSpy = sinon.spy(libp2p.peerStore, '_commitData')
const peers = await peerUtils.createPeerId({ number: 2 })
const multiaddrs = [
multiaddr('/ip4/156.10.1.22/tcp/1000'),
@@ -384,16 +346,10 @@ describe('libp2p.peerStore (Persisted)', () => {
libp2p.peerStore.addressBook.set(peers[0], [multiaddrs[0]])
libp2p.peerStore.addressBook.set(peers[1], [multiaddrs[1]])
// let batch commit complete
await Promise.all(commitSpy.returnValues)
// ProtoBook
libp2p.peerStore.protoBook.set(peers[0], protocols)
libp2p.peerStore.protoBook.set(peers[1], protocols)
// let batch commit complete
await Promise.all(commitSpy.returnValues)
expect(libp2p.peerStore.peers.size).to.equal(2)
await libp2p.stop()

View File

@@ -188,48 +188,5 @@ describe('Pubsub subsystem operates correctly', () => {
await defer.promise
})
it('should have unidirectional streams closed after messages sent', async function () {
this.timeout(10e3)
const defer = pDefer()
const libp2pId = libp2p.peerId.toB58String()
const topic = 'test-topic'
const data = 'hey!'
const conn = await libp2p.dial(remotePeerId)
remoteLibp2p.pubsub.start()
await Promise.all([
pWaitFor(() => libp2p.pubsub._pubsub.peers.size === 1),
pWaitFor(() => remoteLibp2p.pubsub._pubsub.peers.size === 1)
])
// One inbound stream open from each peer to the other (each peer has 2)
expect(conn.streams.length).to.eql(2)
expect(remoteLibp2p.connectionManager.get(libp2p.peerId).streams.length).to.eql(2)
libp2p.pubsub.subscribe(topic, (msg) => {
expect(msg.data.toString()).to.equal(data)
defer.resolve()
})
// wait for remoteLibp2p to know about libp2p subscription
await pWaitFor(() => {
const subscribedPeers = remoteLibp2p.pubsub.getSubscribers(topic)
return subscribedPeers.includes(libp2pId)
})
// Open outbound streams to send message closed
expect(conn.streams.length).to.eql(2)
expect(remoteLibp2p.connectionManager.get(libp2p.peerId).streams.length).to.eql(2)
remoteLibp2p.pubsub.publish(topic, data)
await defer.promise
// Open outbound streams to send message closed
expect(conn.streams.length).to.eql(2)
expect(remoteLibp2p.connectionManager.get(libp2p.peerId).streams.length).to.eql(2)
})
})
})