mirror of
https://github.com/fluencelabs/js-libp2p
synced 2025-07-02 18:31:35 +00:00
fix: time out slow reads (#1227)
There are a few places in the codebase where we send/receive data from the network without timeouts/abort controllers which means the user has to wait for the underlying socket to timeout which can take a long time depending on the platform, if at all. This change ensures we can time out while running identify (both flavours), ping and fetch and adds tests to ensure there are no regressions.
This commit is contained in:
@ -18,13 +18,21 @@ describe('Protocol prefix is configurable', () => {
|
||||
it('protocolPrefix is provided', async () => {
|
||||
const testProtocol = 'test-protocol'
|
||||
libp2p = await createLibp2pNode(mergeOptions(baseOptions, {
|
||||
protocolPrefix: testProtocol
|
||||
identify: {
|
||||
protocolPrefix: testProtocol
|
||||
},
|
||||
ping: {
|
||||
protocolPrefix: testProtocol
|
||||
},
|
||||
fetch: {
|
||||
protocolPrefix: testProtocol
|
||||
}
|
||||
}))
|
||||
await libp2p.start()
|
||||
|
||||
const protocols = await libp2p.peerStore.protoBook.get(libp2p.peerId)
|
||||
expect(protocols).to.include.members([
|
||||
'/libp2p/fetch/0.0.1',
|
||||
`/${testProtocol}/fetch/0.0.1`,
|
||||
'/libp2p/circuit/relay/0.1.0',
|
||||
`/${testProtocol}/id/1.0.0`,
|
||||
`/${testProtocol}/id/push/1.0.0`,
|
||||
@ -41,7 +49,8 @@ describe('Protocol prefix is configurable', () => {
|
||||
'/libp2p/circuit/relay/0.1.0',
|
||||
'/ipfs/id/1.0.0',
|
||||
'/ipfs/id/push/1.0.0',
|
||||
'/ipfs/ping/1.0.0'
|
||||
'/ipfs/ping/1.0.0',
|
||||
'/libp2p/fetch/0.0.1'
|
||||
])
|
||||
})
|
||||
})
|
||||
|
133
test/fetch/index.spec.ts
Normal file
133
test/fetch/index.spec.ts
Normal file
@ -0,0 +1,133 @@
|
||||
/* eslint-env mocha */
|
||||
|
||||
import { expect } from 'aegir/chai'
|
||||
import sinon from 'sinon'
|
||||
import { FetchService } from '../../src/fetch/index.js'
|
||||
import Peers from '../fixtures/peers.js'
|
||||
import { mockRegistrar, mockUpgrader, connectionPair } from '@libp2p/interface-compliance-tests/mocks'
|
||||
import { createFromJSON } from '@libp2p/peer-id-factory'
|
||||
import { Components } from '@libp2p/interfaces/components'
|
||||
import { DefaultConnectionManager } from '../../src/connection-manager/index.js'
|
||||
import { start, stop } from '@libp2p/interfaces/startable'
|
||||
import { CustomEvent } from '@libp2p/interfaces/events'
|
||||
import { TimeoutController } from 'timeout-abort-controller'
|
||||
import delay from 'delay'
|
||||
import { pipe } from 'it-pipe'
|
||||
|
||||
const defaultInit = {
|
||||
protocolPrefix: 'ipfs'
|
||||
}
|
||||
|
||||
async function createComponents (index: number) {
|
||||
const peerId = await createFromJSON(Peers[index])
|
||||
|
||||
const components = new Components({
|
||||
peerId,
|
||||
registrar: mockRegistrar(),
|
||||
upgrader: mockUpgrader(),
|
||||
connectionManager: new DefaultConnectionManager({
|
||||
minConnections: 50,
|
||||
maxConnections: 1000,
|
||||
autoDialInterval: 1000
|
||||
})
|
||||
})
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
describe('fetch', () => {
|
||||
let localComponents: Components
|
||||
let remoteComponents: Components
|
||||
|
||||
beforeEach(async () => {
|
||||
localComponents = await createComponents(0)
|
||||
remoteComponents = await createComponents(1)
|
||||
|
||||
await Promise.all([
|
||||
start(localComponents),
|
||||
start(remoteComponents)
|
||||
])
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
sinon.restore()
|
||||
|
||||
await Promise.all([
|
||||
stop(localComponents),
|
||||
stop(remoteComponents)
|
||||
])
|
||||
})
|
||||
|
||||
it('should be able to fetch from another peer', async () => {
|
||||
const key = 'key'
|
||||
const value = Uint8Array.from([0, 1, 2, 3, 4])
|
||||
const localFetch = new FetchService(localComponents, defaultInit)
|
||||
const remoteFetch = new FetchService(remoteComponents, defaultInit)
|
||||
|
||||
remoteFetch.registerLookupFunction(key, async (identifier) => {
|
||||
expect(identifier).to.equal(key)
|
||||
|
||||
return value
|
||||
})
|
||||
|
||||
await start(localFetch)
|
||||
await start(remoteFetch)
|
||||
|
||||
// simulate connection between nodes
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: localToRemote }))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: remoteToLocal }))
|
||||
|
||||
// Run fetch
|
||||
const result = await localFetch.fetch(remoteComponents.getPeerId(), key)
|
||||
|
||||
expect(result).to.equalBytes(value)
|
||||
})
|
||||
|
||||
it('should time out fetching from another peer when waiting for the record', async () => {
|
||||
const key = 'key'
|
||||
const localFetch = new FetchService(localComponents, defaultInit)
|
||||
const remoteFetch = new FetchService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localFetch)
|
||||
await start(remoteFetch)
|
||||
|
||||
// simulate connection between nodes
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: localToRemote }))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: remoteToLocal }))
|
||||
|
||||
// replace existing handler with a really slow one
|
||||
await remoteComponents.getRegistrar().unhandle(remoteFetch.protocol)
|
||||
await remoteComponents.getRegistrar().handle(remoteFetch.protocol, ({ stream }) => {
|
||||
void pipe(
|
||||
stream,
|
||||
async function * (source) {
|
||||
for await (const chunk of source) {
|
||||
// longer than the timeout
|
||||
await delay(1000)
|
||||
|
||||
yield chunk
|
||||
}
|
||||
},
|
||||
stream
|
||||
)
|
||||
})
|
||||
|
||||
const newStreamSpy = sinon.spy(localToRemote, 'newStream')
|
||||
|
||||
// 10 ms timeout
|
||||
const timeoutController = new TimeoutController(10)
|
||||
|
||||
// Run fetch, should time out
|
||||
await expect(localFetch.fetch(remoteComponents.getPeerId(), key, {
|
||||
signal: timeoutController.signal
|
||||
}))
|
||||
.to.eventually.be.rejected.with.property('code', 'ABORT_ERR')
|
||||
|
||||
// should have closed stream
|
||||
expect(newStreamSpy).to.have.property('callCount', 1)
|
||||
const { stream } = await newStreamSpy.getCall(0).returnValue
|
||||
expect(stream).to.have.nested.property('timeline.close')
|
||||
})
|
||||
})
|
@ -3,17 +3,13 @@
|
||||
import { expect } from 'aegir/chai'
|
||||
import sinon from 'sinon'
|
||||
import { Multiaddr } from '@multiformats/multiaddr'
|
||||
import { toString as uint8ArrayToString } from 'uint8arrays/to-string'
|
||||
import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
|
||||
import { codes } from '../../src/errors.js'
|
||||
import { IdentifyService, Message } from '../../src/identify/index.js'
|
||||
import Peers from '../fixtures/peers.js'
|
||||
import { createLibp2pNode } from '../../src/libp2p.js'
|
||||
import { PersistentPeerStore } from '@libp2p/peer-store'
|
||||
import { createBaseOptions } from '../utils/base-options.browser.js'
|
||||
import { DefaultAddressManager } from '../../src/address-manager/index.js'
|
||||
import { MemoryDatastore } from 'datastore-core/memory'
|
||||
import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js'
|
||||
import * as lp from 'it-length-prefixed'
|
||||
import drain from 'it-drain'
|
||||
import { pipe } from 'it-pipe'
|
||||
@ -27,14 +23,9 @@ import {
|
||||
} from '../../src/identify/consts.js'
|
||||
import { DefaultConnectionManager } from '../../src/connection-manager/index.js'
|
||||
import { DefaultTransportManager } from '../../src/transport-manager.js'
|
||||
import { CustomEvent } from '@libp2p/interfaces/events'
|
||||
import delay from 'delay'
|
||||
import pWaitFor from 'p-wait-for'
|
||||
import { peerIdFromString } from '@libp2p/peer-id'
|
||||
import type { PeerId } from '@libp2p/interfaces/peer-id'
|
||||
import type { Libp2pNode } from '../../src/libp2p.js'
|
||||
import { pEvent } from 'p-event'
|
||||
import { start, stop } from '@libp2p/interfaces/startable'
|
||||
import { TimeoutController } from 'timeout-abort-controller'
|
||||
|
||||
const listenMaddrs = [new Multiaddr('/ip4/127.0.0.1/tcp/15002/ws')]
|
||||
|
||||
@ -75,18 +66,16 @@ async function createComponents (index: number) {
|
||||
return components
|
||||
}
|
||||
|
||||
describe('Identify', () => {
|
||||
describe('identify', () => {
|
||||
let localComponents: Components
|
||||
let remoteComponents: Components
|
||||
|
||||
let localPeerRecordUpdater: PeerRecordUpdater
|
||||
let remotePeerRecordUpdater: PeerRecordUpdater
|
||||
|
||||
beforeEach(async () => {
|
||||
localComponents = await createComponents(0)
|
||||
remoteComponents = await createComponents(1)
|
||||
|
||||
localPeerRecordUpdater = new PeerRecordUpdater(localComponents)
|
||||
remotePeerRecordUpdater = new PeerRecordUpdater(remoteComponents)
|
||||
|
||||
await Promise.all([
|
||||
@ -238,355 +227,47 @@ describe('Identify', () => {
|
||||
await stop(localIdentify)
|
||||
})
|
||||
|
||||
describe('push', () => {
|
||||
it('should be able to push identify updates to another peer', async () => {
|
||||
const localIdentify = new IdentifyService(localComponents, defaultInit)
|
||||
const remoteIdentify = new IdentifyService(remoteComponents, defaultInit)
|
||||
it('should time out during identify', async () => {
|
||||
const localIdentify = new IdentifyService(localComponents, defaultInit)
|
||||
const remoteIdentify = new IdentifyService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localIdentify)
|
||||
await start(remoteIdentify)
|
||||
await start(localIdentify)
|
||||
await start(remoteIdentify)
|
||||
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
const [localToRemote] = connectionPair(localComponents, remoteComponents)
|
||||
|
||||
// ensure connections are registered by connection manager
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: localToRemote
|
||||
}))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: remoteToLocal
|
||||
}))
|
||||
// replace existing handler with a really slow one
|
||||
await remoteComponents.getRegistrar().unhandle(MULTICODEC_IDENTIFY)
|
||||
await remoteComponents.getRegistrar().handle(MULTICODEC_IDENTIFY, ({ stream }) => {
|
||||
void pipe(
|
||||
stream,
|
||||
async function * (source) {
|
||||
// we receive no data in the identify protocol, we just send our data
|
||||
await drain(source)
|
||||
|
||||
// identify both ways
|
||||
await localIdentify.identify(localToRemote)
|
||||
await remoteIdentify.identify(remoteToLocal)
|
||||
// longer than the timeout
|
||||
await delay(1000)
|
||||
|
||||
const updatedProtocol = '/special-new-protocol/1.0.0'
|
||||
const updatedAddress = new Multiaddr('/ip4/127.0.0.1/tcp/48322')
|
||||
|
||||
// should have protocols but not our new one
|
||||
const identifiedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(identifiedProtocols).to.not.be.empty()
|
||||
expect(identifiedProtocols).to.not.include(updatedProtocol)
|
||||
|
||||
// should have addresses but not our new one
|
||||
const identifiedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(identifiedAddresses).to.not.be.empty()
|
||||
expect(identifiedAddresses.map(a => a.multiaddr.toString())).to.not.include(updatedAddress.toString())
|
||||
|
||||
// update local data - change event will trigger push
|
||||
await localComponents.getPeerStore().protoBook.add(localComponents.getPeerId(), [updatedProtocol])
|
||||
await localComponents.getPeerStore().addressBook.add(localComponents.getPeerId(), [updatedAddress])
|
||||
|
||||
// needed to update the peer record and send our supported addresses
|
||||
const addressManager = localComponents.getAddressManager()
|
||||
addressManager.getAddresses = () => {
|
||||
return [updatedAddress]
|
||||
}
|
||||
|
||||
// ensure sequence number of peer record we are about to create is different
|
||||
await delay(1000)
|
||||
|
||||
// make sure we have a peer record to send
|
||||
await localPeerRecordUpdater.update()
|
||||
|
||||
// wait for the remote peer store to notice the changes
|
||||
const eventPromise = pEvent(remoteComponents.getPeerStore(), 'change:multiaddrs')
|
||||
|
||||
// push updated peer record to connections
|
||||
await localIdentify.pushToPeerStore()
|
||||
|
||||
await eventPromise
|
||||
|
||||
// should have new protocol
|
||||
const updatedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(updatedProtocols).to.not.be.empty()
|
||||
expect(updatedProtocols).to.include(updatedProtocol)
|
||||
|
||||
// should have new address
|
||||
const updatedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(updatedAddresses.map(a => {
|
||||
return {
|
||||
multiaddr: a.multiaddr.toString(),
|
||||
isCertified: a.isCertified
|
||||
}
|
||||
})).to.deep.equal([{
|
||||
multiaddr: updatedAddress.toString(),
|
||||
isCertified: true
|
||||
}])
|
||||
|
||||
await stop(localIdentify)
|
||||
await stop(remoteIdentify)
|
||||
yield new Uint8Array()
|
||||
},
|
||||
stream
|
||||
)
|
||||
})
|
||||
|
||||
// LEGACY
|
||||
it('should be able to push identify updates to another peer with no certified peer records support', async () => {
|
||||
const localIdentify = new IdentifyService(localComponents, defaultInit)
|
||||
const remoteIdentify = new IdentifyService(remoteComponents, defaultInit)
|
||||
const newStreamSpy = sinon.spy(localToRemote, 'newStream')
|
||||
|
||||
await start(localIdentify)
|
||||
await start(remoteIdentify)
|
||||
// 10 ms timeout
|
||||
const timeoutController = new TimeoutController(10)
|
||||
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
// Run identify
|
||||
await expect(localIdentify.identify(localToRemote, {
|
||||
signal: timeoutController.signal
|
||||
}))
|
||||
.to.eventually.be.rejected.with.property('code', 'ABORT_ERR')
|
||||
|
||||
// ensure connections are registered by connection manager
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: localToRemote
|
||||
}))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: remoteToLocal
|
||||
}))
|
||||
|
||||
// identify both ways
|
||||
await localIdentify.identify(localToRemote)
|
||||
await remoteIdentify.identify(remoteToLocal)
|
||||
|
||||
const updatedProtocol = '/special-new-protocol/1.0.0'
|
||||
const updatedAddress = new Multiaddr('/ip4/127.0.0.1/tcp/48322')
|
||||
|
||||
// should have protocols but not our new one
|
||||
const identifiedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(identifiedProtocols).to.not.be.empty()
|
||||
expect(identifiedProtocols).to.not.include(updatedProtocol)
|
||||
|
||||
// should have addresses but not our new one
|
||||
const identifiedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(identifiedAddresses).to.not.be.empty()
|
||||
expect(identifiedAddresses.map(a => a.multiaddr.toString())).to.not.include(updatedAddress.toString())
|
||||
|
||||
// update local data - change event will trigger push
|
||||
await localComponents.getPeerStore().protoBook.add(localComponents.getPeerId(), [updatedProtocol])
|
||||
await localComponents.getPeerStore().addressBook.add(localComponents.getPeerId(), [updatedAddress])
|
||||
|
||||
// needed to send our supported addresses
|
||||
const addressManager = localComponents.getAddressManager()
|
||||
addressManager.getAddresses = () => {
|
||||
return [updatedAddress]
|
||||
}
|
||||
|
||||
// wait until remote peer store notices protocol list update
|
||||
const waitForUpdate = pEvent(remoteComponents.getPeerStore(), 'change:protocols')
|
||||
|
||||
await localIdentify.pushToPeerStore()
|
||||
|
||||
await waitForUpdate
|
||||
|
||||
// should have new protocol
|
||||
const updatedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(updatedProtocols).to.not.be.empty()
|
||||
expect(updatedProtocols).to.include(updatedProtocol)
|
||||
|
||||
// should have new address
|
||||
const updatedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(updatedAddresses.map(a => {
|
||||
return {
|
||||
multiaddr: a.multiaddr.toString(),
|
||||
isCertified: a.isCertified
|
||||
}
|
||||
})).to.deep.equal([{
|
||||
multiaddr: updatedAddress.toString(),
|
||||
isCertified: false
|
||||
}])
|
||||
|
||||
await stop(localIdentify)
|
||||
await stop(remoteIdentify)
|
||||
})
|
||||
})
|
||||
|
||||
describe('libp2p.dialer.identifyService', () => {
|
||||
let peerId: PeerId
|
||||
let libp2p: Libp2pNode
|
||||
let remoteLibp2p: Libp2pNode
|
||||
const remoteAddr = MULTIADDRS_WEBSOCKETS[0]
|
||||
|
||||
before(async () => {
|
||||
peerId = await createFromJSON(Peers[0])
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
sinon.restore()
|
||||
|
||||
if (libp2p != null) {
|
||||
await libp2p.stop()
|
||||
}
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if (remoteLibp2p != null) {
|
||||
await remoteLibp2p.stop()
|
||||
}
|
||||
})
|
||||
|
||||
it('should run identify automatically after connecting', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord')
|
||||
const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add')
|
||||
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
expect(connection).to.exist()
|
||||
|
||||
// Wait for peer store to be updated
|
||||
// Dialer._createDialTarget (add), Identify (consume)
|
||||
await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1)
|
||||
expect(identityServiceIdentifySpy.callCount).to.equal(1)
|
||||
|
||||
// The connection should have no open streams
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
await connection.close()
|
||||
})
|
||||
|
||||
it('should store remote agent and protocol versions in metadataBook after connecting', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord')
|
||||
const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add')
|
||||
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
expect(connection).to.exist()
|
||||
|
||||
// Wait for peer store to be updated
|
||||
// Dialer._createDialTarget (add), Identify (consume)
|
||||
await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1)
|
||||
expect(identityServiceIdentifySpy.callCount).to.equal(1)
|
||||
|
||||
// The connection should have no open streams
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
await connection.close()
|
||||
|
||||
const remotePeer = peerIdFromString(remoteAddr.getPeerId() ?? '')
|
||||
|
||||
const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(remotePeer, 'AgentVersion')
|
||||
const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(remotePeer, 'ProtocolVersion')
|
||||
|
||||
expect(storedAgentVersion).to.exist()
|
||||
expect(storedProtocolVersion).to.exist()
|
||||
})
|
||||
|
||||
it('should push protocol updates to an already connected peer', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const identityServicePushSpy = sinon.spy(libp2p.identifyService, 'push')
|
||||
const connectionPromise = pEvent(libp2p.connectionManager, 'peer:connect')
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
|
||||
expect(connection).to.exist()
|
||||
// Wait for connection event to be emitted
|
||||
await connectionPromise
|
||||
|
||||
// Wait for identify to finish
|
||||
await identityServiceIdentifySpy.firstCall.returnValue
|
||||
sinon.stub(libp2p, 'isStarted').returns(true)
|
||||
|
||||
await libp2p.handle('/echo/2.0.0', () => {})
|
||||
await libp2p.unhandle('/echo/2.0.0')
|
||||
|
||||
// the protocol change event listener in the identity service is async
|
||||
await pWaitFor(() => identityServicePushSpy.callCount === 2)
|
||||
|
||||
// Verify the remote peer is notified of both changes
|
||||
expect(identityServicePushSpy.callCount).to.equal(2)
|
||||
|
||||
for (const call of identityServicePushSpy.getCalls()) {
|
||||
const [connections] = call.args
|
||||
expect(connections.length).to.equal(1)
|
||||
expect(connections[0].remotePeer.toString()).to.equal(remoteAddr.getPeerId())
|
||||
await call.returnValue
|
||||
}
|
||||
|
||||
// Verify the streams close
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
})
|
||||
|
||||
it('should store host data and protocol version into metadataBook', async () => {
|
||||
const agentVersion = 'js-project/1.0.0'
|
||||
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId,
|
||||
host: {
|
||||
agentVersion
|
||||
}
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(peerId, 'AgentVersion')
|
||||
const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(peerId, 'ProtocolVersion')
|
||||
|
||||
expect(agentVersion).to.equal(uint8ArrayToString(storedAgentVersion ?? new Uint8Array()))
|
||||
expect(storedProtocolVersion).to.exist()
|
||||
})
|
||||
|
||||
it('should push multiaddr updates to an already connected peer', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const identityServicePushSpy = sinon.spy(libp2p.identifyService, 'push')
|
||||
const connectionPromise = pEvent(libp2p.connectionManager, 'peer:connect')
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
|
||||
expect(connection).to.exist()
|
||||
// Wait for connection event to be emitted
|
||||
await connectionPromise
|
||||
|
||||
// Wait for identify to finish
|
||||
await identityServiceIdentifySpy.firstCall.returnValue
|
||||
sinon.stub(libp2p, 'isStarted').returns(true)
|
||||
|
||||
await libp2p.peerStore.addressBook.add(libp2p.peerId, [new Multiaddr('/ip4/180.0.0.1/tcp/15001/ws')])
|
||||
|
||||
// the protocol change event listener in the identity service is async
|
||||
await pWaitFor(() => identityServicePushSpy.callCount === 1)
|
||||
|
||||
// Verify the remote peer is notified of change
|
||||
expect(identityServicePushSpy.callCount).to.equal(1)
|
||||
for (const call of identityServicePushSpy.getCalls()) {
|
||||
const [connections] = call.args
|
||||
expect(connections.length).to.equal(1)
|
||||
expect(connections[0].remotePeer.toString()).to.equal(remoteAddr.getPeerId())
|
||||
await call.returnValue
|
||||
}
|
||||
|
||||
// Verify the streams close
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
})
|
||||
// should have closed stream
|
||||
expect(newStreamSpy).to.have.property('callCount', 1)
|
||||
const { stream } = await newStreamSpy.getCall(0).returnValue
|
||||
expect(stream).to.have.nested.property('timeline.close')
|
||||
})
|
||||
})
|
||||
|
296
test/identify/push.spec.ts
Normal file
296
test/identify/push.spec.ts
Normal file
@ -0,0 +1,296 @@
|
||||
/* eslint-env mocha */
|
||||
|
||||
import { expect } from 'aegir/chai'
|
||||
import sinon from 'sinon'
|
||||
import { Multiaddr } from '@multiformats/multiaddr'
|
||||
import { IdentifyService } from '../../src/identify/index.js'
|
||||
import Peers from '../fixtures/peers.js'
|
||||
import { PersistentPeerStore } from '@libp2p/peer-store'
|
||||
import { DefaultAddressManager } from '../../src/address-manager/index.js'
|
||||
import { MemoryDatastore } from 'datastore-core/memory'
|
||||
import drain from 'it-drain'
|
||||
import { pipe } from 'it-pipe'
|
||||
import { mockConnectionGater, mockRegistrar, mockUpgrader, connectionPair } from '@libp2p/interface-compliance-tests/mocks'
|
||||
import { createFromJSON } from '@libp2p/peer-id-factory'
|
||||
import { Components } from '@libp2p/interfaces/components'
|
||||
import { PeerRecordUpdater } from '../../src/peer-record-updater.js'
|
||||
import {
|
||||
MULTICODEC_IDENTIFY,
|
||||
MULTICODEC_IDENTIFY_PUSH
|
||||
} from '../../src/identify/consts.js'
|
||||
import { DefaultConnectionManager } from '../../src/connection-manager/index.js'
|
||||
import { DefaultTransportManager } from '../../src/transport-manager.js'
|
||||
import { CustomEvent } from '@libp2p/interfaces/events'
|
||||
import delay from 'delay'
|
||||
import { pEvent } from 'p-event'
|
||||
import { start, stop } from '@libp2p/interfaces/startable'
|
||||
|
||||
const listenMaddrs = [new Multiaddr('/ip4/127.0.0.1/tcp/15002/ws')]
|
||||
|
||||
const defaultInit = {
|
||||
protocolPrefix: 'ipfs',
|
||||
host: {
|
||||
agentVersion: 'v1.0.0'
|
||||
}
|
||||
}
|
||||
|
||||
const protocols = [MULTICODEC_IDENTIFY, MULTICODEC_IDENTIFY_PUSH]
|
||||
|
||||
async function createComponents (index: number) {
|
||||
const peerId = await createFromJSON(Peers[index])
|
||||
|
||||
const components = new Components({
|
||||
peerId,
|
||||
datastore: new MemoryDatastore(),
|
||||
registrar: mockRegistrar(),
|
||||
upgrader: mockUpgrader(),
|
||||
connectionGater: mockConnectionGater(),
|
||||
peerStore: new PersistentPeerStore(),
|
||||
connectionManager: new DefaultConnectionManager({
|
||||
minConnections: 50,
|
||||
maxConnections: 1000,
|
||||
autoDialInterval: 1000
|
||||
})
|
||||
})
|
||||
components.setAddressManager(new DefaultAddressManager(components, {
|
||||
announce: listenMaddrs.map(ma => ma.toString())
|
||||
}))
|
||||
|
||||
const transportManager = new DefaultTransportManager(components)
|
||||
components.setTransportManager(transportManager)
|
||||
|
||||
await components.getPeerStore().protoBook.set(peerId, protocols)
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
describe('identify (push)', () => {
|
||||
let localComponents: Components
|
||||
let remoteComponents: Components
|
||||
|
||||
let localPeerRecordUpdater: PeerRecordUpdater
|
||||
|
||||
beforeEach(async () => {
|
||||
localComponents = await createComponents(0)
|
||||
remoteComponents = await createComponents(1)
|
||||
|
||||
localPeerRecordUpdater = new PeerRecordUpdater(localComponents)
|
||||
|
||||
await Promise.all([
|
||||
start(localComponents),
|
||||
start(remoteComponents)
|
||||
])
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
sinon.restore()
|
||||
|
||||
await Promise.all([
|
||||
stop(localComponents),
|
||||
stop(remoteComponents)
|
||||
])
|
||||
})
|
||||
|
||||
it('should be able to push identify updates to another peer', async () => {
|
||||
const localIdentify = new IdentifyService(localComponents, defaultInit)
|
||||
const remoteIdentify = new IdentifyService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localIdentify)
|
||||
await start(remoteIdentify)
|
||||
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
|
||||
// ensure connections are registered by connection manager
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: localToRemote
|
||||
}))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: remoteToLocal
|
||||
}))
|
||||
|
||||
// identify both ways
|
||||
await localIdentify.identify(localToRemote)
|
||||
await remoteIdentify.identify(remoteToLocal)
|
||||
|
||||
const updatedProtocol = '/special-new-protocol/1.0.0'
|
||||
const updatedAddress = new Multiaddr('/ip4/127.0.0.1/tcp/48322')
|
||||
|
||||
// should have protocols but not our new one
|
||||
const identifiedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(identifiedProtocols).to.not.be.empty()
|
||||
expect(identifiedProtocols).to.not.include(updatedProtocol)
|
||||
|
||||
// should have addresses but not our new one
|
||||
const identifiedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(identifiedAddresses).to.not.be.empty()
|
||||
expect(identifiedAddresses.map(a => a.multiaddr.toString())).to.not.include(updatedAddress.toString())
|
||||
|
||||
// update local data - change event will trigger push
|
||||
await localComponents.getPeerStore().protoBook.add(localComponents.getPeerId(), [updatedProtocol])
|
||||
await localComponents.getPeerStore().addressBook.add(localComponents.getPeerId(), [updatedAddress])
|
||||
|
||||
// needed to update the peer record and send our supported addresses
|
||||
const addressManager = localComponents.getAddressManager()
|
||||
addressManager.getAddresses = () => {
|
||||
return [updatedAddress]
|
||||
}
|
||||
|
||||
// ensure sequence number of peer record we are about to create is different
|
||||
await delay(1000)
|
||||
|
||||
// make sure we have a peer record to send
|
||||
await localPeerRecordUpdater.update()
|
||||
|
||||
// wait for the remote peer store to notice the changes
|
||||
const eventPromise = pEvent(remoteComponents.getPeerStore(), 'change:multiaddrs')
|
||||
|
||||
// push updated peer record to connections
|
||||
await localIdentify.pushToPeerStore()
|
||||
|
||||
await eventPromise
|
||||
|
||||
// should have new protocol
|
||||
const updatedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(updatedProtocols).to.not.be.empty()
|
||||
expect(updatedProtocols).to.include(updatedProtocol)
|
||||
|
||||
// should have new address
|
||||
const updatedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(updatedAddresses.map(a => {
|
||||
return {
|
||||
multiaddr: a.multiaddr.toString(),
|
||||
isCertified: a.isCertified
|
||||
}
|
||||
})).to.deep.equal([{
|
||||
multiaddr: updatedAddress.toString(),
|
||||
isCertified: true
|
||||
}])
|
||||
|
||||
await stop(localIdentify)
|
||||
await stop(remoteIdentify)
|
||||
})
|
||||
|
||||
it('should time out during push identify', async () => {
|
||||
let streamEnded = false
|
||||
const localIdentify = new IdentifyService(localComponents, {
|
||||
...defaultInit,
|
||||
timeout: 10
|
||||
})
|
||||
const remoteIdentify = new IdentifyService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localIdentify)
|
||||
await start(remoteIdentify)
|
||||
|
||||
// simulate connection between nodes
|
||||
const [localToRemote] = connectionPair(localComponents, remoteComponents)
|
||||
|
||||
// replace existing handler with a really slow one
|
||||
await remoteComponents.getRegistrar().unhandle(MULTICODEC_IDENTIFY_PUSH)
|
||||
await remoteComponents.getRegistrar().handle(MULTICODEC_IDENTIFY_PUSH, ({ stream }) => {
|
||||
void pipe(
|
||||
stream,
|
||||
async function * (source) {
|
||||
// ignore the sent data
|
||||
await drain(source)
|
||||
|
||||
// longer than the timeout
|
||||
await delay(1000)
|
||||
|
||||
// the delay should have caused the local push to time out so this should
|
||||
// occur after the local push method invocation has completed
|
||||
streamEnded = true
|
||||
|
||||
yield new Uint8Array()
|
||||
},
|
||||
stream
|
||||
)
|
||||
})
|
||||
|
||||
const newStreamSpy = sinon.spy(localToRemote, 'newStream')
|
||||
|
||||
// push updated peer record to remote
|
||||
await localIdentify.push([localToRemote])
|
||||
|
||||
// should have closed stream
|
||||
expect(newStreamSpy).to.have.property('callCount', 1)
|
||||
const { stream } = await newStreamSpy.getCall(0).returnValue
|
||||
expect(stream).to.have.nested.property('timeline.close')
|
||||
|
||||
// method should have returned before the remote handler completes as we timed
|
||||
// out so we ignore the return value
|
||||
expect(streamEnded).to.be.false()
|
||||
})
|
||||
|
||||
// LEGACY
|
||||
it('should be able to push identify updates to another peer with no certified peer records support', async () => {
|
||||
const localIdentify = new IdentifyService(localComponents, defaultInit)
|
||||
const remoteIdentify = new IdentifyService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localIdentify)
|
||||
await start(remoteIdentify)
|
||||
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
|
||||
// ensure connections are registered by connection manager
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: localToRemote
|
||||
}))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', {
|
||||
detail: remoteToLocal
|
||||
}))
|
||||
|
||||
// identify both ways
|
||||
await localIdentify.identify(localToRemote)
|
||||
await remoteIdentify.identify(remoteToLocal)
|
||||
|
||||
const updatedProtocol = '/special-new-protocol/1.0.0'
|
||||
const updatedAddress = new Multiaddr('/ip4/127.0.0.1/tcp/48322')
|
||||
|
||||
// should have protocols but not our new one
|
||||
const identifiedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(identifiedProtocols).to.not.be.empty()
|
||||
expect(identifiedProtocols).to.not.include(updatedProtocol)
|
||||
|
||||
// should have addresses but not our new one
|
||||
const identifiedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(identifiedAddresses).to.not.be.empty()
|
||||
expect(identifiedAddresses.map(a => a.multiaddr.toString())).to.not.include(updatedAddress.toString())
|
||||
|
||||
// update local data - change event will trigger push
|
||||
await localComponents.getPeerStore().protoBook.add(localComponents.getPeerId(), [updatedProtocol])
|
||||
await localComponents.getPeerStore().addressBook.add(localComponents.getPeerId(), [updatedAddress])
|
||||
|
||||
// needed to send our supported addresses
|
||||
const addressManager = localComponents.getAddressManager()
|
||||
addressManager.getAddresses = () => {
|
||||
return [updatedAddress]
|
||||
}
|
||||
|
||||
// wait until remote peer store notices protocol list update
|
||||
const waitForUpdate = pEvent(remoteComponents.getPeerStore(), 'change:protocols')
|
||||
|
||||
await localIdentify.pushToPeerStore()
|
||||
|
||||
await waitForUpdate
|
||||
|
||||
// should have new protocol
|
||||
const updatedProtocols = await remoteComponents.getPeerStore().protoBook.get(localComponents.getPeerId())
|
||||
expect(updatedProtocols).to.not.be.empty()
|
||||
expect(updatedProtocols).to.include(updatedProtocol)
|
||||
|
||||
// should have new address
|
||||
const updatedAddresses = await remoteComponents.getPeerStore().addressBook.get(localComponents.getPeerId())
|
||||
expect(updatedAddresses.map(a => {
|
||||
return {
|
||||
multiaddr: a.multiaddr.toString(),
|
||||
isCertified: a.isCertified
|
||||
}
|
||||
})).to.deep.equal([{
|
||||
multiaddr: updatedAddress.toString(),
|
||||
isCertified: false
|
||||
}])
|
||||
|
||||
await stop(localIdentify)
|
||||
await stop(remoteIdentify)
|
||||
})
|
||||
})
|
216
test/identify/service.spec.ts
Normal file
216
test/identify/service.spec.ts
Normal file
@ -0,0 +1,216 @@
|
||||
/* eslint-env mocha */
|
||||
|
||||
import { expect } from 'aegir/chai'
|
||||
import sinon from 'sinon'
|
||||
import { Multiaddr } from '@multiformats/multiaddr'
|
||||
import { toString as uint8ArrayToString } from 'uint8arrays/to-string'
|
||||
import Peers from '../fixtures/peers.js'
|
||||
import { createLibp2pNode } from '../../src/libp2p.js'
|
||||
import { createBaseOptions } from '../utils/base-options.browser.js'
|
||||
import { MULTIADDRS_WEBSOCKETS } from '../fixtures/browser.js'
|
||||
import { createFromJSON } from '@libp2p/peer-id-factory'
|
||||
import pWaitFor from 'p-wait-for'
|
||||
import { peerIdFromString } from '@libp2p/peer-id'
|
||||
import type { PeerId } from '@libp2p/interfaces/peer-id'
|
||||
import type { Libp2pNode } from '../../src/libp2p.js'
|
||||
import { pEvent } from 'p-event'
|
||||
|
||||
describe('libp2p.dialer.identifyService', () => {
|
||||
let peerId: PeerId
|
||||
let libp2p: Libp2pNode
|
||||
let remoteLibp2p: Libp2pNode
|
||||
const remoteAddr = MULTIADDRS_WEBSOCKETS[0]
|
||||
|
||||
before(async () => {
|
||||
peerId = await createFromJSON(Peers[0])
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
sinon.restore()
|
||||
|
||||
if (libp2p != null) {
|
||||
await libp2p.stop()
|
||||
}
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if (remoteLibp2p != null) {
|
||||
await remoteLibp2p.stop()
|
||||
}
|
||||
})
|
||||
|
||||
it('should run identify automatically after connecting', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord')
|
||||
const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add')
|
||||
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
expect(connection).to.exist()
|
||||
|
||||
// Wait for peer store to be updated
|
||||
// Dialer._createDialTarget (add), Identify (consume)
|
||||
await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1)
|
||||
expect(identityServiceIdentifySpy.callCount).to.equal(1)
|
||||
|
||||
// The connection should have no open streams
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
await connection.close()
|
||||
})
|
||||
|
||||
it('should store remote agent and protocol versions in metadataBook after connecting', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const peerStoreSpyConsumeRecord = sinon.spy(libp2p.peerStore.addressBook, 'consumePeerRecord')
|
||||
const peerStoreSpyAdd = sinon.spy(libp2p.peerStore.addressBook, 'add')
|
||||
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
expect(connection).to.exist()
|
||||
|
||||
// Wait for peer store to be updated
|
||||
// Dialer._createDialTarget (add), Identify (consume)
|
||||
await pWaitFor(() => peerStoreSpyConsumeRecord.callCount === 1 && peerStoreSpyAdd.callCount === 1)
|
||||
expect(identityServiceIdentifySpy.callCount).to.equal(1)
|
||||
|
||||
// The connection should have no open streams
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
await connection.close()
|
||||
|
||||
const remotePeer = peerIdFromString(remoteAddr.getPeerId() ?? '')
|
||||
|
||||
const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(remotePeer, 'AgentVersion')
|
||||
const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(remotePeer, 'ProtocolVersion')
|
||||
|
||||
expect(storedAgentVersion).to.exist()
|
||||
expect(storedProtocolVersion).to.exist()
|
||||
})
|
||||
|
||||
it('should push protocol updates to an already connected peer', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const identityServicePushSpy = sinon.spy(libp2p.identifyService, 'push')
|
||||
const connectionPromise = pEvent(libp2p.connectionManager, 'peer:connect')
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
|
||||
expect(connection).to.exist()
|
||||
// Wait for connection event to be emitted
|
||||
await connectionPromise
|
||||
|
||||
// Wait for identify to finish
|
||||
await identityServiceIdentifySpy.firstCall.returnValue
|
||||
sinon.stub(libp2p, 'isStarted').returns(true)
|
||||
|
||||
await libp2p.handle('/echo/2.0.0', () => {})
|
||||
await libp2p.unhandle('/echo/2.0.0')
|
||||
|
||||
// the protocol change event listener in the identity service is async
|
||||
await pWaitFor(() => identityServicePushSpy.callCount === 2)
|
||||
|
||||
// Verify the remote peer is notified of both changes
|
||||
expect(identityServicePushSpy.callCount).to.equal(2)
|
||||
|
||||
for (const call of identityServicePushSpy.getCalls()) {
|
||||
const [connections] = call.args
|
||||
expect(connections.length).to.equal(1)
|
||||
expect(connections[0].remotePeer.toString()).to.equal(remoteAddr.getPeerId())
|
||||
await call.returnValue
|
||||
}
|
||||
|
||||
// Verify the streams close
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
})
|
||||
|
||||
it('should store host data and protocol version into metadataBook', async () => {
|
||||
const agentVersion = 'js-project/1.0.0'
|
||||
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId,
|
||||
identify: {
|
||||
host: {
|
||||
agentVersion
|
||||
}
|
||||
}
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const storedAgentVersion = await libp2p.peerStore.metadataBook.getValue(peerId, 'AgentVersion')
|
||||
const storedProtocolVersion = await libp2p.peerStore.metadataBook.getValue(peerId, 'ProtocolVersion')
|
||||
|
||||
expect(agentVersion).to.equal(uint8ArrayToString(storedAgentVersion ?? new Uint8Array()))
|
||||
expect(storedProtocolVersion).to.exist()
|
||||
})
|
||||
|
||||
it('should push multiaddr updates to an already connected peer', async () => {
|
||||
libp2p = await createLibp2pNode(createBaseOptions({
|
||||
peerId
|
||||
}))
|
||||
|
||||
await libp2p.start()
|
||||
|
||||
if (libp2p.identifyService == null) {
|
||||
throw new Error('Identity service was not configured')
|
||||
}
|
||||
|
||||
const identityServiceIdentifySpy = sinon.spy(libp2p.identifyService, 'identify')
|
||||
const identityServicePushSpy = sinon.spy(libp2p.identifyService, 'push')
|
||||
const connectionPromise = pEvent(libp2p.connectionManager, 'peer:connect')
|
||||
const connection = await libp2p.dial(remoteAddr)
|
||||
|
||||
expect(connection).to.exist()
|
||||
// Wait for connection event to be emitted
|
||||
await connectionPromise
|
||||
|
||||
// Wait for identify to finish
|
||||
await identityServiceIdentifySpy.firstCall.returnValue
|
||||
sinon.stub(libp2p, 'isStarted').returns(true)
|
||||
|
||||
await libp2p.peerStore.addressBook.add(libp2p.peerId, [new Multiaddr('/ip4/180.0.0.1/tcp/15001/ws')])
|
||||
|
||||
// the protocol change event listener in the identity service is async
|
||||
await pWaitFor(() => identityServicePushSpy.callCount === 1)
|
||||
|
||||
// Verify the remote peer is notified of change
|
||||
expect(identityServicePushSpy.callCount).to.equal(1)
|
||||
for (const call of identityServicePushSpy.getCalls()) {
|
||||
const [connections] = call.args
|
||||
expect(connections.length).to.equal(1)
|
||||
expect(connections[0].remotePeer.toString()).to.equal(remoteAddr.getPeerId())
|
||||
await call.returnValue
|
||||
}
|
||||
|
||||
// Verify the streams close
|
||||
await pWaitFor(() => connection.streams.length === 0)
|
||||
})
|
||||
})
|
122
test/ping/index.spec.ts
Normal file
122
test/ping/index.spec.ts
Normal file
@ -0,0 +1,122 @@
|
||||
/* eslint-env mocha */
|
||||
|
||||
import { expect } from 'aegir/chai'
|
||||
import sinon from 'sinon'
|
||||
import { PingService } from '../../src/ping/index.js'
|
||||
import Peers from '../fixtures/peers.js'
|
||||
import { mockRegistrar, mockUpgrader, connectionPair } from '@libp2p/interface-compliance-tests/mocks'
|
||||
import { createFromJSON } from '@libp2p/peer-id-factory'
|
||||
import { Components } from '@libp2p/interfaces/components'
|
||||
import { DefaultConnectionManager } from '../../src/connection-manager/index.js'
|
||||
import { start, stop } from '@libp2p/interfaces/startable'
|
||||
import { CustomEvent } from '@libp2p/interfaces/events'
|
||||
import { TimeoutController } from 'timeout-abort-controller'
|
||||
import delay from 'delay'
|
||||
import { pipe } from 'it-pipe'
|
||||
|
||||
const defaultInit = {
|
||||
protocolPrefix: 'ipfs'
|
||||
}
|
||||
|
||||
async function createComponents (index: number) {
|
||||
const peerId = await createFromJSON(Peers[index])
|
||||
|
||||
const components = new Components({
|
||||
peerId,
|
||||
registrar: mockRegistrar(),
|
||||
upgrader: mockUpgrader(),
|
||||
connectionManager: new DefaultConnectionManager({
|
||||
minConnections: 50,
|
||||
maxConnections: 1000,
|
||||
autoDialInterval: 1000
|
||||
})
|
||||
})
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
describe('ping', () => {
|
||||
let localComponents: Components
|
||||
let remoteComponents: Components
|
||||
|
||||
beforeEach(async () => {
|
||||
localComponents = await createComponents(0)
|
||||
remoteComponents = await createComponents(1)
|
||||
|
||||
await Promise.all([
|
||||
start(localComponents),
|
||||
start(remoteComponents)
|
||||
])
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
sinon.restore()
|
||||
|
||||
await Promise.all([
|
||||
stop(localComponents),
|
||||
stop(remoteComponents)
|
||||
])
|
||||
})
|
||||
|
||||
it('should be able to ping another peer', async () => {
|
||||
const localPing = new PingService(localComponents, defaultInit)
|
||||
const remotePing = new PingService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localPing)
|
||||
await start(remotePing)
|
||||
|
||||
// simulate connection between nodes
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: localToRemote }))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: remoteToLocal }))
|
||||
|
||||
// Run ping
|
||||
await expect(localPing.ping(remoteComponents.getPeerId())).to.eventually.be.gte(0)
|
||||
})
|
||||
|
||||
it('should time out pinging another peer when waiting for a pong', async () => {
|
||||
const localPing = new PingService(localComponents, defaultInit)
|
||||
const remotePing = new PingService(remoteComponents, defaultInit)
|
||||
|
||||
await start(localPing)
|
||||
await start(remotePing)
|
||||
|
||||
// simulate connection between nodes
|
||||
const [localToRemote, remoteToLocal] = connectionPair(localComponents, remoteComponents)
|
||||
localComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: localToRemote }))
|
||||
remoteComponents.getUpgrader().dispatchEvent(new CustomEvent('connection', { detail: remoteToLocal }))
|
||||
|
||||
// replace existing handler with a really slow one
|
||||
await remoteComponents.getRegistrar().unhandle(remotePing.protocol)
|
||||
await remoteComponents.getRegistrar().handle(remotePing.protocol, ({ stream }) => {
|
||||
void pipe(
|
||||
stream,
|
||||
async function * (source) {
|
||||
for await (const chunk of source) {
|
||||
// longer than the timeout
|
||||
await delay(1000)
|
||||
|
||||
yield chunk
|
||||
}
|
||||
},
|
||||
stream
|
||||
)
|
||||
})
|
||||
|
||||
const newStreamSpy = sinon.spy(localToRemote, 'newStream')
|
||||
|
||||
// 10 ms timeout
|
||||
const timeoutController = new TimeoutController(10)
|
||||
|
||||
// Run ping, should time out
|
||||
await expect(localPing.ping(remoteComponents.getPeerId(), {
|
||||
signal: timeoutController.signal
|
||||
}))
|
||||
.to.eventually.be.rejected.with.property('code', 'ABORT_ERR')
|
||||
|
||||
// should have closed stream
|
||||
expect(newStreamSpy).to.have.property('callCount', 1)
|
||||
const { stream } = await newStreamSpy.getCall(0).returnValue
|
||||
expect(stream).to.have.nested.property('timeline.close')
|
||||
})
|
||||
})
|
@ -14,7 +14,7 @@ import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
|
||||
import swarmKey from '../fixtures/swarm.key.js'
|
||||
import { DefaultUpgrader } from '../../src/upgrader.js'
|
||||
import { codes } from '../../src/errors.js'
|
||||
import { mockConnectionGater, mockMultiaddrConnPair, mockRegistrar } from '@libp2p/interface-compliance-tests/mocks'
|
||||
import { mockConnectionGater, mockMultiaddrConnPair, mockRegistrar, mockStream } from '@libp2p/interface-compliance-tests/mocks'
|
||||
import Peers from '../fixtures/peers.js'
|
||||
import type { Upgrader } from '@libp2p/interfaces/transport'
|
||||
import type { PeerId } from '@libp2p/interfaces/peer-id'
|
||||
@ -27,6 +27,9 @@ import type { Stream } from '@libp2p/interfaces/connection'
|
||||
import pDefer from 'p-defer'
|
||||
import { createLibp2pNode, Libp2pNode } from '../../src/libp2p.js'
|
||||
import { pEvent } from 'p-event'
|
||||
import { TimeoutController } from 'timeout-abort-controller'
|
||||
import delay from 'delay'
|
||||
import drain from 'it-drain'
|
||||
|
||||
const addrs = [
|
||||
new Multiaddr('/ip4/127.0.0.1/tcp/0'),
|
||||
@ -35,6 +38,7 @@ const addrs = [
|
||||
|
||||
describe('Upgrader', () => {
|
||||
let localUpgrader: Upgrader
|
||||
let localMuxerFactory: StreamMuxerFactory
|
||||
let remoteUpgrader: Upgrader
|
||||
let localPeer: PeerId
|
||||
let remotePeer: PeerId
|
||||
@ -55,12 +59,13 @@ describe('Upgrader', () => {
|
||||
connectionGater: mockConnectionGater(),
|
||||
registrar: mockRegistrar()
|
||||
})
|
||||
localMuxerFactory = new Mplex()
|
||||
localUpgrader = new DefaultUpgrader(localComponents, {
|
||||
connectionEncryption: [
|
||||
new Plaintext()
|
||||
],
|
||||
muxers: [
|
||||
new Mplex()
|
||||
localMuxerFactory
|
||||
]
|
||||
})
|
||||
|
||||
@ -366,6 +371,40 @@ describe('Upgrader', () => {
|
||||
expect(result).to.have.nested.property('reason.code', codes.ERR_UNSUPPORTED_PROTOCOL)
|
||||
})
|
||||
})
|
||||
|
||||
it('should abort protocol selection for slow streams', async () => {
|
||||
const createStreamMuxerSpy = sinon.spy(localMuxerFactory, 'createStreamMuxer')
|
||||
const { inbound, outbound } = mockMultiaddrConnPair({ addrs, remotePeer })
|
||||
|
||||
const connections = await Promise.all([
|
||||
localUpgrader.upgradeOutbound(outbound),
|
||||
remoteUpgrader.upgradeInbound(inbound)
|
||||
])
|
||||
|
||||
// 10 ms timeout
|
||||
const timeoutController = new TimeoutController(10)
|
||||
|
||||
// should have created muxer for connection
|
||||
expect(createStreamMuxerSpy).to.have.property('callCount', 1)
|
||||
|
||||
// create mock muxed stream that never sends data
|
||||
const muxer = createStreamMuxerSpy.getCall(0).returnValue
|
||||
muxer.newStream = () => {
|
||||
return mockStream({
|
||||
source: (async function * () {
|
||||
// longer than the timeout
|
||||
await delay(1000)
|
||||
yield new Uint8Array()
|
||||
}()),
|
||||
sink: drain
|
||||
})
|
||||
}
|
||||
|
||||
await expect(connections[0].newStream('/echo/1.0.0', {
|
||||
signal: timeoutController.signal
|
||||
}))
|
||||
.to.eventually.be.rejected.with.property('code', 'ABORT_ERR')
|
||||
})
|
||||
})
|
||||
|
||||
describe('libp2p.upgrader', () => {
|
||||
|
Reference in New Issue
Block a user