Store information about the local node in the topology (#772)

* Store information about the local node in the topology

* Fix build

* Store the external addresses in the topology
This commit is contained in:
Pierre Krieger
2018-12-13 19:06:13 +01:00
committed by GitHub
parent e2ff74994c
commit 40a503fd63
9 changed files with 87 additions and 54 deletions

View File

@ -56,17 +56,11 @@ where TTransport: Transport,
/// if we're not connected to them.
topology: TTopology,
/// Public key of the local node.
local_public_key: PublicKey,
/// List of protocols that the behaviour says it supports.
supported_protocols: SmallVec<[Vec<u8>; 16]>,
/// List of multiaddresses we're listening on.
listened_addrs: SmallVec<[Multiaddr; 8]>,
/// List of multiaddresses we're listening on after NAT traversal.
external_addresses: SmallVec<[Multiaddr; 8]>,
}
impl<TTransport, TBehaviour, TTopology> Deref for Swarm<TTransport, TBehaviour, TTopology>
@ -121,7 +115,7 @@ where TBehaviour: NetworkBehaviour<TTopology>,
{
/// Builds a new `Swarm`.
#[inline]
pub fn new(transport: TTransport, mut behaviour: TBehaviour, topology: TTopology, local_public_key: PublicKey) -> Self {
pub fn new(transport: TTransport, mut behaviour: TBehaviour, topology: TTopology) -> Self {
let supported_protocols = behaviour
.new_handler()
.listen_protocol()
@ -130,17 +124,14 @@ where TBehaviour: NetworkBehaviour<TTopology>,
.map(|info| info.protocol_name().to_vec())
.collect();
let local_peer_id = local_public_key.clone().into_peer_id();
let raw_swarm = RawSwarm::new(transport, local_peer_id.clone());
let raw_swarm = RawSwarm::new(transport, topology.local_peer_id().clone());
Swarm {
raw_swarm,
behaviour,
topology,
local_public_key,
supported_protocols,
listened_addrs: SmallVec::new(),
external_addresses: SmallVec::new(),
}
}
@ -278,10 +269,7 @@ where TBehaviour: NetworkBehaviour<TTopology>,
topology: &mut self.topology,
supported_protocols: &self.supported_protocols,
listened_addrs: &self.listened_addrs,
external_addresses: &self.external_addresses,
nat_traversal: &move |a, b| transport.nat_traversal(a, b),
local_public_key: &self.local_public_key,
local_peer_id: &self.raw_swarm.local_peer_id(),
};
self.behaviour.poll(&mut parameters)
};
@ -304,11 +292,7 @@ where TBehaviour: NetworkBehaviour<TTopology>,
}
},
Async::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => {
for addr in self.raw_swarm.nat_traversal(&address) {
// TODO: is it a good idea to add these addresses permanently? what about
// a TTL instead?
self.external_addresses.push(addr);
}
self.topology.add_local_external_addrs(self.raw_swarm.nat_traversal(&address));
},
}
}
@ -358,10 +342,7 @@ pub struct PollParameters<'a, TTopology: 'a> {
topology: &'a mut TTopology,
supported_protocols: &'a [Vec<u8>],
listened_addrs: &'a [Multiaddr],
external_addresses: &'a [Multiaddr],
nat_traversal: &'a dyn Fn(&Multiaddr, &Multiaddr) -> Option<Multiaddr>,
local_public_key: &'a PublicKey,
local_peer_id: &'a PeerId,
}
impl<'a, TTopology> PollParameters<'a, TTopology> {
@ -382,30 +363,35 @@ impl<'a, TTopology> PollParameters<'a, TTopology> {
self.supported_protocols.iter().map(AsRef::as_ref)
}
/// Returns the list of the addresses we're listening on
/// Returns the list of the addresses we're listening on.
#[inline]
pub fn listened_addresses(&self) -> impl ExactSizeIterator<Item = &Multiaddr> {
self.listened_addrs.iter()
}
/// Returns the list of the addresses we're listening on, after accounting for NAT traversal.
///
/// This corresponds to the elements produced with `ReportObservedAddr`.
/// Returns the list of the addresses nodes can use to reach us.
#[inline]
pub fn external_addresses(&self) -> impl ExactSizeIterator<Item = &Multiaddr> {
self.external_addresses.iter()
pub fn external_addresses<'b>(&'b mut self) -> impl ExactSizeIterator<Item = Multiaddr> + 'b
where TTopology: Topology
{
let local_peer_id = self.topology.local_peer_id().clone();
self.topology.addresses_of_peer(&local_peer_id).into_iter()
}
/// Returns the public key of the local node.
#[inline]
pub fn local_public_key(&self) -> &PublicKey {
self.local_public_key
pub fn local_public_key(&self) -> &PublicKey
where TTopology: Topology
{
self.topology.local_public_key()
}
/// Returns the peer id of the local node.
#[inline]
pub fn local_peer_id(&self) -> &PeerId {
self.local_peer_id
pub fn local_peer_id(&self) -> &PeerId
where TTopology: Topology
{
self.topology.local_peer_id()
}
/// Calls the `nat_traversal` method on the underlying transport of the `Swarm`.

View File

@ -19,25 +19,49 @@
// DEALINGS IN THE SOFTWARE.
use std::collections::HashMap;
use {Multiaddr, PeerId};
use {Multiaddr, PeerId, PublicKey};
/// Storage for the network topology.
///
/// The topology should also store information about the local node, including its public key, its
/// `PeerId`, and the addresses it's advertising.
pub trait Topology {
/// Returns the addresses to try use to reach the given peer.
///
/// > **Note**: Keep in mind that `peer` can be the local node.
fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec<Multiaddr>;
/// Adds an address that other nodes can use to connect to our local node.
///
/// > **Note**: Should later be returned when calling `addresses_of_peer()` with the `PeerId`
/// > of the local node.
fn add_local_external_addrs<TIter>(&mut self, addrs: TIter)
where TIter: Iterator<Item = Multiaddr>;
/// Returns the `PeerId` of the local node.
fn local_peer_id(&self) -> &PeerId;
/// Returns the public key of the local node.
fn local_public_key(&self) -> &PublicKey;
}
/// Topology of the network stored in memory.
pub struct MemoryTopology {
list: HashMap<PeerId, Vec<Multiaddr>>,
local_peer_id: PeerId,
local_public_key: PublicKey,
}
impl MemoryTopology {
/// Creates an empty topology.
#[inline]
pub fn empty() -> MemoryTopology {
pub fn empty(pubkey: PublicKey) -> MemoryTopology {
let local_peer_id = pubkey.clone().into_peer_id();
MemoryTopology {
list: Default::default()
list: Default::default(),
local_peer_id,
local_public_key: pubkey,
}
}
@ -69,15 +93,27 @@ impl MemoryTopology {
}
}
impl Default for MemoryTopology {
#[inline]
fn default() -> MemoryTopology {
MemoryTopology::empty()
}
}
impl Topology for MemoryTopology {
fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec<Multiaddr> {
self.list.get(peer).map(|v| v.clone()).unwrap_or(Vec::new())
}
fn add_local_external_addrs<TIter>(&mut self, addrs: TIter)
where TIter: Iterator<Item = Multiaddr>
{
for addr in addrs {
let id = self.local_peer_id.clone();
self.add_address(id, addr);
}
}
#[inline]
fn local_peer_id(&self) -> &PeerId {
&self.local_peer_id
}
#[inline]
fn local_public_key(&self) -> &PublicKey {
&self.local_public_key
}
}

View File

@ -109,7 +109,7 @@ fn main() {
};
behaviour.floodsub.subscribe(floodsub_topic.clone());
libp2p::Swarm::new(transport, behaviour, libp2p::core::topology::MemoryTopology::empty(), local_pub_key)
libp2p::Swarm::new(transport, behaviour, libp2p::core::topology::MemoryTopology::empty(local_pub_key))
};
// Listen on all interfaces and whatever port the OS assigns

View File

@ -52,7 +52,7 @@ fn main() {
});
// Create the topology of the network with the IPFS bootstrap nodes.
let mut topology = libp2p::core::topology::MemoryTopology::empty();
let mut topology = libp2p::core::topology::MemoryTopology::empty(local_pub_key.clone());
topology.add_address("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse().unwrap(), "/ip4/104.131.131.82/tcp/4001".parse().unwrap());
topology.add_address("QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip4/104.236.179.241/tcp/4001".parse().unwrap());
topology.add_address("QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip4/104.236.76.40/tcp/4001".parse().unwrap());
@ -70,8 +70,8 @@ fn main() {
// to insert our local node in the DHT. However here we use `without_init` because this
// example is very ephemeral and we don't want to pollute the DHT. In a real world
// application, you want to use `new` instead.
let mut behaviour = libp2p::kad::Kademlia::without_init(local_pub_key.clone().into_peer_id());
libp2p::core::Swarm::new(transport, behaviour, topology, local_pub_key)
let mut behaviour = libp2p::kad::Kademlia::without_init(local_pub_key.into_peer_id());
libp2p::core::Swarm::new(transport, behaviour, topology)
};
// Order Kademlia to search for a peer.

View File

@ -22,7 +22,7 @@ use crate::service::{MdnsService, MdnsPacket};
use futures::prelude::*;
use libp2p_core::protocols_handler::{DummyProtocolsHandler, ProtocolsHandler};
use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
use libp2p_core::{Multiaddr, PeerId, multiaddr::Protocol, topology::MemoryTopology};
use libp2p_core::{Multiaddr, PeerId, multiaddr::Protocol, topology::MemoryTopology, topology::Topology};
use smallvec::SmallVec;
use std::{fmt, io, iter, marker::PhantomData, time::Duration};
use tokio_io::{AsyncRead, AsyncWrite};
@ -55,7 +55,7 @@ impl<TSubstream> Mdns<TSubstream> {
}
/// Trait that must be implemented on the network topology for it to be usable with `Mdns`.
pub trait MdnsTopology {
pub trait MdnsTopology: Topology {
/// Adds an address discovered by mDNS.
///
/// Will never be called with the local peer ID.

View File

@ -25,7 +25,7 @@ use crate::topology::IdentifyTopology;
use futures::prelude::*;
use libp2p_core::protocols_handler::{ProtocolsHandler, ProtocolsHandlerSelect};
use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
use libp2p_core::{Multiaddr, PeerId, either::EitherOutput};
use libp2p_core::{Multiaddr, PeerId, either::EitherOutput, topology::Topology};
use smallvec::SmallVec;
use std::{collections::HashMap, collections::VecDeque, io};
use tokio_io::{AsyncRead, AsyncWrite};

View File

@ -18,11 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use libp2p_core::{topology::MemoryTopology, Multiaddr, PeerId};
use libp2p_core::{Multiaddr, PeerId};
use libp2p_core::topology::{MemoryTopology, Topology};
/// Trait required on the topology for the identify system to store addresses.
pub trait IdentifyTopology {
pub trait IdentifyTopology: Topology {
/// Adds to the topology an address discovered through identification.
///
/// > **Note**: Will never be called with the local peer ID.
fn add_identify_discovered_addrs<TIter>(&mut self, peer: &PeerId, addr: TIter)
where
TIter: Iterator<Item = Multiaddr>;

View File

@ -161,10 +161,10 @@ impl<TSubstream> Kademlia<TSubstream> {
}
/// Builds a `KadPeer` structure corresponding to the local node.
fn build_local_kad_peer<'a>(&self, local_addrs: impl IntoIterator<Item = &'a Multiaddr>) -> KadPeer {
fn build_local_kad_peer(&self, local_addrs: impl IntoIterator<Item = Multiaddr>) -> KadPeer {
KadPeer {
node_id: self.local_peer_id.clone(),
multiaddrs: local_addrs.into_iter().cloned().collect(),
multiaddrs: local_addrs.into_iter().collect(),
connection_ty: KadConnectionType::Connected,
}
}

View File

@ -33,6 +33,8 @@ pub trait KademliaTopology: Topology {
type GetProvidersIter: Iterator<Item = PeerId>;
/// Adds an address discovered through Kademlia to the topology.
///
/// > **Note**: Keep in mind that `peer` can the local peer.
fn add_kad_discovered_address(&mut self, peer: PeerId, addr: Multiaddr,
connection_ty: KadConnectionType);
@ -40,6 +42,8 @@ pub trait KademliaTopology: Topology {
///
/// The `max` parameter is the maximum number of results that we are going to use. If more
/// than `max` elements are returned, they will be ignored.
///
/// > **Note**: The results should include the local node.
fn closest_peers(&mut self, target: &Multihash, max: usize) -> Self::ClosestPeersIter;
/// Registers the given peer as provider of the resource with the given ID.
@ -51,6 +55,8 @@ pub trait KademliaTopology: Topology {
fn add_provider(&mut self, key: Multihash, peer_id: PeerId);
/// Returns the list of providers that have been registered with `add_provider`.
///
/// If the local node is a provider for `key`, our local peer ID should also be returned.
fn get_providers(&mut self, key: &Multihash) -> Self::GetProvidersIter;
}
@ -60,7 +66,9 @@ impl KademliaTopology for MemoryTopology {
type GetProvidersIter = vec::IntoIter<PeerId>;
fn add_kad_discovered_address(&mut self, peer: PeerId, addr: Multiaddr, _: KadConnectionType) {
self.add_address(peer, addr)
if &peer != self.local_peer_id() {
self.add_address(peer, addr)
}
}
fn closest_peers(&mut self, target: &Multihash, _: usize) -> Self::ClosestPeersIter {