diff --git a/core/src/swarm.rs b/core/src/swarm.rs index d00ad79d..145fd056 100644 --- a/core/src/swarm.rs +++ b/core/src/swarm.rs @@ -56,17 +56,11 @@ where TTransport: Transport, /// if we're not connected to them. topology: TTopology, - /// Public key of the local node. - local_public_key: PublicKey, - /// List of protocols that the behaviour says it supports. supported_protocols: SmallVec<[Vec; 16]>, /// List of multiaddresses we're listening on. listened_addrs: SmallVec<[Multiaddr; 8]>, - - /// List of multiaddresses we're listening on after NAT traversal. - external_addresses: SmallVec<[Multiaddr; 8]>, } impl Deref for Swarm @@ -121,7 +115,7 @@ where TBehaviour: NetworkBehaviour, { /// Builds a new `Swarm`. #[inline] - pub fn new(transport: TTransport, mut behaviour: TBehaviour, topology: TTopology, local_public_key: PublicKey) -> Self { + pub fn new(transport: TTransport, mut behaviour: TBehaviour, topology: TTopology) -> Self { let supported_protocols = behaviour .new_handler() .listen_protocol() @@ -130,17 +124,14 @@ where TBehaviour: NetworkBehaviour, .map(|info| info.protocol_name().to_vec()) .collect(); - let local_peer_id = local_public_key.clone().into_peer_id(); - let raw_swarm = RawSwarm::new(transport, local_peer_id.clone()); + let raw_swarm = RawSwarm::new(transport, topology.local_peer_id().clone()); Swarm { raw_swarm, behaviour, topology, - local_public_key, supported_protocols, listened_addrs: SmallVec::new(), - external_addresses: SmallVec::new(), } } @@ -278,10 +269,7 @@ where TBehaviour: NetworkBehaviour, topology: &mut self.topology, supported_protocols: &self.supported_protocols, listened_addrs: &self.listened_addrs, - external_addresses: &self.external_addresses, nat_traversal: &move |a, b| transport.nat_traversal(a, b), - local_public_key: &self.local_public_key, - local_peer_id: &self.raw_swarm.local_peer_id(), }; self.behaviour.poll(&mut parameters) }; @@ -304,11 +292,7 @@ where TBehaviour: NetworkBehaviour, } }, Async::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => { - for addr in self.raw_swarm.nat_traversal(&address) { - // TODO: is it a good idea to add these addresses permanently? what about - // a TTL instead? - self.external_addresses.push(addr); - } + self.topology.add_local_external_addrs(self.raw_swarm.nat_traversal(&address)); }, } } @@ -358,10 +342,7 @@ pub struct PollParameters<'a, TTopology: 'a> { topology: &'a mut TTopology, supported_protocols: &'a [Vec], listened_addrs: &'a [Multiaddr], - external_addresses: &'a [Multiaddr], nat_traversal: &'a dyn Fn(&Multiaddr, &Multiaddr) -> Option, - local_public_key: &'a PublicKey, - local_peer_id: &'a PeerId, } impl<'a, TTopology> PollParameters<'a, TTopology> { @@ -382,30 +363,35 @@ impl<'a, TTopology> PollParameters<'a, TTopology> { self.supported_protocols.iter().map(AsRef::as_ref) } - /// Returns the list of the addresses we're listening on + /// Returns the list of the addresses we're listening on. #[inline] pub fn listened_addresses(&self) -> impl ExactSizeIterator { self.listened_addrs.iter() } - /// Returns the list of the addresses we're listening on, after accounting for NAT traversal. - /// - /// This corresponds to the elements produced with `ReportObservedAddr`. + /// Returns the list of the addresses nodes can use to reach us. #[inline] - pub fn external_addresses(&self) -> impl ExactSizeIterator { - self.external_addresses.iter() + pub fn external_addresses<'b>(&'b mut self) -> impl ExactSizeIterator + 'b + where TTopology: Topology + { + let local_peer_id = self.topology.local_peer_id().clone(); + self.topology.addresses_of_peer(&local_peer_id).into_iter() } /// Returns the public key of the local node. #[inline] - pub fn local_public_key(&self) -> &PublicKey { - self.local_public_key + pub fn local_public_key(&self) -> &PublicKey + where TTopology: Topology + { + self.topology.local_public_key() } /// Returns the peer id of the local node. #[inline] - pub fn local_peer_id(&self) -> &PeerId { - self.local_peer_id + pub fn local_peer_id(&self) -> &PeerId + where TTopology: Topology + { + self.topology.local_peer_id() } /// Calls the `nat_traversal` method on the underlying transport of the `Swarm`. diff --git a/core/src/topology/mod.rs b/core/src/topology/mod.rs index a360a5ef..2d8a3415 100644 --- a/core/src/topology/mod.rs +++ b/core/src/topology/mod.rs @@ -19,25 +19,49 @@ // DEALINGS IN THE SOFTWARE. use std::collections::HashMap; -use {Multiaddr, PeerId}; +use {Multiaddr, PeerId, PublicKey}; /// Storage for the network topology. +/// +/// The topology should also store information about the local node, including its public key, its +/// `PeerId`, and the addresses it's advertising. pub trait Topology { /// Returns the addresses to try use to reach the given peer. + /// + /// > **Note**: Keep in mind that `peer` can be the local node. fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec; + + /// Adds an address that other nodes can use to connect to our local node. + /// + /// > **Note**: Should later be returned when calling `addresses_of_peer()` with the `PeerId` + /// > of the local node. + fn add_local_external_addrs(&mut self, addrs: TIter) + where TIter: Iterator; + + /// Returns the `PeerId` of the local node. + fn local_peer_id(&self) -> &PeerId; + + /// Returns the public key of the local node. + fn local_public_key(&self) -> &PublicKey; } /// Topology of the network stored in memory. pub struct MemoryTopology { list: HashMap>, + local_peer_id: PeerId, + local_public_key: PublicKey, } impl MemoryTopology { /// Creates an empty topology. #[inline] - pub fn empty() -> MemoryTopology { + pub fn empty(pubkey: PublicKey) -> MemoryTopology { + let local_peer_id = pubkey.clone().into_peer_id(); + MemoryTopology { - list: Default::default() + list: Default::default(), + local_peer_id, + local_public_key: pubkey, } } @@ -69,15 +93,27 @@ impl MemoryTopology { } } -impl Default for MemoryTopology { - #[inline] - fn default() -> MemoryTopology { - MemoryTopology::empty() - } -} - impl Topology for MemoryTopology { fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { self.list.get(peer).map(|v| v.clone()).unwrap_or(Vec::new()) } + + fn add_local_external_addrs(&mut self, addrs: TIter) + where TIter: Iterator + { + for addr in addrs { + let id = self.local_peer_id.clone(); + self.add_address(id, addr); + } + } + + #[inline] + fn local_peer_id(&self) -> &PeerId { + &self.local_peer_id + } + + #[inline] + fn local_public_key(&self) -> &PublicKey { + &self.local_public_key + } } diff --git a/examples/chat.rs b/examples/chat.rs index 5c2cfae2..340b76bb 100644 --- a/examples/chat.rs +++ b/examples/chat.rs @@ -109,7 +109,7 @@ fn main() { }; behaviour.floodsub.subscribe(floodsub_topic.clone()); - libp2p::Swarm::new(transport, behaviour, libp2p::core::topology::MemoryTopology::empty(), local_pub_key) + libp2p::Swarm::new(transport, behaviour, libp2p::core::topology::MemoryTopology::empty(local_pub_key)) }; // Listen on all interfaces and whatever port the OS assigns diff --git a/examples/ipfs-kad.rs b/examples/ipfs-kad.rs index c370f007..055e52f7 100644 --- a/examples/ipfs-kad.rs +++ b/examples/ipfs-kad.rs @@ -52,7 +52,7 @@ fn main() { }); // Create the topology of the network with the IPFS bootstrap nodes. - let mut topology = libp2p::core::topology::MemoryTopology::empty(); + let mut topology = libp2p::core::topology::MemoryTopology::empty(local_pub_key.clone()); topology.add_address("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ".parse().unwrap(), "/ip4/104.131.131.82/tcp/4001".parse().unwrap()); topology.add_address("QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM".parse().unwrap(), "/ip4/104.236.179.241/tcp/4001".parse().unwrap()); topology.add_address("QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64".parse().unwrap(), "/ip4/104.236.76.40/tcp/4001".parse().unwrap()); @@ -70,8 +70,8 @@ fn main() { // to insert our local node in the DHT. However here we use `without_init` because this // example is very ephemeral and we don't want to pollute the DHT. In a real world // application, you want to use `new` instead. - let mut behaviour = libp2p::kad::Kademlia::without_init(local_pub_key.clone().into_peer_id()); - libp2p::core::Swarm::new(transport, behaviour, topology, local_pub_key) + let mut behaviour = libp2p::kad::Kademlia::without_init(local_pub_key.into_peer_id()); + libp2p::core::Swarm::new(transport, behaviour, topology) }; // Order Kademlia to search for a peer. diff --git a/misc/mdns/src/behaviour.rs b/misc/mdns/src/behaviour.rs index 5fee8652..3098d79b 100644 --- a/misc/mdns/src/behaviour.rs +++ b/misc/mdns/src/behaviour.rs @@ -22,7 +22,7 @@ use crate::service::{MdnsService, MdnsPacket}; use futures::prelude::*; use libp2p_core::protocols_handler::{DummyProtocolsHandler, ProtocolsHandler}; use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p_core::{Multiaddr, PeerId, multiaddr::Protocol, topology::MemoryTopology}; +use libp2p_core::{Multiaddr, PeerId, multiaddr::Protocol, topology::MemoryTopology, topology::Topology}; use smallvec::SmallVec; use std::{fmt, io, iter, marker::PhantomData, time::Duration}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -55,7 +55,7 @@ impl Mdns { } /// Trait that must be implemented on the network topology for it to be usable with `Mdns`. -pub trait MdnsTopology { +pub trait MdnsTopology: Topology { /// Adds an address discovered by mDNS. /// /// Will never be called with the local peer ID. diff --git a/protocols/identify/src/identify.rs b/protocols/identify/src/identify.rs index 17176ae5..b1737c0c 100644 --- a/protocols/identify/src/identify.rs +++ b/protocols/identify/src/identify.rs @@ -25,7 +25,7 @@ use crate::topology::IdentifyTopology; use futures::prelude::*; use libp2p_core::protocols_handler::{ProtocolsHandler, ProtocolsHandlerSelect}; use libp2p_core::swarm::{ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p_core::{Multiaddr, PeerId, either::EitherOutput}; +use libp2p_core::{Multiaddr, PeerId, either::EitherOutput, topology::Topology}; use smallvec::SmallVec; use std::{collections::HashMap, collections::VecDeque, io}; use tokio_io::{AsyncRead, AsyncWrite}; diff --git a/protocols/identify/src/topology.rs b/protocols/identify/src/topology.rs index 69baf87b..6215dab0 100644 --- a/protocols/identify/src/topology.rs +++ b/protocols/identify/src/topology.rs @@ -18,11 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core::{topology::MemoryTopology, Multiaddr, PeerId}; +use libp2p_core::{Multiaddr, PeerId}; +use libp2p_core::topology::{MemoryTopology, Topology}; /// Trait required on the topology for the identify system to store addresses. -pub trait IdentifyTopology { +pub trait IdentifyTopology: Topology { /// Adds to the topology an address discovered through identification. + /// + /// > **Note**: Will never be called with the local peer ID. fn add_identify_discovered_addrs(&mut self, peer: &PeerId, addr: TIter) where TIter: Iterator; diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 9972ce69..1609a330 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -161,10 +161,10 @@ impl Kademlia { } /// Builds a `KadPeer` structure corresponding to the local node. - fn build_local_kad_peer<'a>(&self, local_addrs: impl IntoIterator) -> KadPeer { + fn build_local_kad_peer(&self, local_addrs: impl IntoIterator) -> KadPeer { KadPeer { node_id: self.local_peer_id.clone(), - multiaddrs: local_addrs.into_iter().cloned().collect(), + multiaddrs: local_addrs.into_iter().collect(), connection_ty: KadConnectionType::Connected, } } diff --git a/protocols/kad/src/topology.rs b/protocols/kad/src/topology.rs index ad6ce7d3..9298b9e3 100644 --- a/protocols/kad/src/topology.rs +++ b/protocols/kad/src/topology.rs @@ -33,6 +33,8 @@ pub trait KademliaTopology: Topology { type GetProvidersIter: Iterator; /// Adds an address discovered through Kademlia to the topology. + /// + /// > **Note**: Keep in mind that `peer` can the local peer. fn add_kad_discovered_address(&mut self, peer: PeerId, addr: Multiaddr, connection_ty: KadConnectionType); @@ -40,6 +42,8 @@ pub trait KademliaTopology: Topology { /// /// The `max` parameter is the maximum number of results that we are going to use. If more /// than `max` elements are returned, they will be ignored. + /// + /// > **Note**: The results should include the local node. fn closest_peers(&mut self, target: &Multihash, max: usize) -> Self::ClosestPeersIter; /// Registers the given peer as provider of the resource with the given ID. @@ -51,6 +55,8 @@ pub trait KademliaTopology: Topology { fn add_provider(&mut self, key: Multihash, peer_id: PeerId); /// Returns the list of providers that have been registered with `add_provider`. + /// + /// If the local node is a provider for `key`, our local peer ID should also be returned. fn get_providers(&mut self, key: &Multihash) -> Self::GetProvidersIter; } @@ -60,7 +66,9 @@ impl KademliaTopology for MemoryTopology { type GetProvidersIter = vec::IntoIter; fn add_kad_discovered_address(&mut self, peer: PeerId, addr: Multiaddr, _: KadConnectionType) { - self.add_address(peer, addr) + if &peer != self.local_peer_id() { + self.add_address(peer, addr) + } } fn closest_peers(&mut self, target: &Multihash, _: usize) -> Self::ClosestPeersIter {