2020-01-25 02:16:02 +11:00
|
|
|
// Copyright 2020 Sigma Prime Pty Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
// collection of tests for the gossipsub network behaviour
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::super::*;
|
|
|
|
|
|
|
|
// helper functions for testing
|
|
|
|
|
|
|
|
// This function generates `peer_no` random PeerId's, subscribes to `topics` and subscribes the
|
|
|
|
// injected nodes to all topics if `to_subscribe` is set. All nodes are considered gossipsub nodes.
|
|
|
|
fn build_and_inject_nodes(
|
|
|
|
peer_no: usize,
|
|
|
|
topics: Vec<String>,
|
|
|
|
to_subscribe: bool,
|
2020-02-07 16:29:30 +01:00
|
|
|
) -> (Gossipsub, Vec<PeerId>, Vec<TopicHash>) {
|
2020-08-03 18:13:43 +10:00
|
|
|
let keypair = libp2p_core::identity::Keypair::generate_secp256k1();
|
|
|
|
// generate a default GossipsubConfig with signing
|
2020-01-25 02:16:02 +11:00
|
|
|
let gs_config = GossipsubConfig::default();
|
|
|
|
// create a gossipsub struct
|
2020-08-03 18:13:43 +10:00
|
|
|
let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Signed(keypair), gs_config);
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
let mut topic_hashes = vec![];
|
|
|
|
|
|
|
|
// subscribe to the topics
|
|
|
|
for t in topics {
|
|
|
|
let topic = Topic::new(t);
|
|
|
|
gs.subscribe(topic.clone());
|
|
|
|
topic_hashes.push(topic.no_hash().clone());
|
|
|
|
}
|
|
|
|
|
|
|
|
// build and connect peer_no random peers
|
|
|
|
let mut peers = vec![];
|
|
|
|
|
|
|
|
for _ in 0..peer_no {
|
|
|
|
let peer = PeerId::random();
|
|
|
|
peers.push(peer.clone());
|
2020-08-03 18:13:43 +10:00
|
|
|
<Gossipsub as NetworkBehaviour>::inject_connected(&mut gs, &peer);
|
2020-01-25 02:16:02 +11:00
|
|
|
if to_subscribe {
|
|
|
|
gs.handle_received_subscriptions(
|
|
|
|
&topic_hashes
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|t| GossipsubSubscription {
|
|
|
|
action: GossipsubSubscriptionAction::Subscribe,
|
|
|
|
topic_hash: t,
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&peer,
|
|
|
|
);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
return (gs, peers, topic_hashes);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Test local node subscribing to a topic
|
|
|
|
fn test_subscribe() {
|
|
|
|
// The node should:
|
|
|
|
// - Create an empty vector in mesh[topic]
|
|
|
|
// - Send subscription request to all peers
|
|
|
|
// - run JOIN(topic)
|
|
|
|
|
|
|
|
let subscribe_topic = vec![String::from("test_subscribe")];
|
|
|
|
let (gs, _, topic_hashes) = build_and_inject_nodes(20, subscribe_topic, true);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[0]).is_some(),
|
|
|
|
"Subscribe should add a new entry to the mesh[topic] hashmap"
|
|
|
|
);
|
|
|
|
|
|
|
|
// collect all the subscriptions
|
|
|
|
let subscriptions =
|
|
|
|
gs.events
|
|
|
|
.iter()
|
|
|
|
.fold(vec![], |mut collected_subscriptions, e| match e {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
for s in &event.subscriptions {
|
|
|
|
match s.action {
|
|
|
|
GossipsubSubscriptionAction::Subscribe => {
|
|
|
|
collected_subscriptions.push(s.clone())
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
collected_subscriptions
|
|
|
|
}
|
|
|
|
_ => collected_subscriptions,
|
|
|
|
});
|
|
|
|
|
|
|
|
// we sent a subscribe to all known peers
|
|
|
|
assert!(
|
|
|
|
subscriptions.len() == 20,
|
|
|
|
"Should send a subscription to all known peers"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Test unsubscribe.
|
|
|
|
fn test_unsubscribe() {
|
|
|
|
// Unsubscribe should:
|
|
|
|
// - Remove the mesh entry for topic
|
|
|
|
// - Send UNSUBSCRIBE to all known peers
|
|
|
|
// - Call Leave
|
|
|
|
|
|
|
|
let topic_strings = vec![String::from("topic1"), String::from("topic2")];
|
|
|
|
let topics = topic_strings
|
|
|
|
.iter()
|
|
|
|
.map(|t| Topic::new(t.clone()))
|
|
|
|
.collect::<Vec<Topic>>();
|
|
|
|
|
|
|
|
// subscribe to topic_strings
|
|
|
|
let (mut gs, _, topic_hashes) = build_and_inject_nodes(20, topic_strings, true);
|
|
|
|
|
|
|
|
for topic_hash in &topic_hashes {
|
|
|
|
assert!(
|
|
|
|
gs.topic_peers.get(&topic_hash).is_some(),
|
|
|
|
"Topic_peers contain a topic entry"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hash).is_some(),
|
|
|
|
"mesh should contain a topic entry"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// unsubscribe from both topics
|
|
|
|
assert!(
|
|
|
|
gs.unsubscribe(topics[0].clone()),
|
|
|
|
"should be able to unsubscribe successfully from each topic",
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
gs.unsubscribe(topics[1].clone()),
|
|
|
|
"should be able to unsubscribe successfully from each topic",
|
|
|
|
);
|
|
|
|
|
|
|
|
let subscriptions =
|
|
|
|
gs.events
|
|
|
|
.iter()
|
|
|
|
.fold(vec![], |mut collected_subscriptions, e| match e {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
for s in &event.subscriptions {
|
|
|
|
match s.action {
|
|
|
|
GossipsubSubscriptionAction::Unsubscribe => {
|
|
|
|
collected_subscriptions.push(s.clone())
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
collected_subscriptions
|
|
|
|
}
|
|
|
|
_ => collected_subscriptions,
|
|
|
|
});
|
|
|
|
|
|
|
|
// we sent a unsubscribe to all known peers, for two topics
|
|
|
|
assert!(
|
|
|
|
subscriptions.len() == 40,
|
|
|
|
"Should send an unsubscribe event to all known peers"
|
|
|
|
);
|
|
|
|
|
|
|
|
// check we clean up internal structures
|
|
|
|
for topic_hash in &topic_hashes {
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hash).is_none(),
|
|
|
|
"All topics should have been removed from the mesh"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Test JOIN(topic) functionality.
|
|
|
|
fn test_join() {
|
|
|
|
// The Join function should:
|
|
|
|
// - Remove peers from fanout[topic]
|
|
|
|
// - Add any fanout[topic] peers to the mesh (up to mesh_n)
|
|
|
|
// - Fill up to mesh_n peers from known gossipsub peers in the topic
|
|
|
|
// - Send GRAFT messages to all nodes added to the mesh
|
|
|
|
|
|
|
|
// This test is not an isolated unit test, rather it uses higher level,
|
|
|
|
// subscribe/unsubscribe to perform the test.
|
|
|
|
|
|
|
|
let topic_strings = vec![String::from("topic1"), String::from("topic2")];
|
|
|
|
let topics = topic_strings
|
|
|
|
.iter()
|
|
|
|
.map(|t| Topic::new(t.clone()))
|
|
|
|
.collect::<Vec<Topic>>();
|
|
|
|
|
|
|
|
let (mut gs, _, topic_hashes) = build_and_inject_nodes(20, topic_strings, true);
|
|
|
|
|
|
|
|
// unsubscribe, then call join to invoke functionality
|
|
|
|
assert!(
|
|
|
|
gs.unsubscribe(topics[0].clone()),
|
|
|
|
"should be able to unsubscribe successfully"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
gs.unsubscribe(topics[1].clone()),
|
|
|
|
"should be able to unsubscribe successfully"
|
|
|
|
);
|
|
|
|
|
|
|
|
// re-subscribe - there should be peers associated with the topic
|
|
|
|
assert!(
|
|
|
|
gs.subscribe(topics[0].clone()),
|
|
|
|
"should be able to subscribe successfully"
|
|
|
|
);
|
|
|
|
|
|
|
|
// should have added mesh_n nodes to the mesh
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6,
|
|
|
|
"Should have added 6 nodes to the mesh"
|
|
|
|
);
|
|
|
|
|
2020-08-03 18:13:43 +10:00
|
|
|
fn collect_grafts(
|
|
|
|
mut collected_grafts: Vec<GossipsubControlAction>,
|
|
|
|
(_, controls): (&PeerId, &Vec<GossipsubControlAction>),
|
|
|
|
) -> Vec<GossipsubControlAction> {
|
|
|
|
for c in controls.iter() {
|
|
|
|
match c {
|
|
|
|
GossipsubControlAction::Graft { topic_hash: _ } => {
|
|
|
|
collected_grafts.push(c.clone())
|
2020-01-25 02:16:02 +11:00
|
|
|
}
|
2020-08-03 18:13:43 +10:00
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
collected_grafts
|
|
|
|
}
|
|
|
|
|
|
|
|
// there should be mesh_n GRAFT messages.
|
|
|
|
let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts);
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
graft_messages.len(),
|
|
|
|
6,
|
|
|
|
"There should be 6 grafts messages sent to peers"
|
|
|
|
);
|
|
|
|
|
|
|
|
// verify fanout nodes
|
|
|
|
// add 3 random peers to the fanout[topic1]
|
2020-08-03 18:13:43 +10:00
|
|
|
gs.fanout
|
|
|
|
.insert(topic_hashes[1].clone(), Default::default());
|
|
|
|
let new_peers: Vec<PeerId> = vec![];
|
2020-01-25 02:16:02 +11:00
|
|
|
for _ in 0..3 {
|
|
|
|
let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap();
|
2020-08-03 18:13:43 +10:00
|
|
|
fanout_peers.insert(PeerId::random());
|
2020-01-25 02:16:02 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
// subscribe to topic1
|
|
|
|
gs.subscribe(topics[1].clone());
|
|
|
|
|
|
|
|
// the three new peers should have been added, along with 3 more from the pool.
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6,
|
|
|
|
"Should have added 6 nodes to the mesh"
|
|
|
|
);
|
|
|
|
let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap();
|
|
|
|
for new_peer in new_peers {
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
mesh_peers.contains(&new_peer),
|
2020-01-25 02:16:02 +11:00
|
|
|
"Fanout peer should be included in the mesh"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// there should now be 12 graft messages to be sent
|
2020-08-03 18:13:43 +10:00
|
|
|
let graft_messages = gs.control_pool.iter().fold(vec![], collect_grafts);
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
assert!(
|
|
|
|
graft_messages.len() == 12,
|
|
|
|
"There should be 12 grafts messages sent to peers"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test local node publish to subscribed topic
|
|
|
|
#[test]
|
|
|
|
fn test_publish() {
|
|
|
|
// node should:
|
|
|
|
// - Send publish message to all peers
|
|
|
|
// - Insert message into gs.mcache and gs.received
|
|
|
|
|
|
|
|
let publish_topic = String::from("test_publish");
|
|
|
|
let (mut gs, _, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![publish_topic.clone()], true);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[0]).is_some(),
|
|
|
|
"Subscribe should add a new entry to the mesh[topic] hashmap"
|
|
|
|
);
|
|
|
|
|
2020-08-03 18:13:43 +10:00
|
|
|
// all peers should be subscribed to the topic
|
|
|
|
assert_eq!(
|
|
|
|
gs.topic_peers.get(&topic_hashes[0]).map(|p| p.len()),
|
|
|
|
Some(20),
|
|
|
|
"Peers should be subscribed to the topic"
|
|
|
|
);
|
|
|
|
|
2020-01-25 02:16:02 +11:00
|
|
|
// publish on topic
|
|
|
|
let publish_data = vec![0; 42];
|
2020-08-03 18:13:43 +10:00
|
|
|
gs.publish(&Topic::new(publish_topic), publish_data)
|
|
|
|
.unwrap();
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
// Collect all publish messages
|
|
|
|
let publishes = gs
|
|
|
|
.events
|
|
|
|
.iter()
|
|
|
|
.fold(vec![], |mut collected_publish, e| match e {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
for s in &event.messages {
|
|
|
|
collected_publish.push(s.clone());
|
|
|
|
}
|
|
|
|
collected_publish
|
|
|
|
}
|
|
|
|
_ => collected_publish,
|
|
|
|
});
|
|
|
|
|
|
|
|
let msg_id =
|
|
|
|
(gs.config.message_id_fn)(&publishes.first().expect("Should contain > 0 entries"));
|
|
|
|
|
2020-08-03 18:13:43 +10:00
|
|
|
let config = GossipsubConfig::default();
|
|
|
|
assert_eq!(
|
|
|
|
publishes.len(),
|
|
|
|
config.mesh_n_low,
|
2020-01-25 02:16:02 +11:00
|
|
|
"Should send a publish message to all known peers"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mcache.get(&msg_id).is_some(),
|
|
|
|
"Message cache should contain published message"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test local node publish to unsubscribed topic
|
|
|
|
#[test]
|
|
|
|
fn test_fanout() {
|
|
|
|
// node should:
|
|
|
|
// - Populate fanout peers
|
|
|
|
// - Send publish message to fanout peers
|
|
|
|
// - Insert message into gs.mcache and gs.received
|
|
|
|
let fanout_topic = String::from("test_fanout");
|
|
|
|
let (mut gs, _, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![fanout_topic.clone()], true);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[0]).is_some(),
|
|
|
|
"Subscribe should add a new entry to the mesh[topic] hashmap"
|
|
|
|
);
|
|
|
|
// Unsubscribe from topic
|
|
|
|
assert!(
|
|
|
|
gs.unsubscribe(Topic::new(fanout_topic.clone())),
|
|
|
|
"should be able to unsubscribe successfully from topic"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Publish on unsubscribed topic
|
|
|
|
let publish_data = vec![0; 42];
|
2020-08-03 18:13:43 +10:00
|
|
|
gs.publish(&Topic::new(fanout_topic.clone()), publish_data)
|
|
|
|
.unwrap();
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
gs.fanout
|
|
|
|
.get(&TopicHash::from_raw(fanout_topic.clone()))
|
|
|
|
.unwrap()
|
|
|
|
.len(),
|
|
|
|
gs.config.mesh_n,
|
|
|
|
"Fanout should contain `mesh_n` peers for fanout topic"
|
|
|
|
);
|
|
|
|
|
|
|
|
// Collect all publish messages
|
|
|
|
let publishes = gs
|
|
|
|
.events
|
|
|
|
.iter()
|
|
|
|
.fold(vec![], |mut collected_publish, e| match e {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
for s in &event.messages {
|
|
|
|
collected_publish.push(s.clone());
|
|
|
|
}
|
|
|
|
collected_publish
|
|
|
|
}
|
|
|
|
_ => collected_publish,
|
|
|
|
});
|
|
|
|
|
|
|
|
let msg_id =
|
|
|
|
(gs.config.message_id_fn)(&publishes.first().expect("Should contain > 0 entries"));
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
publishes.len(),
|
|
|
|
gs.config.mesh_n,
|
|
|
|
"Should send a publish message to `mesh_n` fanout peers"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mcache.get(&msg_id).is_some(),
|
|
|
|
"Message cache should contain published message"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Test the gossipsub NetworkBehaviour peer connection logic.
|
|
|
|
fn test_inject_connected() {
|
|
|
|
let (gs, peers, topic_hashes) = build_and_inject_nodes(
|
|
|
|
20,
|
|
|
|
vec![String::from("topic1"), String::from("topic2")],
|
|
|
|
true,
|
|
|
|
);
|
|
|
|
|
|
|
|
// check that our subscriptions are sent to each of the peers
|
|
|
|
// collect all the SendEvents
|
|
|
|
let send_events: Vec<&NetworkBehaviourAction<Arc<GossipsubRpc>, GossipsubEvent>> = gs
|
|
|
|
.events
|
|
|
|
.iter()
|
|
|
|
.filter(|e| match e {
|
2020-08-03 18:13:43 +10:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
|
|
|
!event.subscriptions.is_empty()
|
|
|
|
}
|
2020-01-25 02:16:02 +11:00
|
|
|
_ => false,
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// check that there are two subscriptions sent to each peer
|
|
|
|
for sevent in send_events.clone() {
|
|
|
|
match sevent {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(
|
|
|
|
event.subscriptions.len() == 2,
|
|
|
|
"There should be two subscriptions sent to each peer (1 for each topic)."
|
|
|
|
);
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
// check that there are 20 send events created
|
|
|
|
assert!(
|
|
|
|
send_events.len() == 20,
|
|
|
|
"There should be a subscription event sent to each peer."
|
|
|
|
);
|
|
|
|
|
|
|
|
// should add the new peers to `peer_topics` with an empty vec as a gossipsub node
|
|
|
|
for peer in peers {
|
|
|
|
let known_topics = gs.peer_topics.get(&peer).unwrap();
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
known_topics == &topic_hashes.iter().cloned().collect(),
|
2020-01-25 02:16:02 +11:00
|
|
|
"The topics for each node should all topics"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Test subscription handling
|
|
|
|
fn test_handle_received_subscriptions() {
|
|
|
|
// For every subscription:
|
|
|
|
// SUBSCRIBE: - Add subscribed topic to peer_topics for peer.
|
|
|
|
// - Add peer to topics_peer.
|
|
|
|
// UNSUBSCRIBE - Remove topic from peer_topics for peer.
|
|
|
|
// - Remove peer from topic_peers.
|
|
|
|
|
|
|
|
let topics = vec!["topic1", "topic2", "topic3", "topic4"]
|
|
|
|
.iter()
|
|
|
|
.map(|&t| String::from(t))
|
|
|
|
.collect();
|
|
|
|
let (mut gs, peers, topic_hashes) = build_and_inject_nodes(20, topics, false);
|
|
|
|
|
|
|
|
// The first peer sends 3 subscriptions and 1 unsubscription
|
|
|
|
let mut subscriptions = topic_hashes[..3]
|
|
|
|
.iter()
|
|
|
|
.map(|topic_hash| GossipsubSubscription {
|
|
|
|
action: GossipsubSubscriptionAction::Subscribe,
|
|
|
|
topic_hash: topic_hash.clone(),
|
|
|
|
})
|
|
|
|
.collect::<Vec<GossipsubSubscription>>();
|
|
|
|
|
|
|
|
subscriptions.push(GossipsubSubscription {
|
|
|
|
action: GossipsubSubscriptionAction::Unsubscribe,
|
|
|
|
topic_hash: topic_hashes[topic_hashes.len() - 1].clone(),
|
|
|
|
});
|
|
|
|
|
|
|
|
let unknown_peer = PeerId::random();
|
|
|
|
// process the subscriptions
|
|
|
|
// first and second peers send subscriptions
|
|
|
|
gs.handle_received_subscriptions(&subscriptions, &peers[0]);
|
|
|
|
gs.handle_received_subscriptions(&subscriptions, &peers[1]);
|
|
|
|
// unknown peer sends the same subscriptions
|
|
|
|
gs.handle_received_subscriptions(&subscriptions, &unknown_peer);
|
|
|
|
|
|
|
|
// verify the result
|
|
|
|
|
|
|
|
let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone();
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
peer_topics == topic_hashes.iter().take(3).cloned().collect(),
|
2020-01-25 02:16:02 +11:00
|
|
|
"First peer should be subscribed to three topics"
|
|
|
|
);
|
|
|
|
let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone();
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
peer_topics == topic_hashes.iter().take(3).cloned().collect(),
|
2020-01-25 02:16:02 +11:00
|
|
|
"Second peer should be subscribed to three topics"
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.peer_topics.get(&unknown_peer).is_none(),
|
|
|
|
"Unknown peer should not have been added"
|
|
|
|
);
|
|
|
|
|
|
|
|
for topic_hash in topic_hashes[..3].iter() {
|
|
|
|
let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone();
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
topic_peers == peers[..2].into_iter().cloned().collect(),
|
2020-01-25 02:16:02 +11:00
|
|
|
"Two peers should be added to the first three topics"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peer 0 unsubscribes from the first topic
|
|
|
|
|
|
|
|
gs.handle_received_subscriptions(
|
|
|
|
&vec![GossipsubSubscription {
|
|
|
|
action: GossipsubSubscriptionAction::Unsubscribe,
|
|
|
|
topic_hash: topic_hashes[0].clone(),
|
|
|
|
}],
|
|
|
|
&peers[0],
|
|
|
|
);
|
|
|
|
|
|
|
|
let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone();
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
peer_topics == topic_hashes[1..3].into_iter().cloned().collect(),
|
2020-01-25 02:16:02 +11:00
|
|
|
"Peer should be subscribed to two topics"
|
|
|
|
);
|
|
|
|
|
|
|
|
let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
topic_peers == peers[1..2].into_iter().cloned().collect(),
|
2020-01-25 02:16:02 +11:00
|
|
|
"Only the second peers should be in the first topic"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
/// Test Gossipsub.get_random_peers() function
|
|
|
|
fn test_get_random_peers() {
|
|
|
|
// generate a default GossipsubConfig
|
2020-08-03 18:13:43 +10:00
|
|
|
let mut gs_config = GossipsubConfig::default();
|
|
|
|
gs_config.validation_mode = ValidationMode::Anonymous;
|
2020-01-25 02:16:02 +11:00
|
|
|
// create a gossipsub struct
|
2020-08-03 18:13:43 +10:00
|
|
|
let mut gs: Gossipsub = Gossipsub::new(MessageAuthenticity::Anonymous, gs_config);
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
// create a topic and fill it with some peers
|
|
|
|
let topic_hash = Topic::new("Test".into()).no_hash().clone();
|
|
|
|
let mut peers = vec![];
|
|
|
|
for _ in 0..20 {
|
|
|
|
peers.push(PeerId::random())
|
|
|
|
}
|
|
|
|
|
2020-08-03 18:13:43 +10:00
|
|
|
gs.topic_peers
|
|
|
|
.insert(topic_hash.clone(), peers.iter().cloned().collect());
|
2020-01-25 02:16:02 +11:00
|
|
|
|
2020-08-03 18:13:43 +10:00
|
|
|
let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, |_| true);
|
|
|
|
assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned");
|
|
|
|
let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 30, |_| true);
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(random_peers.len() == 20, "Expected 20 peers to be returned");
|
2020-08-03 18:13:43 +10:00
|
|
|
assert!(
|
|
|
|
random_peers == peers.iter().cloned().collect(),
|
|
|
|
"Expected no shuffling"
|
|
|
|
);
|
|
|
|
let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 20, |_| true);
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(random_peers.len() == 20, "Expected 20 peers to be returned");
|
2020-08-03 18:13:43 +10:00
|
|
|
assert!(
|
|
|
|
random_peers == peers.iter().cloned().collect(),
|
|
|
|
"Expected no shuffling"
|
|
|
|
);
|
|
|
|
let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 0, |_| true);
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(random_peers.len() == 0, "Expected 0 peers to be returned");
|
|
|
|
// test the filter
|
2020-08-03 18:13:43 +10:00
|
|
|
let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, |_| false);
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(random_peers.len() == 0, "Expected 0 peers to be returned");
|
2020-08-03 18:13:43 +10:00
|
|
|
let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 10, {
|
|
|
|
|peer| peers.contains(peer)
|
|
|
|
});
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(random_peers.len() == 10, "Expected 10 peers to be returned");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tests that the correct message is sent when a peer asks for a message in our cache.
|
|
|
|
#[test]
|
|
|
|
fn test_handle_iwant_msg_cached() {
|
|
|
|
let (mut gs, peers, _) = build_and_inject_nodes(20, Vec::new(), true);
|
|
|
|
|
|
|
|
let id = gs.config.message_id_fn;
|
|
|
|
|
|
|
|
let message = GossipsubMessage {
|
2020-08-03 18:13:43 +10:00
|
|
|
source: Some(peers[11].clone()),
|
2020-01-25 02:16:02 +11:00
|
|
|
data: vec![1, 2, 3, 4],
|
2020-08-03 18:13:43 +10:00
|
|
|
sequence_number: Some(1u64),
|
2020-01-25 02:16:02 +11:00
|
|
|
topics: Vec::new(),
|
2020-08-03 18:13:43 +10:00
|
|
|
signature: None,
|
|
|
|
key: None,
|
|
|
|
validated: true,
|
2020-01-25 02:16:02 +11:00
|
|
|
};
|
|
|
|
let msg_id = id(&message);
|
|
|
|
gs.mcache.put(message.clone());
|
|
|
|
|
|
|
|
gs.handle_iwant(&peers[7], vec![msg_id.clone()]);
|
|
|
|
|
|
|
|
// the messages we are sending
|
|
|
|
let sent_messages = gs
|
|
|
|
.events
|
|
|
|
.iter()
|
|
|
|
.fold(vec![], |mut collected_messages, e| match e {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
for c in &event.messages {
|
|
|
|
collected_messages.push(c.clone())
|
|
|
|
}
|
|
|
|
collected_messages
|
|
|
|
}
|
|
|
|
_ => collected_messages,
|
|
|
|
});
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
sent_messages.iter().any(|msg| id(msg) == msg_id),
|
|
|
|
"Expected the cached message to be sent to an IWANT peer"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tests that messages are sent correctly depending on the shifting of the message cache.
|
|
|
|
#[test]
|
|
|
|
fn test_handle_iwant_msg_cached_shifted() {
|
|
|
|
let (mut gs, peers, _) = build_and_inject_nodes(20, Vec::new(), true);
|
|
|
|
|
|
|
|
let id = gs.config.message_id_fn;
|
|
|
|
// perform 10 memshifts and check that it leaves the cache
|
|
|
|
for shift in 1..10 {
|
|
|
|
let message = GossipsubMessage {
|
2020-08-03 18:13:43 +10:00
|
|
|
source: Some(peers[11].clone()),
|
2020-01-25 02:16:02 +11:00
|
|
|
data: vec![1, 2, 3, 4],
|
2020-08-03 18:13:43 +10:00
|
|
|
sequence_number: Some(shift),
|
2020-01-25 02:16:02 +11:00
|
|
|
topics: Vec::new(),
|
2020-08-03 18:13:43 +10:00
|
|
|
signature: None,
|
|
|
|
key: None,
|
|
|
|
validated: true,
|
2020-01-25 02:16:02 +11:00
|
|
|
};
|
|
|
|
let msg_id = id(&message);
|
|
|
|
gs.mcache.put(message.clone());
|
|
|
|
for _ in 0..shift {
|
|
|
|
gs.mcache.shift();
|
|
|
|
}
|
|
|
|
|
|
|
|
gs.handle_iwant(&peers[7], vec![msg_id.clone()]);
|
|
|
|
|
|
|
|
// is the message is being sent?
|
|
|
|
let message_exists = gs.events.iter().any(|e| match e {
|
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
|
|
|
NetworkBehaviourAction::NotifyHandler { event, .. } => {
|
2020-01-25 02:16:02 +11:00
|
|
|
event.messages.iter().any(|msg| id(msg) == msg_id)
|
|
|
|
}
|
|
|
|
_ => false,
|
|
|
|
});
|
|
|
|
// default history_length is 5, expect no messages after shift > 5
|
|
|
|
if shift < 5 {
|
|
|
|
assert!(
|
|
|
|
message_exists,
|
|
|
|
"Expected the cached message to be sent to an IWANT peer before 5 shifts"
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
assert!(
|
|
|
|
!message_exists,
|
|
|
|
"Expected the cached message to not be sent to an IWANT peer after 5 shifts"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests that an event is not created when a peers asks for a message not in our cache
|
|
|
|
fn test_handle_iwant_msg_not_cached() {
|
|
|
|
let (mut gs, peers, _) = build_and_inject_nodes(20, Vec::new(), true);
|
|
|
|
|
|
|
|
let events_before = gs.events.len();
|
2020-08-03 18:13:43 +10:00
|
|
|
gs.handle_iwant(&peers[7], vec![MessageId::new(b"unknown id")]);
|
2020-01-25 02:16:02 +11:00
|
|
|
let events_after = gs.events.len();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
events_before, events_after,
|
|
|
|
"Expected event count to stay the same"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests that an event is created when a peer shares that it has a message we want
|
|
|
|
fn test_handle_ihave_subscribed_and_msg_not_cached() {
|
|
|
|
let (mut gs, peers, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![String::from("topic1")], true);
|
|
|
|
|
|
|
|
gs.handle_ihave(
|
|
|
|
&peers[7],
|
2020-08-03 18:13:43 +10:00
|
|
|
vec![(topic_hashes[0].clone(), vec![MessageId::new(b"unknown id")])],
|
2020-01-25 02:16:02 +11:00
|
|
|
);
|
|
|
|
|
|
|
|
// check that we sent an IWANT request for `unknown id`
|
|
|
|
let iwant_exists = match gs.control_pool.get(&peers[7]) {
|
|
|
|
Some(controls) => controls.iter().any(|c| match c {
|
|
|
|
GossipsubControlAction::IWant { message_ids } => message_ids
|
|
|
|
.iter()
|
2020-08-03 18:13:43 +10:00
|
|
|
.any(|m| *m == MessageId::new(b"unknown id")),
|
2020-01-25 02:16:02 +11:00
|
|
|
_ => false,
|
|
|
|
}),
|
|
|
|
_ => false,
|
|
|
|
};
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
iwant_exists,
|
|
|
|
"Expected to send an IWANT control message for unkown message id"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests that an event is not created when a peer shares that it has a message that
|
|
|
|
// we already have
|
|
|
|
fn test_handle_ihave_subscribed_and_msg_cached() {
|
|
|
|
let (mut gs, peers, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![String::from("topic1")], true);
|
|
|
|
|
2020-08-03 18:13:43 +10:00
|
|
|
let msg_id = MessageId::new(b"known id");
|
2020-01-25 02:16:02 +11:00
|
|
|
|
|
|
|
let events_before = gs.events.len();
|
|
|
|
gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]);
|
|
|
|
let events_after = gs.events.len();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
events_before, events_after,
|
|
|
|
"Expected event count to stay the same"
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// test that an event is not created when a peer shares that it has a message in
|
|
|
|
// a topic that we are not subscribed to
|
|
|
|
fn test_handle_ihave_not_subscribed() {
|
|
|
|
let (mut gs, peers, _) = build_and_inject_nodes(20, vec![], true);
|
|
|
|
|
|
|
|
let events_before = gs.events.len();
|
|
|
|
gs.handle_ihave(
|
|
|
|
&peers[7],
|
|
|
|
vec![(
|
|
|
|
TopicHash::from_raw(String::from("unsubscribed topic")),
|
2020-08-03 18:13:43 +10:00
|
|
|
vec![MessageId::new(b"irrelevant id")],
|
2020-01-25 02:16:02 +11:00
|
|
|
)],
|
|
|
|
);
|
|
|
|
let events_after = gs.events.len();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
events_before, events_after,
|
|
|
|
"Expected event count to stay the same"
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests that a peer is added to our mesh when we are both subscribed
|
|
|
|
// to the same topic
|
|
|
|
fn test_handle_graft_is_subscribed() {
|
|
|
|
let (mut gs, peers, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![String::from("topic1")], true);
|
|
|
|
|
|
|
|
gs.handle_graft(&peers[7], topic_hashes.clone());
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]),
|
|
|
|
"Expected peer to have been added to mesh"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests that a peer is not added to our mesh when they are subscribed to
|
|
|
|
// a topic that we are not
|
|
|
|
fn test_handle_graft_is_not_subscribed() {
|
|
|
|
let (mut gs, peers, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![String::from("topic1")], true);
|
|
|
|
|
|
|
|
gs.handle_graft(
|
|
|
|
&peers[7],
|
|
|
|
vec![TopicHash::from_raw(String::from("unsubscribed topic"))],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
2020-08-03 18:13:43 +10:00
|
|
|
!gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]),
|
2020-01-25 02:16:02 +11:00
|
|
|
"Expected peer to have been added to mesh"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests multiple topics in a single graft message
|
|
|
|
fn test_handle_graft_multiple_topics() {
|
|
|
|
let topics: Vec<String> = vec!["topic1", "topic2", "topic3", "topic4"]
|
|
|
|
.iter()
|
|
|
|
.map(|&t| String::from(t))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let (mut gs, peers, topic_hashes) = build_and_inject_nodes(20, topics.clone(), true);
|
|
|
|
|
|
|
|
let mut their_topics = topic_hashes.clone();
|
|
|
|
// their_topics = [topic1, topic2, topic3]
|
|
|
|
// our_topics = [topic1, topic2, topic4]
|
|
|
|
their_topics.pop();
|
|
|
|
gs.leave(&their_topics[2]);
|
|
|
|
|
|
|
|
gs.handle_graft(&peers[7], their_topics.clone());
|
|
|
|
|
|
|
|
for i in 0..2 {
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[i]).unwrap().contains(&peers[7]),
|
|
|
|
"Expected peer to be in the mesh for the first 2 topics"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[2]).is_none(),
|
|
|
|
"Expected the second topic to not be in the mesh"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// tests that a peer is removed from our mesh
|
|
|
|
fn test_handle_prune_peer_in_mesh() {
|
|
|
|
let (mut gs, peers, topic_hashes) =
|
|
|
|
build_and_inject_nodes(20, vec![String::from("topic1")], true);
|
|
|
|
|
|
|
|
// insert peer into our mesh for 'topic1'
|
2020-08-03 18:13:43 +10:00
|
|
|
gs.mesh
|
|
|
|
.insert(topic_hashes[0].clone(), peers.iter().cloned().collect());
|
2020-01-25 02:16:02 +11:00
|
|
|
assert!(
|
|
|
|
gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]),
|
|
|
|
"Expected peer to be in mesh"
|
|
|
|
);
|
|
|
|
|
|
|
|
gs.handle_prune(&peers[7], topic_hashes.clone());
|
|
|
|
assert!(
|
|
|
|
!gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]),
|
|
|
|
"Expected peer to be removed from mesh"
|
|
|
|
);
|
|
|
|
}
|
2020-08-03 18:13:43 +10:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
// Tests the mesh maintenance addition
|
|
|
|
fn test_mesh_addition() {
|
|
|
|
let config = GossipsubConfig::default();
|
|
|
|
|
|
|
|
// Adds mesh_low peers and PRUNE 2 giving us a deficit.
|
|
|
|
let (mut gs, peers, topics) =
|
|
|
|
build_and_inject_nodes(config.mesh_n + 1, vec!["test".into()], true);
|
|
|
|
|
|
|
|
let to_remove_peers = config.mesh_n + 1 - config.mesh_n_low - 1;
|
|
|
|
|
|
|
|
for index in 0..to_remove_peers {
|
|
|
|
gs.handle_prune(&peers[index], topics.clone());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the pruned peers are removed from the mesh.
|
|
|
|
assert_eq!(
|
|
|
|
gs.mesh.get(&topics[0]).unwrap().len(),
|
|
|
|
config.mesh_n_low - 1
|
|
|
|
);
|
|
|
|
|
|
|
|
// run a heartbeat
|
|
|
|
gs.heartbeat();
|
|
|
|
|
|
|
|
// Peers should be added to reach mesh_n
|
|
|
|
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
// Tests the mesh maintenance subtraction
|
|
|
|
fn test_mesh_subtraction() {
|
|
|
|
let config = GossipsubConfig::default();
|
|
|
|
|
|
|
|
// Adds mesh_low peers and PRUNE 2 giving us a deficit.
|
|
|
|
let (mut gs, peers, topics) =
|
|
|
|
build_and_inject_nodes(config.mesh_n_high + 10, vec!["test".into()], true);
|
|
|
|
|
|
|
|
// graft all the peers
|
|
|
|
for peer in peers {
|
|
|
|
gs.handle_graft(&peer, topics.clone());
|
|
|
|
}
|
|
|
|
|
|
|
|
// run a heartbeat
|
|
|
|
gs.heartbeat();
|
|
|
|
|
|
|
|
// Peers should be removed to reach mesh_n
|
|
|
|
assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), config.mesh_n);
|
|
|
|
}
|
2020-01-25 02:16:02 +11:00
|
|
|
}
|