.cargo: Run clippy on ALL the source files (#2949)

This commit is contained in:
Thomas Eizinger
2022-10-04 18:24:38 +11:00
committed by GitHub
parent e6da99e4f8
commit 1b793242e6
50 changed files with 5497 additions and 5657 deletions

View File

@ -1,3 +1,3 @@
[alias]
# Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped.
custom-clippy = "clippy --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings"
custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -D warnings"

View File

@ -39,7 +39,7 @@ fn clone(c: &mut Criterion) {
c.bench_function("clone", |b| {
b.iter(|| {
black_box(peer_id.clone());
black_box(peer_id);
})
});
}

View File

@ -307,9 +307,9 @@ mod tests {
use super::*;
use quickcheck::*;
const KEY1: &'static [u8] = include_bytes!("test/rsa-2048.pk8");
const KEY2: &'static [u8] = include_bytes!("test/rsa-3072.pk8");
const KEY3: &'static [u8] = include_bytes!("test/rsa-4096.pk8");
const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8");
const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8");
const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8");
#[derive(Clone, Debug)]
struct SomeKeypair(Keypair);

View File

@ -286,10 +286,10 @@ mod tests {
#[test]
fn extract_peer_id_from_multi_address() {
let address =
format!("/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc")
.parse()
.unwrap();
let address = "/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc"
.to_string()
.parse()
.unwrap();
let peer_id = PeerId::try_from_multiaddr(&address).unwrap();
@ -303,7 +303,7 @@ mod tests {
#[test]
fn no_panic_on_extract_peer_id_from_multi_address_if_not_present() {
let address = format!("/memory/1234").parse().unwrap();
let address = "/memory/1234".to_string().parse().unwrap();
let maybe_empty = PeerId::try_from_multiaddr(&address);

View File

@ -487,7 +487,7 @@ mod tests {
fn prop(msg: Message) {
let mut buf = BytesMut::new();
msg.encode(&mut buf)
.expect(&format!("Encoding message failed: {:?}", msg));
.unwrap_or_else(|_| panic!("Encoding message failed: {:?}", msg));
match Message::decode(buf.freeze()) {
Ok(m) => assert_eq!(m, msg),
Err(e) => panic!("Decoding failed: {:?}", e),

View File

@ -71,9 +71,10 @@ fn transport_upgrade() {
let addr = addr_receiver.await.unwrap();
dialer.dial(addr).unwrap();
futures::future::poll_fn(move |cx| loop {
match ready!(dialer.poll_next_unpin(cx)).unwrap() {
SwarmEvent::ConnectionEstablished { .. } => return Poll::Ready(()),
_ => {}
if let SwarmEvent::ConnectionEstablished { .. } =
ready!(dialer.poll_next_unpin(cx)).unwrap()
{
return Poll::Ready(());
}
})
.await

View File

@ -53,7 +53,7 @@ const BENCH_SIZES: [usize; 8] = [
fn prepare(c: &mut Criterion) {
let _ = env_logger::try_init();
let payload: Vec<u8> = vec![1; 1024 * 1024 * 1];
let payload: Vec<u8> = vec![1; 1024 * 1024];
let mut tcp = c.benchmark_group("tcp");
let tcp_addr = multiaddr![Ip4(std::net::Ipv4Addr::new(127, 0, 0, 1)), Tcp(0u16)];

View File

@ -1326,7 +1326,7 @@ mod tests {
w_buf: BytesMut::new(),
eof: false,
};
let mut m = Multiplexed::new(conn, cfg.clone());
let mut m = Multiplexed::new(conn, cfg);
// Run the test.
let mut opened = HashSet::new();

View File

@ -188,11 +188,10 @@ fn protocol_not_match() {
let mut transport = TcpTransport::default()
.and_then(move |c, e| upgrade::apply(c, mplex, e, upgrade::Version::V1))
.boxed();
match transport.dial(rx.await.unwrap()).unwrap().await {
Ok(_) => {
assert!(false, "Dialing should fail here as protocols do not match")
}
_ => {}
}
assert!(
transport.dial(rx.await.unwrap()).unwrap().await.is_err(),
"Dialing should fail here as protocols do not match"
);
});
}

View File

@ -117,6 +117,7 @@ impl Behaviour {
}
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
enum Event {
AutoNat(autonat::Event),
Identify(identify::Event),

View File

@ -96,6 +96,7 @@ impl Behaviour {
}
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
enum Event {
AutoNat(autonat::Event),
Identify(identify::Event),

View File

@ -58,9 +58,8 @@ async fn spawn_server(kill: oneshot::Receiver<()>) -> (PeerId, Multiaddr) {
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
let addr = loop {
match server.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => break address,
_ => {}
if let SwarmEvent::NewListenAddr { address, .. } = server.select_next_some().await {
break address;
};
};
tx.send((peer_id, addr)).unwrap();
@ -78,11 +77,8 @@ async fn spawn_server(kill: oneshot::Receiver<()>) -> (PeerId, Multiaddr) {
async fn next_event(swarm: &mut Swarm<Behaviour>) -> Event {
loop {
match swarm.select_next_some().await {
SwarmEvent::Behaviour(event) => {
break event;
}
_ => {}
if let SwarmEvent::Behaviour(event) = swarm.select_next_some().await {
break event;
}
}
}
@ -177,9 +173,8 @@ async fn test_auto_probe() {
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { .. } => break,
_ => {}
if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await {
break;
}
}
@ -269,14 +264,13 @@ async fn test_confidence() {
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { .. } => break,
_ => {}
if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await {
break;
}
}
} else {
let unreachable_addr: Multiaddr = "/ip4/127.0.0.1/tcp/42".parse().unwrap();
client.add_external_address(unreachable_addr.clone(), AddressScore::Infinite);
client.add_external_address(unreachable_addr, AddressScore::Infinite);
}
for i in 0..MAX_CONFIDENCE + 1 {
@ -357,9 +351,8 @@ async fn test_throttle_server_period() {
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { .. } => break,
_ => {}
if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await {
break;
}
}
@ -477,9 +470,8 @@ async fn test_outbound_failure() {
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { .. } => break,
_ => {}
if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await {
break;
}
}
// First probe should be successful and flip status to public.
@ -497,7 +489,8 @@ async fn test_outbound_failure() {
}
let inactive = servers.split_off(1);
// Drop the handles of the inactive servers to kill them.
#[allow(clippy::needless_collect)] // Drop the handles of the inactive servers to kill them.
let inactive_ids: Vec<_> = inactive.into_iter().map(|(id, _handle)| id).collect();
// Expect to retry on outbound failure
@ -541,9 +534,8 @@ async fn test_global_ips_config() {
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { .. } => break,
_ => {}
if let SwarmEvent::NewListenAddr { .. } = client.select_next_some().await {
break;
}
}

View File

@ -56,9 +56,8 @@ async fn init_server(config: Option<Config>) -> (Swarm<Behaviour>, PeerId, Multi
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
let addr = loop {
match server.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => break address,
_ => {}
if let SwarmEvent::NewListenAddr { address, .. } = server.select_next_some().await {
break address;
};
};
(server, peer_id, addr)
@ -91,12 +90,9 @@ async fn spawn_client(
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => {
addr = Some(address);
break;
}
_ => {}
if let SwarmEvent::NewListenAddr { address, .. } = client.select_next_some().await {
addr = Some(address);
break;
};
}
}
@ -119,11 +115,8 @@ async fn spawn_client(
async fn next_event(swarm: &mut Swarm<Behaviour>) -> Event {
loop {
match swarm.select_next_some().await {
SwarmEvent::Behaviour(event) => {
break event;
}
_ => {}
if let SwarmEvent::Behaviour(event) = swarm.select_next_some().await {
break event;
}
}
}
@ -161,9 +154,8 @@ async fn test_dial_back() {
} => {
assert_eq!(peer_id, client_id);
let observed_client_ip = loop {
match send_back_addr.pop().unwrap() {
Protocol::Ip4(ip4_addr) => break ip4_addr,
_ => {}
if let Protocol::Ip4(ip4_addr) = send_back_addr.pop().unwrap() {
break ip4_addr;
}
};
break observed_client_ip;

View File

@ -113,6 +113,7 @@ fn main() -> Result<(), Box<dyn Error>> {
}
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
enum Event {
Ping(ping::Event),
Identify(identify::Event),

View File

@ -53,7 +53,6 @@ fn connect() {
let mut dst = build_client();
let dst_peer_id = *dst.local_peer_id();
let dst_relayed_addr = relay_addr
.clone()
.with(Protocol::P2p(relay_peer_id.into()))
.with(Protocol::P2pCircuit)
.with(Protocol::P2p(dst_peer_id.into()));
@ -96,7 +95,7 @@ fn connect() {
fn build_relay() -> Swarm<relay::Relay> {
let local_key = identity::Keypair::generate_ed25519();
let local_public_key = local_key.public();
let local_peer_id = local_public_key.clone().to_peer_id();
let local_peer_id = local_public_key.to_peer_id();
let transport = build_transport(MemoryTransport::default().boxed(), local_public_key);
@ -116,7 +115,7 @@ fn build_relay() -> Swarm<relay::Relay> {
fn build_client() -> Swarm<Client> {
let local_key = identity::Keypair::generate_ed25519();
let local_public_key = local_key.public();
let local_peer_id = local_public_key.clone().to_peer_id();
let local_peer_id = local_public_key.to_peer_id();
let (relay_transport, behaviour) = client::Client::new_transport_and_behaviour(local_peer_id);
let transport = build_transport(
@ -141,13 +140,11 @@ fn build_transport<StreamSink>(
where
StreamSink: AsyncRead + AsyncWrite + Send + Unpin + 'static,
{
let transport = transport
transport
.upgrade(Version::V1)
.authenticate(PlainText2Config { local_public_key })
.multiplex(libp2p::yamux::YamuxConfig::default())
.boxed();
transport
.boxed()
}
#[derive(NetworkBehaviour)]

File diff suppressed because it is too large Load Diff

View File

@ -270,23 +270,16 @@ mod tests {
fn test_put_get_one() {
let mut mc = new_cache(10, 15);
let topic1_hash = Topic::new("topic1").hash().clone();
let topic1_hash = Topic::new("topic1").hash();
let (id, m) = gen_testm(10, topic1_hash);
mc.put(&id, m.clone());
assert!(mc.history[0].len() == 1);
assert_eq!(mc.history[0].len(), 1);
let fetched = mc.get(&id);
assert_eq!(fetched.is_none(), false);
assert_eq!(fetched.is_some(), true);
// Make sure it is the same fetched message
match fetched {
Some(x) => assert_eq!(*x, m),
_ => assert!(false),
}
assert_eq!(fetched.unwrap(), &m);
}
#[test]
@ -294,15 +287,15 @@ mod tests {
fn test_get_wrong() {
let mut mc = new_cache(10, 15);
let topic1_hash = Topic::new("topic1").hash().clone();
let topic1_hash = Topic::new("topic1").hash();
let (id, m) = gen_testm(10, topic1_hash);
mc.put(&id, m.clone());
mc.put(&id, m);
// Try to get an incorrect ID
let wrong_id = MessageId::new(b"wrongid");
let fetched = mc.get(&wrong_id);
assert_eq!(fetched.is_none(), true);
assert!(fetched.is_none());
}
#[test]
@ -313,7 +306,7 @@ mod tests {
// Try to get an incorrect ID
let wrong_string = MessageId::new(b"imempty");
let fetched = mc.get(&wrong_string);
assert_eq!(fetched.is_none(), true);
assert!(fetched.is_none());
}
#[test]
@ -321,7 +314,7 @@ mod tests {
fn test_shift() {
let mut mc = new_cache(1, 5);
let topic1_hash = Topic::new("topic1").hash().clone();
let topic1_hash = Topic::new("topic1").hash();
// Build the message
for i in 0..10 {
@ -332,7 +325,7 @@ mod tests {
mc.shift();
// Ensure the shift occurred
assert!(mc.history[0].len() == 0);
assert!(mc.history[0].is_empty());
assert!(mc.history[1].len() == 10);
// Make sure no messages deleted
@ -344,7 +337,7 @@ mod tests {
fn test_empty_shift() {
let mut mc = new_cache(1, 5);
let topic1_hash = Topic::new("topic1").hash().clone();
let topic1_hash = Topic::new("topic1").hash();
// Build the message
for i in 0..10 {
@ -355,14 +348,14 @@ mod tests {
mc.shift();
// Ensure the shift occurred
assert!(mc.history[0].len() == 0);
assert!(mc.history[0].is_empty());
assert!(mc.history[1].len() == 10);
mc.shift();
assert!(mc.history[2].len() == 10);
assert!(mc.history[1].len() == 0);
assert!(mc.history[0].len() == 0);
assert!(mc.history[1].is_empty());
assert!(mc.history[0].is_empty());
}
#[test]
@ -370,7 +363,7 @@ mod tests {
fn test_remove_last_from_shift() {
let mut mc = new_cache(4, 5);
let topic1_hash = Topic::new("topic1").hash().clone();
let topic1_hash = Topic::new("topic1").hash();
// Build the message
for i in 0..10 {

View File

@ -29,7 +29,7 @@ fn within_variance(value: f64, expected: f64, variance: f64) -> bool {
if expected >= 0.0 {
return value > expected * (1.0 - variance) && value < expected * (1.0 + variance);
}
return value > expected * (1.0 + variance) && value < expected * (1.0 - variance);
value > expected * (1.0 + variance) && value < expected * (1.0 - variance)
}
// generates a random gossipsub message with sequence number i
@ -45,7 +45,7 @@ fn make_test_message(seq: u64) -> (MessageId, RawGossipsubMessage) {
};
let message = GossipsubMessage {
source: raw_message.source.clone(),
source: raw_message.source,
data: raw_message.data.clone(),
sequence_number: raw_message.sequence_number,
topic: raw_message.topic.clone(),
@ -62,7 +62,7 @@ fn default_message_id() -> fn(&GossipsubMessage) -> MessageId {
let mut source_string = if let Some(peer_id) = message.source.as_ref() {
peer_id.to_base58()
} else {
PeerId::from_bytes(&vec![0, 1, 0])
PeerId::from_bytes(&[0, 1, 0])
.expect("Valid peer id")
.to_base58()
};
@ -76,14 +76,18 @@ fn test_score_time_in_mesh() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
params.topic_score_cap = 1000.0;
let mut params = PeerScoreParams {
topic_score_cap: 1000.0,
..Default::default()
};
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 0.5;
topic_params.time_in_mesh_weight = 1.0;
topic_params.time_in_mesh_quantum = Duration::from_millis(1);
topic_params.time_in_mesh_cap = 3600.0;
let topic_params = TopicScoreParams {
topic_weight: 0.5,
time_in_mesh_weight: 1.0,
time_in_mesh_quantum: Duration::from_millis(1),
time_in_mesh_cap: 3600.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
@ -91,7 +95,7 @@ fn test_score_time_in_mesh() {
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(peer_id);
let score = peer_score.score(&peer_id);
assert!(
@ -125,11 +129,13 @@ fn test_score_time_in_mesh_cap() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 0.5;
topic_params.time_in_mesh_weight = 1.0;
topic_params.time_in_mesh_quantum = Duration::from_millis(1);
topic_params.time_in_mesh_cap = 10.0;
let topic_params = TopicScoreParams {
topic_weight: 0.5,
time_in_mesh_weight: 1.0,
time_in_mesh_quantum: Duration::from_millis(1),
time_in_mesh_cap: 10.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
@ -137,7 +143,7 @@ fn test_score_time_in_mesh_cap() {
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(peer_id);
let score = peer_score.score(&peer_id);
assert!(
@ -173,12 +179,14 @@ fn test_score_first_message_deliveries() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.first_message_deliveries_weight = 1.0;
topic_params.first_message_deliveries_decay = 1.0;
topic_params.first_message_deliveries_cap = 2000.0;
topic_params.time_in_mesh_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 1.0,
first_message_deliveries_cap: 2000.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
@ -186,7 +194,7 @@ fn test_score_first_message_deliveries() {
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(peer_id);
peer_score.graft(&peer_id, topic);
// deliver a bunch of messages from the peer
@ -217,12 +225,14 @@ fn test_score_first_message_deliveries_cap() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.first_message_deliveries_weight = 1.0;
topic_params.first_message_deliveries_decay = 1.0; // test without decay
topic_params.first_message_deliveries_cap = 50.0;
topic_params.time_in_mesh_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 1.0, // test without decay
first_message_deliveries_cap: 50.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
@ -230,7 +240,7 @@ fn test_score_first_message_deliveries_cap() {
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(peer_id);
peer_score.graft(&peer_id, topic);
// deliver a bunch of messages from the peer
@ -261,17 +271,19 @@ fn test_score_first_message_deliveries_decay() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.first_message_deliveries_weight = 1.0;
topic_params.first_message_deliveries_decay = 0.9; // decay 10% per decay interval
topic_params.first_message_deliveries_cap = 2000.0;
topic_params.time_in_mesh_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 0.9, // decay 10% per decay interval
first_message_deliveries_cap: 2000.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let peer_id = PeerId::random();
let mut peer_score = PeerScore::new(params);
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(peer_id);
peer_score.graft(&peer_id, topic);
// deliver a bunch of messages from the peer
@ -317,17 +329,19 @@ fn test_score_mesh_message_deliveries() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = -1.0;
topic_params.mesh_message_deliveries_activation = Duration::from_secs(1);
topic_params.mesh_message_deliveries_window = Duration::from_millis(10);
topic_params.mesh_message_deliveries_threshold = 20.0;
topic_params.mesh_message_deliveries_cap = 100.0;
topic_params.mesh_message_deliveries_decay = 1.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: -1.0,
mesh_message_deliveries_activation: Duration::from_secs(1),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
@ -341,11 +355,11 @@ fn test_score_mesh_message_deliveries() {
let peer_id_b = PeerId::random();
let peer_id_c = PeerId::random();
let peers = vec![peer_id_a.clone(), peer_id_b.clone(), peer_id_c.clone()];
let peers = vec![peer_id_a, peer_id_b, peer_id_c];
for peer_id in &peers {
peer_score.add_peer(peer_id.clone());
peer_score.graft(&peer_id, topic.clone());
peer_score.add_peer(*peer_id);
peer_score.graft(peer_id, topic.clone());
}
// assert that nobody has been penalized yet for not delivering messages before activation time
@ -419,25 +433,27 @@ fn test_score_mesh_message_deliveries_decay() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = -1.0;
topic_params.mesh_message_deliveries_activation = Duration::from_secs(0);
topic_params.mesh_message_deliveries_window = Duration::from_millis(10);
topic_params.mesh_message_deliveries_threshold = 20.0;
topic_params.mesh_message_deliveries_cap = 100.0;
topic_params.mesh_message_deliveries_decay = 0.9;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.time_in_mesh_quantum = Duration::from_secs(1);
topic_params.mesh_failure_penalty_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: -1.0,
mesh_message_deliveries_activation: Duration::from_secs(0),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 0.9,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
mesh_failure_penalty_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a.clone());
peer_score.graft(&peer_id_a, topic.clone());
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
// deliver a bunch of messages from peer A
let messages = 100;
@ -480,24 +496,26 @@ fn test_score_mesh_failure_penalty() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
// the mesh failure penalty is applied when a peer is pruned while their
// mesh deliveries are under the threshold.
// for this test, we set the mesh delivery threshold, but set
// mesh_message_deliveries to zero, so the only affect on the score
// is from the mesh failure penalty
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.mesh_message_deliveries_activation = Duration::from_secs(0);
topic_params.mesh_message_deliveries_window = Duration::from_millis(10);
topic_params.mesh_message_deliveries_threshold = 20.0;
topic_params.mesh_message_deliveries_cap = 100.0;
topic_params.mesh_message_deliveries_decay = 1.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.time_in_mesh_quantum = Duration::from_secs(1);
topic_params.mesh_failure_penalty_weight = -1.0;
topic_params.mesh_failure_penalty_decay = 1.0;
let topic_params = TopicScoreParams {
// the mesh failure penalty is applied when a peer is pruned while their
// mesh deliveries are under the threshold.
// for this test, we set the mesh delivery threshold, but set
// mesh_message_deliveries to zero, so the only affect on the score
// is from the mesh failure penalty
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(0),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
mesh_failure_penalty_weight: -1.0,
mesh_failure_penalty_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
@ -505,11 +523,11 @@ fn test_score_mesh_failure_penalty() {
let peer_id_a = PeerId::random();
let peer_id_b = PeerId::random();
let peers = vec![peer_id_a.clone(), peer_id_b.clone()];
let peers = vec![peer_id_a, peer_id_b];
for peer_id in &peers {
peer_score.add_peer(peer_id.clone());
peer_score.graft(&peer_id, topic.clone());
peer_score.add_peer(*peer_id);
peer_score.graft(peer_id, topic.clone());
}
// deliver a bunch of messages from peer A
@ -562,27 +580,28 @@ fn test_score_invalid_message_deliveries() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.mesh_message_deliveries_activation = Duration::from_secs(1);
topic_params.mesh_message_deliveries_window = Duration::from_millis(10);
topic_params.mesh_message_deliveries_threshold = 20.0;
topic_params.mesh_message_deliveries_cap = 100.0;
topic_params.mesh_message_deliveries_decay = 1.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
topic_params.invalid_message_deliveries_weight = -1.0;
topic_params.invalid_message_deliveries_decay = 1.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(1),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a.clone());
peer_score.graft(&peer_id_a, topic.clone());
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
// reject a bunch of messages from peer A
let messages = 100;
@ -608,27 +627,28 @@ fn test_score_invalid_message_deliveris_decay() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.mesh_message_deliveries_activation = Duration::from_secs(1);
topic_params.mesh_message_deliveries_window = Duration::from_millis(10);
topic_params.mesh_message_deliveries_threshold = 20.0;
topic_params.mesh_message_deliveries_cap = 100.0;
topic_params.mesh_message_deliveries_decay = 1.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
topic_params.invalid_message_deliveries_weight = -1.0;
topic_params.invalid_message_deliveries_decay = 0.9;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(1),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 0.9,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a.clone());
peer_score.graft(&peer_id_a, topic.clone());
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
// reject a bunch of messages from peer A
let messages = 100;
@ -667,26 +687,28 @@ fn test_score_reject_message_deliveries() {
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.time_in_mesh_quantum = Duration::from_secs(1);
topic_params.invalid_message_deliveries_weight = -1.0;
topic_params.invalid_message_deliveries_decay = 1.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
let peer_id_b = PeerId::random();
let peers = vec![peer_id_a.clone(), peer_id_b.clone()];
let peers = vec![peer_id_a, peer_id_b];
for peer_id in &peers {
peer_score.add_peer(peer_id.clone());
peer_score.add_peer(*peer_id);
}
let (id, msg) = make_test_message(1);
@ -777,25 +799,29 @@ fn test_application_score() {
let app_specific_weight = 0.5;
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
params.app_specific_weight = app_specific_weight;
let mut params = PeerScoreParams {
app_specific_weight,
..Default::default()
};
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.time_in_mesh_quantum = Duration::from_secs(1);
topic_params.invalid_message_deliveries_weight = 0.0;
topic_params.invalid_message_deliveries_decay = 1.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: 0.0,
invalid_message_deliveries_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a.clone());
peer_score.graft(&peer_id_a, topic.clone());
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
let messages = 100;
for i in -100..messages {
@ -815,20 +841,24 @@ fn test_score_ip_colocation() {
let ip_colocation_factor_threshold = 1.0;
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
params.ip_colocation_factor_weight = ip_colocation_factor_weight;
params.ip_colocation_factor_threshold = ip_colocation_factor_threshold;
let mut params = PeerScoreParams {
ip_colocation_factor_weight,
ip_colocation_factor_threshold,
..Default::default()
};
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.time_in_mesh_quantum = Duration::from_secs(1);
topic_params.invalid_message_deliveries_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
@ -836,15 +866,10 @@ fn test_score_ip_colocation() {
let peer_id_c = PeerId::random();
let peer_id_d = PeerId::random();
let peers = vec![
peer_id_a.clone(),
peer_id_b.clone(),
peer_id_c.clone(),
peer_id_d.clone(),
];
let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d];
for peer_id in &peers {
peer_score.add_peer(peer_id.clone());
peer_score.graft(&peer_id, topic.clone());
peer_score.add_peer(*peer_id);
peer_score.graft(peer_id, topic.clone());
}
// peerA should have no penalty, but B, C, and D should be penalized for sharing an IP
@ -880,20 +905,24 @@ fn test_score_behaviour_penality() {
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
params.behaviour_penalty_decay = behaviour_penalty_decay;
params.behaviour_penalty_weight = behaviour_penalty_weight;
let mut params = PeerScoreParams {
behaviour_penalty_decay,
behaviour_penalty_weight,
..Default::default()
};
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 1.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.first_message_deliveries_weight = 0.0;
topic_params.mesh_failure_penalty_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
topic_params.time_in_mesh_quantum = Duration::from_secs(1);
topic_params.invalid_message_deliveries_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
@ -905,7 +934,7 @@ fn test_score_behaviour_penality() {
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
// add the peer and test penalties
peer_score.add_peer(peer_id_a.clone());
peer_score.add_peer(peer_id_a);
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
peer_score.add_penalty(&peer_id_a, 1);
@ -931,23 +960,27 @@ fn test_score_retention() {
let app_specific_weight = 1.0;
let app_score_value = -1000.0;
let retain_score = Duration::from_secs(1);
let mut params = PeerScoreParams::default();
params.app_specific_weight = app_specific_weight;
params.retain_score = retain_score;
let mut params = PeerScoreParams {
app_specific_weight,
retain_score,
..Default::default()
};
let mut topic_params = TopicScoreParams::default();
topic_params.topic_weight = 0.0;
topic_params.mesh_message_deliveries_weight = 0.0;
topic_params.mesh_message_deliveries_activation = Duration::from_secs(0);
topic_params.first_message_deliveries_weight = 0.0;
topic_params.time_in_mesh_weight = 0.0;
let topic_params = TopicScoreParams {
topic_weight: 0.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(0),
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a.clone());
peer_score.graft(&peer_id_a, topic.clone());
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
peer_score.set_application_score(&peer_id_a, app_score_value);

View File

@ -582,11 +582,8 @@ mod tests {
// generate an arbitrary GossipsubMessage using the behaviour signing functionality
let config = GossipsubConfig::default();
let gs: Gossipsub = Gossipsub::new(
crate::MessageAuthenticity::Signed(keypair.0.clone()),
config,
)
.unwrap();
let gs: Gossipsub =
Gossipsub::new(crate::MessageAuthenticity::Signed(keypair.0), config).unwrap();
let data = (0..g.gen_range(10..10024u32))
.map(|_| u8::arbitrary(g))
.collect::<Vec<_>>();
@ -602,8 +599,7 @@ mod tests {
fn arbitrary(g: &mut Gen) -> Self {
let topic_string: String = (0..g.gen_range(20..1024u32))
.map(|_| char::arbitrary(g))
.collect::<String>()
.into();
.collect::<String>();
TopicId(Topic::new(topic_string).into())
}
}
@ -654,7 +650,7 @@ mod tests {
let mut codec = GossipsubCodec::new(codec::UviBytes::default(), ValidationMode::Strict);
let mut buf = BytesMut::new();
codec.encode(rpc.clone().into_protobuf(), &mut buf).unwrap();
codec.encode(rpc.into_protobuf(), &mut buf).unwrap();
let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap();
// mark as validated as its a published message
match decoded_rpc {

View File

@ -75,7 +75,7 @@ mod test {
assert_eq!(new_message.topic, topic1.clone().into_string());
let new_message = super::Message::decode(&old_message2b[..]).unwrap();
assert_eq!(new_message.topic, topic2.clone().into_string());
assert_eq!(new_message.topic, topic2.into_string());
let old_message = compat_proto::Message::decode(&new_message1b[..]).unwrap();
assert_eq!(old_message.topic_ids, vec![topic1.into_string()]);

View File

@ -232,15 +232,15 @@ pub mod regex {
let subscriptions = vec![
GossipsubSubscription {
action: Subscribe,
topic_hash: t1.clone(),
topic_hash: t1,
},
GossipsubSubscription {
action: Subscribe,
topic_hash: t2.clone(),
topic_hash: t2,
},
GossipsubSubscription {
action: Subscribe,
topic_hash: t3.clone(),
topic_hash: t3,
},
];
@ -277,7 +277,7 @@ mod test {
},
GossipsubSubscription {
action: Subscribe,
topic_hash: t2.clone(),
topic_hash: t2,
},
GossipsubSubscription {
action: Subscribe,
@ -285,7 +285,7 @@ mod test {
},
GossipsubSubscription {
action: Unsubscribe,
topic_hash: t1.clone(),
topic_hash: t1,
},
];
@ -306,11 +306,11 @@ mod test {
let subscriptions = vec![
GossipsubSubscription {
action: Subscribe,
topic_hash: t1.clone(),
topic_hash: t1,
},
GossipsubSubscription {
action: Subscribe,
topic_hash: t2.clone(),
topic_hash: t2,
},
];
@ -343,7 +343,7 @@ mod test {
},
GossipsubSubscription {
action: Subscribe,
topic_hash: t1.clone(),
topic_hash: t1,
},
];
@ -434,11 +434,11 @@ mod test {
let subscriptions = vec![
GossipsubSubscription {
action: Subscribe,
topic_hash: t1.clone(),
topic_hash: t1,
},
GossipsubSubscription {
action: Subscribe,
topic_hash: t2.clone(),
topic_hash: t2,
},
];

View File

@ -170,7 +170,7 @@ fn build_node() -> (Multiaddr, Swarm<Gossipsub>) {
.validation_mode(ValidationMode::Permissive)
.build()
.unwrap();
let behaviour = Gossipsub::new(MessageAuthenticity::Author(peer_id.clone()), config).unwrap();
let behaviour = Gossipsub::new(MessageAuthenticity::Author(peer_id), config).unwrap();
let mut swarm = Swarm::new(transport, behaviour, peer_id);
let port = 1 + random::<u64>();
@ -187,7 +187,7 @@ fn multi_hop_propagation() {
let _ = env_logger::try_init();
fn prop(num_nodes: u8, seed: u64) -> TestResult {
if num_nodes < 2 || num_nodes > 50 {
if !(2..=50).contains(&num_nodes) {
return TestResult::discard();
}

View File

@ -605,9 +605,8 @@ mod tests {
loop {
let swarm1_fut = swarm1.select_next_some();
pin_mut!(swarm1_fut);
match swarm1_fut.await {
SwarmEvent::NewListenAddr { address, .. } => return address,
_ => {}
if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await {
return address;
}
}
});
@ -681,9 +680,8 @@ mod tests {
loop {
let swarm1_fut = swarm1.select_next_some();
pin_mut!(swarm1_fut);
match swarm1_fut.await {
SwarmEvent::NewListenAddr { address, .. } => return address,
_ => {}
if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await {
return address;
}
}
});
@ -835,7 +833,7 @@ mod tests {
let addr_without_peer_id: Multiaddr = addr.clone();
let mut addr_with_other_peer_id = addr.clone();
addr.push(Protocol::P2p(peer_id.clone().into()));
addr.push(Protocol::P2p(peer_id.into()));
addr_with_other_peer_id.push(Protocol::P2p(other_peer_id.into()));
assert!(multiaddr_matches_peer_id(&addr, &peer_id));

View File

@ -63,8 +63,8 @@ fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) {
.boxed();
let local_id = local_public_key.to_peer_id();
let store = MemoryStore::new(local_id.clone());
let behaviour = Kademlia::with_config(local_id.clone(), store, cfg.clone());
let store = MemoryStore::new(local_id);
let behaviour = Kademlia::with_config(local_id, store, cfg);
let mut swarm = Swarm::new(transport, behaviour, local_id);
@ -129,7 +129,7 @@ fn build_fully_connected_nodes_with_config(
for (_addr, swarm) in swarms.iter_mut() {
for (addr, peer) in &swarm_addr_and_peer_id {
swarm.behaviour_mut().add_address(&peer, addr.clone());
swarm.behaviour_mut().add_address(peer, addr.clone());
}
}
@ -210,7 +210,7 @@ fn bootstrap() {
let mut known = HashSet::new();
for b in swarm.behaviour_mut().kbuckets.iter() {
for e in b.iter() {
known.insert(e.node.key.preimage().clone());
known.insert(*e.node.key.preimage());
}
}
assert_eq!(expected_known, known);
@ -266,7 +266,7 @@ fn query_iter() {
}
// Set up expectations.
let expected_swarm_id = swarm_ids[0].clone();
let expected_swarm_id = swarm_ids[0];
let expected_peer_ids: Vec<_> = swarm_ids.iter().skip(1).cloned().collect();
let mut expected_distances = distances(&search_target_key, expected_peer_ids.clone());
expected_distances.sort();
@ -510,11 +510,11 @@ fn put_record() {
let mut single_swarm = build_node_with_config(config);
// Connect `single_swarm` to three bootnodes.
for i in 0..3 {
single_swarm.1.behaviour_mut().add_address(
fully_connected_swarms[i].1.local_peer_id(),
fully_connected_swarms[i].0.clone(),
);
for swarm in fully_connected_swarms.iter().take(3) {
single_swarm
.1
.behaviour_mut()
.add_address(swarm.1.local_peer_id(), swarm.0.clone());
}
let mut swarms = vec![single_swarm];
@ -527,6 +527,7 @@ fn put_record() {
.collect::<Vec<_>>()
};
#[allow(clippy::mutable_key_type)] // False positive, we never modify `Bytes`.
let records = records
.into_iter()
.take(num_total)
@ -710,7 +711,7 @@ fn put_record() {
);
assert_eq!(swarms[0].behaviour_mut().queries.size(), 0);
for k in records.keys() {
swarms[0].behaviour_mut().store.remove(&k);
swarms[0].behaviour_mut().store.remove(k);
}
assert_eq!(swarms[0].behaviour_mut().store.records().count(), 0);
// All records have been republished, thus the test is complete.
@ -740,7 +741,7 @@ fn get_record() {
// Let first peer know of second peer and second peer know of third peer.
for i in 0..2 {
let (peer_id, address) = (
Swarm::local_peer_id(&swarms[i + 1].1).clone(),
*Swarm::local_peer_id(&swarms[i + 1].1),
swarms[i + 1].0.clone(),
);
swarms[i].1.behaviour_mut().add_address(&peer_id, address);
@ -810,8 +811,8 @@ fn get_record_many() {
let record = Record::new(random_multihash(), vec![4, 5, 6]);
for i in 0..num_nodes {
swarms[i].behaviour_mut().store.put(record.clone()).unwrap();
for swarm in swarms.iter_mut().take(num_nodes) {
swarm.behaviour_mut().store.put(record.clone()).unwrap();
}
let quorum = Quorum::N(NonZeroUsize::new(num_results).unwrap());
@ -870,11 +871,11 @@ fn add_provider() {
let mut single_swarm = build_node_with_config(config);
// Connect `single_swarm` to three bootnodes.
for i in 0..3 {
single_swarm.1.behaviour_mut().add_address(
fully_connected_swarms[i].1.local_peer_id(),
fully_connected_swarms[i].0.clone(),
);
for swarm in fully_connected_swarms.iter().take(3) {
single_swarm
.1
.behaviour_mut()
.add_address(swarm.1.local_peer_id(), swarm.0.clone());
}
let mut swarms = vec![single_swarm];
@ -887,6 +888,7 @@ fn add_provider() {
.collect::<Vec<_>>()
};
#[allow(clippy::mutable_key_type)] // False positive, we never modify `Bytes`.
let keys: HashSet<_> = keys.into_iter().take(num_total).collect();
// Each test run publishes all records twice.
@ -961,7 +963,7 @@ fn add_provider() {
.skip(1)
.filter_map(|swarm| {
if swarm.behaviour().store.providers(&key).len() == 1 {
Some(Swarm::local_peer_id(&swarm).clone())
Some(*Swarm::local_peer_id(swarm))
} else {
None
}
@ -1007,7 +1009,7 @@ fn add_provider() {
keys.len()
);
for k in &keys {
swarms[0].behaviour_mut().stop_providing(&k);
swarms[0].behaviour_mut().stop_providing(k);
}
assert_eq!(swarms[0].behaviour_mut().store.provided().count(), 0);
// All records have been republished, thus the test is complete.
@ -1106,11 +1108,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() {
alice
.1
.behaviour_mut()
.add_address(&trudy.1.local_peer_id(), trudy.0.clone());
.add_address(trudy.1.local_peer_id(), trudy.0.clone());
alice
.1
.behaviour_mut()
.add_address(&bob.1.local_peer_id(), bob.0.clone());
.add_address(bob.1.local_peer_id(), bob.0.clone());
// Drop the swarm addresses.
let (mut alice, mut bob, mut trudy) = (alice.1, bob.1, trudy.1);
@ -1169,12 +1171,12 @@ fn disjoint_query_does_not_finish_before_all_paths_did() {
assert_eq!(
*records,
vec![PeerRecord {
peer: Some(Swarm::local_peer_id(&trudy).clone()),
peer: Some(*Swarm::local_peer_id(&trudy)),
record: record_trudy.clone(),
}],
);
}
i @ _ => panic!("Unexpected query info: {:?}", i),
i => panic!("Unexpected query info: {:?}", i),
});
// Poll `alice` and `bob` expecting `alice` to return a successful query
@ -1211,11 +1213,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() {
assert_eq!(2, records.len());
assert!(records.contains(&PeerRecord {
peer: Some(Swarm::local_peer_id(&bob).clone()),
peer: Some(*Swarm::local_peer_id(&bob)),
record: record_bob,
}));
assert!(records.contains(&PeerRecord {
peer: Some(Swarm::local_peer_id(&trudy).clone()),
peer: Some(*Swarm::local_peer_id(&trudy)),
record: record_trudy,
}));
}
@ -1283,7 +1285,7 @@ fn network_behaviour_inject_address_change() {
let old_address: Multiaddr = Protocol::Memory(1).into();
let new_address: Multiaddr = Protocol::Memory(2).into();
let mut kademlia = Kademlia::new(local_peer_id.clone(), MemoryStore::new(local_peer_id));
let mut kademlia = Kademlia::new(local_peer_id, MemoryStore::new(local_peer_id));
let endpoint = ConnectedPoint::Dialer {
address: old_address.clone(),
@ -1301,8 +1303,8 @@ fn network_behaviour_inject_address_change() {
// Mimick the connection handler confirming the protocol for
// the test connection, so that the peer is added to the routing table.
kademlia.inject_event(
remote_peer_id.clone(),
connection_id.clone(),
remote_peer_id,
connection_id,
KademliaHandlerEvent::ProtocolConfirmed { endpoint },
);
@ -1315,7 +1317,7 @@ fn network_behaviour_inject_address_change() {
&remote_peer_id,
&connection_id,
&ConnectedPoint::Dialer {
address: old_address.clone(),
address: old_address,
role_override: Endpoint::Dialer,
},
&ConnectedPoint::Dialer {
@ -1325,7 +1327,7 @@ fn network_behaviour_inject_address_change() {
);
assert_eq!(
vec![new_address.clone()],
vec![new_address],
kademlia.addresses_of_peer(&remote_peer_id),
);
}

View File

@ -338,7 +338,7 @@ mod tests {
let replicate_interval = Duration::from_secs(rng.gen_range(1..60));
let publish_interval = Some(replicate_interval * rng.gen_range(1..10));
let record_ttl = Some(Duration::from_secs(rng.gen_range(1..600)));
PutRecordJob::new(id.clone(), replicate_interval, publish_interval, record_ttl)
PutRecordJob::new(id, replicate_interval, publish_interval, record_ttl)
}
fn rand_add_provider_job() -> AddProviderJob {
@ -360,7 +360,7 @@ mod tests {
fn prop(records: Vec<Record>) {
let mut job = rand_put_record_job();
// Fill a record store.
let mut store = MemoryStore::new(job.local_id.clone());
let mut store = MemoryStore::new(job.local_id);
for r in records {
let _ = store.put(r);
}
@ -389,9 +389,9 @@ mod tests {
let mut job = rand_add_provider_job();
let id = PeerId::random();
// Fill a record store.
let mut store = MemoryStore::new(id.clone());
let mut store = MemoryStore::new(id);
for mut r in records {
r.provider = id.clone();
r.provider = id;
let _ = store.add_provider(r);
}

View File

@ -504,18 +504,15 @@ mod tests {
value: (),
};
let full = bucket.num_entries() == K_VALUE.get();
match bucket.insert(node, status) {
InsertResult::Inserted => {
let vec = match status {
NodeStatus::Connected => &mut connected,
NodeStatus::Disconnected => &mut disconnected,
};
if full {
vec.pop_front();
}
vec.push_back((status, key.clone()));
if let InsertResult::Inserted = bucket.insert(node, status) {
let vec = match status {
NodeStatus::Connected => &mut connected,
NodeStatus::Disconnected => &mut disconnected,
};
if full {
vec.pop_front();
}
_ => {}
vec.push_back((status, key.clone()));
}
}
@ -533,7 +530,7 @@ mod tests {
// All nodes before the first connected node must be disconnected and
// in insertion order. Similarly, all remaining nodes must be connected
// and in insertion order.
nodes == Vec::from(disconnected) && tail == Vec::from(connected)
disconnected == nodes && connected == tail
}
quickcheck(prop as fn(_) -> _);
@ -635,7 +632,7 @@ mod tests {
// The pending node has been discarded.
assert!(bucket.pending().is_none());
assert!(bucket.iter().all(|(n, _)| &n.key != &key));
assert!(bucket.iter().all(|(n, _)| n.key != key));
// The initially disconnected node is now the most-recently connected.
assert_eq!(

View File

@ -494,7 +494,7 @@ mod tests {
.collect()
}
fn sorted<T: AsRef<KeyBytes>>(target: &T, peers: &Vec<Key<PeerId>>) -> bool {
fn sorted<T: AsRef<KeyBytes>>(target: &T, peers: &[Key<PeerId>]) -> bool {
peers
.windows(2)
.all(|w| w[0].distance(&target) < w[1].distance(&target))
@ -549,10 +549,7 @@ mod tests {
.map(|e| (e.key.clone(), &e.state))
.unzip();
let none_contacted = states.iter().all(|s| match s {
PeerState::NotContacted => true,
_ => false,
});
let none_contacted = states.iter().all(|s| matches!(s, PeerState::NotContacted));
assert!(none_contacted, "Unexpected peer state in new iterator.");
assert!(
@ -593,7 +590,7 @@ mod tests {
let mut num_failures = 0;
'finished: loop {
if expected.len() == 0 {
if expected.is_empty() {
break;
}
// Split off the next up to `parallelism` expected peers.
@ -650,10 +647,10 @@ mod tests {
// Determine if all peers have been contacted by the iterator. This _must_ be
// the case if the iterator finished with fewer than the requested number
// of results.
let all_contacted = iter.closest_peers.values().all(|e| match e.state {
PeerState::NotContacted | PeerState::Waiting { .. } => false,
_ => true,
});
let all_contacted = iter
.closest_peers
.values()
.all(|e| !matches!(e.state, PeerState::NotContacted | PeerState::Waiting { .. }));
let target = iter.target.clone();
let num_results = iter.config.num_results;
@ -742,7 +739,7 @@ mod tests {
}
// Artificially advance the clock.
now = now + iter.config.peer_timeout;
now += iter.config.peer_timeout;
// Advancing the iterator again should mark the first peer as unresponsive.
let _ = iter.next(now);

View File

@ -456,22 +456,20 @@ mod tests {
let num_closest_iters = g.gen_range(0..20 + 1);
let peers = random_peers(g.gen_range(0..20 * num_closest_iters + 1), g);
let iters: Vec<_> = (0..num_closest_iters)
.map(|_| {
let num_peers = g.gen_range(0..20 + 1);
let mut peers = g
.choose_multiple(&peers, num_peers)
.cloned()
.map(Key::from)
.collect::<Vec<_>>();
let iters = (0..num_closest_iters).map(|_| {
let num_peers = g.gen_range(0..20 + 1);
let mut peers = g
.choose_multiple(&peers, num_peers)
.cloned()
.map(Key::from)
.collect::<Vec<_>>();
peers.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b)));
peers.sort_unstable_by_key(|a| target.distance(a));
peers.into_iter()
})
.collect();
peers.into_iter()
});
ResultIter::new(target, iters.into_iter())
ResultIter::new(target.clone(), iters)
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
@ -514,20 +512,15 @@ mod tests {
// The peer that should not be included.
let peer = self.peers.pop()?;
let iters = self
.iters
.clone()
.into_iter()
.filter_map(|mut iter| {
iter.retain(|p| p != &peer);
if iter.is_empty() {
return None;
}
Some(iter.into_iter())
})
.collect::<Vec<_>>();
let iters = self.iters.clone().into_iter().filter_map(|mut iter| {
iter.retain(|p| p != &peer);
if iter.is_empty() {
return None;
}
Some(iter.into_iter())
});
Some(ResultIter::new(self.target.clone(), iters.into_iter()))
Some(ResultIter::new(self.target.clone(), iters))
}
}
@ -640,7 +633,7 @@ mod tests {
.map(|_| Key::from(PeerId::random()))
.collect::<Vec<_>>();
pool.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b)));
pool.sort_unstable_by_key(|a| target.distance(a));
let known_closest_peers = pool.split_off(pool.len() - 3);
@ -650,11 +643,8 @@ mod tests {
..ClosestPeersIterConfig::default()
};
let mut peers_iter = ClosestDisjointPeersIter::with_config(
config.clone(),
target,
known_closest_peers.clone(),
);
let mut peers_iter =
ClosestDisjointPeersIter::with_config(config, target, known_closest_peers.clone());
////////////////////////////////////////////////////////////////////////
// First round.
@ -681,19 +671,19 @@ mod tests {
malicious_response_1
.clone()
.into_iter()
.map(|k| k.preimage().clone()),
.map(|k| *k.preimage()),
);
// Response from peer 2.
peers_iter.on_success(
known_closest_peers[1].preimage(),
response_2.clone().into_iter().map(|k| k.preimage().clone()),
response_2.clone().into_iter().map(|k| *k.preimage()),
);
// Response from peer 3.
peers_iter.on_success(
known_closest_peers[2].preimage(),
response_3.clone().into_iter().map(|k| k.preimage().clone()),
response_3.clone().into_iter().map(|k| *k.preimage()),
);
////////////////////////////////////////////////////////////////////////
@ -752,7 +742,7 @@ mod tests {
fn arbitrary(g: &mut Gen) -> Self {
let mut peer_ids = random_peers(g.gen_range(K_VALUE.get()..200), g)
.into_iter()
.map(|peer_id| (peer_id.clone(), Key::from(peer_id)))
.map(|peer_id| (peer_id, Key::from(peer_id)))
.collect::<Vec<_>>();
// Make each peer aware of its direct neighborhood.
@ -790,7 +780,7 @@ mod tests {
.collect::<Vec<_>>();
peer.known_peers.append(&mut random_peer_ids);
peer.known_peers = std::mem::replace(&mut peer.known_peers, vec![])
peer.known_peers = std::mem::take(&mut peer.known_peers)
// Deduplicate peer ids.
.into_iter()
.collect::<HashSet<_>>()
@ -804,7 +794,8 @@ mod tests {
impl Graph {
fn get_closest_peer(&self, target: &KeyBytes) -> PeerId {
self.0
*self
.0
.iter()
.map(|(peer_id, _)| (target.distance(&Key::from(*peer_id)), peer_id))
.fold(None, |acc, (distance_b, peer_id_b)| match acc {
@ -819,7 +810,6 @@ mod tests {
})
.expect("Graph to have at least one peer.")
.1
.clone()
}
}
@ -892,8 +882,7 @@ mod tests {
.take(K_VALUE.get())
.map(|(key, _peers)| Key::from(*key))
.collect::<Vec<_>>();
known_closest_peers
.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b)));
known_closest_peers.sort_unstable_by_key(|a| target.distance(a));
let cfg = ClosestPeersIterConfig {
parallelism: parallelism.0,
@ -917,7 +906,7 @@ mod tests {
target.clone(),
known_closest_peers.clone(),
)),
graph.clone(),
graph,
&target,
);
@ -964,11 +953,8 @@ mod tests {
match iter.next(now) {
PeersIterState::Waiting(Some(peer_id)) => {
let peer_id = peer_id.clone().into_owned();
let closest_peers = graph
.0
.get_mut(&peer_id)
.unwrap()
.get_closest_peers(&target);
let closest_peers =
graph.0.get_mut(&peer_id).unwrap().get_closest_peers(target);
iter.on_success(&peer_id, closest_peers);
}
PeersIterState::WaitingAtCapacity | PeersIterState::Waiting(None) => {
@ -983,7 +969,7 @@ mod tests {
.into_iter()
.map(Key::from)
.collect::<Vec<_>>();
result.sort_unstable_by(|a, b| target.distance(a).cmp(&target.distance(b)));
result.sort_unstable_by_key(|a| target.distance(a));
result.into_iter().map(|k| k.into_preimage()).collect()
}
@ -998,7 +984,7 @@ mod tests {
let peer = PeerId::random();
let mut iter = ClosestDisjointPeersIter::new(
Key::from(PeerId::random()).into(),
iter::once(Key::from(peer.clone())),
iter::once(Key::from(peer)),
);
assert!(matches!(iter.next(now), PeersIterState::Waiting(Some(_))));

View File

@ -267,7 +267,7 @@ mod tests {
assert!(store.add_provider(r.clone()).is_ok());
}
records.sort_by(|r1, r2| distance(r1).cmp(&distance(r2)));
records.sort_by_key(distance);
records.truncate(store.config.max_providers_per_key);
records == store.providers(&key).to_vec()
@ -279,9 +279,9 @@ mod tests {
#[test]
fn provided() {
let id = PeerId::random();
let mut store = MemoryStore::new(id.clone());
let mut store = MemoryStore::new(id);
let key = random_multihash();
let rec = ProviderRecord::new(key, id.clone(), Vec::new());
let rec = ProviderRecord::new(key, id, Vec::new());
assert!(store.add_provider(rec.clone()).is_ok());
assert_eq!(
vec![Cow::Borrowed(&rec)],

View File

@ -301,8 +301,8 @@ mod tests {
let mut addr1: Multiaddr = "/ip4/1.2.3.4/tcp/5000".parse().expect("bad multiaddress");
let mut addr2: Multiaddr = "/ip6/::1/udp/10000".parse().expect("bad multiaddress");
addr1.push(Protocol::P2p(peer_id.clone().into()));
addr2.push(Protocol::P2p(peer_id.clone().into()));
addr1.push(Protocol::P2p(peer_id.into()));
addr2.push(Protocol::P2p(peer_id.into()));
let packets = build_query_response(
0xf8f8,
@ -324,7 +324,7 @@ mod tests {
RData::PTR(record) => record.0.to_string(),
_ => return None,
};
return Some(record_value);
Some(record_value)
})
.next()
.expect("empty record value");

View File

@ -75,33 +75,27 @@ async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box<dyn Error>> {
let mut discovered_b = false;
loop {
futures::select! {
ev = a.select_next_some() => match ev {
SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => {
for (peer, _addr) in peers {
if peer == *b.local_peer_id() {
if discovered_a {
return Ok(());
} else {
discovered_b = true;
}
ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev {
for (peer, _addr) in peers {
if peer == *b.local_peer_id() {
if discovered_a {
return Ok(());
} else {
discovered_b = true;
}
}
}
_ => {}
},
ev = b.select_next_some() => match ev {
SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => {
for (peer, _addr) in peers {
if peer == *a.local_peer_id() {
if discovered_b {
return Ok(());
} else {
discovered_a = true;
}
ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev {
for (peer, _addr) in peers {
if peer == *a.local_peer_id() {
if discovered_b {
return Ok(());
} else {
discovered_a = true;
}
}
}
_ => {}
}
}
}
@ -113,27 +107,20 @@ async fn run_peer_expiration_test(config: MdnsConfig) -> Result<(), Box<dyn Erro
loop {
futures::select! {
ev = a.select_next_some() => match ev {
SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => {
for (peer, _addr) in peers {
if peer == *b.local_peer_id() {
return Ok(());
}
ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) = ev {
for (peer, _addr) in peers {
if peer == *b.local_peer_id() {
return Ok(());
}
}
_ => {}
},
ev = b.select_next_some() => match ev {
SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) => {
for (peer, _addr) in peers {
if peer == *a.local_peer_id() {
return Ok(());
}
ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Expired(peers)) = ev {
for (peer, _addr) in peers {
if peer == *a.local_peer_id() {
return Ok(());
}
}
_ => {}
}
}
}
}

View File

@ -71,33 +71,27 @@ async fn run_discovery_test(config: MdnsConfig) -> Result<(), Box<dyn Error>> {
let mut discovered_b = false;
loop {
futures::select! {
ev = a.select_next_some() => match ev {
SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => {
for (peer, _addr) in peers {
if peer == *b.local_peer_id() {
if discovered_a {
return Ok(());
} else {
discovered_b = true;
}
ev = a.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev {
for (peer, _addr) in peers {
if peer == *b.local_peer_id() {
if discovered_a {
return Ok(());
} else {
discovered_b = true;
}
}
}
_ => {}
},
ev = b.select_next_some() => match ev {
SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) => {
for (peer, _addr) in peers {
if peer == *a.local_peer_id() {
if discovered_b {
return Ok(());
} else {
discovered_a = true;
}
ev = b.select_next_some() => if let SwarmEvent::Behaviour(MdnsEvent::Discovered(peers)) = ev {
for (peer, _addr) in peers {
if peer == *a.local_peer_id() {
if discovered_b {
return Ok(());
} else {
discovered_a = true;
}
}
}
_ => {}
}
}
}

View File

@ -44,14 +44,14 @@ fn ping_pong() {
.with_interval(Duration::from_millis(10));
let (peer1_id, trans) = mk_transport(muxer);
let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id.clone());
let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id);
let (peer2_id, trans) = mk_transport(muxer);
let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id.clone());
let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id);
let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
let pid1 = peer1_id.clone();
let pid1 = peer1_id;
let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap();
swarm1.listen_on(addr).unwrap();
@ -68,7 +68,7 @@ fn ping_pong() {
}) => {
count1 -= 1;
if count1 == 0 {
return (pid1.clone(), peer, rtt);
return (pid1, peer, rtt);
}
}
SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => {
@ -79,7 +79,7 @@ fn ping_pong() {
}
};
let pid2 = peer2_id.clone();
let pid2 = peer2_id;
let peer2 = async move {
swarm2.dial(rx.next().await.unwrap()).unwrap();
@ -91,7 +91,7 @@ fn ping_pong() {
}) => {
count2 -= 1;
if count2 == 0 {
return (pid2.clone(), peer, rtt);
return (pid2, peer, rtt);
}
}
SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => {
@ -123,10 +123,10 @@ fn max_failures() {
.with_max_failures(max_failures.into());
let (peer1_id, trans) = mk_transport(muxer);
let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id.clone());
let mut swarm1 = Swarm::new(trans, ping::Behaviour::new(cfg.clone()), peer1_id);
let (peer2_id, trans) = mk_transport(muxer);
let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id.clone());
let mut swarm2 = Swarm::new(trans, ping::Behaviour::new(cfg), peer2_id);
let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
@ -190,14 +190,14 @@ fn unsupported_doesnt_fail() {
let mut swarm1 = Swarm::new(
trans,
DummyBehaviour::with_keep_alive(KeepAlive::Yes),
peer1_id.clone(),
peer1_id,
);
let (peer2_id, trans) = mk_transport(MuxerChoice::Mplex);
let mut swarm2 = Swarm::new(
trans,
ping::Behaviour::new(ping::Config::new().with_keep_alive(true)),
peer2_id.clone(),
peer2_id,
);
let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
@ -207,9 +207,8 @@ fn unsupported_doesnt_fail() {
async_std::task::spawn(async move {
loop {
match swarm1.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => tx.send(address).await.unwrap(),
_ => {}
if let SwarmEvent::NewListenAddr { address, .. } = swarm1.select_next_some().await {
tx.send(address).await.unwrap()
}
}
});

View File

@ -197,7 +197,7 @@ mod tests {
let n = std::cmp::min(self.read.len(), buf.len());
buf[0..n].copy_from_slice(&self.read[0..n]);
self.read = self.read.split_off(n);
return Poll::Ready(Ok(n));
Poll::Ready(Ok(n))
}
}

View File

@ -279,10 +279,7 @@ mod generic {
}
let mut now = Instant::now();
let mut l = RateLimiter::new(RateLimiterConfig {
limit: limit.try_into().unwrap(),
interval,
});
let mut l = RateLimiter::new(RateLimiterConfig { limit, interval });
for (id, d) in events {
now = if let Some(now) = now.checked_add(d) {

View File

@ -50,7 +50,6 @@ fn reservation() {
spawn_swarm_on_pool(&pool, relay);
let client_addr = relay_addr
.clone()
.with(Protocol::P2p(relay_peer_id.into()))
.with(Protocol::P2pCircuit);
let mut client = build_client();
@ -96,7 +95,6 @@ fn new_reservation_to_same_relay_replaces_old() {
let mut client = build_client();
let client_peer_id = *client.local_peer_id();
let client_addr = relay_addr
.clone()
.with(Protocol::P2p(relay_peer_id.into()))
.with(Protocol::P2pCircuit);
let client_addr_with_peer_id = client_addr
@ -117,7 +115,7 @@ fn new_reservation_to_same_relay_replaces_old() {
));
// Trigger new reservation.
let new_listener = client.listen_on(client_addr.clone()).unwrap();
let new_listener = client.listen_on(client_addr).unwrap();
// Wait for
// - listener of old reservation to close
@ -190,7 +188,6 @@ fn connect() {
let mut dst = build_client();
let dst_peer_id = *dst.local_peer_id();
let dst_addr = relay_addr
.clone()
.with(Protocol::P2p(relay_peer_id.into()))
.with(Protocol::P2pCircuit)
.with(Protocol::P2p(dst_peer_id.into()));
@ -246,12 +243,11 @@ fn handle_dial_failure() {
let mut client = build_client();
let client_peer_id = *client.local_peer_id();
let client_addr = relay_addr
.clone()
.with(Protocol::P2p(relay_peer_id.into()))
.with(Protocol::P2pCircuit)
.with(Protocol::P2p(client_peer_id.into()));
client.listen_on(client_addr.clone()).unwrap();
client.listen_on(client_addr).unwrap();
assert!(!pool.run_until(wait_for_dial(&mut client, relay_peer_id)));
}
@ -291,7 +287,7 @@ fn reuse_connection() {
fn build_relay() -> Swarm<Relay> {
let local_key = identity::Keypair::generate_ed25519();
let local_public_key = local_key.public();
let local_peer_id = local_public_key.clone().to_peer_id();
let local_peer_id = local_public_key.to_peer_id();
let transport = upgrade_transport(MemoryTransport::default().boxed(), local_public_key);
@ -314,7 +310,7 @@ fn build_relay() -> Swarm<Relay> {
fn build_client() -> Swarm<Client> {
let local_key = identity::Keypair::generate_ed25519();
let local_public_key = local_key.public();
let local_peer_id = local_public_key.clone().to_peer_id();
let local_peer_id = local_public_key.to_peer_id();
let (relay_transport, behaviour) = client::Client::new_transport_and_behaviour(local_peer_id);
let transport = upgrade_transport(

View File

@ -55,7 +55,7 @@ async fn main() {
log::info!("Local peer id: {}", swarm.local_peer_id());
let _ = swarm.dial(rendezvous_point_address.clone()).unwrap();
swarm.dial(rendezvous_point_address.clone()).unwrap();
let mut discover_tick = tokio::time::interval(Duration::from_secs(30));
let mut cookie = None;

View File

@ -114,6 +114,7 @@ async fn main() {
}
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
enum MyEvent {
Rendezvous(rendezvous::client::Event),
Identify(identify::Event),

View File

@ -62,11 +62,10 @@ where
fn get_rand_memory_address() -> Multiaddr {
let address_port = rand::random::<u64>();
let addr = format!("/memory/{}", address_port)
.parse::<Multiaddr>()
.unwrap();
addr
format!("/memory/{}", address_port)
.parse::<Multiaddr>()
.unwrap()
}
pub async fn await_event_or_timeout<Event, Error>(

View File

@ -37,7 +37,7 @@ async fn given_successful_registration_then_successful_discovery() {
let ([mut alice, mut bob], mut robert) =
new_server_with_connected_clients(rendezvous::server::Config::default()).await;
let _ = alice
alice
.behaviour_mut()
.register(namespace.clone(), *robert.local_peer_id(), None);
@ -86,7 +86,7 @@ async fn given_successful_registration_then_refresh_ttl() {
let roberts_peer_id = *robert.local_peer_id();
let refresh_ttl = 10_000;
let _ = alice
alice
.behaviour_mut()
.register(namespace.clone(), roberts_peer_id, None);
@ -374,6 +374,7 @@ struct CombinedBehaviour {
}
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
enum CombinedEvent {
Client(rendezvous::client::Event),
Server(rendezvous::server::Event),

View File

@ -44,8 +44,7 @@ fn one_field() {
ping: ping::Behaviour,
}
#[allow(dead_code)]
#[allow(unreachable_code)]
#[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
fn foo() {
let _out_event: <Foo as NetworkBehaviour>::OutEvent = unimplemented!();
match _out_event {
@ -63,8 +62,7 @@ fn two_fields() {
identify: identify::Behaviour,
}
#[allow(dead_code)]
#[allow(unreachable_code)]
#[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
fn foo() {
let _out_event: <Foo as NetworkBehaviour>::OutEvent = unimplemented!();
match _out_event {
@ -86,8 +84,7 @@ fn three_fields() {
kad: libp2p::kad::Kademlia<libp2p::kad::record::store::MemoryStore>,
}
#[allow(dead_code)]
#[allow(unreachable_code)]
#[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
fn foo() {
let _out_event: <Foo as NetworkBehaviour>::OutEvent = unimplemented!();
match _out_event {
@ -112,6 +109,7 @@ fn custom_event() {
identify: identify::Behaviour,
}
#[allow(clippy::large_enum_variant)]
enum MyEvent {
Ping(ping::Event),
Identify(identify::Event),
@ -145,6 +143,7 @@ fn custom_event_mismatching_field_names() {
b: libp2p::identify::Behaviour,
}
#[allow(clippy::large_enum_variant)]
enum MyEvent {
Ping(ping::Event),
Identify(libp2p::identify::Event),
@ -209,8 +208,7 @@ fn nested_derives_with_import() {
foo: Foo,
}
#[allow(dead_code)]
#[allow(unreachable_code)]
#[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
fn foo() {
let _out_event: <Bar as NetworkBehaviour>::OutEvent = unimplemented!();
match _out_event {
@ -221,6 +219,7 @@ fn nested_derives_with_import() {
#[test]
fn custom_event_emit_event_through_poll() {
#[allow(clippy::large_enum_variant)]
enum BehaviourOutEvent {
Ping(ping::Event),
Identify(identify::Event),
@ -238,7 +237,7 @@ fn custom_event_emit_event_through_poll() {
}
}
#[allow(dead_code)]
#[allow(dead_code, clippy::large_enum_variant)]
#[derive(NetworkBehaviour)]
#[behaviour(out_event = "BehaviourOutEvent")]
struct Foo {
@ -246,7 +245,7 @@ fn custom_event_emit_event_through_poll() {
identify: identify::Behaviour,
}
#[allow(dead_code, unreachable_code)]
#[allow(dead_code, unreachable_code, clippy::diverging_sub_expression)]
fn bar() {
require_net_behaviour::<Foo>();

View File

@ -598,7 +598,7 @@ mod tests {
let upgrade_timeout = Duration::from_secs(1);
let mut connection = Connection::new(
StreamMuxerBox::new(PendingStreamMuxer),
MockConnectionHandler::new(upgrade_timeout.clone()),
MockConnectionHandler::new(upgrade_timeout),
None,
2,
);

View File

@ -252,7 +252,7 @@ mod tests {
);
block_on(poll_fn(|cx| loop {
if let Poll::Pending = handler.poll(cx) {
if handler.poll(cx).is_pending() {
return Poll::Ready(());
}
}));

View File

@ -1724,7 +1724,7 @@ mod tests {
let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::<u64>()).into();
let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::<u64>()).into();
swarm1.listen_on(addr1.clone()).unwrap();
swarm1.listen_on(addr1).unwrap();
swarm2.listen_on(addr2.clone()).unwrap();
let swarm1_id = *swarm1.local_peer_id();
@ -1982,7 +1982,7 @@ mod tests {
let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::<u64>()).into();
let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::<u64>()).into();
swarm1.listen_on(addr1.clone()).unwrap();
swarm1.listen_on(addr1).unwrap();
swarm2.listen_on(addr2.clone()).unwrap();
let swarm1_id = *swarm1.local_peer_id();
@ -2090,7 +2090,7 @@ mod tests {
swarm
.dial(
DialOpts::peer_id(PeerId::random())
.addresses(listen_addresses.into())
.addresses(listen_addresses)
.build(),
)
.unwrap();
@ -2145,16 +2145,11 @@ mod tests {
.addresses(vec![addr.clone()])
.build(),
)
.ok()
.expect("Unexpected connection limit.");
}
match network
.dial(
DialOpts::peer_id(target)
.addresses(vec![addr.clone()])
.build(),
)
.dial(DialOpts::peer_id(target).addresses(vec![addr]).build())
.expect_err("Unexpected dialing success.")
{
DialError::ConnectionLimit(limit) => {
@ -2212,7 +2207,7 @@ mod tests {
// Spawn and block on the dialer.
async_std::task::block_on({
let mut n = 0;
let _ = network2.dial(listen_addr.clone()).unwrap();
network2.dial(listen_addr.clone()).unwrap();
let mut expected_closed = false;
let mut network_1_established = false;

View File

@ -388,7 +388,7 @@ mod tests {
// Add the first address.
addresses.add(first.addr.clone(), first.score);
assert!(addresses.iter().any(|a| &a.addr == &first.addr));
assert!(addresses.iter().any(|a| a.addr == first.addr));
// Add another address so often that the initial report of
// the first address may be purged and, since it was the
@ -397,7 +397,7 @@ mod tests {
addresses.add(other.addr.clone(), other.score);
}
let exists = addresses.iter().any(|a| &a.addr == &first.addr);
let exists = addresses.iter().any(|a| a.addr == first.addr);
match (first.score, other.score) {
// Only finite scores push out other finite scores.
@ -428,14 +428,14 @@ mod tests {
// Add the first address.
addresses.add(first.addr.clone(), first.score);
assert!(addresses.iter().any(|a| &a.addr == &first.addr));
assert!(addresses.iter().any(|a| a.addr == first.addr));
// Add another address so the first will address be purged,
// because its score is finite(0)
addresses.add(other.addr.clone(), other.score);
assert!(addresses.iter().any(|a| &a.addr == &other.addr));
assert!(!addresses.iter().any(|a| &a.addr == &first.addr));
assert!(addresses.iter().any(|a| a.addr == other.addr));
assert!(!addresses.iter().any(|a| a.addr == first.addr));
}
#[test]
@ -451,12 +451,14 @@ mod tests {
// Count the finitely scored addresses.
let num_finite = addresses
.iter()
.filter(|r| match r {
AddressRecord {
score: AddressScore::Finite(_),
..
} => true,
_ => false,
.filter(|r| {
matches!(
r,
AddressRecord {
score: AddressScore::Finite(_),
..
}
)
})
.count();
@ -476,13 +478,13 @@ mod tests {
// Add all address reports to the collection.
for r in records.iter() {
addresses.add(r.addr.clone(), r.score.clone());
addresses.add(r.addr.clone(), r.score);
}
// Check that each address in the registry has the expected score.
for r in &addresses.registry {
let expected_score = records.iter().fold(None::<AddressScore>, |sum, rec| {
if &rec.addr == &r.addr {
if rec.addr == r.addr {
sum.map_or(Some(rec.score), |s| Some(s + rec.score))
} else {
sum

View File

@ -232,7 +232,7 @@ where
}
fn addresses_of_peer(&mut self, p: &PeerId) -> Vec<Multiaddr> {
self.addresses_of_peer.push(p.clone());
self.addresses_of_peer.push(*p);
self.inner.addresses_of_peer(p)
}
@ -271,12 +271,8 @@ where
} else {
assert_eq!(other_established, 0)
}
self.inject_connection_established.push((
p.clone(),
c.clone(),
e.clone(),
other_established,
));
self.inject_connection_established
.push((*p, *c, e.clone(), other_established));
self.inner
.inject_connection_established(p, c, e, errors, other_established);
}
@ -349,7 +345,7 @@ where
"`inject_event` is never called for closed connections."
);
self.inject_event.push((p.clone(), c.clone(), e.clone()));
self.inject_event.push((p, c, e.clone()));
self.inner.inject_event(p, c, e);
}
@ -389,7 +385,7 @@ where
}
fn inject_listener_error(&mut self, l: ListenerId, e: &(dyn std::error::Error + 'static)) {
self.inject_listener_error.push(l.clone());
self.inject_listener_error.push(l);
self.inner.inject_listener_error(l, e);
}

View File

@ -607,13 +607,10 @@ mod tests {
fn dial(&mut self, addr: Multiaddr) -> Result<Self::Dial, TransportError<Self::Error>> {
// Check that all DNS components have been resolved, i.e. replaced.
assert!(!addr.iter().any(|p| match p {
Protocol::Dns(_)
| Protocol::Dns4(_)
| Protocol::Dns6(_)
| Protocol::Dnsaddr(_) => true,
_ => false,
}));
assert!(!addr.iter().any(|p| matches!(
p,
Protocol::Dns(_) | Protocol::Dns4(_) | Protocol::Dns6(_) | Protocol::Dnsaddr(_)
)));
Ok(Box::pin(future::ready(Ok(()))))
}

View File

@ -300,7 +300,7 @@ mod tests {
let sodium_sec =
ed25519_sk_to_curve25519(&ed25519_compact::SecretKey::new(ed25519.encode()));
let sodium_pub = ed25519_pk_to_curve25519(&ed25519_compact::PublicKey::new(
ed25519.public().encode().clone(),
ed25519.public().encode(),
));
let our_pub = x25519.public.0;

View File

@ -32,7 +32,7 @@ fn variable_msg_length() {
let _ = env_logger::try_init();
fn prop(msg: Vec<u8>) {
let mut msg_to_send = msg.clone();
let msg_to_send = msg.clone();
let msg_to_receive = msg;
let server_id = identity::Keypair::generate_ed25519();
@ -91,7 +91,7 @@ fn variable_msg_length() {
debug!("Client: writing message.");
client_channel
.write_all(&mut msg_to_send)
.write_all(&msg_to_send)
.await
.expect("no error");
debug!("Client: flushing channel.");

View File

@ -264,7 +264,6 @@ mod tests {
Transport,
};
use std::{self, borrow::Cow, path::Path};
use tempfile;
#[test]
fn multiaddr_to_path_conversion() {
@ -320,7 +319,7 @@ mod tests {
let mut uds = UdsConfig::new();
let addr = rx.await.unwrap();
let mut socket = uds.dial(addr).unwrap().await.unwrap();
socket.write(&[1, 2, 3]).await.unwrap();
let _ = socket.write(&[1, 2, 3]).await.unwrap();
});
}