mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-06-30 10:11:33 +00:00
Remove spaces before semicolons (#591)
This commit is contained in:
committed by
Pierre Krieger
parent
585c90a33c
commit
45cd7db6e9
@ -345,7 +345,7 @@ impl FloodSubController {
|
||||
let topics = topics.into_iter();
|
||||
|
||||
if log_enabled!(Level::Debug) {
|
||||
debug!("Queuing sub/unsub message ; sub = {:?} ; unsub = {:?}",
|
||||
debug!("Queuing sub/unsub message; sub = {:?}; unsub = {:?}",
|
||||
topics.clone().filter(|t| t.1)
|
||||
.map(|t| t.0.hash().clone().into_string())
|
||||
.collect::<Vec<_>>(),
|
||||
@ -389,7 +389,7 @@ impl FloodSubController {
|
||||
{
|
||||
let topics = topics.into_iter().collect::<Vec<_>>();
|
||||
|
||||
debug!("Queueing publish message ; topics = {:?} ; data_len = {:?}",
|
||||
debug!("Queueing publish message; topics = {:?}; data_len = {:?}",
|
||||
topics.iter().map(|t| t.hash().clone().into_string()).collect::<Vec<_>>(),
|
||||
data.len());
|
||||
|
||||
@ -554,7 +554,7 @@ fn handle_packet_received(
|
||||
let mut input = match protobuf::parse_from_bytes::<rpc_proto::RPC>(&bytes) {
|
||||
Ok(msg) => msg,
|
||||
Err(err) => {
|
||||
debug!("Failed to parse protobuf message ; err = {:?}", err);
|
||||
debug!("Failed to parse protobuf message; err = {:?}", err);
|
||||
return Err(err.into());
|
||||
}
|
||||
};
|
||||
@ -588,7 +588,7 @@ fn handle_packet_received(
|
||||
.lock()
|
||||
.insert(hash((from.clone(), publish.take_seqno())))
|
||||
{
|
||||
trace!("Skipping message because we had already received it ; payload = {} bytes",
|
||||
trace!("Skipping message because we had already received it; payload = {} bytes",
|
||||
publish.get_data().len());
|
||||
continue;
|
||||
}
|
||||
@ -609,7 +609,7 @@ fn handle_packet_received(
|
||||
.map(|h| TopicHash::from_raw(h))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
trace!("Processing message for topics {:?} ; payload = {} bytes",
|
||||
trace!("Processing message for topics {:?}; payload = {} bytes",
|
||||
topics,
|
||||
publish.get_data().len());
|
||||
|
||||
|
@ -87,7 +87,7 @@ where
|
||||
|
||||
let bytes = message
|
||||
.write_to_bytes()
|
||||
.expect("writing protobuf failed ; should never happen");
|
||||
.expect("writing protobuf failed; should never happen");
|
||||
|
||||
let future = self.inner.send(bytes).map(|_| ());
|
||||
Box::new(future) as Box<_>
|
||||
@ -142,7 +142,7 @@ where
|
||||
let (info, observed_addr) = match parse_proto_msg(msg) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
debug!("Failed to parse protobuf message ; error = {:?}", err);
|
||||
debug!("Failed to parse protobuf message; error = {:?}", err);
|
||||
return Err(err.into());
|
||||
}
|
||||
};
|
||||
|
@ -192,7 +192,7 @@ where F: FnMut(&PeerId) -> Fut + Send + 'a,
|
||||
fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> {
|
||||
let my_id_len = my_id.as_bytes().len();
|
||||
|
||||
// TODO: this 2 is magic here ; it is the length of the hash of the multihash
|
||||
// TODO: this 2 is magic here; it is the length of the hash of the multihash
|
||||
let bits_diff = bucket_num + 1;
|
||||
if bits_diff > 8 * (my_id_len - 2) {
|
||||
return Err(());
|
||||
@ -232,7 +232,7 @@ where F: FnMut(&PeerId) -> Fut + 'a,
|
||||
Fut: IntoFuture<Item = KadConnecController, Error = IoError> + 'a,
|
||||
Fut::Future: Send,
|
||||
{
|
||||
debug!("Start query for {:?} ; num results = {}", searched_key, num_results);
|
||||
debug!("Start query for {:?}; num results = {}", searched_key, num_results);
|
||||
|
||||
// State of the current iterative process.
|
||||
struct State<'a, F> {
|
||||
@ -322,7 +322,7 @@ where F: FnMut(&PeerId) -> Fut + 'a,
|
||||
to_contact
|
||||
};
|
||||
|
||||
debug!("New query round ; {} queries in progress ; contacting {} new peers",
|
||||
debug!("New query round; {} queries in progress; contacting {} new peers",
|
||||
state.current_attempts_fut.len(),
|
||||
to_contact.len());
|
||||
|
||||
@ -449,7 +449,7 @@ where F: FnMut(&PeerId) -> Fut + 'a,
|
||||
|
||||
} else {
|
||||
if !local_nearest_node_updated {
|
||||
trace!("Loop didn't update closer node ; jumping to step 2");
|
||||
trace!("Loop didn't update closer node; jumping to step 2");
|
||||
state.stage = Stage::SecondStep;
|
||||
}
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ impl KadPeer {
|
||||
// Builds a `KadPeer` from its raw protobuf equivalent.
|
||||
// TODO: use TryFrom once stable
|
||||
fn from_peer(peer: &mut protobuf_structs::dht::Message_Peer) -> Result<KadPeer, IoError> {
|
||||
// TODO: this is in fact a CID ; not sure if this should be handled in `from_bytes` or
|
||||
// TODO: this is in fact a CID; not sure if this should be handled in `from_bytes` or
|
||||
// as a special case here
|
||||
let node_id = PeerId::from_bytes(peer.get_id().to_vec())
|
||||
.map_err(|_| IoError::new(IoErrorKind::InvalidData, "invalid peer id"))?;
|
||||
@ -339,7 +339,7 @@ fn proto_to_msg(mut message: protobuf_structs::dht::Message) -> Result<KadMsg, I
|
||||
|
||||
} else {
|
||||
// TODO: for now we don't parse the peer properly, so it is possible that we get
|
||||
// parsing errors for peers even when they are valid ; we ignore these
|
||||
// parsing errors for peers even when they are valid; we ignore these
|
||||
// errors for now, but ultimately we should just error altogether
|
||||
let closer_peers = message.mut_closerPeers()
|
||||
.iter_mut()
|
||||
@ -362,7 +362,7 @@ fn proto_to_msg(mut message: protobuf_structs::dht::Message) -> Result<KadMsg, I
|
||||
|
||||
} else {
|
||||
// TODO: for now we don't parse the peer properly, so it is possible that we get
|
||||
// parsing errors for peers even when they are valid ; we ignore these
|
||||
// parsing errors for peers even when they are valid; we ignore these
|
||||
// errors for now, but ultimately we should just error altogether
|
||||
let closer_peers = message.mut_closerPeers()
|
||||
.iter_mut()
|
||||
@ -382,7 +382,7 @@ fn proto_to_msg(mut message: protobuf_structs::dht::Message) -> Result<KadMsg, I
|
||||
|
||||
protobuf_structs::dht::Message_MessageType::ADD_PROVIDER => {
|
||||
// TODO: for now we don't parse the peer properly, so it is possible that we get
|
||||
// parsing errors for peers even when they are valid ; we ignore these
|
||||
// parsing errors for peers even when they are valid; we ignore these
|
||||
// errors for now, but ultimately we should just error altogether
|
||||
let provider_peer = message.mut_providerPeers()
|
||||
.iter_mut()
|
||||
|
@ -100,7 +100,7 @@ where
|
||||
fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> {
|
||||
let my_id_len = my_id.as_bytes().len();
|
||||
|
||||
// TODO: this 2 is magic here ; it is the length of the hash of the multihash
|
||||
// TODO: this 2 is magic here; it is the length of the hash of the multihash
|
||||
let bits_diff = bucket_num + 1;
|
||||
if bits_diff > 8 * (my_id_len - 2) {
|
||||
return Err(());
|
||||
@ -137,7 +137,7 @@ where
|
||||
FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone,
|
||||
FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone,
|
||||
{
|
||||
debug!("Start query for {:?} ; num results = {}", searched_key, num_results);
|
||||
debug!("Start query for {:?}; num results = {}", searched_key, num_results);
|
||||
|
||||
// State of the current iterative process.
|
||||
struct State<'a> {
|
||||
@ -230,7 +230,7 @@ where
|
||||
to_contact
|
||||
};
|
||||
|
||||
debug!("New query round ; {} queries in progress ; contacting {} new peers",
|
||||
debug!("New query round; {} queries in progress; contacting {} new peers",
|
||||
state.current_attempts_fut.len(),
|
||||
to_contact.len());
|
||||
|
||||
@ -350,7 +350,7 @@ where
|
||||
|
||||
} else {
|
||||
if !local_nearest_node_updated {
|
||||
trace!("Loop didn't update closer node ; jumping to step 2");
|
||||
trace!("Loop didn't update closer node; jumping to step 2");
|
||||
state.stage = Stage::SecondStep;
|
||||
}
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ where TSocket: AsyncRead + AsyncWrite
|
||||
PingListenerState::Listening => {
|
||||
match self.inner.poll() {
|
||||
Ok(Async::Ready(Some(payload))) => {
|
||||
debug!("Received ping (payload={:?}) ; sending back", payload);
|
||||
debug!("Received ping (payload={:?}); sending back", payload);
|
||||
self.state = PingListenerState::Sending(payload.freeze())
|
||||
},
|
||||
Ok(Async::Ready(None)) => self.state = PingListenerState::Closing,
|
||||
|
@ -324,7 +324,7 @@ where
|
||||
.and_then(|context| {
|
||||
// Generate our nonce.
|
||||
let context = context.with_local()?;
|
||||
trace!("starting handshake ; local nonce = {:?}", context.state.nonce);
|
||||
trace!("starting handshake; local nonce = {:?}", context.state.nonce);
|
||||
Ok(context)
|
||||
})
|
||||
.and_then(|context| {
|
||||
@ -346,7 +346,7 @@ where
|
||||
return Err(err.into())
|
||||
},
|
||||
};
|
||||
trace!("received proposition from remote ; pubkey = {:?} ; nonce = {:?}",
|
||||
trace!("received proposition from remote; pubkey = {:?}; nonce = {:?}",
|
||||
context.state.public_key, context.state.nonce);
|
||||
Ok((socket, context))
|
||||
})
|
||||
@ -436,7 +436,7 @@ where
|
||||
let remote_exch = match protobuf_parse_from_bytes::<Exchange>(&raw) {
|
||||
Ok(e) => e,
|
||||
Err(err) => {
|
||||
debug!("failed to parse remote's exchange protobuf ; {:?}", err);
|
||||
debug!("failed to parse remote's exchange protobuf; {:?}", err);
|
||||
return Err(SecioError::HandshakeParsingFailure);
|
||||
}
|
||||
};
|
||||
|
@ -306,7 +306,7 @@ impl SecioKeyPair {
|
||||
SecioKeyPairInner::Secp256k1 { ref private } => {
|
||||
let secp = secp256k1::Secp256k1::with_caps(secp256k1::ContextFlag::SignOnly);
|
||||
let pubkey = secp256k1::key::PublicKey::from_secret_key(&secp, private)
|
||||
.expect("wrong secp256k1 private key ; type safety violated");
|
||||
.expect("wrong secp256k1 private key; type safety violated");
|
||||
PublicKey::Secp256k1(pubkey.serialize_vec(&secp, true).to_vec())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user