*: Fix clippy warnings (#2227)

This commit is contained in:
Roman
2021-09-14 16:00:05 +03:00
committed by GitHub
parent 5f68c74177
commit b79fd02f0b
25 changed files with 93 additions and 124 deletions

View File

@ -348,7 +348,7 @@ where
Poll::Ready(Ok(())) => {
let event = Event::Closed {
id: this.id,
error: error.map(|limit| ConnectionError::ConnectionLimit(limit)),
error: error.map(ConnectionError::ConnectionLimit),
handler,
};
this.state = State::Terminating(event);

View File

@ -434,7 +434,7 @@ impl<THandler: IntoConnectionHandler, TTransErr> Pool<THandler, TTransErr> {
/// Returns an iterator for information on all pending outgoing connections.
pub fn iter_pending_outgoing(&self) -> impl Iterator<Item = OutgoingInfo<'_>> {
self.iter_pending_info()
.filter_map(|(_, ref endpoint, ref peer_id)| match endpoint {
.filter_map(|(_, ref endpoint, peer_id)| match endpoint {
ConnectedPoint::Listener { .. } => None,
ConnectedPoint::Dialer { address } => Some(OutgoingInfo {
address,

View File

@ -39,7 +39,7 @@ impl Keypair {
///
/// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5
pub fn from_pkcs8(der: &mut [u8]) -> Result<Keypair, DecodingError> {
let kp = RsaKeyPair::from_pkcs8(&der)
let kp = RsaKeyPair::from_pkcs8(der)
.map_err(|e| DecodingError::new("RSA PKCS#8 PrivateKeyInfo").source(e))?;
der.zeroize();
Ok(Keypair(Arc::new(kp)))
@ -54,7 +54,7 @@ impl Keypair {
pub fn sign(&self, data: &[u8]) -> Result<Vec<u8>, SigningError> {
let mut signature = vec![0; self.0.public_modulus_len()];
let rng = SystemRandom::new();
match self.0.sign(&RSA_PKCS1_SHA256, &rng, &data, &mut signature) {
match self.0.sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) {
Ok(()) => Ok(signature),
Err(e) => Err(SigningError::new("RSA").source(e)),
}
@ -94,11 +94,9 @@ impl PublicKey {
subjectPublicKey: Asn1SubjectPublicKey(self.clone()),
};
let mut buf = Vec::new();
let buf = spki
.encode(&mut buf)
spki.encode(&mut buf)
.map(|_| buf)
.expect("RSA X.509 public key encoding failed.");
buf
.expect("RSA X.509 public key encoding failed.")
}
/// Decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo

View File

@ -66,7 +66,7 @@ impl PeerId {
/// Parses a `PeerId` from bytes.
pub fn from_bytes(data: &[u8]) -> Result<PeerId, Error> {
PeerId::from_multihash(Multihash::from_bytes(&data)?)
PeerId::from_multihash(Multihash::from_bytes(data)?)
.map_err(|mh| Error::UnsupportedCode(mh.code()))
}

View File

@ -53,7 +53,7 @@ impl SignedEnvelope {
domain_separation: String,
expected_payload_type: &[u8],
) -> Result<&[u8], ReadPayloadError> {
if &self.payload_type != expected_payload_type {
if self.payload_type != expected_payload_type {
return Err(ReadPayloadError::UnexpectedPayloadType {
expected: expected_payload_type.to_vec(),
got: self.payload_type.clone(),

View File

@ -125,7 +125,7 @@ impl<R> LengthDelimited<R> {
let mut this = self.project();
while !this.write_buffer.is_empty() {
match this.inner.as_mut().poll_write(cx, &this.write_buffer) {
match this.inner.as_mut().poll_write(cx, this.write_buffer) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Ok(0)) => {
return Poll::Ready(Err(io::Error::new(

View File

@ -157,8 +157,8 @@ enum PublishConfig {
impl PublishConfig {
pub fn get_own_id(&self) -> Option<&PeerId> {
match self {
Self::Signing { author, .. } => Some(&author),
Self::Author(author) => Some(&author),
Self::Signing { author, .. } => Some(author),
Self::Author(author) => Some(author),
_ => None,
}
}
@ -381,7 +381,7 @@ where
// We do not allow configurations where a published message would also be rejected if it
// were received locally.
validate_config(&privacy, &config.validation_mode())?;
validate_config(&privacy, config.validation_mode())?;
// Set up message publishing parameters.
@ -990,7 +990,7 @@ where
get_random_peers(
&self.topic_peers,
&self.connected_peers,
&topic_hash,
topic_hash,
self.config.prune_peers(),
|p| p != peer && !self.score_below_threshold(p, |_| 0.0).0,
)
@ -1337,7 +1337,7 @@ where
*peer_id,
vec![&topic_hash],
&self.mesh,
self.peer_topics.get(&peer_id),
self.peer_topics.get(peer_id),
&mut self.events,
&self.connected_peers,
);
@ -1396,7 +1396,7 @@ where
always_update_backoff: bool,
) {
let mut update_backoff = always_update_backoff;
if let Some(peers) = self.mesh.get_mut(&topic_hash) {
if let Some(peers) = self.mesh.get_mut(topic_hash) {
// remove the peer if it exists in the mesh
if peers.remove(peer_id) {
debug!(
@ -1416,7 +1416,7 @@ where
*peer_id,
topic_hash,
&self.mesh,
self.peer_topics.get(&peer_id),
self.peer_topics.get(peer_id),
&mut self.events,
&self.connected_peers,
);
@ -1429,7 +1429,7 @@ where
self.config.prune_backoff()
};
// is there a backoff specified by the peer? if so obey it.
self.backoffs.update_backoff(&topic_hash, peer_id, time);
self.backoffs.update_backoff(topic_hash, peer_id, time);
}
}
@ -1570,7 +1570,7 @@ where
own_id != propagation_source
&& raw_message.source.as_ref().map_or(false, |s| s == own_id)
} else {
self.published_message_ids.contains(&msg_id)
self.published_message_ids.contains(msg_id)
};
if self_published {
@ -2176,7 +2176,7 @@ where
"HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?}",
topic_hash
);
fanout.remove(&topic_hash);
fanout.remove(topic_hash);
return false;
}
true
@ -2195,7 +2195,7 @@ where
// is the peer still subscribed to the topic?
match self.peer_topics.get(peer) {
Some(topics) => {
if !topics.contains(&topic_hash) || score(peer) < publish_threshold {
if !topics.contains(topic_hash) || score(peer) < publish_threshold {
debug!(
"HEARTBEAT: Peer removed from fanout for topic: {:?}",
topic_hash
@ -2291,7 +2291,7 @@ where
fn emit_gossip(&mut self) {
let mut rng = thread_rng();
for (topic_hash, peers) in self.mesh.iter().chain(self.fanout.iter()) {
let mut message_ids = self.mcache.get_gossip_message_ids(&topic_hash);
let mut message_ids = self.mcache.get_gossip_message_ids(topic_hash);
if message_ids.is_empty() {
return;
}
@ -2319,7 +2319,7 @@ where
let to_msg_peers = get_random_peers_dynamic(
&self.topic_peers,
&self.connected_peers,
&topic_hash,
topic_hash,
n_map,
|peer| {
!peers.contains(peer)
@ -2438,7 +2438,7 @@ where
*peer,
topic_hash,
&self.mesh,
self.peer_topics.get(&peer),
self.peer_topics.get(peer),
&mut self.events,
&self.connected_peers,
);
@ -2483,7 +2483,7 @@ where
// add mesh peers
let topic = &message.topic;
// mesh
if let Some(mesh_peers) = self.mesh.get(&topic) {
if let Some(mesh_peers) = self.mesh.get(topic) {
for peer_id in mesh_peers {
if Some(peer_id) != propagation_source && Some(peer_id) != message.source.as_ref() {
recipient_peers.insert(*peer_id);
@ -2877,13 +2877,13 @@ where
// remove peer from all mappings
for topic in topics {
// check the mesh for the topic
if let Some(mesh_peers) = self.mesh.get_mut(&topic) {
if let Some(mesh_peers) = self.mesh.get_mut(topic) {
// check if the peer is in the mesh and remove it
mesh_peers.remove(peer_id);
}
// remove from topic_peers
if let Some(peer_list) = self.topic_peers.get_mut(&topic) {
if let Some(peer_list) = self.topic_peers.get_mut(topic) {
if !peer_list.remove(peer_id) {
// debugging purposes
warn!(
@ -2900,7 +2900,7 @@ where
// remove from fanout
self.fanout
.get_mut(&topic)
.get_mut(topic)
.map(|peers| peers.remove(peer_id));
}
}
@ -2943,7 +2943,7 @@ where
// Add the IP to the peer scoring system
if let Some((peer_score, ..)) = &mut self.peer_score {
if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) {
peer_score.add_ip(&peer_id, ip);
peer_score.add_ip(peer_id, ip);
} else {
trace!(
"Couldn't extract ip from endpoint of peer {} with endpoint {:?}",
@ -3041,7 +3041,7 @@ where
)
}
if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) {
peer_score.add_ip(&peer, ip);
peer_score.add_ip(peer, ip);
} else {
trace!(
"Couldn't extract ip from endpoint of peer {} with endpoint {:?}",

View File

@ -194,7 +194,7 @@ impl GossipsubCodec {
}
};
let source = match PeerId::from_bytes(&from) {
let source = match PeerId::from_bytes(from) {
Ok(v) => v,
Err(_) => {
debug!("Signature verification failed: Invalid Peer Id");
@ -214,8 +214,8 @@ impl GossipsubCodec {
// obtained from the inlined source peer_id.
let public_key = match message
.key
.as_ref()
.map(|key| PublicKey::from_protobuf_encoding(&key))
.as_deref()
.map(PublicKey::from_protobuf_encoding)
{
Some(Ok(key)) => key,
_ => match PublicKey::from_protobuf_encoding(&source.to_bytes()[2..]) {

View File

@ -803,16 +803,12 @@ fn advance_substream<TUserData>(
true,
),
Err(error) => {
let event = if let Some(user_data) = user_data {
Some(ProtocolsHandlerEvent::Custom(
KademliaHandlerEvent::QueryError {
let event = user_data.map(|user_data| {
ProtocolsHandlerEvent::Custom(KademliaHandlerEvent::QueryError {
error: KademliaHandlerQueryErr::Io(error),
user_data,
},
))
} else {
None
};
})
});
(None, event, false)
}
@ -823,16 +819,12 @@ fn advance_substream<TUserData>(
false,
),
Poll::Ready(Err(error)) => {
let event = if let Some(user_data) = user_data {
Some(ProtocolsHandlerEvent::Custom(
KademliaHandlerEvent::QueryError {
let event = user_data.map(|user_data| {
ProtocolsHandlerEvent::Custom(KademliaHandlerEvent::QueryError {
error: KademliaHandlerQueryErr::Io(error),
user_data,
},
))
} else {
None
};
})
});
(None, event, false)
}
@ -857,16 +849,12 @@ fn advance_substream<TUserData>(
false,
),
Poll::Ready(Err(error)) => {
let event = if let Some(user_data) = user_data {
Some(ProtocolsHandlerEvent::Custom(
KademliaHandlerEvent::QueryError {
let event = user_data.map(|user_data| {
ProtocolsHandlerEvent::Custom(KademliaHandlerEvent::QueryError {
error: KademliaHandlerQueryErr::Io(error),
user_data,
},
))
} else {
None
};
})
});
(None, event, false)
}

View File

@ -201,7 +201,7 @@ where
let (node, status, _pos) = self
.0
.bucket
.remove(&self.0.key)
.remove(self.0.key)
.expect("We can only build a PresentEntry if the entry is in the bucket; QED");
EntryView { node, status }
}

View File

@ -58,6 +58,7 @@ enum PeerState {
}
impl FixedPeersIter {
#[allow(clippy::needless_collect)]
pub fn new<I>(peers: I, parallelism: NonZeroUsize) -> Self
where
I: IntoIterator<Item = PeerId>,

View File

@ -226,7 +226,7 @@ impl Mdns {
let mut addrs: Vec<Multiaddr> = Vec::new();
for addr in peer.addresses() {
if let Some(new_addr) = address_translation(&addr, &observed) {
if let Some(new_addr) = address_translation(addr, &observed) {
addrs.push(new_addr.clone())
}
addrs.push(addr.clone())

View File

@ -228,11 +228,11 @@ fn query_response_packet(id: u16, peer_id: &[u8], records: &[Vec<u8>], ttl: u32)
// Peer Id.
append_u16(&mut out, peer_id.len() as u16);
out.extend_from_slice(&peer_id);
out.extend_from_slice(peer_id);
// The TXT records.
for record in records {
out.extend_from_slice(&record);
out.extend_from_slice(record);
}
out

View File

@ -296,7 +296,7 @@ impl NetworkBehaviour for Relay {
.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: *peer_id,
handler: NotifyHandler::Any,
event: event,
event,
});
}
}
@ -640,7 +640,7 @@ impl NetworkBehaviour for Relay {
dst_peer_id,
send_back,
})) => {
if let Some(_) = self.connected_peers.get(&relay_peer_id) {
if self.connected_peers.get(&relay_peer_id).is_some() {
// In case we are already listening via the relay,
// prefer the primary connection.
let handler = self

View File

@ -50,10 +50,7 @@ pub struct IncomingDstReq {
impl IncomingDstReq {
/// Creates a `IncomingDstReq`.
pub(crate) fn new(stream: Framed<NegotiatedSubstream, UviBytes>, src: Peer) -> Self {
IncomingDstReq {
stream: stream,
src,
}
IncomingDstReq { stream, src }
}
/// Returns the peer id of the source that is being relayed.

View File

@ -151,7 +151,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -169,7 +169,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
Some(match field.ident {
@ -186,7 +186,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
Some(match field.ident {
@ -199,7 +199,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
// Build the list of statements to put in the body of `inject_connection_established()`.
let inject_connection_established_stmts = {
data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
Some(match field.ident {
@ -212,7 +212,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
// Build the list of statements to put in the body of `inject_address_change()`.
let inject_address_change_stmts = {
data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
Some(match field.ident {
@ -230,7 +230,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.enumerate()
// The outmost handler belongs to the last behaviour.
.rev()
.filter(|f| !is_ignored(&f.1))
.filter(|f| !is_ignored(f.1))
.enumerate()
.map(move |(enum_n, (field_n, field))| {
let handler = if field_n == 0 {
@ -257,7 +257,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
let inject_addr_reach_failure_stmts =
{
data_struct.fields.iter().enumerate().filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -276,7 +276,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.enumerate()
// The outmost handler belongs to the last behaviour.
.rev()
.filter(|f| !is_ignored(&f.1))
.filter(|f| !is_ignored(f.1))
.enumerate()
.map(move |(enum_n, (field_n, field))| {
let handler = if field_n == 0 {
@ -311,7 +311,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.rev()
.filter(|f| !is_ignored(&f.1))
.filter(|f| !is_ignored(f.1))
.enumerate()
.map(move |(enum_n, (field_n, field))| {
let handler = if field_n == 0 {
@ -341,7 +341,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -359,7 +359,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -377,7 +377,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -395,7 +395,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -413,7 +413,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
@ -431,7 +431,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
Some(match field.ident {
@ -448,7 +448,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
.iter()
.enumerate()
.filter_map(move |(field_n, field)| {
if is_ignored(&field) {
if is_ignored(field) {
return None;
}
Some(match field.ident {
@ -462,7 +462,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
//
// The event type is a construction of nested `#either_ident`s of the events of the children.
// We call `inject_event` on the corresponding child.
let inject_node_event_stmts = data_struct.fields.iter().enumerate().filter(|f| !is_ignored(&f.1)).enumerate().map(|(enum_n, (field_n, field))| {
let inject_node_event_stmts = data_struct.fields.iter().enumerate().filter(|f| !is_ignored(f.1)).enumerate().map(|(enum_n, (field_n, field))| {
let mut elem = if enum_n != 0 {
quote!{ #either_ident::Second(ev) }
} else {
@ -483,7 +483,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
let protocols_handler_ty = {
let mut ph_ty = None;
for field in data_struct.fields.iter() {
if is_ignored(&field) {
if is_ignored(field) {
continue;
}
let ty = &field.ty;
@ -503,7 +503,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
let mut out_handler = None;
for (field_n, field) in data_struct.fields.iter().enumerate() {
if is_ignored(&field) {
if is_ignored(field) {
continue;
}
@ -553,7 +553,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
// List of statements to put in `poll()`.
//
// We poll each child one by one and wrap around the output.
let poll_stmts = data_struct.fields.iter().enumerate().filter(|f| !is_ignored(&f.1)).enumerate().map(|(enum_n, (field_n, field))| {
let poll_stmts = data_struct.fields.iter().enumerate().filter(|f| !is_ignored(f.1)).enumerate().map(|(enum_n, (field_n, field))| {
let field_name = match field.ident {
Some(ref i) => quote!{ self.#i },
None => quote!{ self.#field_n },
@ -576,7 +576,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream {
let mut out_handler = None;
for (f_n, f) in data_struct.fields.iter().enumerate() {
if is_ignored(&f) {
if is_ignored(f) {
continue;
}

View File

@ -675,7 +675,7 @@ where
} => NetworkBehaviourAction::NotifyHandler {
peer_id,
handler,
event: event,
event,
},
NetworkBehaviourAction::ReportObservedAddr { address, score } => {
NetworkBehaviourAction::ReportObservedAddr { address, score }

View File

@ -333,7 +333,7 @@ where
pub fn dial_addr(&mut self, addr: Multiaddr) -> Result<(), DialError> {
let handler = self.behaviour.new_handler();
self.dial_addr_with_handler(addr, handler)
.map_err(|e| DialError::from_network_dial_error(e))
.map_err(DialError::from_network_dial_error)
.map_err(|(e, _)| e)
}
@ -392,7 +392,7 @@ where
Err(error) => {
let (error, handler) = DialError::from_network_dial_error(error);
self.behaviour.inject_dial_failure(
&peer_id,
peer_id,
handler.into_protocols_handler(),
error.clone(),
);
@ -1051,7 +1051,7 @@ impl<'a> PollParameters for SwarmPollParameters<'a> {
}
fn local_peer_id(&self) -> &PeerId {
&self.local_peer_id
self.local_peer_id
}
}

View File

@ -523,10 +523,7 @@ pub enum KeepAlive {
impl KeepAlive {
/// Returns true for `Yes`, false otherwise.
pub fn is_yes(&self) -> bool {
match *self {
KeepAlive::Yes => true,
_ => false,
}
matches!(*self, KeepAlive::Yes)
}
}

View File

@ -285,7 +285,7 @@ impl<'a> Iterator for AddressIter<'a> {
}
let item = &self.items[self.offset];
self.offset += 1;
Some(&item)
Some(item)
}
fn size_hint(&self) -> (usize, Option<usize>) {

View File

@ -76,7 +76,7 @@ pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
pub use trust_dns_resolver::error::{ResolveError, ResolveErrorKind};
/// The prefix for `dnsaddr` protocol TXT record lookups.
const DNSADDR_PREFIX: &'static str = "_dnsaddr.";
const DNSADDR_PREFIX: &str = "_dnsaddr.";
/// The maximum number of dialing attempts to resolved addresses.
const MAX_DIAL_ATTEMPTS: usize = 16;

View File

@ -114,7 +114,7 @@ impl<T: AsyncWrite + Unpin> AsyncWrite for NoiseOutput<T> {
if this.send_offset == MAX_FRAME_LEN {
trace!("write: sending {} bytes", MAX_FRAME_LEN);
ready!(io.as_mut().poll_ready(cx))?;
io.as_mut().start_send(&frame_buf)?;
io.as_mut().start_send(frame_buf)?;
this.send_offset = 0;
}
@ -138,7 +138,7 @@ impl<T: AsyncWrite + Unpin> AsyncWrite for NoiseOutput<T> {
if this.send_offset > 0 {
ready!(io.as_mut().poll_ready(cx))?;
trace!("flush: sending {} bytes", this.send_offset);
io.as_mut().start_send(&frame_buf)?;
io.as_mut().start_send(frame_buf)?;
this.send_offset = 0;
}

View File

@ -246,7 +246,7 @@ impl snow::types::Dh for Keypair<X25519> {
fn set(&mut self, sk: &[u8]) {
let mut secret = [0u8; 32];
secret.copy_from_slice(&sk);
secret.copy_from_slice(sk);
self.secret = SecretKey(X25519(secret)); // Copy
self.public = PublicKey(X25519(x25519(secret, X25519_BASEPOINT_BYTES)));
secret.zeroize();

View File

@ -162,7 +162,7 @@ impl snow::types::Dh for Keypair<X25519Spec> {
fn set(&mut self, sk: &[u8]) {
let mut secret = [0u8; 32];
secret.copy_from_slice(&sk);
secret.copy_from_slice(sk);
self.secret = SecretKey(X25519Spec(secret)); // Copy
self.public = PublicKey(X25519Spec(x25519(secret, X25519_BASEPOINT_BYTES)));
secret.zeroize();

View File

@ -494,27 +494,15 @@ impl IncomingData {
}
pub fn is_binary(&self) -> bool {
if let IncomingData::Binary(_) = self {
true
} else {
false
}
matches!(self, IncomingData::Binary(_))
}
pub fn is_text(&self) -> bool {
if let IncomingData::Text(_) = self {
true
} else {
false
}
matches!(self, IncomingData::Text(_))
}
pub fn is_pong(&self) -> bool {
if let IncomingData::Pong(_) = self {
true
} else {
false
}
matches!(self, IncomingData::Pong(_))
}
pub fn into_bytes(self) -> Vec<u8> {