mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-06-21 05:41:33 +00:00
*: Activate clippy::style
lint group (#2620)
This commit is contained in:
@ -1,3 +1,3 @@
|
||||
[alias]
|
||||
# Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped.
|
||||
custom-clippy = "clippy -- -A clippy::type_complexity -A clippy::pedantic -A clippy::style -D warnings"
|
||||
custom-clippy = "clippy -- -A clippy::type_complexity -A clippy::pedantic -D warnings"
|
||||
|
@ -150,9 +150,12 @@ impl Future for DialFuture {
|
||||
.take()
|
||||
.expect("Future should not be polled again once complete");
|
||||
let dial_port = self.dial_port;
|
||||
match self.sender.start_send((channel_to_send, dial_port)) {
|
||||
Err(_) => return Poll::Ready(Err(MemoryTransportError::Unreachable)),
|
||||
Ok(()) => {}
|
||||
if self
|
||||
.sender
|
||||
.start_send((channel_to_send, dial_port))
|
||||
.is_err()
|
||||
{
|
||||
return Poll::Ready(Err(MemoryTransportError::Unreachable));
|
||||
}
|
||||
|
||||
Poll::Ready(Ok(self
|
||||
|
@ -42,11 +42,8 @@ impl Metrics {
|
||||
|
||||
impl super::Recorder<libp2p_gossipsub::GossipsubEvent> for super::Metrics {
|
||||
fn record(&self, event: &libp2p_gossipsub::GossipsubEvent) {
|
||||
match event {
|
||||
libp2p_gossipsub::GossipsubEvent::Message { .. } => {
|
||||
self.gossipsub.messages.inc();
|
||||
}
|
||||
_ => {}
|
||||
if let libp2p_gossipsub::GossipsubEvent::Message { .. } = event {
|
||||
self.gossipsub.messages.inc();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,10 @@
|
||||
|
||||
- Update to `libp2p-swarm` `v0.36.0`.
|
||||
|
||||
- changed `TimeCache::contains_key` and `DuplicateCache::contains` to immutable methods. See [PR 2620].
|
||||
|
||||
[PR 2620]: https://github.com/libp2p/rust-libp2p/pull/2620
|
||||
|
||||
# 0.37.0
|
||||
|
||||
- Update to `libp2p-swarm` `v0.35.0`.
|
||||
|
@ -1215,6 +1215,21 @@ where
|
||||
|
||||
let mut iwant_ids = HashSet::new();
|
||||
|
||||
let want_message = |id: &MessageId| {
|
||||
if self.duplicate_cache.contains(id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.pending_iwant_msgs.contains(id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.peer_score
|
||||
.as_ref()
|
||||
.map(|(_, _, _, promises)| !promises.contains(id))
|
||||
.unwrap_or(true)
|
||||
};
|
||||
|
||||
for (topic, ids) in ihave_msgs {
|
||||
// only process the message if we are subscribed
|
||||
if !self.mesh.contains_key(&topic) {
|
||||
@ -1225,21 +1240,12 @@ where
|
||||
continue;
|
||||
}
|
||||
|
||||
for id in ids {
|
||||
if !self.duplicate_cache.contains(&id) && !self.pending_iwant_msgs.contains(&id) {
|
||||
if self
|
||||
.peer_score
|
||||
.as_ref()
|
||||
.map(|(_, _, _, promises)| !promises.contains(&id))
|
||||
.unwrap_or(true)
|
||||
{
|
||||
// have not seen this message and are not currently requesting it
|
||||
if iwant_ids.insert(id) {
|
||||
// Register the IWANT metric
|
||||
if let Some(metrics) = self.metrics.as_mut() {
|
||||
metrics.register_iwant(&topic);
|
||||
}
|
||||
}
|
||||
for id in ids.into_iter().filter(want_message) {
|
||||
// have not seen this message and are not currently requesting it
|
||||
if iwant_ids.insert(id) {
|
||||
// Register the IWANT metric
|
||||
if let Some(metrics) = self.metrics.as_mut() {
|
||||
metrics.register_iwant(&topic);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1353,7 +1359,7 @@ where
|
||||
} else if let Some(m) = self.metrics.as_mut() {
|
||||
// Sending of messages succeeded, register them on the internal metrics.
|
||||
for topic in topics.iter() {
|
||||
m.msg_sent(&topic, msg_bytes);
|
||||
m.msg_sent(topic, msg_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2136,7 +2142,7 @@ where
|
||||
for peer_id in self.connected_peers.keys() {
|
||||
scores
|
||||
.entry(peer_id)
|
||||
.or_insert_with(|| peer_score.metric_score(&peer_id, self.metrics.as_mut()));
|
||||
.or_insert_with(|| peer_score.metric_score(peer_id, self.metrics.as_mut()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,7 @@ impl MessageCache {
|
||||
message.validated = true;
|
||||
// Clear the known peers list (after a message is validated, it is forwarded and we no
|
||||
// longer need to store the originating peers).
|
||||
let originating_peers = std::mem::replace(known_peers, HashSet::new());
|
||||
let originating_peers = std::mem::take(known_peers);
|
||||
(&*message, originating_peers)
|
||||
})
|
||||
}
|
||||
|
@ -517,9 +517,7 @@ impl Metrics {
|
||||
let metric = self
|
||||
.peers_per_protocol
|
||||
.get_or_create(&ProtocolLabel { protocol: kind });
|
||||
if metric.get() == 0 {
|
||||
return;
|
||||
} else {
|
||||
if metric.get() != 0 {
|
||||
// decrement the counter
|
||||
metric.set(metric.get() - 1);
|
||||
}
|
||||
|
@ -860,7 +860,7 @@ impl PeerScore {
|
||||
//should always be true
|
||||
let window_time = validated_time
|
||||
.checked_add(topic_params.mesh_message_deliveries_window)
|
||||
.unwrap_or_else(|| *now);
|
||||
.unwrap_or(*now);
|
||||
if now > &window_time {
|
||||
falls_in_mesh_deliver_window = false;
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ where
|
||||
self.list.clear();
|
||||
}
|
||||
|
||||
pub fn contains_key(&mut self, key: &Key) -> bool {
|
||||
pub fn contains_key(&self, key: &Key) -> bool {
|
||||
self.map.contains_key(key)
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn contains(&mut self, key: &Key) -> bool {
|
||||
pub fn contains(&self, key: &Key) -> bool {
|
||||
self.0.contains_key(key)
|
||||
}
|
||||
}
|
||||
|
@ -196,16 +196,14 @@ impl Identify {
|
||||
I: IntoIterator<Item = PeerId>,
|
||||
{
|
||||
for p in peers {
|
||||
if self.pending_push.insert(p) {
|
||||
if !self.connected.contains_key(&p) {
|
||||
let handler = self.new_handler();
|
||||
self.events.push_back(NetworkBehaviourAction::Dial {
|
||||
opts: DialOpts::peer_id(p)
|
||||
.condition(dial_opts::PeerCondition::Disconnected)
|
||||
.build(),
|
||||
handler,
|
||||
});
|
||||
}
|
||||
if self.pending_push.insert(p) && !self.connected.contains_key(&p) {
|
||||
let handler = self.new_handler();
|
||||
self.events.push_back(NetworkBehaviourAction::Dial {
|
||||
opts: DialOpts::peer_id(p)
|
||||
.condition(dial_opts::PeerCondition::Disconnected)
|
||||
.build(),
|
||||
handler,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -240,7 +238,7 @@ impl NetworkBehaviour for Identify {
|
||||
if let Some(entry) = self.discovered_peers.get_mut(peer_id) {
|
||||
for addr in failed_addresses
|
||||
.into_iter()
|
||||
.flat_map(|addresses| addresses.into_iter())
|
||||
.flat_map(|addresses| addresses.iter())
|
||||
{
|
||||
entry.remove(addr);
|
||||
}
|
||||
@ -451,7 +449,7 @@ impl NetworkBehaviour for Identify {
|
||||
self.discovered_peers
|
||||
.get(peer)
|
||||
.cloned()
|
||||
.map(|addr| Vec::from_iter(addr))
|
||||
.map(Vec::from_iter)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
@ -510,7 +508,7 @@ fn multiaddr_matches_peer_id(addr: &Multiaddr, peer_id: &PeerId) -> bool {
|
||||
if let Some(Protocol::P2p(multi_addr_peer_id)) = last_component {
|
||||
return multi_addr_peer_id == *peer_id.as_ref();
|
||||
}
|
||||
return true;
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
- Derive `Error` for `GetRecordError` (see [PR 2614]).
|
||||
|
||||
[pr 2614]: https://github.com/libp2p/rust-libp2p/pull/2614
|
||||
[PR 2614]: https://github.com/libp2p/rust-libp2p/pull/2614
|
||||
|
||||
# 0.36.0
|
||||
|
||||
|
@ -65,6 +65,7 @@ impl Addresses {
|
||||
///
|
||||
/// An address should only be removed if is determined to be invalid or
|
||||
/// otherwise unreachable.
|
||||
#[allow(clippy::result_unit_err)]
|
||||
pub fn remove(&mut self, addr: &Multiaddr) -> Result<(), ()> {
|
||||
if self.addrs.len() == 1 {
|
||||
return Err(());
|
||||
|
@ -1825,7 +1825,7 @@ where
|
||||
errors: Option<&Vec<Multiaddr>>,
|
||||
other_established: usize,
|
||||
) {
|
||||
for addr in errors.map(|a| a.into_iter()).into_iter().flatten() {
|
||||
for addr in errors.map(|a| a.iter()).into_iter().flatten() {
|
||||
self.address_failed(*peer_id, addr);
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ impl<T> PeriodicJob<T> {
|
||||
|
||||
/// Returns `true` if the job is currently not running but ready
|
||||
/// to be run, `false` otherwise.
|
||||
fn is_ready(&mut self, cx: &mut Context<'_>, now: Instant) -> bool {
|
||||
fn check_ready(&mut self, cx: &mut Context<'_>, now: Instant) -> bool {
|
||||
if let PeriodicJobState::Waiting(delay, deadline) = &mut self.state {
|
||||
if now >= *deadline || !Future::poll(Pin::new(delay), cx).is_pending() {
|
||||
return true;
|
||||
@ -195,7 +195,7 @@ impl PutRecordJob {
|
||||
where
|
||||
for<'a> T: RecordStore<'a>,
|
||||
{
|
||||
if self.inner.is_ready(cx, now) {
|
||||
if self.inner.check_ready(cx, now) {
|
||||
let publish = self.next_publish.map_or(false, |t_pub| now >= t_pub);
|
||||
let records = store
|
||||
.records()
|
||||
@ -239,7 +239,7 @@ impl PutRecordJob {
|
||||
let deadline = now + self.inner.interval;
|
||||
let delay = Delay::new(self.inner.interval);
|
||||
self.inner.state = PeriodicJobState::Waiting(delay, deadline);
|
||||
assert!(!self.inner.is_ready(cx, now));
|
||||
assert!(!self.inner.check_ready(cx, now));
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
@ -296,7 +296,7 @@ impl AddProviderJob {
|
||||
where
|
||||
for<'a> T: RecordStore<'a>,
|
||||
{
|
||||
if self.inner.is_ready(cx, now) {
|
||||
if self.inner.check_ready(cx, now) {
|
||||
let records = store
|
||||
.provided()
|
||||
.map(|r| r.into_owned())
|
||||
@ -317,7 +317,7 @@ impl AddProviderJob {
|
||||
let deadline = now + self.inner.interval;
|
||||
let delay = Delay::new(self.inner.interval);
|
||||
self.inner.state = PeriodicJobState::Waiting(delay, deadline);
|
||||
assert!(!self.inner.is_ready(cx, now));
|
||||
assert!(!self.inner.check_ready(cx, now));
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
|
@ -365,11 +365,11 @@ where
|
||||
// Adjust `first_connected_pos` accordingly.
|
||||
match status {
|
||||
NodeStatus::Connected => {
|
||||
if self.first_connected_pos.map_or(false, |p| p == pos.0) {
|
||||
if pos.0 == self.nodes.len() {
|
||||
// It was the last connected node.
|
||||
self.first_connected_pos = None
|
||||
}
|
||||
if self.first_connected_pos.map_or(false, |p| p == pos.0)
|
||||
&& pos.0 == self.nodes.len()
|
||||
{
|
||||
// It was the last connected node.
|
||||
self.first_connected_pos = None
|
||||
}
|
||||
}
|
||||
NodeStatus::Disconnected => {
|
||||
|
@ -86,9 +86,9 @@ impl<T> Key<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Into<KeyBytes> for Key<T> {
|
||||
fn into(self) -> KeyBytes {
|
||||
self.bytes
|
||||
impl<T> From<Key<T>> for KeyBytes {
|
||||
fn from(key: Key<T>) -> KeyBytes {
|
||||
key.bytes
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ impl NetworkBehaviour for Mdns {
|
||||
|
||||
fn inject_new_listen_addr(&mut self, _id: ListenerId, _addr: &Multiaddr) {
|
||||
log::trace!("waking interface state because listening address changed");
|
||||
for (_, iface) in &mut self.iface_states {
|
||||
for iface in self.iface_states.values_mut() {
|
||||
iface.fire_timer();
|
||||
}
|
||||
}
|
||||
@ -178,7 +178,7 @@ impl NetworkBehaviour for Mdns {
|
||||
}
|
||||
// Emit discovered event.
|
||||
let mut discovered = SmallVec::<[(PeerId, Multiaddr); 4]>::new();
|
||||
for (_, iface_state) in &mut self.iface_states {
|
||||
for iface_state in self.iface_states.values_mut() {
|
||||
while let Some((peer, addr, expiration)) = iface_state.poll(cx, params) {
|
||||
if let Some((_, _, cur_expires)) = self
|
||||
.discovered_nodes
|
||||
|
@ -116,6 +116,12 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// The successful result of processing an inbound or outbound ping.
|
||||
#[derive(Debug)]
|
||||
pub enum Success {
|
||||
|
@ -313,7 +313,7 @@ impl NetworkBehaviour for Client {
|
||||
),
|
||||
};
|
||||
|
||||
return Poll::Ready(action);
|
||||
Poll::Ready(action)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ impl Transport for ClientTransport {
|
||||
// traversal. One would coordinate such traversal via a previously
|
||||
// established relayed connection, but never using a relayed connection
|
||||
// itself.
|
||||
return Err(TransportError::MultiaddrNotSupported(addr));
|
||||
Err(TransportError::MultiaddrNotSupported(addr))
|
||||
}
|
||||
|
||||
fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option<Multiaddr> {
|
||||
@ -244,7 +244,7 @@ fn parse_relayed_multiaddr(
|
||||
if before_circuit {
|
||||
before_circuit = false;
|
||||
} else {
|
||||
Err(RelayError::MultipleCircuitRelayProtocolsUnsupported)?;
|
||||
return Err(RelayError::MultipleCircuitRelayProtocolsUnsupported.into());
|
||||
}
|
||||
}
|
||||
Protocol::P2p(hash) => {
|
||||
@ -252,12 +252,12 @@ fn parse_relayed_multiaddr(
|
||||
|
||||
if before_circuit {
|
||||
if relayed_multiaddr.relay_peer_id.is_some() {
|
||||
Err(RelayError::MalformedMultiaddr)?;
|
||||
return Err(RelayError::MalformedMultiaddr.into());
|
||||
}
|
||||
relayed_multiaddr.relay_peer_id = Some(peer_id)
|
||||
} else {
|
||||
if relayed_multiaddr.dst_peer_id.is_some() {
|
||||
Err(RelayError::MalformedMultiaddr)?;
|
||||
return Err(RelayError::MalformedMultiaddr.into());
|
||||
}
|
||||
relayed_multiaddr.dst_peer_id = Some(peer_id)
|
||||
}
|
||||
|
@ -86,7 +86,9 @@ impl upgrade::InboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
.map_err(|_| FatalUpgradeError::ParsePeerId)?;
|
||||
Req::Connect(CircuitReq { dst, substream })
|
||||
}
|
||||
hop_message::Type::Status => Err(FatalUpgradeError::UnexpectedTypeStatus)?,
|
||||
hop_message::Type::Status => {
|
||||
return Err(FatalUpgradeError::UnexpectedTypeStatus.into())
|
||||
}
|
||||
};
|
||||
|
||||
Ok(req)
|
||||
|
@ -78,7 +78,7 @@ impl upgrade::InboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
limit: limit.map(Into::into),
|
||||
})
|
||||
}
|
||||
stop_message::Type::Status => Err(FatalUpgradeError::UnexpectedTypeStatus)?,
|
||||
stop_message::Type::Status => Err(FatalUpgradeError::UnexpectedTypeStatus.into()),
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
|
@ -100,8 +100,12 @@ impl upgrade::OutboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
let r#type =
|
||||
hop_message::Type::from_i32(r#type).ok_or(FatalUpgradeError::ParseTypeField)?;
|
||||
match r#type {
|
||||
hop_message::Type::Connect => Err(FatalUpgradeError::UnexpectedTypeConnect)?,
|
||||
hop_message::Type::Reserve => Err(FatalUpgradeError::UnexpectedTypeReserve)?,
|
||||
hop_message::Type::Connect => {
|
||||
return Err(FatalUpgradeError::UnexpectedTypeConnect.into())
|
||||
}
|
||||
hop_message::Type::Reserve => {
|
||||
return Err(FatalUpgradeError::UnexpectedTypeReserve.into())
|
||||
}
|
||||
hop_message::Type::Status => {}
|
||||
}
|
||||
|
||||
@ -114,18 +118,20 @@ impl upgrade::OutboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
Upgrade::Reserve => {
|
||||
match status {
|
||||
Status::Ok => {}
|
||||
Status::ReservationRefused => Err(ReservationFailedReason::Refused)?,
|
||||
Status::ResourceLimitExceeded => {
|
||||
Err(ReservationFailedReason::ResourceLimitExceeded)?
|
||||
Status::ReservationRefused => {
|
||||
return Err(ReservationFailedReason::Refused.into())
|
||||
}
|
||||
s => Err(FatalUpgradeError::UnexpectedStatus(s))?,
|
||||
Status::ResourceLimitExceeded => {
|
||||
return Err(ReservationFailedReason::ResourceLimitExceeded.into())
|
||||
}
|
||||
s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()),
|
||||
}
|
||||
|
||||
let reservation =
|
||||
reservation.ok_or(FatalUpgradeError::MissingReservationField)?;
|
||||
|
||||
if reservation.addrs.is_empty() {
|
||||
Err(FatalUpgradeError::NoAddressesInReservation)?;
|
||||
return Err(FatalUpgradeError::NoAddressesInReservation.into());
|
||||
}
|
||||
|
||||
let addrs = reservation
|
||||
@ -161,12 +167,18 @@ impl upgrade::OutboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
match status {
|
||||
Status::Ok => {}
|
||||
Status::ResourceLimitExceeded => {
|
||||
Err(CircuitFailedReason::ResourceLimitExceeded)?
|
||||
return Err(CircuitFailedReason::ResourceLimitExceeded.into())
|
||||
}
|
||||
Status::ConnectionFailed => Err(CircuitFailedReason::ConnectionFailed)?,
|
||||
Status::NoReservation => Err(CircuitFailedReason::NoReservation)?,
|
||||
Status::PermissionDenied => Err(CircuitFailedReason::PermissionDenied)?,
|
||||
s => Err(FatalUpgradeError::UnexpectedStatus(s))?,
|
||||
Status::ConnectionFailed => {
|
||||
return Err(CircuitFailedReason::ConnectionFailed.into())
|
||||
}
|
||||
Status::NoReservation => {
|
||||
return Err(CircuitFailedReason::NoReservation.into())
|
||||
}
|
||||
Status::PermissionDenied => {
|
||||
return Err(CircuitFailedReason::PermissionDenied.into())
|
||||
}
|
||||
s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()),
|
||||
}
|
||||
|
||||
let FramedParts {
|
||||
|
@ -97,7 +97,9 @@ impl upgrade::OutboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
let r#type =
|
||||
stop_message::Type::from_i32(r#type).ok_or(FatalUpgradeError::ParseTypeField)?;
|
||||
match r#type {
|
||||
stop_message::Type::Connect => Err(FatalUpgradeError::UnexpectedTypeConnect)?,
|
||||
stop_message::Type::Connect => {
|
||||
return Err(FatalUpgradeError::UnexpectedTypeConnect.into())
|
||||
}
|
||||
stop_message::Type::Status => {}
|
||||
}
|
||||
|
||||
@ -105,9 +107,13 @@ impl upgrade::OutboundUpgrade<NegotiatedSubstream> for Upgrade {
|
||||
.ok_or(FatalUpgradeError::ParseStatusField)?;
|
||||
match status {
|
||||
Status::Ok => {}
|
||||
Status::ResourceLimitExceeded => Err(CircuitFailedReason::ResourceLimitExceeded)?,
|
||||
Status::PermissionDenied => Err(CircuitFailedReason::PermissionDenied)?,
|
||||
s => Err(FatalUpgradeError::UnexpectedStatus(s))?,
|
||||
Status::ResourceLimitExceeded => {
|
||||
return Err(CircuitFailedReason::ResourceLimitExceeded.into())
|
||||
}
|
||||
Status::PermissionDenied => {
|
||||
return Err(CircuitFailedReason::PermissionDenied.into())
|
||||
}
|
||||
s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()),
|
||||
}
|
||||
|
||||
let FramedParts {
|
||||
|
@ -238,7 +238,7 @@ impl NetworkBehaviour for Relay {
|
||||
_remaining_established: usize,
|
||||
) {
|
||||
if let hash_map::Entry::Occupied(mut peer) = self.reservations.entry(*peer) {
|
||||
peer.get_mut().remove(&connection);
|
||||
peer.get_mut().remove(connection);
|
||||
if peer.get().is_empty() {
|
||||
peer.remove();
|
||||
}
|
||||
|
@ -4,6 +4,10 @@
|
||||
|
||||
- Update to `libp2p-swarm` `v0.36.0`.
|
||||
|
||||
- Renamed `Error::ConversionError` to `Error::Conversion` in the `codec` module. See [PR 2620].
|
||||
|
||||
[PR 2620]: https://github.com/libp2p/rust-libp2p/pull/2620
|
||||
|
||||
# 0.5.0
|
||||
|
||||
- Update to `libp2p-swarm` `v0.35.0`.
|
||||
|
@ -263,7 +263,7 @@ pub enum Error {
|
||||
#[error("Failed to read/write")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Failed to convert wire message to internal data model")]
|
||||
ConversionError(#[from] ConversionError),
|
||||
Conversion(#[from] ConversionError),
|
||||
}
|
||||
|
||||
impl From<Message> for wire::Message {
|
||||
|
@ -32,6 +32,7 @@ use std::task::{Context, Poll};
|
||||
|
||||
/// The state of an inbound substream (i.e. the remote node opened it).
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum Stream {
|
||||
/// We are in the process of reading a message from the substream.
|
||||
PendingRead(Framed<NegotiatedSubstream, RendezvousCodec>),
|
||||
@ -55,6 +56,7 @@ impl fmt::Debug for Stream {
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum OutEvent {
|
||||
RegistrationRequested(NewRegistration),
|
||||
|
@ -121,6 +121,7 @@ pub enum OutEvent {
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug)]
|
||||
pub enum OpenInfo {
|
||||
RegisterRequest(NewRegistration),
|
||||
|
@ -274,6 +274,7 @@ where
|
||||
}
|
||||
|
||||
/// Event sent from the [`libp2p_swarm::NetworkBehaviour`] to the [`SubstreamConnectionHandler`].
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Debug)]
|
||||
pub enum InEvent<I, TInboundEvent, TOutboundEvent> {
|
||||
/// Open a new substream using the provided `open_info`.
|
||||
|
@ -706,7 +706,7 @@ where
|
||||
self.pending_events
|
||||
.push_back(NetworkBehaviourAction::GenerateEvent(
|
||||
RequestResponseEvent::OutboundFailure {
|
||||
peer: peer,
|
||||
peer,
|
||||
request_id: request.request_id,
|
||||
error: OutboundFailure::DialFailure,
|
||||
},
|
||||
|
@ -6,8 +6,11 @@
|
||||
|
||||
- Make `behaviour::either` module private. See [PR 2610]
|
||||
|
||||
- Rename `IncomingInfo::to_connected_point` to `IncomingInfo::create_connected_point`. See [PR 2620].
|
||||
|
||||
[PR 2529]: https://github.com/libp2p/rust-libp2p/pull/2529
|
||||
[PR 2610]: https://github.com/libp2p/rust-libp2p/pull/2610/
|
||||
[PR 2610]: https://github.com/libp2p/rust-libp2p/pull/2610
|
||||
[PR 2620]: https://github.com/libp2p/rust-libp2p/pull/2620
|
||||
|
||||
# 0.35.0
|
||||
|
||||
|
@ -176,7 +176,7 @@ pub struct IncomingInfo<'a> {
|
||||
|
||||
impl<'a> IncomingInfo<'a> {
|
||||
/// Builds the [`ConnectedPoint`] corresponding to the incoming connection.
|
||||
pub fn to_connected_point(&self) -> ConnectedPoint {
|
||||
pub fn create_connected_point(&self) -> ConnectedPoint {
|
||||
ConnectedPoint::Listener {
|
||||
local_addr: self.local_addr.clone(),
|
||||
send_back_addr: self.send_back_addr.clone(),
|
||||
|
@ -550,7 +550,7 @@ where
|
||||
where
|
||||
TFut: Future<Output = Result<TTrans::Output, TTrans::Error>> + Send + 'static,
|
||||
{
|
||||
let endpoint = info.to_connected_point();
|
||||
let endpoint = info.create_connected_point();
|
||||
|
||||
if let Err(limit) = self.counters.check_max_pending_incoming() {
|
||||
return Err((limit, handler));
|
||||
|
@ -633,6 +633,7 @@ where
|
||||
/// collaborative manner across [`ConnectionHandler`]s
|
||||
/// with [`ConnectionHandler::connection_keep_alive`] or directly with
|
||||
/// [`ConnectionHandlerEvent::Close`].
|
||||
#[allow(clippy::result_unit_err)]
|
||||
pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> {
|
||||
let was_connected = self.pool.is_connected(peer_id);
|
||||
self.pool.disconnect(peer_id);
|
||||
|
Reference in New Issue
Block a user