mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-05-29 02:31:20 +00:00
Restore RequestResponse::throttled
. (#1726)
* Restore `RequestResponse::throttled`. In contrast to the existing "throttled" approach this PR adds back- pressure to the protocol without requiring pre-existing knowledge of all nodes about their limits. It adds small, CBOR-encoded headers to the actual payload data. Extra credit messages communicate back to the sender how many more requests it is allowed to send. * Remove some noise. * Resend credit grant after connection closed. Should an error in some lower layer cause a connection to be closed, our previously sent credit grant may not have reached the remote peer. Therefore, pessimistically, a credit grant is resent whenever a connection is closed. The remote ignores duplicate grants. * Remove inbound/outbound tracking per peer. * Send ACK as response to duplicate credit grants. * Simplify. * Fix grammar. * Incorporate review feedback. - Remove `ResponseSent` which was a leftover from previous attemps and issue a credit grant immediately in `send_response`. - Only resend credit grants after a connection is closed if we are still connected to this peer. * Move codec/header.rs to throttled/codec.rs. * More review suggestions. * Generalise `ProtocolWrapper` and use shorter prefix. * Update protocols/request-response/src/lib.rs Co-authored-by: Roman Borschel <romanb@users.noreply.github.com> * Update protocols/request-response/src/throttled.rs Co-authored-by: Roman Borschel <romanb@users.noreply.github.com> * Update protocols/request-response/src/throttled.rs Co-authored-by: Roman Borschel <romanb@users.noreply.github.com> * Minor comment changes. * Limit max. header size to 8KiB * Always construct initial limit with 1. Since honest senders always assume a send budget of 1 and wait for credit afterwards, setting the default limit to a higher value can only become effective after informing the peer about it which means leaving `max_recv` at 1 and setting `next_max` to the desired value. Co-authored-by: Roman Borschel <romanb@users.noreply.github.com>
This commit is contained in:
parent
ed5aec14f3
commit
d988b05f85
@ -11,13 +11,15 @@ categories = ["network-programming", "asynchronous"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
|
bytes = "0.5.6"
|
||||||
futures = "0.3.1"
|
futures = "0.3.1"
|
||||||
libp2p-core = { version = "0.22.0", path = "../../core" }
|
libp2p-core = { version = "0.22.0", path = "../../core" }
|
||||||
libp2p-swarm = { version = "0.22.0", path = "../../swarm" }
|
libp2p-swarm = { version = "0.22.0", path = "../../swarm" }
|
||||||
log = "0.4.11"
|
log = "0.4.11"
|
||||||
lru = "0.6"
|
minicbor = { version = "0.5", features = ["std", "derive"] }
|
||||||
rand = "0.7"
|
rand = "0.7"
|
||||||
smallvec = "1.4"
|
smallvec = "1.4"
|
||||||
|
unsigned-varint = { version = "0.5", features = ["std", "futures"] }
|
||||||
wasm-timer = "0.2"
|
wasm-timer = "0.2"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
@ -64,3 +64,4 @@ pub trait RequestResponseCodec {
|
|||||||
where
|
where
|
||||||
T: AsyncWrite + Unpin + Send;
|
T: AsyncWrite + Unpin + Send;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ use smallvec::SmallVec;
|
|||||||
use std::{
|
use std::{
|
||||||
collections::VecDeque,
|
collections::VecDeque,
|
||||||
io,
|
io,
|
||||||
|
sync::{atomic::{AtomicU64, Ordering}, Arc},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
task::{Context, Poll}
|
task::{Context, Poll}
|
||||||
};
|
};
|
||||||
@ -79,9 +80,10 @@ where
|
|||||||
/// Inbound upgrades waiting for the incoming request.
|
/// Inbound upgrades waiting for the incoming request.
|
||||||
inbound: FuturesUnordered<BoxFuture<'static,
|
inbound: FuturesUnordered<BoxFuture<'static,
|
||||||
Result<
|
Result<
|
||||||
(TCodec::Request, oneshot::Sender<TCodec::Response>),
|
((RequestId, TCodec::Request), oneshot::Sender<TCodec::Response>),
|
||||||
oneshot::Canceled
|
oneshot::Canceled
|
||||||
>>>,
|
>>>,
|
||||||
|
inbound_request_id: Arc<AtomicU64>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TCodec> RequestResponseHandler<TCodec>
|
impl<TCodec> RequestResponseHandler<TCodec>
|
||||||
@ -93,6 +95,7 @@ where
|
|||||||
codec: TCodec,
|
codec: TCodec,
|
||||||
keep_alive_timeout: Duration,
|
keep_alive_timeout: Duration,
|
||||||
substream_timeout: Duration,
|
substream_timeout: Duration,
|
||||||
|
inbound_request_id: Arc<AtomicU64>
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inbound_protocols,
|
inbound_protocols,
|
||||||
@ -104,6 +107,7 @@ where
|
|||||||
inbound: FuturesUnordered::new(),
|
inbound: FuturesUnordered::new(),
|
||||||
pending_events: VecDeque::new(),
|
pending_events: VecDeque::new(),
|
||||||
pending_error: None,
|
pending_error: None,
|
||||||
|
inbound_request_id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,6 +121,7 @@ where
|
|||||||
{
|
{
|
||||||
/// An inbound request.
|
/// An inbound request.
|
||||||
Request {
|
Request {
|
||||||
|
request_id: RequestId,
|
||||||
request: TCodec::Request,
|
request: TCodec::Request,
|
||||||
sender: oneshot::Sender<TCodec::Response>
|
sender: oneshot::Sender<TCodec::Response>
|
||||||
},
|
},
|
||||||
@ -130,9 +135,9 @@ where
|
|||||||
/// An outbound request failed to negotiate a mutually supported protocol.
|
/// An outbound request failed to negotiate a mutually supported protocol.
|
||||||
OutboundUnsupportedProtocols(RequestId),
|
OutboundUnsupportedProtocols(RequestId),
|
||||||
/// An inbound request timed out.
|
/// An inbound request timed out.
|
||||||
InboundTimeout,
|
InboundTimeout(RequestId),
|
||||||
/// An inbound request failed to negotiate a mutually supported protocol.
|
/// An inbound request failed to negotiate a mutually supported protocol.
|
||||||
InboundUnsupportedProtocols,
|
InboundUnsupportedProtocols(RequestId),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TCodec> ProtocolsHandler for RequestResponseHandler<TCodec>
|
impl<TCodec> ProtocolsHandler for RequestResponseHandler<TCodec>
|
||||||
@ -145,7 +150,7 @@ where
|
|||||||
type InboundProtocol = ResponseProtocol<TCodec>;
|
type InboundProtocol = ResponseProtocol<TCodec>;
|
||||||
type OutboundProtocol = RequestProtocol<TCodec>;
|
type OutboundProtocol = RequestProtocol<TCodec>;
|
||||||
type OutboundOpenInfo = RequestId;
|
type OutboundOpenInfo = RequestId;
|
||||||
type InboundOpenInfo = ();
|
type InboundOpenInfo = RequestId;
|
||||||
|
|
||||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, Self::InboundOpenInfo> {
|
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, Self::InboundOpenInfo> {
|
||||||
// A channel for notifying the handler when the inbound
|
// A channel for notifying the handler when the inbound
|
||||||
@ -156,6 +161,8 @@ where
|
|||||||
// response is sent.
|
// response is sent.
|
||||||
let (rs_send, rs_recv) = oneshot::channel();
|
let (rs_send, rs_recv) = oneshot::channel();
|
||||||
|
|
||||||
|
let request_id = RequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed));
|
||||||
|
|
||||||
// By keeping all I/O inside the `ResponseProtocol` and thus the
|
// By keeping all I/O inside the `ResponseProtocol` and thus the
|
||||||
// inbound substream upgrade via above channels, we ensure that it
|
// inbound substream upgrade via above channels, we ensure that it
|
||||||
// is all subject to the configured timeout without extra bookkeeping
|
// is all subject to the configured timeout without extra bookkeeping
|
||||||
@ -167,6 +174,7 @@ where
|
|||||||
codec: self.codec.clone(),
|
codec: self.codec.clone(),
|
||||||
request_sender: rq_send,
|
request_sender: rq_send,
|
||||||
response_receiver: rs_recv,
|
response_receiver: rs_recv,
|
||||||
|
request_id
|
||||||
};
|
};
|
||||||
|
|
||||||
// The handler waits for the request to come in. It then emits
|
// The handler waits for the request to come in. It then emits
|
||||||
@ -174,16 +182,14 @@ where
|
|||||||
// `ResponseChannel`.
|
// `ResponseChannel`.
|
||||||
self.inbound.push(rq_recv.map_ok(move |rq| (rq, rs_send)).boxed());
|
self.inbound.push(rq_recv.map_ok(move |rq| (rq, rs_send)).boxed());
|
||||||
|
|
||||||
SubstreamProtocol::new(proto, ()).with_timeout(self.substream_timeout)
|
SubstreamProtocol::new(proto, request_id).with_timeout(self.substream_timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_fully_negotiated_inbound(
|
fn inject_fully_negotiated_inbound(
|
||||||
&mut self,
|
&mut self,
|
||||||
(): (),
|
(): (),
|
||||||
(): ()
|
_: RequestId
|
||||||
) {
|
) {
|
||||||
// Nothing to do, as the response has already been sent
|
|
||||||
// as part of the upgrade.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_fully_negotiated_outbound(
|
fn inject_fully_negotiated_outbound(
|
||||||
@ -231,13 +237,12 @@ where
|
|||||||
|
|
||||||
fn inject_listen_upgrade_error(
|
fn inject_listen_upgrade_error(
|
||||||
&mut self,
|
&mut self,
|
||||||
(): Self::InboundOpenInfo,
|
info: RequestId,
|
||||||
error: ProtocolsHandlerUpgrErr<io::Error>
|
error: ProtocolsHandlerUpgrErr<io::Error>
|
||||||
) {
|
) {
|
||||||
match error {
|
match error {
|
||||||
ProtocolsHandlerUpgrErr::Timeout => {
|
ProtocolsHandlerUpgrErr::Timeout => {
|
||||||
self.pending_events.push_back(
|
self.pending_events.push_back(RequestResponseHandlerEvent::InboundTimeout(info))
|
||||||
RequestResponseHandlerEvent::InboundTimeout);
|
|
||||||
}
|
}
|
||||||
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
|
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
|
||||||
// The local peer merely doesn't support the protocol(s) requested.
|
// The local peer merely doesn't support the protocol(s) requested.
|
||||||
@ -246,7 +251,7 @@ where
|
|||||||
// An event is reported to permit user code to react to the fact that
|
// An event is reported to permit user code to react to the fact that
|
||||||
// the local peer does not support the requested protocol(s).
|
// the local peer does not support the requested protocol(s).
|
||||||
self.pending_events.push_back(
|
self.pending_events.push_back(
|
||||||
RequestResponseHandlerEvent::InboundUnsupportedProtocols);
|
RequestResponseHandlerEvent::InboundUnsupportedProtocols(info));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// Anything else is considered a fatal error or misbehaviour of
|
// Anything else is considered a fatal error or misbehaviour of
|
||||||
@ -282,12 +287,12 @@ where
|
|||||||
// Check for inbound requests.
|
// Check for inbound requests.
|
||||||
while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) {
|
while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) {
|
||||||
match result {
|
match result {
|
||||||
Ok((rq, rs_sender)) => {
|
Ok(((id, rq), rs_sender)) => {
|
||||||
// We received an inbound request.
|
// We received an inbound request.
|
||||||
self.keep_alive = KeepAlive::Yes;
|
self.keep_alive = KeepAlive::Yes;
|
||||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
||||||
RequestResponseHandlerEvent::Request {
|
RequestResponseHandlerEvent::Request {
|
||||||
request: rq, sender: rs_sender
|
request_id: id, request: rq, sender: rs_sender
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
Err(oneshot::Canceled) => {
|
Err(oneshot::Canceled) => {
|
||||||
|
@ -71,8 +71,10 @@ where
|
|||||||
{
|
{
|
||||||
pub(crate) codec: TCodec,
|
pub(crate) codec: TCodec,
|
||||||
pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>,
|
pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>,
|
||||||
pub(crate) request_sender: oneshot::Sender<TCodec::Request>,
|
pub(crate) request_sender: oneshot::Sender<(RequestId, TCodec::Request)>,
|
||||||
pub(crate) response_receiver: oneshot::Receiver<TCodec::Response>
|
pub(crate) response_receiver: oneshot::Receiver<TCodec::Response>,
|
||||||
|
pub(crate) request_id: RequestId
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TCodec> UpgradeInfo for ResponseProtocol<TCodec>
|
impl<TCodec> UpgradeInfo for ResponseProtocol<TCodec>
|
||||||
@ -99,7 +101,7 @@ where
|
|||||||
async move {
|
async move {
|
||||||
let read = self.codec.read_request(&protocol, &mut io);
|
let read = self.codec.read_request(&protocol, &mut io);
|
||||||
let request = read.await?;
|
let request = read.await?;
|
||||||
if let Ok(()) = self.request_sender.send(request) {
|
if let Ok(()) = self.request_sender.send((self.request_id, request)) {
|
||||||
if let Ok(response) = self.response_receiver.await {
|
if let Ok(response) = self.response_receiver.await {
|
||||||
let write = self.codec.write_response(&protocol, &mut io, response);
|
let write = self.codec.write_response(&protocol, &mut io, response);
|
||||||
write.await?;
|
write.await?;
|
||||||
|
@ -70,13 +70,11 @@
|
|||||||
|
|
||||||
pub mod codec;
|
pub mod codec;
|
||||||
pub mod handler;
|
pub mod handler;
|
||||||
|
pub mod throttled;
|
||||||
// Disabled until #1706 is fixed:
|
|
||||||
// pub mod throttled;
|
|
||||||
// pub use throttled::Throttled;
|
|
||||||
|
|
||||||
pub use codec::{RequestResponseCodec, ProtocolName};
|
pub use codec::{RequestResponseCodec, ProtocolName};
|
||||||
pub use handler::ProtocolSupport;
|
pub use handler::ProtocolSupport;
|
||||||
|
pub use throttled::Throttled;
|
||||||
|
|
||||||
use futures::{
|
use futures::{
|
||||||
channel::oneshot,
|
channel::oneshot,
|
||||||
@ -102,21 +100,25 @@ use libp2p_swarm::{
|
|||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{VecDeque, HashMap},
|
collections::{VecDeque, HashMap},
|
||||||
|
fmt,
|
||||||
time::Duration,
|
time::Duration,
|
||||||
|
sync::{atomic::AtomicU64, Arc},
|
||||||
task::{Context, Poll}
|
task::{Context, Poll}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// An inbound request or response.
|
/// An inbound request or response.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum RequestResponseMessage<TRequest, TResponse> {
|
pub enum RequestResponseMessage<TRequest, TResponse, TChannelResponse = TResponse> {
|
||||||
/// A request message.
|
/// A request message.
|
||||||
Request {
|
Request {
|
||||||
|
/// The ID of this request.
|
||||||
|
request_id: RequestId,
|
||||||
/// The request message.
|
/// The request message.
|
||||||
request: TRequest,
|
request: TRequest,
|
||||||
/// The sender of the request who is awaiting a response.
|
/// The sender of the request who is awaiting a response.
|
||||||
///
|
///
|
||||||
/// See [`RequestResponse::send_response`].
|
/// See [`RequestResponse::send_response`].
|
||||||
channel: ResponseChannel<TResponse>,
|
channel: ResponseChannel<TChannelResponse>,
|
||||||
},
|
},
|
||||||
/// A response message.
|
/// A response message.
|
||||||
Response {
|
Response {
|
||||||
@ -131,13 +133,13 @@ pub enum RequestResponseMessage<TRequest, TResponse> {
|
|||||||
|
|
||||||
/// The events emitted by a [`RequestResponse`] protocol.
|
/// The events emitted by a [`RequestResponse`] protocol.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum RequestResponseEvent<TRequest, TResponse> {
|
pub enum RequestResponseEvent<TRequest, TResponse, TChannelResponse = TResponse> {
|
||||||
/// An incoming message (request or response).
|
/// An incoming message (request or response).
|
||||||
Message {
|
Message {
|
||||||
/// The peer who sent the message.
|
/// The peer who sent the message.
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
/// The incoming message.
|
/// The incoming message.
|
||||||
message: RequestResponseMessage<TRequest, TResponse>
|
message: RequestResponseMessage<TRequest, TResponse, TChannelResponse>
|
||||||
},
|
},
|
||||||
/// An outbound request failed.
|
/// An outbound request failed.
|
||||||
OutboundFailure {
|
OutboundFailure {
|
||||||
@ -152,6 +154,8 @@ pub enum RequestResponseEvent<TRequest, TResponse> {
|
|||||||
InboundFailure {
|
InboundFailure {
|
||||||
/// The peer from whom the request was received.
|
/// The peer from whom the request was received.
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
|
/// The ID of the failed inbound request.
|
||||||
|
request_id: RequestId,
|
||||||
/// The error that occurred.
|
/// The error that occurred.
|
||||||
error: InboundFailure,
|
error: InboundFailure,
|
||||||
},
|
},
|
||||||
@ -188,6 +192,8 @@ pub enum InboundFailure {
|
|||||||
Timeout,
|
Timeout,
|
||||||
/// The local peer supports none of the requested protocols.
|
/// The local peer supports none of the requested protocols.
|
||||||
UnsupportedProtocols,
|
UnsupportedProtocols,
|
||||||
|
/// The connection closed before a response was delivered.
|
||||||
|
ConnectionClosed,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A channel for sending a response to an inbound request.
|
/// A channel for sending a response to an inbound request.
|
||||||
@ -195,6 +201,7 @@ pub enum InboundFailure {
|
|||||||
/// See [`RequestResponse::send_response`].
|
/// See [`RequestResponse::send_response`].
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ResponseChannel<TResponse> {
|
pub struct ResponseChannel<TResponse> {
|
||||||
|
request_id: RequestId,
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
sender: oneshot::Sender<TResponse>,
|
sender: oneshot::Sender<TResponse>,
|
||||||
}
|
}
|
||||||
@ -210,14 +217,23 @@ impl<TResponse> ResponseChannel<TResponse> {
|
|||||||
pub fn is_open(&self) -> bool {
|
pub fn is_open(&self) -> bool {
|
||||||
!self.sender.is_canceled()
|
!self.sender.is_canceled()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the ID of the inbound request waiting for a response.
|
||||||
|
pub(crate) fn request_id(&self) -> RequestId {
|
||||||
|
self.request_id
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The (local) ID of an outgoing request.
|
/// The ID of an inbound or outbound request.
|
||||||
///
|
|
||||||
/// See [`RequestResponse::send_request`].
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||||
pub struct RequestId(u64);
|
pub struct RequestId(u64);
|
||||||
|
|
||||||
|
impl fmt::Display for RequestId {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The configuration for a `RequestResponse` protocol.
|
/// The configuration for a `RequestResponse` protocol.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RequestResponseConfig {
|
pub struct RequestResponseConfig {
|
||||||
@ -259,6 +275,8 @@ where
|
|||||||
outbound_protocols: SmallVec<[TCodec::Protocol; 2]>,
|
outbound_protocols: SmallVec<[TCodec::Protocol; 2]>,
|
||||||
/// The next (local) request ID.
|
/// The next (local) request ID.
|
||||||
next_request_id: RequestId,
|
next_request_id: RequestId,
|
||||||
|
/// The next (inbound) request ID.
|
||||||
|
next_inbound_id: Arc<AtomicU64>,
|
||||||
/// The protocol configuration.
|
/// The protocol configuration.
|
||||||
config: RequestResponseConfig,
|
config: RequestResponseConfig,
|
||||||
/// The protocol codec for reading and writing requests and responses.
|
/// The protocol codec for reading and writing requests and responses.
|
||||||
@ -276,7 +294,7 @@ where
|
|||||||
/// to be established.
|
/// to be established.
|
||||||
pending_requests: HashMap<PeerId, SmallVec<[RequestProtocol<TCodec>; 10]>>,
|
pending_requests: HashMap<PeerId, SmallVec<[RequestProtocol<TCodec>; 10]>>,
|
||||||
/// Responses that have not yet been received.
|
/// Responses that have not yet been received.
|
||||||
pending_responses: HashMap<RequestId, (PeerId, ConnectionId)>,
|
pending_responses: HashMap<RequestId, (PeerId, ConnectionId)>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TCodec> RequestResponse<TCodec>
|
impl<TCodec> RequestResponse<TCodec>
|
||||||
@ -303,6 +321,7 @@ where
|
|||||||
inbound_protocols,
|
inbound_protocols,
|
||||||
outbound_protocols,
|
outbound_protocols,
|
||||||
next_request_id: RequestId(1),
|
next_request_id: RequestId(1),
|
||||||
|
next_inbound_id: Arc::new(AtomicU64::new(1)),
|
||||||
config: cfg,
|
config: cfg,
|
||||||
codec,
|
codec,
|
||||||
pending_events: VecDeque::new(),
|
pending_events: VecDeque::new(),
|
||||||
@ -313,11 +332,18 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disabled until #1706 is fixed.
|
/// Creates a `RequestResponse` which limits requests per peer.
|
||||||
// /// Wrap this behaviour in [`Throttled`] to limit the number of concurrent requests per peer.
|
///
|
||||||
// pub fn throttled(self) -> Throttled<TCodec> {
|
/// The behaviour is wrapped in [`Throttled`] and detects the limits
|
||||||
// Throttled::new(self)
|
/// per peer at runtime which are then enforced.
|
||||||
// }
|
pub fn throttled<I>(c: TCodec, protos: I, cfg: RequestResponseConfig) -> Throttled<TCodec>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = (TCodec::Protocol, ProtocolSupport)>,
|
||||||
|
TCodec: Send,
|
||||||
|
TCodec::Protocol: Sync
|
||||||
|
{
|
||||||
|
Throttled::new(c, protos, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
/// Initiates sending a request.
|
/// Initiates sending a request.
|
||||||
///
|
///
|
||||||
@ -389,13 +415,17 @@ where
|
|||||||
|
|
||||||
/// Checks whether a peer is currently connected.
|
/// Checks whether a peer is currently connected.
|
||||||
pub fn is_connected(&self, peer: &PeerId) -> bool {
|
pub fn is_connected(&self, peer: &PeerId) -> bool {
|
||||||
self.connected.contains_key(peer)
|
if let Some(connections) = self.connected.get(peer) {
|
||||||
|
!connections.is_empty()
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks whether an outbound request initiated by
|
/// Checks whether an outbound request initiated by
|
||||||
/// [`RequestResponse::send_request`] is still pending, i.e. waiting
|
/// [`RequestResponse::send_request`] is still pending, i.e. waiting
|
||||||
/// for a response.
|
/// for a response.
|
||||||
pub fn is_pending(&self, req_id: &RequestId) -> bool {
|
pub fn is_pending_outbound(&self, req_id: &RequestId) -> bool {
|
||||||
self.pending_responses.contains_key(req_id)
|
self.pending_responses.contains_key(req_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,6 +443,9 @@ where
|
|||||||
-> Option<RequestProtocol<TCodec>>
|
-> Option<RequestProtocol<TCodec>>
|
||||||
{
|
{
|
||||||
if let Some(connections) = self.connected.get(peer) {
|
if let Some(connections) = self.connected.get(peer) {
|
||||||
|
if connections.is_empty() {
|
||||||
|
return Some(request)
|
||||||
|
}
|
||||||
let ix = (request.request_id.0 as usize) % connections.len();
|
let ix = (request.request_id.0 as usize) % connections.len();
|
||||||
let conn = connections[ix].id;
|
let conn = connections[ix].id;
|
||||||
self.pending_responses.insert(request.request_id, (peer.clone(), conn));
|
self.pending_responses.insert(request.request_id, (peer.clone(), conn));
|
||||||
@ -441,6 +474,7 @@ where
|
|||||||
self.codec.clone(),
|
self.codec.clone(),
|
||||||
self.config.connection_keep_alive,
|
self.config.connection_keep_alive,
|
||||||
self.config.request_timeout,
|
self.config.request_timeout,
|
||||||
|
self.next_inbound_id.clone()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,27 +514,22 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Any pending responses of requests sent over this connection
|
let pending_events = &mut self.pending_events;
|
||||||
// must be considered failed.
|
|
||||||
let failed = self.pending_responses.iter()
|
|
||||||
.filter_map(|(r, (p, c))|
|
|
||||||
if conn == c {
|
|
||||||
Some((p.clone(), *r))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
for (peer, request_id) in failed {
|
// Any pending responses of requests sent over this connection must be considered failed.
|
||||||
self.pending_responses.remove(&request_id);
|
self.pending_responses.retain(|rid, (peer, cid)| {
|
||||||
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
|
if conn != cid {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
|
||||||
RequestResponseEvent::OutboundFailure {
|
RequestResponseEvent::OutboundFailure {
|
||||||
peer,
|
peer: peer.clone(),
|
||||||
request_id,
|
request_id: *rid,
|
||||||
error: OutboundFailure::ConnectionClosed
|
error: OutboundFailure::ConnectionClosed
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
}
|
false
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_disconnected(&mut self, peer: &PeerId) {
|
fn inject_disconnected(&mut self, peer: &PeerId) {
|
||||||
@ -541,12 +570,12 @@ where
|
|||||||
NetworkBehaviourAction::GenerateEvent(
|
NetworkBehaviourAction::GenerateEvent(
|
||||||
RequestResponseEvent::Message { peer, message }));
|
RequestResponseEvent::Message { peer, message }));
|
||||||
}
|
}
|
||||||
RequestResponseHandlerEvent::Request { request, sender } => {
|
RequestResponseHandlerEvent::Request { request_id, request, sender } => {
|
||||||
let channel = ResponseChannel { peer: peer.clone(), sender };
|
let channel = ResponseChannel { request_id, peer: peer.clone(), sender };
|
||||||
let message = RequestResponseMessage::Request { request, channel };
|
let message = RequestResponseMessage::Request { request_id, request, channel };
|
||||||
self.pending_events.push_back(
|
self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(
|
||||||
NetworkBehaviourAction::GenerateEvent(
|
RequestResponseEvent::Message { peer, message }
|
||||||
RequestResponseEvent::Message { peer, message }));
|
));
|
||||||
}
|
}
|
||||||
RequestResponseHandlerEvent::OutboundTimeout(request_id) => {
|
RequestResponseHandlerEvent::OutboundTimeout(request_id) => {
|
||||||
if let Some((peer, _conn)) = self.pending_responses.remove(&request_id) {
|
if let Some((peer, _conn)) = self.pending_responses.remove(&request_id) {
|
||||||
@ -559,13 +588,14 @@ where
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RequestResponseHandlerEvent::InboundTimeout => {
|
RequestResponseHandlerEvent::InboundTimeout(request_id) => {
|
||||||
self.pending_events.push_back(
|
self.pending_events.push_back(
|
||||||
NetworkBehaviourAction::GenerateEvent(
|
NetworkBehaviourAction::GenerateEvent(
|
||||||
RequestResponseEvent::InboundFailure {
|
RequestResponseEvent::InboundFailure {
|
||||||
peer,
|
peer,
|
||||||
error: InboundFailure::Timeout,
|
request_id,
|
||||||
}));
|
error: InboundFailure::Timeout,
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
RequestResponseHandlerEvent::OutboundUnsupportedProtocols(request_id) => {
|
RequestResponseHandlerEvent::OutboundUnsupportedProtocols(request_id) => {
|
||||||
self.pending_events.push_back(
|
self.pending_events.push_back(
|
||||||
@ -576,11 +606,12 @@ where
|
|||||||
error: OutboundFailure::UnsupportedProtocols,
|
error: OutboundFailure::UnsupportedProtocols,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
RequestResponseHandlerEvent::InboundUnsupportedProtocols => {
|
RequestResponseHandlerEvent::InboundUnsupportedProtocols(request_id) => {
|
||||||
self.pending_events.push_back(
|
self.pending_events.push_back(
|
||||||
NetworkBehaviourAction::GenerateEvent(
|
NetworkBehaviourAction::GenerateEvent(
|
||||||
RequestResponseEvent::InboundFailure {
|
RequestResponseEvent::InboundFailure {
|
||||||
peer,
|
peer,
|
||||||
|
request_id,
|
||||||
error: InboundFailure::UnsupportedProtocols,
|
error: InboundFailure::UnsupportedProtocols,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
@ -18,72 +18,288 @@
|
|||||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
// DEALINGS IN THE SOFTWARE.
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
//! Limit the number of requests peers can send to each other.
|
||||||
|
//!
|
||||||
|
//! Each peer is assigned a budget for sending and a budget for receiving
|
||||||
|
//! requests. Initially a peer assumes it has a send budget of 1. When its
|
||||||
|
//! budget has been used up its remote peer will send a credit message which
|
||||||
|
//! informs it how many more requests it can send before it needs to wait for
|
||||||
|
//! the next credit message. Credit messages which error or time out are
|
||||||
|
//! retried until they have reached the peer which is assumed once a
|
||||||
|
//! corresponding ack or a new request has been received from the peer.
|
||||||
|
//!
|
||||||
|
//! The `Throttled` behaviour wraps an existing `RequestResponse` behaviour
|
||||||
|
//! and uses a codec implementation that sends ordinary requests and responses
|
||||||
|
//! as well as a special credit message to which an ack message is expected
|
||||||
|
//! as a response. It does so by putting a small CBOR encoded header in front
|
||||||
|
//! of each message the inner codec produces.
|
||||||
|
|
||||||
|
mod codec;
|
||||||
|
|
||||||
|
use codec::{Codec, Message, ProtocolWrapper, Type};
|
||||||
use crate::handler::{RequestProtocol, RequestResponseHandler, RequestResponseHandlerEvent};
|
use crate::handler::{RequestProtocol, RequestResponseHandler, RequestResponseHandlerEvent};
|
||||||
|
use futures::ready;
|
||||||
use libp2p_core::{ConnectedPoint, connection::ConnectionId, Multiaddr, PeerId};
|
use libp2p_core::{ConnectedPoint, connection::ConnectionId, Multiaddr, PeerId};
|
||||||
use libp2p_swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
use libp2p_swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
||||||
use lru::LruCache;
|
|
||||||
use std::{collections::{HashMap, VecDeque}, task::{Context, Poll}};
|
use std::{collections::{HashMap, VecDeque}, task::{Context, Poll}};
|
||||||
use std::{cmp::min, num::NonZeroU16};
|
use std::num::NonZeroU16;
|
||||||
use super::{
|
use super::{
|
||||||
|
ProtocolSupport,
|
||||||
RequestId,
|
RequestId,
|
||||||
RequestResponse,
|
RequestResponse,
|
||||||
RequestResponseCodec,
|
RequestResponseCodec,
|
||||||
|
RequestResponseConfig,
|
||||||
RequestResponseEvent,
|
RequestResponseEvent,
|
||||||
|
RequestResponseMessage,
|
||||||
ResponseChannel
|
ResponseChannel
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A wrapper around [`RequestResponse`] which adds request limits per peer.
|
/// A wrapper around [`RequestResponse`] which adds request limits per peer.
|
||||||
///
|
pub struct Throttled<C>
|
||||||
/// Each peer is assigned a default limit of concurrent requests and
|
where
|
||||||
/// responses, which can be overriden per peer.
|
C: RequestResponseCodec + Send,
|
||||||
///
|
C::Protocol: Sync
|
||||||
/// It is not possible to send more requests than configured and receiving
|
{
|
||||||
/// more is reported as an error event. Since `libp2p-request-response` is
|
|
||||||
/// not its own protocol, there is no way to communicate limits to peers,
|
|
||||||
/// hence nodes must have pre-established knowledge about each other's limits.
|
|
||||||
pub struct Throttled<C: RequestResponseCodec> {
|
|
||||||
/// A random id used for logging.
|
/// A random id used for logging.
|
||||||
id: u32,
|
id: u32,
|
||||||
/// The wrapped behaviour.
|
/// The wrapped behaviour.
|
||||||
behaviour: RequestResponse<C>,
|
behaviour: RequestResponse<Codec<C>>,
|
||||||
/// Limits per peer.
|
/// Information per peer.
|
||||||
limits: HashMap<PeerId, Limit>,
|
peer_info: HashMap<PeerId, PeerInfo>,
|
||||||
/// After disconnects we keep limits around to prevent circumventing
|
/// The default limit applies to all peers unless overriden.
|
||||||
/// them by successive reconnects.
|
default_limit: Limit,
|
||||||
previous: LruCache<PeerId, Limit>,
|
/// Permanent limit overrides per peer.
|
||||||
/// The default limit applied to all peers unless overriden.
|
limit_overrides: HashMap<PeerId, Limit>,
|
||||||
default: Limit,
|
|
||||||
/// Pending events to report in `Throttled::poll`.
|
/// Pending events to report in `Throttled::poll`.
|
||||||
events: VecDeque<Event<C::Request, C::Response>>
|
events: VecDeque<Event<C::Request, C::Response, Message<C::Response>>>,
|
||||||
|
/// Current outbound credit grants in flight.
|
||||||
|
credit_messages: HashMap<PeerId, Credit>,
|
||||||
|
/// The current credit ID.
|
||||||
|
credit_id: u64
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A `Limit` of inbound and outbound requests.
|
/// Credit information that is sent to remote peers.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
struct Credit {
|
||||||
|
/// A credit ID. Used to deduplicate retransmitted credit messages.
|
||||||
|
id: u64,
|
||||||
|
/// The ID of the outbound credit grant message.
|
||||||
|
request: RequestId,
|
||||||
|
/// The number of requests the remote is allowed to send.
|
||||||
|
amount: u16
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Max. number of inbound requests that can be received.
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
struct Limit {
|
struct Limit {
|
||||||
/// The remaining number of outbound requests that can be send.
|
/// The current receive limit.
|
||||||
send_budget: u16,
|
max_recv: NonZeroU16,
|
||||||
/// The remaining number of inbound requests that can be received.
|
/// The next receive limit which becomes active after
|
||||||
recv_budget: u16,
|
/// the current limit has been reached.
|
||||||
/// The original limit which applies to inbound and outbound requests.
|
next_max: NonZeroU16
|
||||||
maximum: NonZeroU16
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Limit {
|
impl Limit {
|
||||||
fn default() -> Self {
|
/// Create a new limit.
|
||||||
let maximum = NonZeroU16::new(1).expect("1 > 0");
|
fn new(max: NonZeroU16) -> Self {
|
||||||
|
// The max. limit provided will be effective after the initial request
|
||||||
|
// from a peer which is always allowed has been answered. Values greater
|
||||||
|
// than 1 would prevent sending the credit grant, leading to a stalling
|
||||||
|
// sender so we must not use `max` right away.
|
||||||
Limit {
|
Limit {
|
||||||
send_budget: maximum.get(),
|
max_recv: NonZeroU16::new(1).expect("1 > 0"),
|
||||||
recv_budget: maximum.get(),
|
next_max: max
|
||||||
maximum
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set a new limit.
|
||||||
|
///
|
||||||
|
/// The new limit becomes effective when all current inbound
|
||||||
|
/// requests have been processed and replied to.
|
||||||
|
fn set(&mut self, next: NonZeroU16) {
|
||||||
|
self.next_max = next
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Activate the new limit.
|
||||||
|
fn switch(&mut self) -> u16 {
|
||||||
|
self.max_recv = self.next_max;
|
||||||
|
self.max_recv.get()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Budget information about a peer.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct PeerInfo {
|
||||||
|
/// Limit that applies to this peer.
|
||||||
|
limit: Limit,
|
||||||
|
/// Remaining number of outbound requests that can be sent.
|
||||||
|
send_budget: u16,
|
||||||
|
/// Remaining number of inbound requests that can be received.
|
||||||
|
recv_budget: u16,
|
||||||
|
/// The ID of the credit message that granted the current `send_budget`.
|
||||||
|
send_budget_id: Option<u64>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeerInfo {
|
||||||
|
fn new(limit: Limit) -> Self {
|
||||||
|
PeerInfo {
|
||||||
|
limit,
|
||||||
|
send_budget: 1,
|
||||||
|
recv_budget: 1,
|
||||||
|
send_budget_id: None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Throttled<C>
|
||||||
|
where
|
||||||
|
C: RequestResponseCodec + Send + Clone,
|
||||||
|
C::Protocol: Sync
|
||||||
|
{
|
||||||
|
/// Create a new throttled request-response behaviour.
|
||||||
|
pub fn new<I>(c: C, protos: I, cfg: RequestResponseConfig) -> Self
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = (C::Protocol, ProtocolSupport)>,
|
||||||
|
C: Send,
|
||||||
|
C::Protocol: Sync
|
||||||
|
{
|
||||||
|
let protos = protos.into_iter().map(|(p, ps)| (ProtocolWrapper::new(b"/t/1", p), ps));
|
||||||
|
Throttled::from(RequestResponse::new(Codec::new(c, 8192), protos, cfg))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrap an existing `RequestResponse` behaviour and apply send/recv limits.
|
||||||
|
pub fn from(behaviour: RequestResponse<Codec<C>>) -> Self {
|
||||||
|
Throttled {
|
||||||
|
id: rand::random(),
|
||||||
|
behaviour,
|
||||||
|
peer_info: HashMap::new(),
|
||||||
|
default_limit: Limit::new(NonZeroU16::new(1).expect("1 > 0")),
|
||||||
|
limit_overrides: HashMap::new(),
|
||||||
|
events: VecDeque::new(),
|
||||||
|
credit_messages: HashMap::new(),
|
||||||
|
credit_id: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the global default receive limit per peer.
|
||||||
|
pub fn set_receive_limit(&mut self, limit: NonZeroU16) {
|
||||||
|
log::trace!("{:08x}: new default limit: {:?}", self.id, limit);
|
||||||
|
self.default_limit = Limit::new(limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Override the receive limit of a single peer.
|
||||||
|
pub fn override_receive_limit(&mut self, p: &PeerId, limit: NonZeroU16) {
|
||||||
|
log::debug!("{:08x}: override limit for {}: {:?}", self.id, p, limit);
|
||||||
|
if let Some(info) = self.peer_info.get_mut(p) {
|
||||||
|
info.limit.set(limit)
|
||||||
|
}
|
||||||
|
self.limit_overrides.insert(p.clone(), Limit::new(limit));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove any limit overrides for the given peer.
|
||||||
|
pub fn remove_override(&mut self, p: &PeerId) {
|
||||||
|
log::trace!("{:08x}: removing limit override for {}", self.id, p);
|
||||||
|
self.limit_overrides.remove(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Has the limit of outbound requests been reached for the given peer?
|
||||||
|
pub fn can_send(&mut self, p: &PeerId) -> bool {
|
||||||
|
self.peer_info.get(p).map(|i| i.send_budget > 0).unwrap_or(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a request to a peer.
|
||||||
|
///
|
||||||
|
/// If the limit of outbound requests has been reached, the request is
|
||||||
|
/// returned. Sending more outbound requests should only be attempted
|
||||||
|
/// once [`Event::ResumeSending`] has been received from [`NetworkBehaviour::poll`].
|
||||||
|
pub fn send_request(&mut self, p: &PeerId, req: C::Request) -> Result<RequestId, C::Request> {
|
||||||
|
let info =
|
||||||
|
if let Some(info) = self.peer_info.get_mut(p) {
|
||||||
|
info
|
||||||
|
} else {
|
||||||
|
let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit);
|
||||||
|
self.peer_info.entry(p.clone()).or_insert(PeerInfo::new(limit))
|
||||||
|
};
|
||||||
|
|
||||||
|
if info.send_budget == 0 {
|
||||||
|
log::trace!("{:08x}: no more budget to send another request to {}", self.id, p);
|
||||||
|
return Err(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
info.send_budget -= 1;
|
||||||
|
|
||||||
|
let rid = self.behaviour.send_request(p, Message::request(req));
|
||||||
|
|
||||||
|
log::trace! { "{:08x}: sending request {} to {} (send budget = {})",
|
||||||
|
self.id,
|
||||||
|
rid,
|
||||||
|
p,
|
||||||
|
info.send_budget + 1
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(rid)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Answer an inbound request with a response.
|
||||||
|
///
|
||||||
|
/// See [`RequestResponse::send_response`] for details.
|
||||||
|
pub fn send_response(&mut self, ch: ResponseChannel<Message<C::Response>>, res: C::Response) {
|
||||||
|
log::trace!("{:08x}: sending response {} to peer {}", self.id, ch.request_id(), &ch.peer);
|
||||||
|
if let Some(info) = self.peer_info.get_mut(&ch.peer) {
|
||||||
|
if info.recv_budget == 0 { // need to send more credit to the remote peer
|
||||||
|
let crd = info.limit.switch();
|
||||||
|
info.recv_budget = info.limit.max_recv.get();
|
||||||
|
let cid = self.next_credit_id();
|
||||||
|
let rid = self.behaviour.send_request(&ch.peer, Message::credit(crd, cid));
|
||||||
|
log::trace!("{:08x}: sending {} as credit {} to {}", self.id, crd, cid, ch.peer);
|
||||||
|
let credit = Credit { id: cid, request: rid, amount: crd };
|
||||||
|
self.credit_messages.insert(ch.peer.clone(), credit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.behaviour.send_response(ch, Message::response(res))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a known peer address.
|
||||||
|
///
|
||||||
|
/// See [`RequestResponse::add_address`] for details.
|
||||||
|
pub fn add_address(&mut self, p: &PeerId, a: Multiaddr) {
|
||||||
|
self.behaviour.add_address(p, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove a previously added peer address.
|
||||||
|
///
|
||||||
|
/// See [`RequestResponse::remove_address`] for details.
|
||||||
|
pub fn remove_address(&mut self, p: &PeerId, a: &Multiaddr) {
|
||||||
|
self.behaviour.remove_address(p, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Are we connected to the given peer?
|
||||||
|
///
|
||||||
|
/// See [`RequestResponse::is_connected`] for details.
|
||||||
|
pub fn is_connected(&self, p: &PeerId) -> bool {
|
||||||
|
self.behaviour.is_connected(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Are we waiting for a response to the given request?
|
||||||
|
///
|
||||||
|
/// See [`RequestResponse::is_pending_outbound`] for details.
|
||||||
|
pub fn is_pending_outbound(&self, p: &RequestId) -> bool {
|
||||||
|
self.behaviour.is_pending_outbound(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new credit message ID.
|
||||||
|
fn next_credit_id(&mut self) -> u64 {
|
||||||
|
let n = self.credit_id;
|
||||||
|
self.credit_id += 1;
|
||||||
|
n
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A Wrapper around [`RequestResponseEvent`].
|
/// A Wrapper around [`RequestResponseEvent`].
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Event<Req, Res> {
|
pub enum Event<Req, Res, CRes = Res> {
|
||||||
/// A regular request-response event.
|
/// A regular request-response event.
|
||||||
Event(RequestResponseEvent<Req, Res>),
|
Event(RequestResponseEvent<Req, Res, CRes>),
|
||||||
/// We received more inbound requests than allowed.
|
/// We received more inbound requests than allowed.
|
||||||
TooManyInboundRequests(PeerId),
|
TooManyInboundRequests(PeerId),
|
||||||
/// When previously reaching the send limit of a peer,
|
/// When previously reaching the send limit of a peer,
|
||||||
@ -92,211 +308,227 @@ pub enum Event<Req, Res> {
|
|||||||
ResumeSending(PeerId)
|
ResumeSending(PeerId)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: RequestResponseCodec + Clone> Throttled<C> {
|
|
||||||
/// Wrap an existing `RequestResponse` behaviour and apply send/recv limits.
|
|
||||||
pub fn new(behaviour: RequestResponse<C>) -> Self {
|
|
||||||
Throttled {
|
|
||||||
id: rand::random(),
|
|
||||||
behaviour,
|
|
||||||
limits: HashMap::new(),
|
|
||||||
previous: LruCache::new(2048),
|
|
||||||
default: Limit::default(),
|
|
||||||
events: VecDeque::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the current default limit applied to all peers.
|
|
||||||
pub fn default_limit(&self) -> u16 {
|
|
||||||
self.default.maximum.get()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Override the global default limit.
|
|
||||||
///
|
|
||||||
/// See [`Throttled::set_limit`] to override limits for individual peers.
|
|
||||||
pub fn set_default_limit(&mut self, limit: NonZeroU16) {
|
|
||||||
log::trace!("{:08x}: new default limit: {:?}", self.id, limit);
|
|
||||||
self.default = Limit {
|
|
||||||
send_budget: limit.get(),
|
|
||||||
recv_budget: limit.get(),
|
|
||||||
maximum: limit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Specify the send and receive limit for a single peer.
|
|
||||||
pub fn set_limit(&mut self, id: &PeerId, limit: NonZeroU16) {
|
|
||||||
log::trace!("{:08x}: new limit for {}: {:?}", self.id, id, limit);
|
|
||||||
self.previous.pop(id);
|
|
||||||
self.limits.insert(id.clone(), Limit {
|
|
||||||
send_budget: limit.get(),
|
|
||||||
recv_budget: limit.get(),
|
|
||||||
maximum: limit
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Has the limit of outbound requests been reached for the given peer?
|
|
||||||
pub fn can_send(&mut self, id: &PeerId) -> bool {
|
|
||||||
self.limits.get(id).map(|l| l.send_budget > 0).unwrap_or(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send a request to a peer.
|
|
||||||
///
|
|
||||||
/// If the limit of outbound requests has been reached, the request is
|
|
||||||
/// returned. Sending more outbound requests should only be attempted
|
|
||||||
/// once [`Event::ResumeSending`] has been received from [`NetworkBehaviour::poll`].
|
|
||||||
pub fn send_request(&mut self, id: &PeerId, req: C::Request) -> Result<RequestId, C::Request> {
|
|
||||||
log::trace!("{:08x}: sending request to {}", self.id, id);
|
|
||||||
|
|
||||||
// Getting the limit is somewhat complicated due to the connection state.
|
|
||||||
// Applications may try to send a request to a peer we have never been connected
|
|
||||||
// to, or a peer we have previously been connected to. In the first case, the
|
|
||||||
// default limit applies and in the latter, the cached limit from the previous
|
|
||||||
// connection (if still available).
|
|
||||||
let mut limit =
|
|
||||||
if let Some(limit) = self.limits.get_mut(id) {
|
|
||||||
limit
|
|
||||||
} else {
|
|
||||||
let limit = self.previous.pop(id).unwrap_or_else(|| self.default.clone());
|
|
||||||
self.limits.entry(id.clone()).or_insert(limit)
|
|
||||||
};
|
|
||||||
|
|
||||||
if limit.send_budget == 0 {
|
|
||||||
log::trace!("{:08x}: no budget to send request to {}", self.id, id);
|
|
||||||
return Err(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
limit.send_budget -= 1;
|
|
||||||
|
|
||||||
Ok(self.behaviour.send_request(id, req))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Answer an inbound request with a response.
|
|
||||||
///
|
|
||||||
/// See [`RequestResponse::send_response`] for details.
|
|
||||||
pub fn send_response(&mut self, ch: ResponseChannel<C::Response>, rs: C::Response) {
|
|
||||||
if let Some(limit) = self.limits.get_mut(&ch.peer) {
|
|
||||||
limit.recv_budget += 1;
|
|
||||||
debug_assert!(limit.recv_budget <= limit.maximum.get())
|
|
||||||
}
|
|
||||||
self.behaviour.send_response(ch, rs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a known peer address.
|
|
||||||
///
|
|
||||||
/// See [`RequestResponse::add_address`] for details.
|
|
||||||
pub fn add_address(&mut self, id: &PeerId, ma: Multiaddr) {
|
|
||||||
self.behaviour.add_address(id, ma)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove a previously added peer address.
|
|
||||||
///
|
|
||||||
/// See [`RequestResponse::remove_address`] for details.
|
|
||||||
pub fn remove_address(&mut self, id: &PeerId, ma: &Multiaddr) {
|
|
||||||
self.behaviour.remove_address(id, ma)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Are we connected to the given peer?
|
|
||||||
///
|
|
||||||
/// See [`RequestResponse::is_connected`] for details.
|
|
||||||
pub fn is_connected(&self, id: &PeerId) -> bool {
|
|
||||||
self.behaviour.is_connected(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Are we waiting for a response to the given request?
|
|
||||||
///
|
|
||||||
/// See [`RequestResponse::is_pending`] for details.
|
|
||||||
pub fn is_pending(&self, id: &RequestId) -> bool {
|
|
||||||
self.behaviour.is_pending(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<C> NetworkBehaviour for Throttled<C>
|
impl<C> NetworkBehaviour for Throttled<C>
|
||||||
where
|
where
|
||||||
C: RequestResponseCodec + Send + Clone + 'static
|
C: RequestResponseCodec + Send + Clone + 'static,
|
||||||
|
C::Protocol: Sync
|
||||||
{
|
{
|
||||||
type ProtocolsHandler = RequestResponseHandler<C>;
|
type ProtocolsHandler = RequestResponseHandler<Codec<C>>;
|
||||||
type OutEvent = Event<C::Request, C::Response>;
|
type OutEvent = Event<C::Request, C::Response, Message<C::Response>>;
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||||
self.behaviour.new_handler()
|
self.behaviour.new_handler()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec<Multiaddr> {
|
fn addresses_of_peer(&mut self, p: &PeerId) -> Vec<Multiaddr> {
|
||||||
self.behaviour.addresses_of_peer(peer)
|
self.behaviour.addresses_of_peer(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_connection_established(&mut self, p: &PeerId, id: &ConnectionId, end: &ConnectedPoint) {
|
fn inject_connection_established(&mut self, p: &PeerId, id: &ConnectionId, end: &ConnectedPoint) {
|
||||||
self.behaviour.inject_connection_established(p, id, end)
|
self.behaviour.inject_connection_established(p, id, end)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_connection_closed(&mut self, p: &PeerId, id: &ConnectionId, end: &ConnectedPoint) {
|
fn inject_connection_closed(&mut self, peer: &PeerId, id: &ConnectionId, end: &ConnectedPoint) {
|
||||||
self.behaviour.inject_connection_closed(p, id, end);
|
self.behaviour.inject_connection_closed(peer, id, end);
|
||||||
}
|
if self.is_connected(peer) {
|
||||||
|
if let Some(credit) = self.credit_messages.get_mut(peer) {
|
||||||
fn inject_connected(&mut self, peer: &PeerId) {
|
log::debug! { "{:08x}: resending credit grant {} to {} after connection closed",
|
||||||
log::trace!("{:08x}: connected to {}", self.id, peer);
|
self.id,
|
||||||
self.behaviour.inject_connected(peer);
|
credit.id,
|
||||||
// The limit may have been added by [`Throttled::send_request`] already.
|
peer
|
||||||
if !self.limits.contains_key(peer) {
|
};
|
||||||
let limit = self.previous.pop(peer).unwrap_or_else(|| self.default.clone());
|
let msg = Message::credit(credit.amount, credit.id);
|
||||||
self.limits.insert(peer.clone(), limit);
|
credit.request = self.behaviour.send_request(peer, msg)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_disconnected(&mut self, peer: &PeerId) {
|
fn inject_connected(&mut self, p: &PeerId) {
|
||||||
log::trace!("{:08x}: disconnected from {}", self.id, peer);
|
log::trace!("{:08x}: connected to {}", self.id, p);
|
||||||
self.behaviour.inject_disconnected(peer);
|
self.behaviour.inject_connected(p);
|
||||||
// Save the limit in case the peer reconnects soon.
|
// The limit may have been added by `Throttled::send_request` already.
|
||||||
if let Some(limit) = self.limits.remove(peer) {
|
if !self.peer_info.contains_key(p) {
|
||||||
self.previous.put(peer.clone(), limit);
|
let limit = self.limit_overrides.get(p).copied().unwrap_or(self.default_limit);
|
||||||
|
self.peer_info.insert(p.clone(), PeerInfo::new(limit));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_dial_failure(&mut self, peer: &PeerId) {
|
fn inject_disconnected(&mut self, p: &PeerId) {
|
||||||
self.behaviour.inject_dial_failure(peer)
|
log::trace!("{:08x}: disconnected from {}", self.id, p);
|
||||||
|
self.peer_info.remove(p);
|
||||||
|
self.credit_messages.remove(p);
|
||||||
|
self.behaviour.inject_disconnected(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_event(&mut self, p: PeerId, i: ConnectionId, e: RequestResponseHandlerEvent<C>) {
|
fn inject_dial_failure(&mut self, p: &PeerId) {
|
||||||
match e {
|
self.behaviour.inject_dial_failure(p)
|
||||||
// Cases where an outbound request has been resolved.
|
}
|
||||||
| RequestResponseHandlerEvent::Response {..}
|
|
||||||
| RequestResponseHandlerEvent::OutboundTimeout (_)
|
fn inject_event(&mut self, p: PeerId, i: ConnectionId, e: RequestResponseHandlerEvent<Codec<C>>) {
|
||||||
| RequestResponseHandlerEvent::OutboundUnsupportedProtocols (_) =>
|
|
||||||
if let Some(limit) = self.limits.get_mut(&p) {
|
|
||||||
if limit.send_budget == 0 {
|
|
||||||
log::trace!("{:08x}: sending to peer {} can resume", self.id, p);
|
|
||||||
self.events.push_back(Event::ResumeSending(p.clone()))
|
|
||||||
}
|
|
||||||
limit.send_budget = min(limit.send_budget + 1, limit.maximum.get())
|
|
||||||
}
|
|
||||||
// A new inbound request.
|
|
||||||
| RequestResponseHandlerEvent::Request {..} =>
|
|
||||||
if let Some(limit) = self.limits.get_mut(&p) {
|
|
||||||
if limit.recv_budget == 0 {
|
|
||||||
log::error!("{:08x}: peer {} exceeds its budget", self.id, p);
|
|
||||||
return self.events.push_back(Event::TooManyInboundRequests(p))
|
|
||||||
}
|
|
||||||
limit.recv_budget -= 1
|
|
||||||
}
|
|
||||||
// The inbound request has expired so grant more budget to receive another one.
|
|
||||||
| RequestResponseHandlerEvent::InboundTimeout =>
|
|
||||||
if let Some(limit) = self.limits.get_mut(&p) {
|
|
||||||
limit.recv_budget = min(limit.recv_budget + 1, limit.maximum.get())
|
|
||||||
}
|
|
||||||
// Nothing to do here ...
|
|
||||||
| RequestResponseHandlerEvent::InboundUnsupportedProtocols => {}
|
|
||||||
}
|
|
||||||
self.behaviour.inject_event(p, i, e)
|
self.behaviour.inject_event(p, i, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll(&mut self, cx: &mut Context<'_>, p: &mut impl PollParameters)
|
fn poll(&mut self, cx: &mut Context<'_>, params: &mut impl PollParameters)
|
||||||
-> Poll<NetworkBehaviourAction<RequestProtocol<C>, Self::OutEvent>>
|
-> Poll<NetworkBehaviourAction<RequestProtocol<Codec<C>>, Self::OutEvent>>
|
||||||
{
|
{
|
||||||
if let Some(ev) = self.events.pop_front() {
|
loop {
|
||||||
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev))
|
if let Some(ev) = self.events.pop_front() {
|
||||||
} else if self.events.capacity() > super::EMPTY_QUEUE_SHRINK_THRESHOLD {
|
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev))
|
||||||
self.events.shrink_to_fit()
|
} else if self.events.capacity() > super::EMPTY_QUEUE_SHRINK_THRESHOLD {
|
||||||
}
|
self.events.shrink_to_fit()
|
||||||
|
}
|
||||||
|
|
||||||
self.behaviour.poll(cx, p).map(|a| a.map_out(Event::Event))
|
let event = match ready!(self.behaviour.poll(cx, params)) {
|
||||||
|
| NetworkBehaviourAction::GenerateEvent(RequestResponseEvent::Message { peer, message }) => {
|
||||||
|
let message = match message {
|
||||||
|
| RequestResponseMessage::Response { request_id, response } =>
|
||||||
|
match &response.header().typ {
|
||||||
|
| Some(Type::Ack) => {
|
||||||
|
if let Some(id) = self.credit_messages.get(&peer).map(|c| c.id) {
|
||||||
|
if Some(id) == response.header().ident {
|
||||||
|
log::trace!("{:08x}: received ack {} from {}", self.id, id, peer);
|
||||||
|
self.credit_messages.remove(&peer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
| Some(Type::Response) => {
|
||||||
|
log::trace!("{:08x}: received response {} from {}", self.id, request_id, peer);
|
||||||
|
if let Some(rs) = response.into_parts().1 {
|
||||||
|
RequestResponseMessage::Response { request_id, response: rs }
|
||||||
|
} else {
|
||||||
|
log::error! { "{:08x}: missing data for response {} from peer {}",
|
||||||
|
self.id,
|
||||||
|
request_id,
|
||||||
|
peer
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| ty => {
|
||||||
|
log::trace! {
|
||||||
|
"{:08x}: unknown message type: {:?} from {}; expected response or credit",
|
||||||
|
self.id,
|
||||||
|
ty,
|
||||||
|
peer
|
||||||
|
};
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| RequestResponseMessage::Request { request_id, request, channel } =>
|
||||||
|
match &request.header().typ {
|
||||||
|
| Some(Type::Credit) => {
|
||||||
|
if let Some(info) = self.peer_info.get_mut(&peer) {
|
||||||
|
let id = if let Some(n) = request.header().ident {
|
||||||
|
n
|
||||||
|
} else {
|
||||||
|
log::warn! { "{:08x}: missing credit id in message from {}",
|
||||||
|
self.id,
|
||||||
|
peer
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
};
|
||||||
|
let credit = request.header().credit.unwrap_or(0);
|
||||||
|
log::trace! { "{:08x}: received {} additional credit {} from {}",
|
||||||
|
self.id,
|
||||||
|
credit,
|
||||||
|
id,
|
||||||
|
peer
|
||||||
|
};
|
||||||
|
if info.send_budget_id < Some(id) {
|
||||||
|
if info.send_budget == 0 && credit > 0 {
|
||||||
|
log::trace!("{:08x}: sending to peer {} can resume", self.id, peer);
|
||||||
|
self.events.push_back(Event::ResumeSending(peer.clone()))
|
||||||
|
}
|
||||||
|
info.send_budget += credit;
|
||||||
|
info.send_budget_id = Some(id)
|
||||||
|
}
|
||||||
|
self.behaviour.send_response(channel, Message::ack(id))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
| Some(Type::Request) => {
|
||||||
|
if let Some(info) = self.peer_info.get_mut(&peer) {
|
||||||
|
log::trace! { "{:08x}: received request {} (recv. budget = {})",
|
||||||
|
self.id,
|
||||||
|
request_id,
|
||||||
|
info.recv_budget
|
||||||
|
};
|
||||||
|
if info.recv_budget == 0 {
|
||||||
|
log::debug!("{:08x}: peer {} exceeds its budget", self.id, peer);
|
||||||
|
self.events.push_back(Event::TooManyInboundRequests(peer.clone()));
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
info.recv_budget -= 1;
|
||||||
|
// We consider a request as proof that our credit grant has
|
||||||
|
// reached the peer. Usually, an ACK has already been
|
||||||
|
// received.
|
||||||
|
self.credit_messages.remove(&peer);
|
||||||
|
}
|
||||||
|
if let Some(rq) = request.into_parts().1 {
|
||||||
|
RequestResponseMessage::Request { request_id, request: rq, channel }
|
||||||
|
} else {
|
||||||
|
log::error! { "{:08x}: missing data for request {} from peer {}",
|
||||||
|
self.id,
|
||||||
|
request_id,
|
||||||
|
peer
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| ty => {
|
||||||
|
log::trace! {
|
||||||
|
"{:08x}: unknown message type: {:?} from {}; expected request or ack",
|
||||||
|
self.id,
|
||||||
|
ty,
|
||||||
|
peer
|
||||||
|
};
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let event = RequestResponseEvent::Message { peer, message };
|
||||||
|
NetworkBehaviourAction::GenerateEvent(Event::Event(event))
|
||||||
|
}
|
||||||
|
| NetworkBehaviourAction::GenerateEvent(RequestResponseEvent::OutboundFailure {
|
||||||
|
peer,
|
||||||
|
request_id,
|
||||||
|
error
|
||||||
|
}) => {
|
||||||
|
if let Some(credit) = self.credit_messages.get_mut(&peer) {
|
||||||
|
if credit.request == request_id {
|
||||||
|
log::debug! { "{:08x}: failed to send {} as credit {} to {}; retrying...",
|
||||||
|
self.id,
|
||||||
|
credit.amount,
|
||||||
|
credit.id,
|
||||||
|
peer
|
||||||
|
};
|
||||||
|
let msg = Message::credit(credit.amount, credit.id);
|
||||||
|
credit.request = self.behaviour.send_request(&peer, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let event = RequestResponseEvent::OutboundFailure { peer, request_id, error };
|
||||||
|
NetworkBehaviourAction::GenerateEvent(Event::Event(event))
|
||||||
|
}
|
||||||
|
| NetworkBehaviourAction::GenerateEvent(RequestResponseEvent::InboundFailure {
|
||||||
|
peer,
|
||||||
|
request_id,
|
||||||
|
error
|
||||||
|
}) => {
|
||||||
|
let event = RequestResponseEvent::InboundFailure { peer, request_id, error };
|
||||||
|
NetworkBehaviourAction::GenerateEvent(Event::Event(event))
|
||||||
|
}
|
||||||
|
| NetworkBehaviourAction::DialAddress { address } =>
|
||||||
|
NetworkBehaviourAction::DialAddress { address },
|
||||||
|
| NetworkBehaviourAction::DialPeer { peer_id, condition } =>
|
||||||
|
NetworkBehaviourAction::DialPeer { peer_id, condition },
|
||||||
|
| NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } =>
|
||||||
|
NetworkBehaviourAction::NotifyHandler { peer_id, handler, event },
|
||||||
|
| NetworkBehaviourAction::ReportObservedAddr { address } =>
|
||||||
|
NetworkBehaviourAction::ReportObservedAddr { address }
|
||||||
|
};
|
||||||
|
|
||||||
|
return Poll::Ready(event)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
251
protocols/request-response/src/throttled/codec.rs
Normal file
251
protocols/request-response/src/throttled/codec.rs
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
// Copyright 2020 Parity Technologies (UK) Ltd.
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
// copy of this software and associated documentation files (the "Software"),
|
||||||
|
// to deal in the Software without restriction, including without limitation
|
||||||
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
// and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
// Software is furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in
|
||||||
|
// all copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||||
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
// DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use bytes::{Bytes, BytesMut};
|
||||||
|
use futures::prelude::*;
|
||||||
|
use libp2p_core::ProtocolName;
|
||||||
|
use minicbor::{Encode, Decode};
|
||||||
|
use std::io;
|
||||||
|
use super::RequestResponseCodec;
|
||||||
|
use unsigned_varint::{aio, io::ReadError};
|
||||||
|
|
||||||
|
/// A protocol header.
|
||||||
|
#[derive(Debug, Default, Clone, PartialEq, Eq, Encode, Decode)]
|
||||||
|
#[cbor(map)]
|
||||||
|
pub struct Header {
|
||||||
|
/// The type of message.
|
||||||
|
#[n(0)] pub typ: Option<Type>,
|
||||||
|
/// The number of additional requests the remote is willing to receive.
|
||||||
|
#[n(1)] pub credit: Option<u16>,
|
||||||
|
/// An identifier used for sending credit grants.
|
||||||
|
#[n(2)] pub ident: Option<u64>
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A protocol message type.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
||||||
|
pub enum Type {
|
||||||
|
#[n(0)] Request,
|
||||||
|
#[n(1)] Response,
|
||||||
|
#[n(2)] Credit,
|
||||||
|
#[n(3)] Ack
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A protocol message consisting of header and data.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct Message<T> {
|
||||||
|
header: Header,
|
||||||
|
data: Option<T>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Message<T> {
|
||||||
|
/// Create a new message of some type.
|
||||||
|
fn new(header: Header) -> Self {
|
||||||
|
Message { header, data: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a request message.
|
||||||
|
pub fn request(data: T) -> Self {
|
||||||
|
let mut m = Message::new(Header { typ: Some(Type::Request), .. Header::default() });
|
||||||
|
m.data = Some(data);
|
||||||
|
m
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a response message.
|
||||||
|
pub fn response(data: T) -> Self {
|
||||||
|
let mut m = Message::new(Header { typ: Some(Type::Response), .. Header::default() });
|
||||||
|
m.data = Some(data);
|
||||||
|
m
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a credit grant.
|
||||||
|
pub fn credit(credit: u16, ident: u64) -> Self {
|
||||||
|
Message::new(Header { typ: Some(Type::Credit), credit: Some(credit), ident: Some(ident) })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create an acknowledge message.
|
||||||
|
pub fn ack(ident: u64) -> Self {
|
||||||
|
Message::new(Header { typ: Some(Type::Ack), credit: None, ident: Some(ident) })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access the message header.
|
||||||
|
pub fn header(&self) -> &Header {
|
||||||
|
&self.header
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access the message data.
|
||||||
|
pub fn data(&self) -> Option<&T> {
|
||||||
|
self.data.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consume this message and return header and data.
|
||||||
|
pub fn into_parts(self) -> (Header, Option<T>) {
|
||||||
|
(self.header, self.data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper around a `ProtocolName` impl which augments the protocol name.
|
||||||
|
///
|
||||||
|
/// The type implements `ProtocolName` itself and creates a name for a
|
||||||
|
/// request-response protocol based on the protocol name of the wrapped type.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ProtocolWrapper<P>(P, Bytes);
|
||||||
|
|
||||||
|
impl<P: ProtocolName> ProtocolWrapper<P> {
|
||||||
|
pub fn new(prefix: &[u8], p: P) -> Self {
|
||||||
|
let mut full = BytesMut::from(prefix);
|
||||||
|
full.extend_from_slice(p.protocol_name());
|
||||||
|
ProtocolWrapper(p, full.freeze())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P> ProtocolName for ProtocolWrapper<P> {
|
||||||
|
fn protocol_name(&self) -> &[u8] {
|
||||||
|
self.1.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A `RequestResponseCodec` wrapper that adds headers to the payload data.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Codec<C> {
|
||||||
|
/// The wrapped codec.
|
||||||
|
inner: C,
|
||||||
|
/// Encoding/decoding buffer.
|
||||||
|
buffer: Vec<u8>,
|
||||||
|
/// Max. header length.
|
||||||
|
max_header_len: u32
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Codec<C> {
|
||||||
|
/// Create a codec by wrapping an existing one.
|
||||||
|
pub fn new(c: C, max_header_len: u32) -> Self {
|
||||||
|
Codec { inner: c, buffer: Vec::new(), max_header_len }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read and decode a request header.
|
||||||
|
async fn read_header<T, H>(&mut self, io: &mut T) -> io::Result<H>
|
||||||
|
where
|
||||||
|
T: AsyncRead + Unpin + Send,
|
||||||
|
H: for<'a> minicbor::Decode<'a>
|
||||||
|
{
|
||||||
|
let header_len = aio::read_u32(&mut *io).await
|
||||||
|
.map_err(|e| match e {
|
||||||
|
ReadError::Io(e) => e,
|
||||||
|
other => io::Error::new(io::ErrorKind::Other, other)
|
||||||
|
})?;
|
||||||
|
if header_len > self.max_header_len {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, "header too large to read"))
|
||||||
|
}
|
||||||
|
self.buffer.resize(u32_to_usize(header_len), 0u8);
|
||||||
|
io.read_exact(&mut self.buffer).await?;
|
||||||
|
minicbor::decode(&self.buffer).map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Encode and write a response header.
|
||||||
|
async fn write_header<T, H>(&mut self, hdr: &H, io: &mut T) -> io::Result<()>
|
||||||
|
where
|
||||||
|
T: AsyncWrite + Unpin + Send,
|
||||||
|
H: minicbor::Encode
|
||||||
|
{
|
||||||
|
self.buffer.clear();
|
||||||
|
minicbor::encode(hdr, &mut self.buffer).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||||
|
if self.buffer.len() > u32_to_usize(self.max_header_len) {
|
||||||
|
return Err(io::Error::new(io::ErrorKind::InvalidData, "header too large to write"))
|
||||||
|
}
|
||||||
|
let mut b = unsigned_varint::encode::u32_buffer();
|
||||||
|
let header_len = unsigned_varint::encode::u32(self.buffer.len() as u32, &mut b);
|
||||||
|
io.write_all(header_len).await?;
|
||||||
|
io.write_all(&self.buffer).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<C> RequestResponseCodec for Codec<C>
|
||||||
|
where
|
||||||
|
C: RequestResponseCodec + Send,
|
||||||
|
C::Protocol: Sync
|
||||||
|
{
|
||||||
|
type Protocol = ProtocolWrapper<C::Protocol>;
|
||||||
|
type Request = Message<C::Request>;
|
||||||
|
type Response = Message<C::Response>;
|
||||||
|
|
||||||
|
async fn read_request<T>(&mut self, p: &Self::Protocol, io: &mut T) -> io::Result<Self::Request>
|
||||||
|
where
|
||||||
|
T: AsyncRead + Unpin + Send
|
||||||
|
{
|
||||||
|
let mut msg = Message::new(self.read_header(io).await?);
|
||||||
|
match msg.header.typ {
|
||||||
|
Some(Type::Request) => {
|
||||||
|
msg.data = Some(self.inner.read_request(&p.0, io).await?);
|
||||||
|
Ok(msg)
|
||||||
|
}
|
||||||
|
Some(Type::Credit) => Ok(msg),
|
||||||
|
Some(Type::Response) | Some(Type::Ack) | None => {
|
||||||
|
log::debug!("unexpected {:?} when expecting request or credit grant", msg.header.typ);
|
||||||
|
Err(io::ErrorKind::InvalidData.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_response<T>(&mut self, p: &Self::Protocol, io: &mut T) -> io::Result<Self::Response>
|
||||||
|
where
|
||||||
|
T: AsyncRead + Unpin + Send
|
||||||
|
{
|
||||||
|
let mut msg = Message::new(self.read_header(io).await?);
|
||||||
|
match msg.header.typ {
|
||||||
|
Some(Type::Response) => {
|
||||||
|
msg.data = Some(self.inner.read_response(&p.0, io).await?);
|
||||||
|
Ok(msg)
|
||||||
|
}
|
||||||
|
Some(Type::Ack) => Ok(msg),
|
||||||
|
Some(Type::Request) | Some(Type::Credit) | None => {
|
||||||
|
log::debug!("unexpected {:?} when expecting response or ack", msg.header.typ);
|
||||||
|
Err(io::ErrorKind::InvalidData.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_request<T>(&mut self, p: &Self::Protocol, io: &mut T, r: Self::Request) -> io::Result<()>
|
||||||
|
where
|
||||||
|
T: AsyncWrite + Unpin + Send
|
||||||
|
{
|
||||||
|
self.write_header(&r.header, io).await?;
|
||||||
|
if let Some(data) = r.data {
|
||||||
|
self.inner.write_request(&p.0, io, data).await?
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn write_response<T>(&mut self, p: &Self::Protocol, io: &mut T, r: Self::Response) -> io::Result<()>
|
||||||
|
where
|
||||||
|
T: AsyncWrite + Unpin + Send
|
||||||
|
{
|
||||||
|
self.write_header(&r.header, io).await?;
|
||||||
|
if let Some(data) = r.data {
|
||||||
|
self.inner.write_response(&p.0, io, data).await?
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(any(target_pointer_width = "64", target_pointer_width = "32"))]
|
||||||
|
fn u32_to_usize(n: u32) -> usize {
|
||||||
|
n as usize
|
||||||
|
}
|
@ -36,7 +36,7 @@ use libp2p_tcp::TcpConfig;
|
|||||||
use futures::{prelude::*, channel::mpsc};
|
use futures::{prelude::*, channel::mpsc};
|
||||||
use rand::{self, Rng};
|
use rand::{self, Rng};
|
||||||
use std::{io, iter};
|
use std::{io, iter};
|
||||||
// use std::{collections::HashSet, num::NonZeroU16}; // Disabled until #1706 is fixed.
|
use std::{collections::HashSet, num::NonZeroU16};
|
||||||
|
|
||||||
/// Exercises a simple ping protocol.
|
/// Exercises a simple ping protocol.
|
||||||
#[test]
|
#[test]
|
||||||
@ -73,7 +73,7 @@ fn ping_protocol() {
|
|||||||
match swarm1.next().await {
|
match swarm1.next().await {
|
||||||
RequestResponseEvent::Message {
|
RequestResponseEvent::Message {
|
||||||
peer,
|
peer,
|
||||||
message: RequestResponseMessage::Request { request, channel }
|
message: RequestResponseMessage::Request { request, channel, .. }
|
||||||
} => {
|
} => {
|
||||||
assert_eq!(&request, &expected_ping);
|
assert_eq!(&request, &expected_ping);
|
||||||
assert_eq!(&peer, &peer2_id);
|
assert_eq!(&peer, &peer2_id);
|
||||||
@ -117,202 +117,101 @@ fn ping_protocol() {
|
|||||||
let () = async_std::task::block_on(peer2);
|
let () = async_std::task::block_on(peer2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disabled until #1706 is fixed.
|
#[test]
|
||||||
///// Like `ping_protocol`, but throttling concurrent requests.
|
fn ping_protocol_throttled() {
|
||||||
//#[test]
|
let ping = Ping("ping".to_string().into_bytes());
|
||||||
//fn ping_protocol_throttled() {
|
let pong = Pong("pong".to_string().into_bytes());
|
||||||
// let ping = Ping("ping".to_string().into_bytes());
|
|
||||||
// let pong = Pong("pong".to_string().into_bytes());
|
let protocols = iter::once((PingProtocol(), ProtocolSupport::Full));
|
||||||
//
|
let cfg = RequestResponseConfig::default();
|
||||||
// let protocols = iter::once((PingProtocol(), ProtocolSupport::Full));
|
|
||||||
// let cfg = RequestResponseConfig::default();
|
let (peer1_id, trans) = mk_transport();
|
||||||
//
|
let ping_proto1 = RequestResponse::throttled(PingCodec(), protocols.clone(), cfg.clone());
|
||||||
// let (peer1_id, trans) = mk_transport();
|
let mut swarm1 = Swarm::new(trans, ping_proto1, peer1_id.clone());
|
||||||
// let ping_proto1 = RequestResponse::new(PingCodec(), protocols.clone(), cfg.clone()).throttled();
|
|
||||||
// let mut swarm1 = Swarm::new(trans, ping_proto1, peer1_id.clone());
|
let (peer2_id, trans) = mk_transport();
|
||||||
//
|
let ping_proto2 = RequestResponse::throttled(PingCodec(), protocols, cfg);
|
||||||
// let (peer2_id, trans) = mk_transport();
|
let mut swarm2 = Swarm::new(trans, ping_proto2, peer2_id.clone());
|
||||||
// let ping_proto2 = RequestResponse::new(PingCodec(), protocols, cfg).throttled();
|
|
||||||
// let mut swarm2 = Swarm::new(trans, ping_proto2, peer2_id.clone());
|
let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
|
||||||
//
|
|
||||||
// let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
|
let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap();
|
||||||
//
|
Swarm::listen_on(&mut swarm1, addr).unwrap();
|
||||||
// let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap();
|
|
||||||
// Swarm::listen_on(&mut swarm1, addr).unwrap();
|
let expected_ping = ping.clone();
|
||||||
//
|
let expected_pong = pong.clone();
|
||||||
// let expected_ping = ping.clone();
|
|
||||||
// let expected_pong = pong.clone();
|
let limit1: u16 = rand::thread_rng().gen_range(1, 10);
|
||||||
//
|
let limit2: u16 = rand::thread_rng().gen_range(1, 10);
|
||||||
// let limit: u16 = rand::thread_rng().gen_range(1, 10);
|
swarm1.set_receive_limit(NonZeroU16::new(limit1).unwrap());
|
||||||
// swarm1.set_default_limit(NonZeroU16::new(limit).unwrap());
|
swarm2.set_receive_limit(NonZeroU16::new(limit2).unwrap());
|
||||||
// swarm2.set_default_limit(NonZeroU16::new(limit).unwrap());
|
|
||||||
//
|
let peer1 = async move {
|
||||||
// let peer1 = async move {
|
while let Some(_) = swarm1.next().now_or_never() {}
|
||||||
// while let Some(_) = swarm1.next().now_or_never() {}
|
|
||||||
//
|
let l = Swarm::listeners(&swarm1).next().unwrap();
|
||||||
// let l = Swarm::listeners(&swarm1).next().unwrap();
|
tx.send(l.clone()).await.unwrap();
|
||||||
// tx.send(l.clone()).await.unwrap();
|
for i in 1.. {
|
||||||
//
|
match swarm1.next().await {
|
||||||
// loop {
|
throttled::Event::Event(RequestResponseEvent::Message {
|
||||||
// match swarm1.next().await {
|
peer,
|
||||||
// throttled::Event::Event(RequestResponseEvent::Message {
|
message: RequestResponseMessage::Request { request, channel, .. },
|
||||||
// peer,
|
}) => {
|
||||||
// message: RequestResponseMessage::Request { request, channel }
|
assert_eq!(&request, &expected_ping);
|
||||||
// }) => {
|
assert_eq!(&peer, &peer2_id);
|
||||||
// assert_eq!(&request, &expected_ping);
|
swarm1.send_response(channel, pong.clone());
|
||||||
// assert_eq!(&peer, &peer2_id);
|
},
|
||||||
// swarm1.send_response(channel, pong.clone());
|
e => panic!("Peer1: Unexpected event: {:?}", e)
|
||||||
// },
|
}
|
||||||
// e => panic!("Peer1: Unexpected event: {:?}", e)
|
if i % 31 == 0 {
|
||||||
// }
|
let lim = rand::thread_rng().gen_range(1, 17);
|
||||||
// }
|
swarm1.override_receive_limit(&peer2_id, NonZeroU16::new(lim).unwrap());
|
||||||
// };
|
}
|
||||||
//
|
}
|
||||||
// let num_pings: u8 = rand::thread_rng().gen_range(1, 100);
|
};
|
||||||
//
|
|
||||||
// let peer2 = async move {
|
let num_pings: u16 = rand::thread_rng().gen_range(100, 1000);
|
||||||
// let mut count = 0;
|
|
||||||
// let addr = rx.next().await.unwrap();
|
let peer2 = async move {
|
||||||
// swarm2.add_address(&peer1_id, addr.clone());
|
let mut count = 0;
|
||||||
// let mut blocked = false;
|
let addr = rx.next().await.unwrap();
|
||||||
// let mut req_ids = HashSet::new();
|
swarm2.add_address(&peer1_id, addr.clone());
|
||||||
//
|
|
||||||
// loop {
|
let mut blocked = false;
|
||||||
// if !blocked {
|
let mut req_ids = HashSet::new();
|
||||||
// while let Some(id) = swarm2.send_request(&peer1_id, ping.clone()).ok() {
|
|
||||||
// req_ids.insert(id);
|
loop {
|
||||||
// }
|
if !blocked {
|
||||||
// blocked = true;
|
while let Some(id) = swarm2.send_request(&peer1_id, ping.clone()).ok() {
|
||||||
// }
|
req_ids.insert(id);
|
||||||
// match swarm2.next().await {
|
}
|
||||||
// throttled::Event::ResumeSending(peer) => {
|
blocked = true;
|
||||||
// assert_eq!(peer, peer1_id);
|
}
|
||||||
// blocked = false
|
match swarm2.next().await {
|
||||||
// }
|
throttled::Event::ResumeSending(peer) => {
|
||||||
// throttled::Event::Event(RequestResponseEvent::Message {
|
assert_eq!(peer, peer1_id);
|
||||||
// peer,
|
blocked = false
|
||||||
// message: RequestResponseMessage::Response { request_id, response }
|
}
|
||||||
// }) => {
|
throttled::Event::Event(RequestResponseEvent::Message {
|
||||||
// count += 1;
|
peer,
|
||||||
// assert_eq!(&response, &expected_pong);
|
message: RequestResponseMessage::Response { request_id, response }
|
||||||
// assert_eq!(&peer, &peer1_id);
|
}) => {
|
||||||
// assert!(req_ids.remove(&request_id));
|
count += 1;
|
||||||
// if count >= num_pings {
|
assert_eq!(&response, &expected_pong);
|
||||||
// break
|
assert_eq!(&peer, &peer1_id);
|
||||||
// }
|
assert!(req_ids.remove(&request_id));
|
||||||
// }
|
if count >= num_pings {
|
||||||
// e => panic!("Peer2: Unexpected event: {:?}", e)
|
break
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
// };
|
e => panic!("Peer2: Unexpected event: {:?}", e)
|
||||||
//
|
}
|
||||||
// async_std::task::spawn(Box::pin(peer1));
|
}
|
||||||
// let () = async_std::task::block_on(peer2);
|
};
|
||||||
//}
|
|
||||||
//
|
async_std::task::spawn(Box::pin(peer1));
|
||||||
//#[test]
|
let () = async_std::task::block_on(peer2);
|
||||||
//fn ping_protocol_limit_violation() {
|
}
|
||||||
// let ping = Ping("ping".to_string().into_bytes());
|
|
||||||
// let pong = Pong("pong".to_string().into_bytes());
|
|
||||||
//
|
|
||||||
// let protocols = iter::once((PingProtocol(), ProtocolSupport::Full));
|
|
||||||
// let cfg = RequestResponseConfig::default();
|
|
||||||
//
|
|
||||||
// let (peer1_id, trans) = mk_transport();
|
|
||||||
// let ping_proto1 = RequestResponse::new(PingCodec(), protocols.clone(), cfg.clone()).throttled();
|
|
||||||
// let mut swarm1 = Swarm::new(trans, ping_proto1, peer1_id.clone());
|
|
||||||
//
|
|
||||||
// let (peer2_id, trans) = mk_transport();
|
|
||||||
// let ping_proto2 = RequestResponse::new(PingCodec(), protocols, cfg).throttled();
|
|
||||||
// let mut swarm2 = Swarm::new(trans, ping_proto2, peer2_id.clone());
|
|
||||||
//
|
|
||||||
// let (mut tx, mut rx) = mpsc::channel::<Multiaddr>(1);
|
|
||||||
//
|
|
||||||
// let addr = "/ip4/127.0.0.1/tcp/0".parse().unwrap();
|
|
||||||
// Swarm::listen_on(&mut swarm1, addr).unwrap();
|
|
||||||
//
|
|
||||||
// let expected_ping = ping.clone();
|
|
||||||
// let expected_pong = pong.clone();
|
|
||||||
//
|
|
||||||
// swarm2.set_default_limit(NonZeroU16::new(2).unwrap());
|
|
||||||
//
|
|
||||||
// let peer1 = async move {
|
|
||||||
// while let Some(_) = swarm1.next().now_or_never() {}
|
|
||||||
//
|
|
||||||
// let l = Swarm::listeners(&swarm1).next().unwrap();
|
|
||||||
// tx.send(l.clone()).await.unwrap();
|
|
||||||
//
|
|
||||||
// let mut pending_responses = Vec::new();
|
|
||||||
//
|
|
||||||
// loop {
|
|
||||||
// match swarm1.next().await {
|
|
||||||
// throttled::Event::Event(RequestResponseEvent::Message {
|
|
||||||
// peer,
|
|
||||||
// message: RequestResponseMessage::Request { request, channel }
|
|
||||||
// }) => {
|
|
||||||
// assert_eq!(&request, &expected_ping);
|
|
||||||
// assert_eq!(&peer, &peer2_id);
|
|
||||||
// pending_responses.push((channel, pong.clone()));
|
|
||||||
// },
|
|
||||||
// throttled::Event::TooManyInboundRequests(p) => {
|
|
||||||
// assert_eq!(p, peer2_id);
|
|
||||||
// break
|
|
||||||
// }
|
|
||||||
// e => panic!("Peer1: Unexpected event: {:?}", e)
|
|
||||||
// }
|
|
||||||
// if pending_responses.len() >= 2 {
|
|
||||||
// for (channel, pong) in pending_responses.drain(..) {
|
|
||||||
// swarm1.send_response(channel, pong)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// let num_pings: u8 = rand::thread_rng().gen_range(1, 100);
|
|
||||||
//
|
|
||||||
// let peer2 = async move {
|
|
||||||
// let mut count = 0;
|
|
||||||
// let addr = rx.next().await.unwrap();
|
|
||||||
// swarm2.add_address(&peer1_id, addr.clone());
|
|
||||||
// let mut blocked = false;
|
|
||||||
// let mut req_ids = HashSet::new();
|
|
||||||
//
|
|
||||||
// loop {
|
|
||||||
// if !blocked {
|
|
||||||
// while let Some(id) = swarm2.send_request(&peer1_id, ping.clone()).ok() {
|
|
||||||
// req_ids.insert(id);
|
|
||||||
// }
|
|
||||||
// blocked = true;
|
|
||||||
// }
|
|
||||||
// match swarm2.next().await {
|
|
||||||
// throttled::Event::ResumeSending(peer) => {
|
|
||||||
// assert_eq!(peer, peer1_id);
|
|
||||||
// blocked = false
|
|
||||||
// }
|
|
||||||
// throttled::Event::Event(RequestResponseEvent::Message {
|
|
||||||
// peer,
|
|
||||||
// message: RequestResponseMessage::Response { request_id, response }
|
|
||||||
// }) => {
|
|
||||||
// count += 1;
|
|
||||||
// assert_eq!(&response, &expected_pong);
|
|
||||||
// assert_eq!(&peer, &peer1_id);
|
|
||||||
// assert!(req_ids.remove(&request_id));
|
|
||||||
// if count >= num_pings {
|
|
||||||
// break
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// throttled::Event::Event(RequestResponseEvent::OutboundFailure { error, .. }) => {
|
|
||||||
// assert!(matches!(error, OutboundFailure::ConnectionClosed));
|
|
||||||
// break
|
|
||||||
// }
|
|
||||||
// e => panic!("Peer2: Unexpected event: {:?}", e)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// async_std::task::spawn(Box::pin(peer1));
|
|
||||||
// let () = async_std::task::block_on(peer2);
|
|
||||||
//}
|
|
||||||
|
|
||||||
fn mk_transport() -> (PeerId, Boxed<(PeerId, StreamMuxerBox), io::Error>) {
|
fn mk_transport() -> (PeerId, Boxed<(PeerId, StreamMuxerBox), io::Error>) {
|
||||||
let id_keys = identity::Keypair::generate_ed25519();
|
let id_keys = identity::Keypair::generate_ed25519();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user