2018-11-26 14:01:08 +01:00
|
|
|
// Copyright 2018 Parity Technologies (UK) Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
2019-07-04 14:47:59 +02:00
|
|
|
use crate::protocols_handler::{
|
|
|
|
KeepAlive,
|
|
|
|
ProtocolsHandler,
|
|
|
|
IntoProtocolsHandler,
|
|
|
|
ProtocolsHandlerEvent,
|
|
|
|
ProtocolsHandlerUpgrErr
|
|
|
|
};
|
|
|
|
use futures::prelude::*;
|
|
|
|
use libp2p_core::{
|
|
|
|
ConnectedPoint,
|
2019-01-14 14:22:25 +01:00
|
|
|
PeerId,
|
2019-06-12 16:21:39 +02:00
|
|
|
nodes::collection::ConnectionInfo,
|
2019-07-09 16:47:24 +02:00
|
|
|
nodes::handled_node::{IntoNodeHandler, NodeHandler, NodeHandlerEndpoint, NodeHandlerEvent},
|
2019-07-04 14:47:59 +02:00
|
|
|
upgrade::{self, InboundUpgradeApply, OutboundUpgradeApply}
|
2018-11-26 14:01:08 +01:00
|
|
|
};
|
2019-04-20 16:00:21 +02:00
|
|
|
use std::{error, fmt, time::Duration};
|
2019-04-25 15:08:06 +02:00
|
|
|
use wasm_timer::{Delay, Timeout};
|
2018-11-26 14:01:08 +01:00
|
|
|
|
|
|
|
/// Prototype for a `NodeHandlerWrapper`.
|
2019-01-14 14:22:25 +01:00
|
|
|
pub struct NodeHandlerWrapperBuilder<TIntoProtoHandler> {
|
2018-11-26 14:01:08 +01:00
|
|
|
/// The underlying handler.
|
2019-01-14 14:22:25 +01:00
|
|
|
handler: TIntoProtoHandler,
|
2018-11-26 14:01:08 +01:00
|
|
|
}
|
|
|
|
|
2019-01-14 14:22:25 +01:00
|
|
|
impl<TIntoProtoHandler> NodeHandlerWrapperBuilder<TIntoProtoHandler>
|
2018-11-26 14:01:08 +01:00
|
|
|
where
|
2019-01-14 14:22:25 +01:00
|
|
|
TIntoProtoHandler: IntoProtocolsHandler
|
2018-11-26 14:01:08 +01:00
|
|
|
{
|
|
|
|
/// Builds a `NodeHandlerWrapperBuilder`.
|
|
|
|
#[inline]
|
2019-04-16 15:57:29 +02:00
|
|
|
pub(crate) fn new(handler: TIntoProtoHandler) -> Self {
|
2018-11-26 14:01:08 +01:00
|
|
|
NodeHandlerWrapperBuilder {
|
|
|
|
handler,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds the `NodeHandlerWrapper`.
|
2019-01-14 14:22:25 +01:00
|
|
|
#[deprecated(note = "Pass the NodeHandlerWrapperBuilder directly")]
|
2018-11-26 14:01:08 +01:00
|
|
|
#[inline]
|
2019-01-14 14:22:25 +01:00
|
|
|
pub fn build(self) -> NodeHandlerWrapper<TIntoProtoHandler>
|
|
|
|
where TIntoProtoHandler: ProtocolsHandler
|
|
|
|
{
|
2018-11-26 14:01:08 +01:00
|
|
|
NodeHandlerWrapper {
|
|
|
|
handler: self.handler,
|
|
|
|
negotiating_in: Vec::new(),
|
|
|
|
negotiating_out: Vec::new(),
|
|
|
|
queued_dial_upgrades: Vec::new(),
|
|
|
|
unique_dial_upgrade_id: 0,
|
2019-04-20 16:00:21 +02:00
|
|
|
shutdown: Shutdown::None,
|
2018-11-26 14:01:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-12 16:21:39 +02:00
|
|
|
impl<TIntoProtoHandler, TProtoHandler, TConnInfo> IntoNodeHandler<(TConnInfo, ConnectedPoint)>
|
2019-05-10 11:05:22 +02:00
|
|
|
for NodeHandlerWrapperBuilder<TIntoProtoHandler>
|
2019-01-14 14:22:25 +01:00
|
|
|
where
|
|
|
|
TIntoProtoHandler: IntoProtocolsHandler<Handler = TProtoHandler>,
|
|
|
|
TProtoHandler: ProtocolsHandler,
|
2019-06-12 16:21:39 +02:00
|
|
|
TConnInfo: ConnectionInfo<PeerId = PeerId>,
|
2019-01-14 14:22:25 +01:00
|
|
|
{
|
|
|
|
type Handler = NodeHandlerWrapper<TIntoProtoHandler::Handler>;
|
|
|
|
|
2019-06-12 16:21:39 +02:00
|
|
|
fn into_handler(self, remote_info: &(TConnInfo, ConnectedPoint)) -> Self::Handler {
|
2019-01-14 14:22:25 +01:00
|
|
|
NodeHandlerWrapper {
|
2019-06-12 16:21:39 +02:00
|
|
|
handler: self.handler.into_handler(&remote_info.0.peer_id(), &remote_info.1),
|
2019-01-14 14:22:25 +01:00
|
|
|
negotiating_in: Vec::new(),
|
|
|
|
negotiating_out: Vec::new(),
|
|
|
|
queued_dial_upgrades: Vec::new(),
|
|
|
|
unique_dial_upgrade_id: 0,
|
2019-04-20 16:00:21 +02:00
|
|
|
shutdown: Shutdown::None,
|
2019-01-14 14:22:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-26 14:01:08 +01:00
|
|
|
/// Wraps around an implementation of `ProtocolsHandler`, and implements `NodeHandler`.
|
|
|
|
// TODO: add a caching system for protocols that are supported or not
|
|
|
|
pub struct NodeHandlerWrapper<TProtoHandler>
|
|
|
|
where
|
|
|
|
TProtoHandler: ProtocolsHandler,
|
|
|
|
{
|
|
|
|
/// The underlying handler.
|
|
|
|
handler: TProtoHandler,
|
|
|
|
/// Futures that upgrade incoming substreams.
|
|
|
|
negotiating_in:
|
|
|
|
Vec<Timeout<InboundUpgradeApply<TProtoHandler::Substream, TProtoHandler::InboundProtocol>>>,
|
|
|
|
/// Futures that upgrade outgoing substreams. The first element of the tuple is the userdata
|
|
|
|
/// to pass back once successfully opened.
|
|
|
|
negotiating_out: Vec<(
|
|
|
|
TProtoHandler::OutboundOpenInfo,
|
|
|
|
Timeout<OutboundUpgradeApply<TProtoHandler::Substream, TProtoHandler::OutboundProtocol>>,
|
|
|
|
)>,
|
|
|
|
/// For each outbound substream request, how to upgrade it. The first element of the tuple
|
|
|
|
/// is the unique identifier (see `unique_dial_upgrade_id`).
|
|
|
|
queued_dial_upgrades: Vec<(u64, TProtoHandler::OutboundProtocol)>,
|
|
|
|
/// Unique identifier assigned to each queued dial upgrade.
|
|
|
|
unique_dial_upgrade_id: u64,
|
2019-04-20 16:00:21 +02:00
|
|
|
/// The currently planned connection & handler shutdown.
|
|
|
|
shutdown: Shutdown,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The options for a planned connection & handler shutdown.
|
|
|
|
///
|
|
|
|
/// A shutdown is planned anew based on the the return value of
|
|
|
|
/// [`ProtocolsHandler::connection_keep_alive`] of the underlying handler
|
|
|
|
/// after every invocation of [`ProtocolsHandler::poll`].
|
|
|
|
///
|
|
|
|
/// A planned shutdown is always postponed for as long as there are ingoing
|
|
|
|
/// or outgoing substreams being negotiated, i.e. it is a graceful, "idle"
|
|
|
|
/// shutdown.
|
|
|
|
enum Shutdown {
|
|
|
|
/// No shutdown is planned.
|
|
|
|
None,
|
|
|
|
/// A shut down is planned as soon as possible.
|
|
|
|
Asap,
|
|
|
|
/// A shut down is planned for when a `Delay` has elapsed.
|
|
|
|
Later(Delay)
|
2018-11-26 14:01:08 +01:00
|
|
|
}
|
|
|
|
|
2019-03-11 17:19:50 +01:00
|
|
|
/// Error generated by the `NodeHandlerWrapper`.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum NodeHandlerWrapperError<TErr> {
|
|
|
|
/// Error generated by the handler.
|
|
|
|
Handler(TErr),
|
|
|
|
/// The connection has been deemed useless and has been closed.
|
|
|
|
UselessTimeout,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TErr> From<TErr> for NodeHandlerWrapperError<TErr> {
|
|
|
|
fn from(err: TErr) -> NodeHandlerWrapperError<TErr> {
|
|
|
|
NodeHandlerWrapperError::Handler(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TErr> fmt::Display for NodeHandlerWrapperError<TErr>
|
|
|
|
where
|
|
|
|
TErr: fmt::Display
|
|
|
|
{
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
match self {
|
|
|
|
NodeHandlerWrapperError::Handler(err) => write!(f, "{}", err),
|
|
|
|
NodeHandlerWrapperError::UselessTimeout =>
|
|
|
|
write!(f, "Node has been closed due to inactivity"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TErr> error::Error for NodeHandlerWrapperError<TErr>
|
|
|
|
where
|
|
|
|
TErr: error::Error + 'static
|
|
|
|
{
|
|
|
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
|
|
|
match self {
|
|
|
|
NodeHandlerWrapperError::Handler(err) => Some(err),
|
|
|
|
NodeHandlerWrapperError::UselessTimeout => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-26 14:01:08 +01:00
|
|
|
impl<TProtoHandler> NodeHandler for NodeHandlerWrapper<TProtoHandler>
|
|
|
|
where
|
|
|
|
TProtoHandler: ProtocolsHandler,
|
|
|
|
{
|
|
|
|
type InEvent = TProtoHandler::InEvent;
|
|
|
|
type OutEvent = TProtoHandler::OutEvent;
|
2019-03-11 17:19:50 +01:00
|
|
|
type Error = NodeHandlerWrapperError<TProtoHandler::Error>;
|
2018-11-26 14:01:08 +01:00
|
|
|
type Substream = TProtoHandler::Substream;
|
|
|
|
// The first element of the tuple is the unique upgrade identifier
|
|
|
|
// (see `unique_dial_upgrade_id`).
|
2019-04-16 15:57:29 +02:00
|
|
|
type OutboundOpenInfo = (u64, TProtoHandler::OutboundOpenInfo, Duration);
|
2018-11-26 14:01:08 +01:00
|
|
|
|
|
|
|
fn inject_substream(
|
|
|
|
&mut self,
|
|
|
|
substream: Self::Substream,
|
|
|
|
endpoint: NodeHandlerEndpoint<Self::OutboundOpenInfo>,
|
|
|
|
) {
|
|
|
|
match endpoint {
|
|
|
|
NodeHandlerEndpoint::Listener => {
|
|
|
|
let protocol = self.handler.listen_protocol();
|
2019-04-16 15:57:29 +02:00
|
|
|
let timeout = protocol.timeout().clone();
|
|
|
|
let upgrade = upgrade::apply_inbound(substream, protocol.into_upgrade());
|
|
|
|
let with_timeout = Timeout::new(upgrade, timeout);
|
2018-11-26 14:01:08 +01:00
|
|
|
self.negotiating_in.push(with_timeout);
|
|
|
|
}
|
2019-04-16 15:57:29 +02:00
|
|
|
NodeHandlerEndpoint::Dialer((upgrade_id, user_data, timeout)) => {
|
2018-11-26 14:01:08 +01:00
|
|
|
let pos = match self
|
|
|
|
.queued_dial_upgrades
|
|
|
|
.iter()
|
|
|
|
.position(|(id, _)| id == &upgrade_id)
|
|
|
|
{
|
|
|
|
Some(p) => p,
|
|
|
|
None => {
|
|
|
|
debug_assert!(false, "Received an upgrade with an invalid upgrade ID");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let (_, proto_upgrade) = self.queued_dial_upgrades.remove(pos);
|
|
|
|
let upgrade = upgrade::apply_outbound(substream, proto_upgrade);
|
2019-04-16 15:57:29 +02:00
|
|
|
let with_timeout = Timeout::new(upgrade, timeout);
|
2018-11-26 14:01:08 +01:00
|
|
|
self.negotiating_out.push((user_data, with_timeout));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn inject_event(&mut self, event: Self::InEvent) {
|
|
|
|
self.handler.inject_event(event);
|
|
|
|
}
|
|
|
|
|
2019-01-02 14:22:23 +01:00
|
|
|
fn poll(&mut self) -> Poll<NodeHandlerEvent<Self::OutboundOpenInfo, Self::OutEvent>, Self::Error> {
|
2018-11-26 14:01:08 +01:00
|
|
|
// Continue negotiation of newly-opened substreams on the listening side.
|
|
|
|
// We remove each element from `negotiating_in` one by one and add them back if not ready.
|
|
|
|
for n in (0..self.negotiating_in.len()).rev() {
|
|
|
|
let mut in_progress = self.negotiating_in.swap_remove(n);
|
|
|
|
match in_progress.poll() {
|
|
|
|
Ok(Async::Ready(upgrade)) =>
|
|
|
|
self.handler.inject_fully_negotiated_inbound(upgrade),
|
|
|
|
Ok(Async::NotReady) => self.negotiating_in.push(in_progress),
|
|
|
|
// TODO: return a diagnostic event?
|
|
|
|
Err(_err) => {}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Continue negotiation of newly-opened substreams.
|
|
|
|
// We remove each element from `negotiating_out` one by one and add them back if not ready.
|
|
|
|
for n in (0..self.negotiating_out.len()).rev() {
|
|
|
|
let (upgr_info, mut in_progress) = self.negotiating_out.swap_remove(n);
|
|
|
|
match in_progress.poll() {
|
|
|
|
Ok(Async::Ready(upgrade)) => {
|
|
|
|
self.handler.inject_fully_negotiated_outbound(upgrade, upgr_info);
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => {
|
|
|
|
self.negotiating_out.push((upgr_info, in_progress));
|
|
|
|
}
|
|
|
|
Err(err) => {
|
2018-12-18 11:23:13 +01:00
|
|
|
let err = if err.is_elapsed() {
|
|
|
|
ProtocolsHandlerUpgrErr::Timeout
|
|
|
|
} else if err.is_timer() {
|
|
|
|
ProtocolsHandlerUpgrErr::Timer
|
|
|
|
} else {
|
|
|
|
debug_assert!(err.is_inner());
|
|
|
|
let err = err.into_inner().expect("Timeout error is one of {elapsed, \
|
2019-04-16 15:57:29 +02:00
|
|
|
timer, inner}; is_elapsed and is_timer are both false; error is \
|
2018-12-18 11:23:13 +01:00
|
|
|
inner; QED");
|
|
|
|
ProtocolsHandlerUpgrErr::Upgrade(err)
|
|
|
|
};
|
|
|
|
|
2018-11-26 14:01:08 +01:00
|
|
|
self.handler.inject_dial_upgrade_error(upgr_info, err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-16 15:57:29 +02:00
|
|
|
// Poll the handler at the end so that we see the consequences of the method
|
|
|
|
// calls on `self.handler`.
|
2019-03-11 17:19:50 +01:00
|
|
|
let poll_result = self.handler.poll()?;
|
2019-01-04 12:02:39 +01:00
|
|
|
|
2019-04-20 16:00:21 +02:00
|
|
|
// Ask the handler whether it wants the connection (and the handler itself)
|
|
|
|
// to be kept alive, which determines the planned shutdown, if any.
|
2019-04-21 15:48:50 +02:00
|
|
|
match (&mut self.shutdown, self.handler.connection_keep_alive()) {
|
|
|
|
(Shutdown::Later(d), KeepAlive::Until(t)) =>
|
|
|
|
if d.deadline() != t {
|
|
|
|
d.reset(t)
|
|
|
|
},
|
|
|
|
(_, KeepAlive::Until(t)) => self.shutdown = Shutdown::Later(Delay::new(t)),
|
2019-04-23 11:58:49 +02:00
|
|
|
(_, KeepAlive::No) => self.shutdown = Shutdown::Asap,
|
|
|
|
(_, KeepAlive::Yes) => self.shutdown = Shutdown::None
|
2019-03-11 17:19:50 +01:00
|
|
|
};
|
2018-11-26 14:01:08 +01:00
|
|
|
|
2019-03-11 17:19:50 +01:00
|
|
|
match poll_result {
|
|
|
|
Async::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
|
|
|
return Ok(Async::Ready(NodeHandlerEvent::Custom(event)));
|
|
|
|
}
|
|
|
|
Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
2019-04-16 15:57:29 +02:00
|
|
|
protocol,
|
2019-03-11 17:19:50 +01:00
|
|
|
info,
|
|
|
|
}) => {
|
|
|
|
let id = self.unique_dial_upgrade_id;
|
2019-04-16 15:57:29 +02:00
|
|
|
let timeout = protocol.timeout().clone();
|
2019-03-11 17:19:50 +01:00
|
|
|
self.unique_dial_upgrade_id += 1;
|
2019-04-16 15:57:29 +02:00
|
|
|
self.queued_dial_upgrades.push((id, protocol.into_upgrade()));
|
2019-03-11 17:19:50 +01:00
|
|
|
return Ok(Async::Ready(
|
2019-04-16 15:57:29 +02:00
|
|
|
NodeHandlerEvent::OutboundSubstreamRequest((id, info, timeout)),
|
2019-03-11 17:19:50 +01:00
|
|
|
));
|
|
|
|
}
|
|
|
|
Async::NotReady => (),
|
|
|
|
};
|
2019-01-25 15:56:32 +01:00
|
|
|
|
2019-04-20 16:00:21 +02:00
|
|
|
// Check if the connection (and handler) should be shut down.
|
|
|
|
// As long as we're still negotiating substreams, shutdown is always postponed.
|
|
|
|
if self.negotiating_in.is_empty() && self.negotiating_out.is_empty() {
|
|
|
|
match self.shutdown {
|
|
|
|
Shutdown::None => {},
|
|
|
|
Shutdown::Asap => return Err(NodeHandlerWrapperError::UselessTimeout),
|
|
|
|
Shutdown::Later(ref mut delay) => match delay.poll() {
|
|
|
|
Ok(Async::Ready(_)) | Err(_) =>
|
|
|
|
return Err(NodeHandlerWrapperError::UselessTimeout),
|
|
|
|
Ok(Async::NotReady) => {}
|
2019-01-04 12:02:39 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-26 14:01:08 +01:00
|
|
|
Ok(Async::NotReady)
|
|
|
|
}
|
|
|
|
}
|