2018-09-21 17:31:52 +02:00
|
|
|
// Copyright 2018 Parity Technologies (UK) Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
2018-11-15 17:41:11 +01:00
|
|
|
use crate::{
|
|
|
|
PeerId,
|
|
|
|
muxing::StreamMuxer,
|
|
|
|
nodes::{
|
2018-12-11 15:36:41 +01:00
|
|
|
handled_node::{HandledNode, HandledNodeError, NodeHandler},
|
2018-11-15 17:41:11 +01:00
|
|
|
node::Substream
|
|
|
|
}
|
|
|
|
};
|
2018-09-21 17:31:52 +02:00
|
|
|
use fnv::FnvHashMap;
|
2018-10-01 11:18:00 +02:00
|
|
|
use futures::{prelude::*, stream, sync::mpsc};
|
2018-09-21 17:31:52 +02:00
|
|
|
use smallvec::SmallVec;
|
2018-11-15 17:41:11 +01:00
|
|
|
use std::{
|
|
|
|
collections::hash_map::{Entry, OccupiedEntry},
|
2018-12-11 15:36:41 +01:00
|
|
|
error,
|
2018-11-15 17:41:11 +01:00
|
|
|
fmt,
|
|
|
|
mem
|
|
|
|
};
|
2019-02-28 12:48:27 +01:00
|
|
|
use tokio_executor::Executor;
|
2018-09-21 17:31:52 +02:00
|
|
|
|
2019-01-09 15:48:56 +01:00
|
|
|
mod tests;
|
|
|
|
|
2018-09-21 17:31:52 +02:00
|
|
|
// Implementor notes
|
|
|
|
// =================
|
|
|
|
//
|
|
|
|
// This collection of nodes spawns a task for each individual node to process. This means that
|
|
|
|
// events happen on the background at the same time as the `HandledNodesTasks` is being polled.
|
|
|
|
//
|
2018-11-14 11:51:38 +01:00
|
|
|
// In order to make the API non-racy and avoid issues, we completely separate the state in the
|
|
|
|
// `HandledNodesTasks` from the states that the task nodes can access. They are only allowed to
|
2018-09-21 17:31:52 +02:00
|
|
|
// exchange messages. The state in the `HandledNodesTasks` is therefore delayed compared to the
|
|
|
|
// tasks, and is updated only when `poll()` is called.
|
|
|
|
//
|
|
|
|
// The only thing that we must be careful about is substreams, as they are "detached" from the
|
|
|
|
// state of the `HandledNodesTasks` and allowed to process in parallel. This is why there is no
|
|
|
|
// "substream closed" event being reported, as it could potentially create confusions and race
|
|
|
|
// conditions in the user's code. See similar comments in the documentation of `NodeStream`.
|
|
|
|
|
|
|
|
/// Implementation of `Stream` that handles a collection of nodes.
|
2019-02-14 13:46:52 +01:00
|
|
|
pub struct HandledNodesTasks<TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId = PeerId> {
|
2018-11-28 10:13:00 +00:00
|
|
|
/// A map between active tasks to an unbounded sender, used to control the task. Closing the sender interrupts
|
2018-09-21 17:31:52 +02:00
|
|
|
/// the task. It is possible that we receive messages from tasks that used to be in this list
|
|
|
|
/// but no longer are, in which case we should ignore them.
|
2019-02-20 16:25:34 +01:00
|
|
|
tasks: FnvHashMap<TaskId, (mpsc::UnboundedSender<ExtToInMessage<TInEvent>>, TUserData)>,
|
2018-11-14 11:51:38 +01:00
|
|
|
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Identifier for the next task to spawn.
|
|
|
|
next_task_id: TaskId,
|
|
|
|
|
|
|
|
/// List of node tasks to spawn.
|
|
|
|
// TODO: stronger typing?
|
2019-02-11 14:58:15 +01:00
|
|
|
to_spawn: SmallVec<[Box<dyn Future<Item = (), Error = ()> + Send>; 8]>,
|
2019-02-28 12:48:27 +01:00
|
|
|
/// If no tokio executor is available, we move tasks to this list, and futures are polled on
|
|
|
|
/// the current thread instead.
|
|
|
|
local_spawns: Vec<Box<dyn Future<Item = (), Error = ()> + Send>>,
|
2018-09-21 17:31:52 +02:00
|
|
|
|
|
|
|
/// Sender to emit events to the outside. Meant to be cloned and sent to tasks.
|
2019-01-23 17:44:40 +01:00
|
|
|
events_tx: mpsc::UnboundedSender<(InToExtMessage<TOutEvent, TIntoHandler, TReachErr, THandlerErr, TPeerId>, TaskId)>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Receiver side for the events.
|
2019-01-23 17:44:40 +01:00
|
|
|
events_rx: mpsc::UnboundedReceiver<(InToExtMessage<TOutEvent, TIntoHandler, TReachErr, THandlerErr, TPeerId>, TaskId)>,
|
2018-09-21 17:31:52 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 13:46:52 +01:00
|
|
|
impl<TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId> fmt::Debug for
|
|
|
|
HandledNodesTasks<TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId>
|
|
|
|
where
|
|
|
|
TUserData: fmt::Debug
|
2019-01-23 17:44:40 +01:00
|
|
|
{
|
2019-02-11 14:58:15 +01:00
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
2019-02-14 13:46:52 +01:00
|
|
|
f.debug_map()
|
|
|
|
.entries(self.tasks.iter().map(|(id, (_, ud))| (id, ud)))
|
2018-10-01 14:49:17 +02:00
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-11 15:36:41 +01:00
|
|
|
/// Error that can happen in a task.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum TaskClosedEvent<TReachErr, THandlerErr> {
|
|
|
|
/// An error happend while we were trying to reach the node.
|
|
|
|
Reach(TReachErr),
|
|
|
|
/// An error happened after the node has been reached.
|
|
|
|
Node(HandledNodeError<THandlerErr>),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TReachErr, THandlerErr> fmt::Display for TaskClosedEvent<TReachErr, THandlerErr>
|
|
|
|
where
|
|
|
|
TReachErr: fmt::Display,
|
|
|
|
THandlerErr: fmt::Display,
|
|
|
|
{
|
2019-02-11 14:58:15 +01:00
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2018-12-11 15:36:41 +01:00
|
|
|
match self {
|
|
|
|
TaskClosedEvent::Reach(err) => write!(f, "{}", err),
|
|
|
|
TaskClosedEvent::Node(err) => write!(f, "{}", err),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TReachErr, THandlerErr> error::Error for TaskClosedEvent<TReachErr, THandlerErr>
|
|
|
|
where
|
|
|
|
TReachErr: error::Error + 'static,
|
|
|
|
THandlerErr: error::Error + 'static
|
|
|
|
{
|
|
|
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
|
|
|
match self {
|
|
|
|
TaskClosedEvent::Reach(err) => Some(err),
|
|
|
|
TaskClosedEvent::Node(err) => Some(err),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-14 14:22:25 +01:00
|
|
|
/// Prototype for a `NodeHandler`.
|
2019-01-23 17:44:40 +01:00
|
|
|
pub trait IntoNodeHandler<TPeerId = PeerId> {
|
2019-01-14 14:22:25 +01:00
|
|
|
/// The node handler.
|
|
|
|
type Handler: NodeHandler;
|
|
|
|
|
|
|
|
/// Builds the node handler.
|
|
|
|
///
|
2019-01-23 17:44:40 +01:00
|
|
|
/// The `TPeerId` is the id of the node the handler is going to handle.
|
|
|
|
fn into_handler(self, remote_peer_id: &TPeerId) -> Self::Handler;
|
2019-01-14 14:22:25 +01:00
|
|
|
}
|
|
|
|
|
2019-01-23 17:44:40 +01:00
|
|
|
impl<T, TPeerId> IntoNodeHandler<TPeerId> for T
|
2019-01-14 14:22:25 +01:00
|
|
|
where T: NodeHandler
|
|
|
|
{
|
|
|
|
type Handler = Self;
|
|
|
|
|
|
|
|
#[inline]
|
2019-01-23 17:44:40 +01:00
|
|
|
fn into_handler(self, _: &TPeerId) -> Self {
|
2019-01-14 14:22:25 +01:00
|
|
|
self
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Event that can happen on the `HandledNodesTasks`.
|
|
|
|
#[derive(Debug)]
|
2019-02-14 13:46:52 +01:00
|
|
|
pub enum HandledNodesEvent<'a, TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId = PeerId> {
|
2018-09-21 17:31:52 +02:00
|
|
|
/// A task has been closed.
|
|
|
|
///
|
|
|
|
/// This happens once the node handler closes or an error happens.
|
2018-10-17 10:17:40 +01:00
|
|
|
// TODO: send back undelivered events?
|
2018-09-21 17:31:52 +02:00
|
|
|
TaskClosed {
|
2019-02-20 16:25:34 +01:00
|
|
|
/// The task that has been closed.
|
|
|
|
task: ClosedTask<TInEvent, TUserData>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// What happened.
|
2018-12-11 15:36:41 +01:00
|
|
|
result: Result<(), TaskClosedEvent<TReachErr, THandlerErr>>,
|
2018-10-17 10:17:40 +01:00
|
|
|
/// If the task closed before reaching the node, this contains the handler that was passed
|
|
|
|
/// to `add_reach_attempt`.
|
2019-01-14 14:22:25 +01:00
|
|
|
handler: Option<TIntoHandler>,
|
2018-09-21 17:31:52 +02:00
|
|
|
},
|
|
|
|
|
2018-11-28 10:13:00 +00:00
|
|
|
/// A task has successfully connected to a node.
|
2018-09-21 17:31:52 +02:00
|
|
|
NodeReached {
|
2019-02-14 13:46:52 +01:00
|
|
|
/// The task that succeeded.
|
|
|
|
task: Task<'a, TInEvent, TUserData>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Identifier of the node.
|
2019-01-23 17:44:40 +01:00
|
|
|
peer_id: TPeerId,
|
2018-09-21 17:31:52 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
/// A task has produced an event.
|
|
|
|
NodeEvent {
|
2019-02-14 13:46:52 +01:00
|
|
|
/// The task that produced the event.
|
|
|
|
task: Task<'a, TInEvent, TUserData>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// The produced event.
|
|
|
|
event: TOutEvent,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Identifier for a future that attempts to reach a node.
|
|
|
|
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
|
|
|
|
pub struct TaskId(usize);
|
|
|
|
|
2019-02-14 13:46:52 +01:00
|
|
|
impl<TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId>
|
|
|
|
HandledNodesTasks<TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId>
|
2019-01-23 17:44:40 +01:00
|
|
|
{
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Creates a new empty collection.
|
|
|
|
#[inline]
|
|
|
|
pub fn new() -> Self {
|
|
|
|
let (events_tx, events_rx) = mpsc::unbounded();
|
|
|
|
|
|
|
|
HandledNodesTasks {
|
|
|
|
tasks: Default::default(),
|
|
|
|
next_task_id: TaskId(0),
|
|
|
|
to_spawn: SmallVec::new(),
|
2019-02-28 12:48:27 +01:00
|
|
|
local_spawns: Vec::new(),
|
2018-09-21 17:31:52 +02:00
|
|
|
events_tx,
|
|
|
|
events_rx,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adds to the collection a future that tries to reach a node.
|
|
|
|
///
|
|
|
|
/// This method spawns a task dedicated to resolving this future and processing the node's
|
|
|
|
/// events.
|
2019-02-14 13:46:52 +01:00
|
|
|
pub fn add_reach_attempt<TFut, TMuxer>(&mut self, future: TFut, user_data: TUserData, handler: TIntoHandler) -> TaskId
|
2018-09-21 17:31:52 +02:00
|
|
|
where
|
2019-01-23 17:44:40 +01:00
|
|
|
TFut: Future<Item = (TPeerId, TMuxer), Error = TReachErr> + Send + 'static,
|
|
|
|
TIntoHandler: IntoNodeHandler<TPeerId> + Send + 'static,
|
2019-01-14 14:22:25 +01:00
|
|
|
TIntoHandler::Handler: NodeHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent, Error = THandlerErr> + Send + 'static,
|
2018-12-11 15:36:41 +01:00
|
|
|
TReachErr: error::Error + Send + 'static,
|
|
|
|
THandlerErr: error::Error + Send + 'static,
|
2018-09-21 17:31:52 +02:00
|
|
|
TInEvent: Send + 'static,
|
|
|
|
TOutEvent: Send + 'static,
|
2019-01-14 14:22:25 +01:00
|
|
|
<TIntoHandler::Handler as NodeHandler>::OutboundOpenInfo: Send + 'static, // TODO: shouldn't be required?
|
2018-09-21 17:31:52 +02:00
|
|
|
TMuxer: StreamMuxer + Send + Sync + 'static, // TODO: Send + Sync + 'static shouldn't be required
|
|
|
|
TMuxer::OutboundSubstream: Send + 'static, // TODO: shouldn't be required
|
2019-01-23 17:44:40 +01:00
|
|
|
TPeerId: Send + 'static,
|
2018-09-21 17:31:52 +02:00
|
|
|
{
|
|
|
|
let task_id = self.next_task_id;
|
|
|
|
self.next_task_id.0 += 1;
|
|
|
|
|
|
|
|
let (tx, rx) = mpsc::unbounded();
|
2019-02-14 13:46:52 +01:00
|
|
|
self.tasks.insert(task_id, (tx, user_data));
|
2018-09-21 17:31:52 +02:00
|
|
|
|
|
|
|
let task = Box::new(NodeTask {
|
2019-02-20 16:25:34 +01:00
|
|
|
taken_over: SmallVec::new(),
|
2018-09-21 17:31:52 +02:00
|
|
|
inner: NodeTaskInner::Future {
|
|
|
|
future,
|
|
|
|
handler,
|
|
|
|
events_buffer: Vec::new(),
|
|
|
|
},
|
|
|
|
events_tx: self.events_tx.clone(),
|
|
|
|
in_events_rx: rx.fuse(),
|
|
|
|
id: task_id,
|
|
|
|
});
|
|
|
|
|
|
|
|
self.to_spawn.push(task);
|
|
|
|
task_id
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Sends an event to all the tasks, including the pending ones.
|
|
|
|
pub fn broadcast_event(&mut self, event: &TInEvent)
|
|
|
|
where TInEvent: Clone,
|
|
|
|
{
|
2019-02-14 13:46:52 +01:00
|
|
|
for (sender, _) in self.tasks.values() {
|
2018-09-21 17:31:52 +02:00
|
|
|
// Note: it is possible that sending an event fails if the background task has already
|
2018-11-14 11:51:38 +01:00
|
|
|
// finished, but the local state hasn't reflected that yet because it hasn't been
|
2018-09-21 17:31:52 +02:00
|
|
|
// polled. This is not an error situation.
|
2019-02-20 16:25:34 +01:00
|
|
|
let _ = sender.unbounded_send(ExtToInMessage::HandlerEvent(event.clone()));
|
2018-09-21 17:31:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Grants access to an object that allows controlling a task of the collection.
|
|
|
|
///
|
|
|
|
/// Returns `None` if the task id is invalid.
|
|
|
|
#[inline]
|
2019-02-14 13:46:52 +01:00
|
|
|
pub fn task(&mut self, id: TaskId) -> Option<Task<'_, TInEvent, TUserData>> {
|
2018-11-02 13:15:17 +01:00
|
|
|
match self.tasks.entry(id) {
|
2018-09-21 17:31:52 +02:00
|
|
|
Entry::Occupied(inner) => Some(Task { inner }),
|
|
|
|
Entry::Vacant(_) => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a list of all the active tasks.
|
|
|
|
#[inline]
|
|
|
|
pub fn tasks<'a>(&'a self) -> impl Iterator<Item = TaskId> + 'a {
|
|
|
|
self.tasks.keys().cloned()
|
|
|
|
}
|
|
|
|
|
2018-11-28 10:13:00 +00:00
|
|
|
/// Provides an API similar to `Stream`, except that it cannot produce an error.
|
2019-02-14 13:46:52 +01:00
|
|
|
pub fn poll(&mut self) -> Async<HandledNodesEvent<TInEvent, TOutEvent, TIntoHandler, TReachErr, THandlerErr, TUserData, TPeerId>> {
|
|
|
|
let (message, task_id) = match self.poll_inner() {
|
|
|
|
Async::Ready(r) => r,
|
|
|
|
Async::NotReady => return Async::NotReady,
|
|
|
|
};
|
|
|
|
|
|
|
|
Async::Ready(match message {
|
|
|
|
InToExtMessage::NodeEvent(event) => {
|
|
|
|
HandledNodesEvent::NodeEvent {
|
|
|
|
task: match self.tasks.entry(task_id) {
|
|
|
|
Entry::Occupied(inner) => Task { inner },
|
|
|
|
Entry::Vacant(_) => panic!("poll_inner only returns valid TaskIds; QED")
|
|
|
|
},
|
|
|
|
event
|
|
|
|
}
|
|
|
|
},
|
|
|
|
InToExtMessage::NodeReached(peer_id) => {
|
|
|
|
HandledNodesEvent::NodeReached {
|
|
|
|
task: match self.tasks.entry(task_id) {
|
|
|
|
Entry::Occupied(inner) => Task { inner },
|
|
|
|
Entry::Vacant(_) => panic!("poll_inner only returns valid TaskIds; QED")
|
|
|
|
},
|
|
|
|
peer_id
|
|
|
|
}
|
|
|
|
},
|
|
|
|
InToExtMessage::TaskClosed(result, handler) => {
|
2019-02-20 16:25:34 +01:00
|
|
|
let (channel, user_data) = self.tasks.remove(&task_id)
|
2019-02-14 13:46:52 +01:00
|
|
|
.expect("poll_inner only returns valid TaskIds; QED");
|
|
|
|
HandledNodesEvent::TaskClosed {
|
2019-02-20 16:25:34 +01:00
|
|
|
task: ClosedTask {
|
|
|
|
id: task_id,
|
|
|
|
channel,
|
|
|
|
user_data,
|
|
|
|
},
|
|
|
|
result,
|
|
|
|
handler,
|
2019-02-14 13:46:52 +01:00
|
|
|
}
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Since non-lexical lifetimes still don't work very well in Rust at the moment, we have to
|
|
|
|
/// split `poll()` in two. This method returns an `InToExtMessage` that is guaranteed to come
|
|
|
|
/// from an alive task.
|
|
|
|
// TODO: look into merging with `poll()`
|
|
|
|
fn poll_inner(&mut self) -> Async<(InToExtMessage<TOutEvent, TIntoHandler, TReachErr, THandlerErr, TPeerId>, TaskId)> {
|
2018-09-21 17:31:52 +02:00
|
|
|
for to_spawn in self.to_spawn.drain() {
|
2019-02-28 12:48:27 +01:00
|
|
|
// We try to use the default executor, but fall back to polling the task manually if
|
|
|
|
// no executor is available. This makes it possible to use the core in environments
|
|
|
|
// outside of tokio.
|
|
|
|
let mut executor = tokio_executor::DefaultExecutor::current();
|
|
|
|
if executor.status().is_ok() {
|
|
|
|
executor.spawn(to_spawn).expect("failed to create a node task");
|
|
|
|
} else {
|
|
|
|
self.local_spawns.push(to_spawn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for n in (0..self.local_spawns.len()).rev() {
|
|
|
|
let mut task = self.local_spawns.swap_remove(n);
|
|
|
|
match task.poll() {
|
|
|
|
Ok(Async::Ready(())) => (),
|
|
|
|
Ok(Async::NotReady) => self.local_spawns.push(task),
|
|
|
|
Err(_err) => () // TODO: log this?
|
|
|
|
}
|
2018-09-21 17:31:52 +02:00
|
|
|
}
|
2019-02-14 13:46:52 +01:00
|
|
|
|
2018-09-21 17:31:52 +02:00
|
|
|
loop {
|
|
|
|
match self.events_rx.poll() {
|
|
|
|
Ok(Async::Ready(Some((message, task_id)))) => {
|
|
|
|
// If the task id is no longer in `self.tasks`, that means that the user called
|
|
|
|
// `close()` on this task earlier. Therefore no new event should be generated
|
|
|
|
// for this task.
|
2019-02-14 13:46:52 +01:00
|
|
|
if self.tasks.contains_key(&task_id) {
|
|
|
|
break Async::Ready((message, task_id));
|
2018-09-21 17:31:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => {
|
2018-10-01 14:49:17 +02:00
|
|
|
break Async::NotReady;
|
2018-09-21 17:31:52 +02:00
|
|
|
}
|
|
|
|
Ok(Async::Ready(None)) => {
|
|
|
|
unreachable!("The sender is in self as well, therefore the receiver never \
|
|
|
|
closes.")
|
|
|
|
},
|
|
|
|
Err(()) => unreachable!("An unbounded receiver never errors"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-01 14:49:17 +02:00
|
|
|
/// Access to a task in the collection.
|
2019-02-14 13:46:52 +01:00
|
|
|
pub struct Task<'a, TInEvent, TUserData> {
|
2019-02-20 16:25:34 +01:00
|
|
|
inner: OccupiedEntry<'a, TaskId, (mpsc::UnboundedSender<ExtToInMessage<TInEvent>>, TUserData)>,
|
2018-10-01 14:49:17 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 13:46:52 +01:00
|
|
|
impl<'a, TInEvent, TUserData> Task<'a, TInEvent, TUserData> {
|
2018-10-01 14:49:17 +02:00
|
|
|
/// Sends an event to the given node.
|
2018-10-17 10:17:40 +01:00
|
|
|
// TODO: report back on delivery
|
2018-10-01 14:49:17 +02:00
|
|
|
#[inline]
|
|
|
|
pub fn send_event(&mut self, event: TInEvent) {
|
|
|
|
// It is possible that the sender is closed if the background task has already finished
|
|
|
|
// but the local state hasn't been updated yet because we haven't been polled in the
|
|
|
|
// meanwhile.
|
2019-02-20 16:25:34 +01:00
|
|
|
let _ = self.inner.get_mut().0.unbounded_send(ExtToInMessage::HandlerEvent(event));
|
2019-02-14 13:46:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the user data associated with the task.
|
|
|
|
pub fn user_data(&self) -> &TUserData {
|
|
|
|
&self.inner.get().1
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the user data associated with the task.
|
|
|
|
pub fn user_data_mut(&mut self) -> &mut TUserData {
|
|
|
|
&mut self.inner.get_mut().1
|
2018-10-01 14:49:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the task id.
|
|
|
|
#[inline]
|
|
|
|
pub fn id(&self) -> TaskId {
|
|
|
|
*self.inner.key()
|
|
|
|
}
|
|
|
|
|
2019-02-18 16:10:00 +01:00
|
|
|
/// Closes the task. Returns the user data.
|
2018-10-01 14:49:17 +02:00
|
|
|
///
|
2019-02-20 16:25:34 +01:00
|
|
|
/// No further event will be generated for this task, but the connection inside the task will
|
|
|
|
/// continue to run until the `ClosedTask` is destroyed.
|
|
|
|
pub fn close(self) -> ClosedTask<TInEvent, TUserData> {
|
|
|
|
let id = *self.inner.key();
|
|
|
|
let (channel, user_data) = self.inner.remove();
|
|
|
|
ClosedTask { id, channel, user_data }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gives ownership of a closed task. As soon as our task (`self`) has some acknowledgment from
|
|
|
|
/// the remote that its connection is alive, it will close the connection with `other`.
|
|
|
|
pub fn take_over(&mut self, other: ClosedTask<TInEvent, TUserData>) -> TUserData {
|
|
|
|
// It is possible that the sender is closed if the background task has already finished
|
|
|
|
// but the local state hasn't been updated yet because we haven't been polled in the
|
|
|
|
// meanwhile.
|
|
|
|
let _ = self.inner.get_mut().0.unbounded_send(ExtToInMessage::TakeOver(other.channel));
|
|
|
|
other.user_data
|
2018-10-01 14:49:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-14 13:46:52 +01:00
|
|
|
impl<'a, TInEvent, TUserData> fmt::Debug for Task<'a, TInEvent, TUserData>
|
|
|
|
where
|
|
|
|
TUserData: fmt::Debug,
|
|
|
|
{
|
2019-02-11 14:58:15 +01:00
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
2018-10-01 14:49:17 +02:00
|
|
|
f.debug_tuple("Task")
|
|
|
|
.field(&self.id())
|
2019-02-14 13:46:52 +01:00
|
|
|
.field(self.user_data())
|
2018-10-01 14:49:17 +02:00
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-20 16:25:34 +01:00
|
|
|
/// Task after it has been closed. The connection to the remote is potentially still going on, but
|
|
|
|
/// no new event for this task will be received.
|
|
|
|
pub struct ClosedTask<TInEvent, TUserData> {
|
|
|
|
/// Identifier of the task that closed. No longer corresponds to anything, but can be reported
|
|
|
|
/// to the user.
|
|
|
|
id: TaskId,
|
|
|
|
/// The channel to the task. The task will continue to work for as long as this channel is
|
|
|
|
/// alive, but events produced by it are ignored.
|
|
|
|
channel: mpsc::UnboundedSender<ExtToInMessage<TInEvent>>,
|
|
|
|
/// The data provided by the user.
|
|
|
|
user_data: TUserData,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TInEvent, TUserData> ClosedTask<TInEvent, TUserData> {
|
|
|
|
/// Returns the task id. Note that this task is no longer part of the collection, and therefore
|
|
|
|
/// calling `task()` with this ID will fail.
|
|
|
|
#[inline]
|
|
|
|
pub fn id(&self) -> TaskId {
|
|
|
|
self.id
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the user data associated with the task.
|
|
|
|
pub fn user_data(&self) -> &TUserData {
|
|
|
|
&self.user_data
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the user data associated with the task.
|
|
|
|
pub fn user_data_mut(&mut self) -> &mut TUserData {
|
|
|
|
&mut self.user_data
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Finish destroying the task and yield the user data. This closes the connection to the
|
|
|
|
/// remote.
|
|
|
|
pub fn into_user_data(self) -> TUserData {
|
|
|
|
self.user_data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TInEvent, TUserData> fmt::Debug for ClosedTask<TInEvent, TUserData>
|
|
|
|
where
|
|
|
|
TUserData: fmt::Debug,
|
|
|
|
{
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
|
|
|
f.debug_tuple("ClosedTask")
|
|
|
|
.field(&self.id)
|
|
|
|
.field(&self.user_data)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Message to transmit from the public API to a task.
|
|
|
|
#[derive(Debug)]
|
|
|
|
enum ExtToInMessage<TInEvent> {
|
|
|
|
/// An event to transmit to the node handler.
|
|
|
|
HandlerEvent(TInEvent),
|
|
|
|
/// When received, stores the parameter inside the task and keeps it alive until we have an
|
|
|
|
/// acknowledgment that the remote has accepted our handshake.
|
|
|
|
TakeOver(mpsc::UnboundedSender<ExtToInMessage<TInEvent>>),
|
|
|
|
}
|
|
|
|
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Message to transmit from a task to the public API.
|
2018-10-01 14:49:17 +02:00
|
|
|
#[derive(Debug)]
|
2019-01-23 17:44:40 +01:00
|
|
|
enum InToExtMessage<TOutEvent, TIntoHandler, TReachErr, THandlerErr, TPeerId> {
|
2018-09-21 17:31:52 +02:00
|
|
|
/// A connection to a node has succeeded.
|
2019-01-23 17:44:40 +01:00
|
|
|
NodeReached(TPeerId),
|
2018-09-21 17:31:52 +02:00
|
|
|
/// The task closed.
|
2019-01-14 14:22:25 +01:00
|
|
|
TaskClosed(Result<(), TaskClosedEvent<TReachErr, THandlerErr>>, Option<TIntoHandler>),
|
2018-09-21 17:31:52 +02:00
|
|
|
/// An event from the node.
|
|
|
|
NodeEvent(TOutEvent),
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Implementation of `Future` that handles a single node, and all the communications between
|
|
|
|
/// the various components of the `HandledNodesTasks`.
|
2019-01-23 17:44:40 +01:00
|
|
|
struct NodeTask<TFut, TMuxer, TIntoHandler, TInEvent, TOutEvent, TReachErr, TPeerId>
|
2018-09-21 17:31:52 +02:00
|
|
|
where
|
|
|
|
TMuxer: StreamMuxer,
|
2019-01-23 17:44:40 +01:00
|
|
|
TIntoHandler: IntoNodeHandler<TPeerId>,
|
2019-01-14 14:22:25 +01:00
|
|
|
TIntoHandler::Handler: NodeHandler<Substream = Substream<TMuxer>>,
|
2018-09-21 17:31:52 +02:00
|
|
|
{
|
|
|
|
/// Sender to transmit events to the outside.
|
2019-01-23 17:44:40 +01:00
|
|
|
events_tx: mpsc::UnboundedSender<(InToExtMessage<TOutEvent, TIntoHandler, TReachErr, <TIntoHandler::Handler as NodeHandler>::Error, TPeerId>, TaskId)>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Receiving end for events sent from the main `HandledNodesTasks`.
|
2019-02-20 16:25:34 +01:00
|
|
|
in_events_rx: stream::Fuse<mpsc::UnboundedReceiver<ExtToInMessage<TInEvent>>>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Inner state of the `NodeTask`.
|
2019-01-23 17:44:40 +01:00
|
|
|
inner: NodeTaskInner<TFut, TMuxer, TIntoHandler, TInEvent, TPeerId>,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// Identifier of the attempt.
|
|
|
|
id: TaskId,
|
2019-02-20 16:25:34 +01:00
|
|
|
/// Channels to keep alive for as long as we don't have an acknowledgment from the remote.
|
|
|
|
taken_over: SmallVec<[mpsc::UnboundedSender<ExtToInMessage<TInEvent>>; 1]>,
|
2018-09-21 17:31:52 +02:00
|
|
|
}
|
|
|
|
|
2019-01-23 17:44:40 +01:00
|
|
|
enum NodeTaskInner<TFut, TMuxer, TIntoHandler, TInEvent, TPeerId>
|
2018-09-21 17:31:52 +02:00
|
|
|
where
|
|
|
|
TMuxer: StreamMuxer,
|
2019-01-23 17:44:40 +01:00
|
|
|
TIntoHandler: IntoNodeHandler<TPeerId>,
|
2019-01-14 14:22:25 +01:00
|
|
|
TIntoHandler::Handler: NodeHandler<Substream = Substream<TMuxer>>,
|
2018-09-21 17:31:52 +02:00
|
|
|
{
|
|
|
|
/// Future to resolve to connect to the node.
|
|
|
|
Future {
|
|
|
|
/// The future that will attempt to reach the node.
|
|
|
|
future: TFut,
|
|
|
|
/// The handler that will be used to build the `HandledNode`.
|
2019-01-14 14:22:25 +01:00
|
|
|
handler: TIntoHandler,
|
2018-09-21 17:31:52 +02:00
|
|
|
/// While we are dialing the future, we need to buffer the events received on
|
|
|
|
/// `in_events_rx` so that they get delivered once dialing succeeds. We can't simply leave
|
|
|
|
/// events in `in_events_rx` because we have to detect if it gets closed.
|
|
|
|
events_buffer: Vec<TInEvent>,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Fully functional node.
|
2019-01-14 14:22:25 +01:00
|
|
|
Node(HandledNode<TMuxer, TIntoHandler::Handler>),
|
2018-09-21 17:31:52 +02:00
|
|
|
|
|
|
|
/// A panic happened while polling.
|
|
|
|
Poisoned,
|
|
|
|
}
|
|
|
|
|
2019-01-23 17:44:40 +01:00
|
|
|
impl<TFut, TMuxer, TIntoHandler, TInEvent, TOutEvent, TReachErr, TPeerId> Future for
|
|
|
|
NodeTask<TFut, TMuxer, TIntoHandler, TInEvent, TOutEvent, TReachErr, TPeerId>
|
2018-09-21 17:31:52 +02:00
|
|
|
where
|
|
|
|
TMuxer: StreamMuxer,
|
2019-01-23 17:44:40 +01:00
|
|
|
TFut: Future<Item = (TPeerId, TMuxer), Error = TReachErr>,
|
|
|
|
TIntoHandler: IntoNodeHandler<TPeerId>,
|
2019-01-14 14:22:25 +01:00
|
|
|
TIntoHandler::Handler: NodeHandler<Substream = Substream<TMuxer>, InEvent = TInEvent, OutEvent = TOutEvent>,
|
2018-09-21 17:31:52 +02:00
|
|
|
{
|
|
|
|
type Item = ();
|
|
|
|
type Error = ();
|
|
|
|
|
|
|
|
fn poll(&mut self) -> Poll<(), ()> {
|
|
|
|
loop {
|
|
|
|
match mem::replace(&mut self.inner, NodeTaskInner::Poisoned) {
|
|
|
|
// First possibility: we are still trying to reach a node.
|
|
|
|
NodeTaskInner::Future { mut future, handler, mut events_buffer } => {
|
|
|
|
// If self.in_events_rx is closed, we stop the task.
|
|
|
|
loop {
|
|
|
|
match self.in_events_rx.poll() {
|
|
|
|
Ok(Async::Ready(None)) => return Ok(Async::Ready(())),
|
2019-02-20 16:25:34 +01:00
|
|
|
Ok(Async::Ready(Some(ExtToInMessage::HandlerEvent(event)))) => {
|
|
|
|
events_buffer.push(event)
|
|
|
|
},
|
|
|
|
Ok(Async::Ready(Some(ExtToInMessage::TakeOver(take_over)))) => {
|
|
|
|
self.taken_over.push(take_over);
|
|
|
|
},
|
2018-09-21 17:31:52 +02:00
|
|
|
Ok(Async::NotReady) => break,
|
|
|
|
Err(_) => unreachable!("An UnboundedReceiver never errors"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check whether dialing succeeded.
|
|
|
|
match future.poll() {
|
2018-10-17 10:17:40 +01:00
|
|
|
Ok(Async::Ready((peer_id, muxer))) => {
|
2019-01-14 14:22:25 +01:00
|
|
|
let mut node = HandledNode::new(muxer, handler.into_handler(&peer_id));
|
2018-09-21 17:31:52 +02:00
|
|
|
let event = InToExtMessage::NodeReached(peer_id);
|
|
|
|
for event in events_buffer {
|
|
|
|
node.inject_event(event);
|
|
|
|
}
|
2018-11-02 13:15:17 +01:00
|
|
|
if self.events_tx.unbounded_send((event, self.id)).is_err() {
|
2018-09-21 17:31:52 +02:00
|
|
|
node.shutdown();
|
|
|
|
}
|
|
|
|
self.inner = NodeTaskInner::Node(node);
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => {
|
|
|
|
self.inner = NodeTaskInner::Future { future, handler, events_buffer };
|
|
|
|
return Ok(Async::NotReady);
|
|
|
|
},
|
|
|
|
Err(err) => {
|
|
|
|
// End the task
|
2018-12-11 15:36:41 +01:00
|
|
|
let event = InToExtMessage::TaskClosed(Err(TaskClosedEvent::Reach(err)), Some(handler));
|
2018-09-21 17:31:52 +02:00
|
|
|
let _ = self.events_tx.unbounded_send((event, self.id));
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
// Second possibility: we have a node.
|
|
|
|
NodeTaskInner::Node(mut node) => {
|
|
|
|
// Start by handling commands received from the outside of the task.
|
|
|
|
if !self.in_events_rx.is_done() {
|
|
|
|
loop {
|
|
|
|
match self.in_events_rx.poll() {
|
|
|
|
Ok(Async::NotReady) => break,
|
2019-02-20 16:25:34 +01:00
|
|
|
Ok(Async::Ready(Some(ExtToInMessage::HandlerEvent(event)))) => {
|
2018-11-14 11:51:38 +01:00
|
|
|
node.inject_event(event)
|
2018-09-21 17:31:52 +02:00
|
|
|
},
|
2019-02-20 16:25:34 +01:00
|
|
|
Ok(Async::Ready(Some(ExtToInMessage::TakeOver(take_over)))) => {
|
|
|
|
self.taken_over.push(take_over);
|
|
|
|
},
|
2018-09-21 17:31:52 +02:00
|
|
|
Ok(Async::Ready(None)) => {
|
2018-10-29 20:38:32 +11:00
|
|
|
// Node closed by the external API; start shutdown process.
|
2018-09-21 17:31:52 +02:00
|
|
|
node.shutdown();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Err(()) => unreachable!("An unbounded receiver never errors"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the node.
|
|
|
|
loop {
|
2019-02-20 16:25:34 +01:00
|
|
|
if !self.taken_over.is_empty() && node.is_remote_acknowledged() {
|
|
|
|
self.taken_over.clear();
|
|
|
|
}
|
|
|
|
|
2018-09-21 17:31:52 +02:00
|
|
|
match node.poll() {
|
|
|
|
Ok(Async::NotReady) => {
|
|
|
|
self.inner = NodeTaskInner::Node(node);
|
|
|
|
return Ok(Async::NotReady);
|
|
|
|
},
|
|
|
|
Ok(Async::Ready(Some(event))) => {
|
|
|
|
let event = InToExtMessage::NodeEvent(event);
|
2018-11-02 13:15:17 +01:00
|
|
|
if self.events_tx.unbounded_send((event, self.id)).is_err() {
|
2018-09-21 17:31:52 +02:00
|
|
|
node.shutdown();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(Async::Ready(None)) => {
|
2018-10-17 10:17:40 +01:00
|
|
|
let event = InToExtMessage::TaskClosed(Ok(()), None);
|
2018-09-21 17:31:52 +02:00
|
|
|
let _ = self.events_tx.unbounded_send((event, self.id));
|
|
|
|
return Ok(Async::Ready(())); // End the task.
|
|
|
|
}
|
|
|
|
Err(err) => {
|
2018-12-11 15:36:41 +01:00
|
|
|
let event = InToExtMessage::TaskClosed(Err(TaskClosedEvent::Node(err)), None);
|
2018-09-21 17:31:52 +02:00
|
|
|
let _ = self.events_tx.unbounded_send((event, self.id));
|
|
|
|
return Ok(Async::Ready(())); // End the task.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
// This happens if a previous poll has ended unexpectedly. The API of futures
|
|
|
|
// guarantees that we shouldn't be polled again.
|
|
|
|
NodeTaskInner::Poisoned => panic!("the node task panicked or errored earlier")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|