2018-11-29 12:11:35 +01:00
|
|
|
// Copyright 2018 Parity Technologies (UK) Ltd.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the "Software"),
|
|
|
|
// to deal in the Software without restriction, including without limitation
|
|
|
|
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
// and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
// Software is furnished to do so, subject to the following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
// DEALINGS IN THE SOFTWARE.
|
|
|
|
|
2019-01-21 10:33:51 +00:00
|
|
|
use crate::protocol::{
|
|
|
|
KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg,
|
|
|
|
KademliaProtocolConfig,
|
|
|
|
};
|
2018-11-29 12:11:35 +01:00
|
|
|
use futures::prelude::*;
|
2019-04-16 15:57:29 +02:00
|
|
|
use libp2p_core::protocols_handler::{
|
|
|
|
KeepAlive,
|
|
|
|
SubstreamProtocol,
|
|
|
|
ProtocolsHandler,
|
|
|
|
ProtocolsHandlerEvent,
|
|
|
|
ProtocolsHandlerUpgrErr
|
|
|
|
};
|
2019-03-19 17:27:30 +01:00
|
|
|
use libp2p_core::{upgrade, either::EitherOutput, InboundUpgrade, OutboundUpgrade, PeerId, upgrade::Negotiated};
|
2018-11-29 12:11:35 +01:00
|
|
|
use multihash::Multihash;
|
2019-05-15 15:44:51 +02:00
|
|
|
use std::{borrow::Cow, error, fmt, io, time::Duration};
|
2018-11-29 12:11:35 +01:00
|
|
|
use tokio_io::{AsyncRead, AsyncWrite};
|
2019-04-25 15:08:06 +02:00
|
|
|
use wasm_timer::Instant;
|
2018-11-29 12:11:35 +01:00
|
|
|
|
|
|
|
/// Protocol handler that handles Kademlia communications with the remote.
|
|
|
|
///
|
|
|
|
/// The handler will automatically open a Kademlia substream with the remote for each request we
|
|
|
|
/// make.
|
|
|
|
///
|
|
|
|
/// It also handles requests made by the remote.
|
|
|
|
pub struct KademliaHandler<TSubstream, TUserData>
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
{
|
|
|
|
/// Configuration for the Kademlia protocol.
|
|
|
|
config: KademliaProtocolConfig,
|
|
|
|
|
|
|
|
/// If false, we always refuse incoming Kademlia substreams.
|
|
|
|
allow_listening: bool,
|
|
|
|
|
|
|
|
/// Next unique ID of a connection.
|
|
|
|
next_connec_unique_id: UniqueConnecId,
|
|
|
|
|
|
|
|
/// List of active substreams with the state they are in.
|
2019-03-19 17:27:30 +01:00
|
|
|
substreams: Vec<SubstreamState<Negotiated<TSubstream>, TUserData>>,
|
2019-01-30 16:37:34 +01:00
|
|
|
|
|
|
|
/// Until when to keep the connection alive.
|
|
|
|
keep_alive: KeepAlive,
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// State of an active substream, opened either by us or by the remote.
|
|
|
|
enum SubstreamState<TSubstream, TUserData>
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
{
|
|
|
|
/// We haven't started opening the outgoing substream yet.
|
|
|
|
/// Contains the request we want to send, and the user data if we expect an answer.
|
|
|
|
OutPendingOpen(KadRequestMsg, Option<TUserData>),
|
|
|
|
/// Waiting to send a message to the remote.
|
|
|
|
OutPendingSend(
|
|
|
|
KadOutStreamSink<TSubstream>,
|
|
|
|
KadRequestMsg,
|
|
|
|
Option<TUserData>,
|
|
|
|
),
|
|
|
|
/// Waiting to send a message to the remote.
|
|
|
|
/// Waiting to flush the substream so that the data arrives to the remote.
|
|
|
|
OutPendingFlush(KadOutStreamSink<TSubstream>, Option<TUserData>),
|
|
|
|
/// Waiting for an answer back from the remote.
|
|
|
|
// TODO: add timeout
|
|
|
|
OutWaitingAnswer(KadOutStreamSink<TSubstream>, TUserData),
|
|
|
|
/// An error happened on the substream and we should report the error to the user.
|
2018-12-18 11:23:13 +01:00
|
|
|
OutReportError(KademliaHandlerQueryErr, TUserData),
|
2018-11-29 12:11:35 +01:00
|
|
|
/// The substream is being closed.
|
|
|
|
OutClosing(KadOutStreamSink<TSubstream>),
|
|
|
|
/// Waiting for a request from the remote.
|
|
|
|
InWaitingMessage(UniqueConnecId, KadInStreamSink<TSubstream>),
|
|
|
|
/// Waiting for the user to send a `KademliaHandlerIn` event containing the response.
|
|
|
|
InWaitingUser(UniqueConnecId, KadInStreamSink<TSubstream>),
|
|
|
|
/// Waiting to send an answer back to the remote.
|
|
|
|
InPendingSend(UniqueConnecId, KadInStreamSink<TSubstream>, KadResponseMsg),
|
|
|
|
/// Waiting to flush an answer back to the remote.
|
|
|
|
InPendingFlush(UniqueConnecId, KadInStreamSink<TSubstream>),
|
|
|
|
/// The substream is being closed.
|
|
|
|
InClosing(KadInStreamSink<TSubstream>),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TSubstream, TUserData> SubstreamState<TSubstream, TUserData>
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
{
|
|
|
|
/// Consumes this state and tries to close the substream.
|
|
|
|
///
|
|
|
|
/// If the substream is not ready to be closed, returns it back.
|
|
|
|
fn try_close(self) -> AsyncSink<Self> {
|
|
|
|
match self {
|
|
|
|
SubstreamState::OutPendingOpen(_, _)
|
|
|
|
| SubstreamState::OutReportError(_, _) => AsyncSink::Ready,
|
|
|
|
SubstreamState::OutPendingSend(mut stream, _, _)
|
|
|
|
| SubstreamState::OutPendingFlush(mut stream, _)
|
|
|
|
| SubstreamState::OutWaitingAnswer(mut stream, _)
|
|
|
|
| SubstreamState::OutClosing(mut stream) => match stream.close() {
|
|
|
|
Ok(Async::Ready(())) | Err(_) => AsyncSink::Ready,
|
|
|
|
Ok(Async::NotReady) => AsyncSink::NotReady(SubstreamState::OutClosing(stream)),
|
|
|
|
},
|
|
|
|
SubstreamState::InWaitingMessage(_, mut stream)
|
|
|
|
| SubstreamState::InWaitingUser(_, mut stream)
|
|
|
|
| SubstreamState::InPendingSend(_, mut stream, _)
|
|
|
|
| SubstreamState::InPendingFlush(_, mut stream)
|
|
|
|
| SubstreamState::InClosing(mut stream) => match stream.close() {
|
|
|
|
Ok(Async::Ready(())) | Err(_) => AsyncSink::Ready,
|
|
|
|
Ok(Async::NotReady) => AsyncSink::NotReady(SubstreamState::InClosing(stream)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Event produced by the Kademlia handler.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum KademliaHandlerEvent<TUserData> {
|
|
|
|
/// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes
|
|
|
|
/// returned is not specified, but should be around 20.
|
|
|
|
FindNodeReq {
|
|
|
|
/// Identifier of the node.
|
|
|
|
key: PeerId,
|
|
|
|
/// Identifier of the request. Needs to be passed back when answering.
|
|
|
|
request_id: KademliaRequestId,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Response to an `KademliaHandlerIn::FindNodeReq`.
|
|
|
|
FindNodeRes {
|
|
|
|
/// Results of the request.
|
|
|
|
closer_peers: Vec<KadPeer>,
|
|
|
|
/// The user data passed to the `FindNodeReq`.
|
|
|
|
user_data: TUserData,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Same as `FindNodeReq`, but should also return the entries of the local providers list for
|
|
|
|
/// this key.
|
|
|
|
GetProvidersReq {
|
|
|
|
/// Identifier being searched.
|
|
|
|
key: Multihash,
|
|
|
|
/// Identifier of the request. Needs to be passed back when answering.
|
|
|
|
request_id: KademliaRequestId,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Response to an `KademliaHandlerIn::GetProvidersReq`.
|
|
|
|
GetProvidersRes {
|
|
|
|
/// Nodes closest to the key.
|
|
|
|
closer_peers: Vec<KadPeer>,
|
|
|
|
/// Known providers for this key.
|
|
|
|
provider_peers: Vec<KadPeer>,
|
|
|
|
/// The user data passed to the `GetProvidersReq`.
|
|
|
|
user_data: TUserData,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// An error happened when performing a query.
|
|
|
|
QueryError {
|
|
|
|
/// The error that happened.
|
2018-12-18 11:23:13 +01:00
|
|
|
error: KademliaHandlerQueryErr,
|
2018-11-29 12:11:35 +01:00
|
|
|
/// The user data passed to the query.
|
|
|
|
user_data: TUserData,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// The remote indicates that this list of providers is known for this key.
|
|
|
|
AddProvider {
|
|
|
|
/// Key for which we should add providers.
|
|
|
|
key: Multihash,
|
|
|
|
/// Known provider for this key.
|
|
|
|
provider_peer: KadPeer,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-12-18 11:23:13 +01:00
|
|
|
/// Error that can happen when requesting an RPC query.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum KademliaHandlerQueryErr {
|
|
|
|
/// Error while trying to perform the query.
|
|
|
|
Upgrade(ProtocolsHandlerUpgrErr<io::Error>),
|
|
|
|
/// Received an answer that doesn't correspond to the request.
|
|
|
|
UnexpectedMessage,
|
|
|
|
/// I/O error in the substream.
|
|
|
|
Io(io::Error),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl fmt::Display for KademliaHandlerQueryErr {
|
2019-02-11 14:58:15 +01:00
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2018-12-18 11:23:13 +01:00
|
|
|
match self {
|
|
|
|
KademliaHandlerQueryErr::Upgrade(err) => {
|
|
|
|
write!(f, "Error while performing Kademlia query: {}", err)
|
|
|
|
},
|
|
|
|
KademliaHandlerQueryErr::UnexpectedMessage => {
|
|
|
|
write!(f, "Remote answered our Kademlia RPC query with the wrong message type")
|
|
|
|
},
|
|
|
|
KademliaHandlerQueryErr::Io(err) => {
|
|
|
|
write!(f, "I/O error during a Kademlia RPC query: {}", err)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl error::Error for KademliaHandlerQueryErr {
|
|
|
|
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
|
|
|
match self {
|
|
|
|
KademliaHandlerQueryErr::Upgrade(err) => Some(err),
|
|
|
|
KademliaHandlerQueryErr::UnexpectedMessage => None,
|
|
|
|
KademliaHandlerQueryErr::Io(err) => Some(err),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<ProtocolsHandlerUpgrErr<io::Error>> for KademliaHandlerQueryErr {
|
|
|
|
#[inline]
|
|
|
|
fn from(err: ProtocolsHandlerUpgrErr<io::Error>) -> Self {
|
|
|
|
KademliaHandlerQueryErr::Upgrade(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
/// Event to send to the handler.
|
|
|
|
pub enum KademliaHandlerIn<TUserData> {
|
|
|
|
/// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes
|
|
|
|
/// returned is not specified, but should be around 20.
|
|
|
|
FindNodeReq {
|
|
|
|
/// Identifier of the node.
|
|
|
|
key: PeerId,
|
|
|
|
/// Custom user data. Passed back in the out event when the results arrive.
|
|
|
|
user_data: TUserData,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Response to a `FindNodeReq`.
|
|
|
|
FindNodeRes {
|
|
|
|
/// Results of the request.
|
|
|
|
closer_peers: Vec<KadPeer>,
|
|
|
|
/// Identifier of the request that was made by the remote.
|
|
|
|
///
|
|
|
|
/// It is a logic error to use an id of the handler of a different node.
|
|
|
|
request_id: KademliaRequestId,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Same as `FindNodeReq`, but should also return the entries of the local providers list for
|
|
|
|
/// this key.
|
|
|
|
GetProvidersReq {
|
|
|
|
/// Identifier being searched.
|
|
|
|
key: Multihash,
|
|
|
|
/// Custom user data. Passed back in the out event when the results arrive.
|
|
|
|
user_data: TUserData,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Response to a `GetProvidersReq`.
|
|
|
|
GetProvidersRes {
|
|
|
|
/// Nodes closest to the key.
|
|
|
|
closer_peers: Vec<KadPeer>,
|
|
|
|
/// Known providers for this key.
|
|
|
|
provider_peers: Vec<KadPeer>,
|
|
|
|
/// Identifier of the request that was made by the remote.
|
|
|
|
///
|
|
|
|
/// It is a logic error to use an id of the handler of a different node.
|
|
|
|
request_id: KademliaRequestId,
|
|
|
|
},
|
|
|
|
|
|
|
|
/// Indicates that this provider is known for this key.
|
|
|
|
///
|
|
|
|
/// The API of the handler doesn't expose any event that allows you to know whether this
|
|
|
|
/// succeeded.
|
|
|
|
AddProvider {
|
|
|
|
/// Key for which we should add providers.
|
|
|
|
key: Multihash,
|
|
|
|
/// Known provider for this key.
|
|
|
|
provider_peer: KadPeer,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unique identifier for a request. Must be passed back in order to answer a request from
|
|
|
|
/// the remote.
|
|
|
|
///
|
|
|
|
/// We don't implement `Clone` on purpose, in order to prevent users from answering the same
|
|
|
|
/// request twice.
|
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
pub struct KademliaRequestId {
|
|
|
|
/// Unique identifier for an incoming connection.
|
|
|
|
connec_unique_id: UniqueConnecId,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unique identifier for a connection.
|
|
|
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
|
|
|
struct UniqueConnecId(u64);
|
|
|
|
|
|
|
|
impl<TSubstream, TUserData> KademliaHandler<TSubstream, TUserData>
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
{
|
|
|
|
/// Create a `KademliaHandler` that only allows sending messages to the remote but denying
|
|
|
|
/// incoming connections.
|
|
|
|
pub fn dial_only() -> Self {
|
|
|
|
KademliaHandler::with_allow_listening(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a `KademliaHandler` that only allows sending messages but also receive incoming
|
|
|
|
/// requests.
|
|
|
|
///
|
|
|
|
/// The `Default` trait implementation wraps around this function.
|
|
|
|
pub fn dial_and_listen() -> Self {
|
|
|
|
KademliaHandler::with_allow_listening(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn with_allow_listening(allow_listening: bool) -> Self {
|
|
|
|
KademliaHandler {
|
|
|
|
config: Default::default(),
|
|
|
|
allow_listening,
|
|
|
|
next_connec_unique_id: UniqueConnecId(0),
|
|
|
|
substreams: Vec::new(),
|
2019-04-23 11:58:49 +02:00
|
|
|
keep_alive: KeepAlive::Yes,
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
}
|
2019-05-15 15:44:51 +02:00
|
|
|
|
|
|
|
/// Modifies the protocol name used on the wire. Can be used to create incompatibilities
|
|
|
|
/// between networks on purpose.
|
|
|
|
pub fn with_protocol_name(mut self, name: impl Into<Cow<'static, [u8]>>) -> Self {
|
|
|
|
self.config = self.config.with_protocol_name(name);
|
|
|
|
self
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<TSubstream, TUserData> Default for KademliaHandler<TSubstream, TUserData>
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
{
|
|
|
|
#[inline]
|
|
|
|
fn default() -> Self {
|
|
|
|
KademliaHandler::dial_and_listen()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<TSubstream, TUserData> ProtocolsHandler for KademliaHandler<TSubstream, TUserData>
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
TUserData: Clone,
|
|
|
|
{
|
|
|
|
type InEvent = KademliaHandlerIn<TUserData>;
|
|
|
|
type OutEvent = KademliaHandlerEvent<TUserData>;
|
2018-12-28 15:11:35 +01:00
|
|
|
type Error = io::Error; // TODO: better error type?
|
2018-11-29 12:11:35 +01:00
|
|
|
type Substream = TSubstream;
|
|
|
|
type InboundProtocol = upgrade::EitherUpgrade<KademliaProtocolConfig, upgrade::DeniedUpgrade>;
|
|
|
|
type OutboundProtocol = KademliaProtocolConfig;
|
|
|
|
// Message of the request to send to the remote, and user data if we expect an answer.
|
|
|
|
type OutboundOpenInfo = (KadRequestMsg, Option<TUserData>);
|
|
|
|
|
|
|
|
#[inline]
|
2019-04-16 15:57:29 +02:00
|
|
|
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol> {
|
2018-11-29 12:11:35 +01:00
|
|
|
if self.allow_listening {
|
2019-05-15 15:44:51 +02:00
|
|
|
SubstreamProtocol::new(self.config.clone()).map_upgrade(upgrade::EitherUpgrade::A)
|
2018-11-29 12:11:35 +01:00
|
|
|
} else {
|
2019-04-16 15:57:29 +02:00
|
|
|
SubstreamProtocol::new(upgrade::EitherUpgrade::B(upgrade::DeniedUpgrade))
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn inject_fully_negotiated_outbound(
|
|
|
|
&mut self,
|
|
|
|
protocol: <Self::OutboundProtocol as OutboundUpgrade<TSubstream>>::Output,
|
|
|
|
(msg, user_data): Self::OutboundOpenInfo,
|
|
|
|
) {
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::OutPendingSend(protocol, msg, user_data));
|
|
|
|
}
|
|
|
|
|
|
|
|
fn inject_fully_negotiated_inbound(
|
|
|
|
&mut self,
|
|
|
|
protocol: <Self::InboundProtocol as InboundUpgrade<TSubstream>>::Output,
|
|
|
|
) {
|
|
|
|
// If `self.allow_listening` is false, then we produced a `DeniedUpgrade` and `protocol`
|
|
|
|
// is a `Void`.
|
|
|
|
let protocol = match protocol {
|
|
|
|
EitherOutput::First(p) => p,
|
|
|
|
EitherOutput::Second(p) => void::unreachable(p),
|
|
|
|
};
|
|
|
|
|
|
|
|
debug_assert!(self.allow_listening);
|
|
|
|
let connec_unique_id = self.next_connec_unique_id;
|
|
|
|
self.next_connec_unique_id.0 += 1;
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::InWaitingMessage(connec_unique_id, protocol));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn inject_event(&mut self, message: KademliaHandlerIn<TUserData>) {
|
|
|
|
match message {
|
|
|
|
KademliaHandlerIn::FindNodeReq { key, user_data } => {
|
|
|
|
let msg = KadRequestMsg::FindNode { key: key.clone() };
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone())));
|
|
|
|
}
|
|
|
|
KademliaHandlerIn::FindNodeRes {
|
|
|
|
closer_peers,
|
|
|
|
request_id,
|
|
|
|
} => {
|
|
|
|
let pos = self.substreams.iter().position(|state| match state {
|
2019-05-08 10:10:58 +02:00
|
|
|
SubstreamState::InWaitingUser(ref conn_id, _) =>
|
|
|
|
conn_id == &request_id.connec_unique_id,
|
2018-11-29 12:11:35 +01:00
|
|
|
_ => false,
|
|
|
|
});
|
|
|
|
|
|
|
|
if let Some(pos) = pos {
|
|
|
|
let (conn_id, substream) = match self.substreams.remove(pos) {
|
|
|
|
SubstreamState::InWaitingUser(conn_id, substream) => (conn_id, substream),
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let msg = KadResponseMsg::FindNode {
|
|
|
|
closer_peers: closer_peers.clone(),
|
|
|
|
};
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::InPendingSend(conn_id, substream, msg));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
KademliaHandlerIn::GetProvidersReq { key, user_data } => {
|
|
|
|
let msg = KadRequestMsg::GetProviders { key: key.clone() };
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::OutPendingOpen(msg, Some(user_data.clone())));
|
|
|
|
}
|
|
|
|
KademliaHandlerIn::GetProvidersRes {
|
|
|
|
closer_peers,
|
|
|
|
provider_peers,
|
|
|
|
request_id,
|
|
|
|
} => {
|
|
|
|
let pos = self.substreams.iter().position(|state| match state {
|
|
|
|
SubstreamState::InWaitingUser(ref conn_id, _)
|
|
|
|
if conn_id == &request_id.connec_unique_id =>
|
|
|
|
{
|
|
|
|
true
|
|
|
|
}
|
|
|
|
_ => false,
|
|
|
|
});
|
|
|
|
|
|
|
|
if let Some(pos) = pos {
|
|
|
|
let (conn_id, substream) = match self.substreams.remove(pos) {
|
|
|
|
SubstreamState::InWaitingUser(conn_id, substream) => (conn_id, substream),
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let msg = KadResponseMsg::GetProviders {
|
|
|
|
closer_peers: closer_peers.clone(),
|
|
|
|
provider_peers: provider_peers.clone(),
|
|
|
|
};
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::InPendingSend(conn_id, substream, msg));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
KademliaHandlerIn::AddProvider { key, provider_peer } => {
|
|
|
|
let msg = KadRequestMsg::AddProvider {
|
|
|
|
key: key.clone(),
|
|
|
|
provider_peer: provider_peer.clone(),
|
|
|
|
};
|
|
|
|
self.substreams
|
|
|
|
.push(SubstreamState::OutPendingOpen(msg, None));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
fn inject_dial_upgrade_error(
|
|
|
|
&mut self,
|
|
|
|
(_, user_data): Self::OutboundOpenInfo,
|
2018-12-18 11:23:13 +01:00
|
|
|
error: ProtocolsHandlerUpgrErr<io::Error>,
|
2018-11-29 12:11:35 +01:00
|
|
|
) {
|
|
|
|
// TODO: cache the fact that the remote doesn't support kademlia at all, so that we don't
|
|
|
|
// continue trying
|
|
|
|
if let Some(user_data) = user_data {
|
|
|
|
self.substreams
|
2018-12-18 11:23:13 +01:00
|
|
|
.push(SubstreamState::OutReportError(error.into(), user_data));
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-04 12:02:39 +01:00
|
|
|
#[inline]
|
2019-01-30 16:37:34 +01:00
|
|
|
fn connection_keep_alive(&self) -> KeepAlive {
|
|
|
|
self.keep_alive
|
2019-01-04 12:02:39 +01:00
|
|
|
}
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
fn poll(
|
|
|
|
&mut self,
|
|
|
|
) -> Poll<
|
2019-01-02 14:22:23 +01:00
|
|
|
ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent>,
|
2018-11-29 12:11:35 +01:00
|
|
|
io::Error,
|
|
|
|
> {
|
|
|
|
// We remove each element from `substreams` one by one and add them back.
|
|
|
|
for n in (0..self.substreams.len()).rev() {
|
|
|
|
let mut substream = self.substreams.swap_remove(n);
|
|
|
|
|
|
|
|
loop {
|
2019-05-15 15:44:51 +02:00
|
|
|
match advance_substream(substream, self.config.clone()) {
|
2018-11-29 12:11:35 +01:00
|
|
|
(Some(new_state), Some(event), _) => {
|
|
|
|
self.substreams.push(new_state);
|
2019-01-02 14:22:23 +01:00
|
|
|
return Ok(Async::Ready(event));
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
(None, Some(event), _) => {
|
2019-01-02 14:22:23 +01:00
|
|
|
return Ok(Async::Ready(event));
|
2018-11-29 12:11:35 +01:00
|
|
|
}
|
|
|
|
(Some(new_state), None, false) => {
|
|
|
|
self.substreams.push(new_state);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
(Some(new_state), None, true) => {
|
|
|
|
substream = new_state;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
(None, None, _) => {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 16:37:34 +01:00
|
|
|
if self.substreams.is_empty() {
|
|
|
|
self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10));
|
|
|
|
} else {
|
2019-04-23 11:58:49 +02:00
|
|
|
self.keep_alive = KeepAlive::Yes;
|
2019-01-30 16:37:34 +01:00
|
|
|
}
|
|
|
|
|
2018-11-29 12:11:35 +01:00
|
|
|
Ok(Async::NotReady)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Advances one substream.
|
|
|
|
///
|
|
|
|
/// Returns the new state for that substream, an event to generate, and whether the substream
|
|
|
|
/// should be polled again.
|
|
|
|
fn advance_substream<TSubstream, TUserData>(
|
|
|
|
state: SubstreamState<TSubstream, TUserData>,
|
|
|
|
upgrade: KademliaProtocolConfig,
|
|
|
|
) -> (
|
|
|
|
Option<SubstreamState<TSubstream, TUserData>>,
|
|
|
|
Option<
|
|
|
|
ProtocolsHandlerEvent<
|
|
|
|
KademliaProtocolConfig,
|
|
|
|
(KadRequestMsg, Option<TUserData>),
|
|
|
|
KademliaHandlerEvent<TUserData>,
|
|
|
|
>,
|
|
|
|
>,
|
|
|
|
bool,
|
|
|
|
)
|
|
|
|
where
|
|
|
|
TSubstream: AsyncRead + AsyncWrite,
|
|
|
|
{
|
|
|
|
match state {
|
|
|
|
SubstreamState::OutPendingOpen(msg, user_data) => {
|
|
|
|
let ev = ProtocolsHandlerEvent::OutboundSubstreamRequest {
|
2019-04-16 15:57:29 +02:00
|
|
|
protocol: SubstreamProtocol::new(upgrade),
|
2018-11-29 12:11:35 +01:00
|
|
|
info: (msg, user_data),
|
|
|
|
};
|
|
|
|
(None, Some(ev), false)
|
|
|
|
}
|
|
|
|
SubstreamState::OutPendingSend(mut substream, msg, user_data) => {
|
|
|
|
match substream.start_send(msg) {
|
|
|
|
Ok(AsyncSink::Ready) => (
|
|
|
|
Some(SubstreamState::OutPendingFlush(substream, user_data)),
|
|
|
|
None,
|
|
|
|
true,
|
|
|
|
),
|
|
|
|
Ok(AsyncSink::NotReady(msg)) => (
|
|
|
|
Some(SubstreamState::OutPendingSend(substream, msg, user_data)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
Err(error) => {
|
|
|
|
let event = if let Some(user_data) = user_data {
|
2018-12-18 11:23:13 +01:00
|
|
|
Some(ProtocolsHandlerEvent::Custom(KademliaHandlerEvent::QueryError {
|
|
|
|
error: KademliaHandlerQueryErr::Io(error),
|
|
|
|
user_data
|
|
|
|
}))
|
2018-11-29 12:11:35 +01:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
(None, event, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SubstreamState::OutPendingFlush(mut substream, user_data) => {
|
|
|
|
match substream.poll_complete() {
|
|
|
|
Ok(Async::Ready(())) => {
|
|
|
|
if let Some(user_data) = user_data {
|
|
|
|
(
|
|
|
|
Some(SubstreamState::OutWaitingAnswer(substream, user_data)),
|
|
|
|
None,
|
|
|
|
true,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(Some(SubstreamState::OutClosing(substream)), None, true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => (
|
|
|
|
Some(SubstreamState::OutPendingFlush(substream, user_data)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
Err(error) => {
|
|
|
|
let event = if let Some(user_data) = user_data {
|
2018-12-18 11:23:13 +01:00
|
|
|
Some(ProtocolsHandlerEvent::Custom(KademliaHandlerEvent::QueryError {
|
|
|
|
error: KademliaHandlerQueryErr::Io(error),
|
|
|
|
user_data,
|
|
|
|
}))
|
2018-11-29 12:11:35 +01:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
(None, event, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SubstreamState::OutWaitingAnswer(mut substream, user_data) => match substream.poll() {
|
|
|
|
Ok(Async::Ready(Some(msg))) => {
|
|
|
|
let new_state = SubstreamState::OutClosing(substream);
|
|
|
|
let event = process_kad_response(msg, user_data);
|
|
|
|
(
|
|
|
|
Some(new_state),
|
|
|
|
Some(ProtocolsHandlerEvent::Custom(event)),
|
|
|
|
true,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => (
|
|
|
|
Some(SubstreamState::OutWaitingAnswer(substream, user_data)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
Err(error) => {
|
2018-12-18 11:23:13 +01:00
|
|
|
let event = KademliaHandlerEvent::QueryError {
|
|
|
|
error: KademliaHandlerQueryErr::Io(error),
|
|
|
|
user_data,
|
|
|
|
};
|
2018-11-29 12:11:35 +01:00
|
|
|
(None, Some(ProtocolsHandlerEvent::Custom(event)), false)
|
|
|
|
}
|
|
|
|
Ok(Async::Ready(None)) => {
|
2018-12-18 11:23:13 +01:00
|
|
|
let event = KademliaHandlerEvent::QueryError {
|
|
|
|
error: KademliaHandlerQueryErr::Io(io::ErrorKind::UnexpectedEof.into()),
|
|
|
|
user_data,
|
|
|
|
};
|
2018-11-29 12:11:35 +01:00
|
|
|
(None, Some(ProtocolsHandlerEvent::Custom(event)), false)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
SubstreamState::OutReportError(error, user_data) => {
|
|
|
|
let event = KademliaHandlerEvent::QueryError { error, user_data };
|
|
|
|
(None, Some(ProtocolsHandlerEvent::Custom(event)), false)
|
|
|
|
}
|
|
|
|
SubstreamState::OutClosing(mut stream) => match stream.close() {
|
|
|
|
Ok(Async::Ready(())) => (None, None, false),
|
|
|
|
Ok(Async::NotReady) => (Some(SubstreamState::OutClosing(stream)), None, false),
|
|
|
|
Err(_) => (None, None, false),
|
|
|
|
},
|
|
|
|
SubstreamState::InWaitingMessage(id, mut substream) => match substream.poll() {
|
|
|
|
Ok(Async::Ready(Some(msg))) => {
|
|
|
|
if let Ok(ev) = process_kad_request(msg, id) {
|
|
|
|
(
|
|
|
|
Some(SubstreamState::InWaitingUser(id, substream)),
|
|
|
|
Some(ProtocolsHandlerEvent::Custom(ev)),
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(Some(SubstreamState::InClosing(substream)), None, true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => (
|
|
|
|
Some(SubstreamState::InWaitingMessage(id, substream)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
Ok(Async::Ready(None)) | Err(_) => (None, None, false),
|
|
|
|
},
|
|
|
|
SubstreamState::InWaitingUser(id, substream) => (
|
|
|
|
Some(SubstreamState::InWaitingUser(id, substream)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
SubstreamState::InPendingSend(id, mut substream, msg) => match substream.start_send(msg) {
|
|
|
|
Ok(AsyncSink::Ready) => (
|
|
|
|
Some(SubstreamState::InPendingFlush(id, substream)),
|
|
|
|
None,
|
|
|
|
true,
|
|
|
|
),
|
|
|
|
Ok(AsyncSink::NotReady(msg)) => (
|
|
|
|
Some(SubstreamState::InPendingSend(id, substream, msg)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
Err(_) => (None, None, false),
|
|
|
|
},
|
|
|
|
SubstreamState::InPendingFlush(id, mut substream) => match substream.poll_complete() {
|
|
|
|
Ok(Async::Ready(())) => (
|
|
|
|
Some(SubstreamState::InWaitingMessage(id, substream)),
|
|
|
|
None,
|
|
|
|
true,
|
|
|
|
),
|
|
|
|
Ok(Async::NotReady) => (
|
|
|
|
Some(SubstreamState::InPendingFlush(id, substream)),
|
|
|
|
None,
|
|
|
|
false,
|
|
|
|
),
|
|
|
|
Err(_) => (None, None, false),
|
|
|
|
},
|
|
|
|
SubstreamState::InClosing(mut stream) => match stream.close() {
|
|
|
|
Ok(Async::Ready(())) => (None, None, false),
|
|
|
|
Ok(Async::NotReady) => (Some(SubstreamState::InClosing(stream)), None, false),
|
|
|
|
Err(_) => (None, None, false),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Processes a Kademlia message that's expected to be a request from a remote.
|
|
|
|
fn process_kad_request<TUserData>(
|
|
|
|
event: KadRequestMsg,
|
|
|
|
connec_unique_id: UniqueConnecId,
|
|
|
|
) -> Result<KademliaHandlerEvent<TUserData>, io::Error> {
|
|
|
|
match event {
|
|
|
|
KadRequestMsg::Ping => {
|
2018-12-19 22:22:39 +00:00
|
|
|
// TODO: implement; although in practice the PING message is never
|
|
|
|
// used, so we may consider removing it altogether
|
2018-11-29 12:11:35 +01:00
|
|
|
Err(io::Error::new(
|
|
|
|
io::ErrorKind::InvalidData,
|
|
|
|
"the PING Kademlia message is not implemented",
|
|
|
|
))
|
|
|
|
}
|
|
|
|
KadRequestMsg::FindNode { key } => Ok(KademliaHandlerEvent::FindNodeReq {
|
|
|
|
key,
|
|
|
|
request_id: KademliaRequestId { connec_unique_id },
|
|
|
|
}),
|
|
|
|
KadRequestMsg::GetProviders { key } => Ok(KademliaHandlerEvent::GetProvidersReq {
|
|
|
|
key,
|
|
|
|
request_id: KademliaRequestId { connec_unique_id },
|
|
|
|
}),
|
|
|
|
KadRequestMsg::AddProvider { key, provider_peer } => {
|
|
|
|
Ok(KademliaHandlerEvent::AddProvider { key, provider_peer })
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Process a Kademlia message that's supposed to be a response to one of our requests.
|
|
|
|
fn process_kad_response<TUserData>(
|
|
|
|
event: KadResponseMsg,
|
|
|
|
user_data: TUserData,
|
|
|
|
) -> KademliaHandlerEvent<TUserData> {
|
|
|
|
// TODO: must check that the response corresponds to the request
|
|
|
|
match event {
|
|
|
|
KadResponseMsg::Pong => {
|
|
|
|
// We never send out pings.
|
|
|
|
KademliaHandlerEvent::QueryError {
|
2018-12-18 11:23:13 +01:00
|
|
|
error: KademliaHandlerQueryErr::UnexpectedMessage,
|
2018-11-29 12:11:35 +01:00
|
|
|
user_data,
|
|
|
|
}
|
|
|
|
}
|
Fix self-dialing in Kademlia. (#1097)
* Fix self-dialing in Kademlia.
Addresses https://github.com/libp2p/rust-libp2p/issues/341 which is the cause
for one of the observations made in https://github.com/libp2p/rust-libp2p/issues/1053.
However, the latter is not assumed to be fully addressed by these changes and
needs further investigation.
Currently, whenever a search for a key yields a response containing the initiating
peer as one of the closest peers known to the remote, the local node
would attempt to dial itself. That attempt is ignored by the Swarm, but
the Kademlia behaviour now believes it still has a query ongoing which is
always doomed to time out. That timeout delays successful completion of the query.
Hence, any query where a remote responds with the ID of the local node takes at
least as long as the `rpc_timeout` to complete, which possibly affects almost
all queries in smaller clusters where every node knows about every other.
This problem is fixed here by ensuring that Kademlia never tries to dial the local node.
Furthermore, `Discovered` events are no longer emitted for the local node
and it is not inserted into the `untrusted_addresses` from discovery, as described
in #341.
This commit also includes a change to the condition for freezing / terminating
a Kademlia query upon receiving a response. Specifically, the condition is
tightened such that it only applies if in addition to `parallelism`
consecutive responses that failed to yield a peer closer to the target, the
last response must also either not have reported any new peer or the
number of collected peers has already reached the number of desired results.
In effect, a Kademlia query now tries harder to actually return `k`
closest peers.
Tests have been refactored and expanded.
* Add another comment.
2019-05-02 21:43:29 +02:00
|
|
|
KadResponseMsg::FindNode { closer_peers } => {
|
|
|
|
KademliaHandlerEvent::FindNodeRes {
|
|
|
|
closer_peers,
|
|
|
|
user_data,
|
|
|
|
}
|
2018-11-29 12:11:35 +01:00
|
|
|
},
|
|
|
|
KadResponseMsg::GetProviders {
|
|
|
|
closer_peers,
|
|
|
|
provider_peers,
|
|
|
|
} => KademliaHandlerEvent::GetProvidersRes {
|
|
|
|
closer_peers,
|
|
|
|
provider_peers,
|
|
|
|
user_data,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|