Fix everything except trust-graph

This commit is contained in:
folex
2021-01-07 18:31:41 +03:00
parent 263407b6f0
commit e3d6fb4823
10 changed files with 65 additions and 73 deletions

View File

@@ -52,6 +52,11 @@ impl Addresses {
self.addrs.iter()
}
/// Returns a mutable iterator over the addresses.
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut Multiaddr> {
self.addrs.iter_mut()
}
/// Returns the number of addresses in the list.
pub fn len(&self) -> usize {
self.addrs.len()

View File

@@ -386,7 +386,7 @@ where
record_ttl: config.record_ttl,
provider_record_ttl: config.provider_record_ttl,
connection_idle_timeout: config.connection_idle_timeout,
local_addrs: HashSet::new()
local_addrs: HashSet::new(),
trust,
metrics: Metrics::disabled(),
}
@@ -880,38 +880,55 @@ where
let local_addrs = &self.local_addrs;
self.store.providers(key)
.into_iter()
.filter_map(move |p|
.filter_map(move |p| {
let kad_peer = if &p.provider != source {
let node_id = p.provider;
let multiaddrs = p.addresses;
let connection_ty = if connected.contains(&node_id) {
KadConnectionType::Connected
} else {
KadConnectionType::NotConnected
};
if multiaddrs.is_empty() {
if &node_id == kbuckets.local_key().preimage() {
// The provider is either the local node and we fill in
// the local addresses on demand, or it is a legacy
// provider record without addresses, in which case we
// try to find addresses in the routing table, as was
// done before provider records were stored along with
// their addresses.
if &node_id == kbuckets.local_key().preimage() {
Some(local_addrs.iter().cloned().collect::<Vec<_>>())
} else {
let key = kbucket::Key::from(node_id);
kbuckets.entry(&key).view().map(|e| e.node.value.clone().into_vec())
}
} else {
Some(multiaddrs)
}
.map(|multiaddrs| {
KadPeer {
// the local addresses on demand,
let self_key = self.kbuckets.local_public_key();
let certificates = self.trust.get_all_certs(&self_key, &[]);
let multiaddrs = local_addrs.iter().cloned().collect::<Vec<_>>();
Some(KadPeer {
public_key: self_key,
node_id,
multiaddrs,
connection_ty,
}
})
certificates
})
} else {
let key = kbucket::Key::from(node_id);
kbuckets.entry(&key).view().map(|e| {
let contact = e.node.value;
let multiaddrs = if p.addresses.is_empty() {
// This is a legacy (pre-#1708) provider without addresses,
// so take addresses from the routing table
contact.addresses.clone().into_vec()
} else {
p.addresses
};
let certificates = node_id.as_public_key().map(|provider_pk|
self.trust.get_all_certs(provider_pk, &[])
).unwrap_or_default();
KadPeer {
node_id,
multiaddrs,
public_key: contact.public_key.clone(),
connection_ty: match e.status {
NodeStatus::Connected => KadConnectionType::Connected,
NodeStatus::Disconnected => KadConnectionType::NotConnected
},
certificates
}
})
}
} else {
None
};
@@ -925,13 +942,7 @@ where
kad_peer
})
.take(self.queries.config().replication_factor.get())
.collect::<Vec<_>>();
peers.iter_mut().for_each(|peer|
peer.certificates = self.trust.get_all_certs(&peer.public_key, &[])
);
peers
.collect::<Vec<_>>()
}
/// Starts an iterative `ADD_PROVIDER` query for the given key.
@@ -1785,8 +1796,8 @@ where
let (old, new) = (old.get_remote_address(), new.get_remote_address());
// Update routing table.
if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(*peer)).value() {
if addrs.replace(old, new) {
if let Some(contact) = self.kbuckets.entry(&kbucket::Key::from(*peer)).value() {
if contact.addresses.replace(old, new) {
debug!("Address '{}' replaced with '{}' for peer '{}'.", old, new, peer);
} else {
debug!(
@@ -1818,8 +1829,8 @@ where
// large performance impact. If so, the code below might be worth
// revisiting.
for query in self.queries.iter_mut() {
if let Some(addrs) = query.inner.addresses.get_mut(peer) {
for addr in addrs.iter_mut() {
if let Some(contact) = query.inner.contacts.get_mut(peer) {
for addr in contact.addresses.iter_mut() {
if addr == old {
*addr = new.clone();
}
@@ -1905,7 +1916,7 @@ where
let contact = self.queries
.iter_mut()
.find_map(|q| q.inner.contacts.get(peer))
.find_map(|q| q.inner.contacts.get(&source))
.cloned()
.and_then(|mut c|
new_address.map(|addr| {
@@ -1913,7 +1924,7 @@ where
c
}));
self.connection_updated(peer.clone(), contact, NodeStatus::Connected);
self.connection_updated(source, contact, NodeStatus::Connected);
}
KademliaHandlerEvent::FindNodeReq { key, request_id } => {
@@ -2594,22 +2605,6 @@ impl AddProviderError {
}
}
impl From<kbucket::EntryRefView<'_, kbucket::Key<PeerId>, Contact>> for KadPeer {
fn from(e: kbucket::EntryRefView<'_, kbucket::Key<PeerId>, Contact>) -> KadPeer {
let Contact { addresses, public_key } = e.node.value;
KadPeer {
public_key: public_key.clone(),
node_id: e.node.key.clone().into_preimage(),
multiaddrs: addresses.clone().into_vec(),
connection_ty: match e.status {
NodeStatus::Connected => KadConnectionType::Connected,
NodeStatus::Disconnected => KadConnectionType::NotConnected
},
certificates: vec![]
}
}
}
impl From<kbucket::EntryView<kbucket::Key<PeerId>, Contact>> for KadPeer {
fn from(e: kbucket::EntryView<kbucket::Key<PeerId>, Contact>) -> KadPeer {
let Contact { addresses, public_key } = e.node.value;

View File

@@ -1117,18 +1117,13 @@ fn manual_bucket_inserts() {
#[test]
fn network_behaviour_inject_address_change() {
let local_peer_id = PeerId::random();
let (_, _, mut kademlia) = build_node();
let remote_peer_id = PeerId::random();
let connection_id = ConnectionId::new(1);
let old_address: Multiaddr = Protocol::Memory(1).into();
let new_address: Multiaddr = Protocol::Memory(2).into();
let mut kademlia = Kademlia::new(
local_peer_id.clone(),
MemoryStore::new(local_peer_id),
);
let endpoint = ConnectedPoint::Dialer { address: old_address.clone() };
// Mimick a connection being established.

View File

@@ -606,9 +606,11 @@ mod tests {
#[test]
fn buckets_are_non_overlapping_and_exhaustive() {
let local_key = Key::from(PeerId::random());
let keypair = ed25519::Keypair::generate();
let public_key = identity::PublicKey::Ed25519(keypair.public());
let local_key = Key::from(PeerId::from(public_key));
let timeout = Duration::from_secs(0);
let mut table = KBucketsTable::<KeyBytes, ()>::new(local_key.into(), timeout);
let mut table = KBucketsTable::<KeyBytes, ()>::new(keypair, local_key.into(), timeout);
let mut prev_max = U256::from(0);

View File

@@ -419,7 +419,7 @@ mod tests {
let mut bucket = Weighted::new(Duration::from_secs(100000));
for (i, (weight, status)) in weight_status.into_iter().enumerate() {
let key = Key::new(PeerId::random());
let key = Key::from(PeerId::random());
let node = Node {
key: key.clone(),
value: (),
@@ -464,7 +464,7 @@ mod tests {
let mut map: HashMap<u32, Vec<(Node<Key<PeerId>, ()>, NodeStatus)>> = HashMap::new();
for (weight, status) in weight_status {
let node: Node<Key<PeerId>, ()> = Node {
key: Key::new(PeerId::random()),
key: Key::from(PeerId::random()),
value: (),
weight,
};

View File

@@ -65,7 +65,7 @@ impl Metrics {
}
pub(super) fn enabled(registry: &Registry, peer_id: &PeerId) -> Self {
let peer_id = bs58::encode(peer_id).into_string();
let peer_id = peer_id.to_string();
let opts = |name: &str| -> Opts {
let mut opts = Opts::new(name, name)
.namespace("libp2p")

View File

@@ -537,8 +537,6 @@ mod tests {
use quickcheck::*;
use rand::{Rng, rngs::StdRng, SeedableRng};
use libp2p_core::PeerId;
use super::*;
fn random_peers<R: Rng>(n: usize, g: &mut R) -> Vec<PeerId> {