Stack allocated PeerId (#1874)

* Stack allocate PeerId.

* Update stuff.

* Upgrade rusttls to fix build.

* Remove unnecessary manual implementations.

* Remove PeerId::into_bytes.

* Remove bytes dependency.

* Perform some cleanup.

* Use Into<kbucket::Key<K>>.

* Update versions and changelogs.

* Fix PR link.

* Fix benchmarks.

Co-authored-by: Roman S. Borschel <roman@parity.io>
This commit is contained in:
David Craven
2020-12-15 14:40:39 +01:00
committed by GitHub
parent a26f6aa674
commit 23b0aa016f
60 changed files with 252 additions and 223 deletions

View File

@ -23,6 +23,10 @@
- [`parity-multiaddr` CHANGELOG](misc/multiaddr/CHANGELOG.md) - [`parity-multiaddr` CHANGELOG](misc/multiaddr/CHANGELOG.md)
- [`libp2p-core-derive` CHANGELOG](misc/core-derive/CHANGELOG.md) - [`libp2p-core-derive` CHANGELOG](misc/core-derive/CHANGELOG.md)
# Version 0.33.0 [unreleased]
- Update `libp2p-core` and all dependent crates.
# Version 0.32.2 [2020-12-10] # Version 0.32.2 [2020-12-10]
- Update `libp2p-websocket`. - Update `libp2p-websocket`.

View File

@ -2,7 +2,7 @@
name = "libp2p" name = "libp2p"
edition = "2018" edition = "2018"
description = "Peer-to-peer networking library" description = "Peer-to-peer networking library"
version = "0.32.2" version = "0.33.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -61,22 +61,22 @@ atomic = "0.5.0"
bytes = "0.5" bytes = "0.5"
futures = "0.3.1" futures = "0.3.1"
lazy_static = "1.2" lazy_static = "1.2"
libp2p-core = { version = "0.25.2", path = "core" } libp2p-core = { version = "0.26.0", path = "core" }
libp2p-core-derive = { version = "0.21.0", path = "misc/core-derive" } libp2p-core-derive = { version = "0.21.0", path = "misc/core-derive" }
libp2p-floodsub = { version = "0.25.0", path = "protocols/floodsub", optional = true } libp2p-floodsub = { version = "0.26.0", path = "protocols/floodsub", optional = true }
libp2p-gossipsub = { version = "0.25.0", path = "./protocols/gossipsub", optional = true } libp2p-gossipsub = { version = "0.26.0", path = "./protocols/gossipsub", optional = true }
libp2p-identify = { version = "0.25.0", path = "protocols/identify", optional = true } libp2p-identify = { version = "0.26.0", path = "protocols/identify", optional = true }
libp2p-kad = { version = "0.26.0", path = "protocols/kad", optional = true } libp2p-kad = { version = "0.27.0", path = "protocols/kad", optional = true }
libp2p-mplex = { version = "0.25.0", path = "muxers/mplex", optional = true } libp2p-mplex = { version = "0.26.0", path = "muxers/mplex", optional = true }
libp2p-noise = { version = "0.27.0", path = "protocols/noise", optional = true } libp2p-noise = { version = "0.28.0", path = "protocols/noise", optional = true }
libp2p-ping = { version = "0.25.0", path = "protocols/ping", optional = true } libp2p-ping = { version = "0.26.0", path = "protocols/ping", optional = true }
libp2p-plaintext = { version = "0.25.0", path = "protocols/plaintext", optional = true } libp2p-plaintext = { version = "0.26.0", path = "protocols/plaintext", optional = true }
libp2p-pnet = { version = "0.19.2", path = "protocols/pnet", optional = true } libp2p-pnet = { version = "0.19.2", path = "protocols/pnet", optional = true }
libp2p-request-response = { version = "0.7.0", path = "protocols/request-response", optional = true } libp2p-request-response = { version = "0.8.0", path = "protocols/request-response", optional = true }
libp2p-swarm = { version = "0.25.0", path = "swarm" } libp2p-swarm = { version = "0.26.0", path = "swarm" }
libp2p-uds = { version = "0.25.0", path = "transports/uds", optional = true } libp2p-uds = { version = "0.26.0", path = "transports/uds", optional = true }
libp2p-wasm-ext = { version = "0.25.0", path = "transports/wasm-ext", optional = true } libp2p-wasm-ext = { version = "0.26.0", path = "transports/wasm-ext", optional = true }
libp2p-yamux = { version = "0.28.0", path = "muxers/yamux", optional = true } libp2p-yamux = { version = "0.29.0", path = "muxers/yamux", optional = true }
multiaddr = { package = "parity-multiaddr", version = "0.10.0", path = "misc/multiaddr" } multiaddr = { package = "parity-multiaddr", version = "0.10.0", path = "misc/multiaddr" }
parking_lot = "0.11.0" parking_lot = "0.11.0"
pin-project = "1.0.0" pin-project = "1.0.0"
@ -84,11 +84,11 @@ smallvec = "1.0"
wasm-timer = "0.2.4" wasm-timer = "0.2.4"
[target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies]
libp2p-deflate = { version = "0.25.0", path = "protocols/deflate", optional = true } libp2p-deflate = { version = "0.26.0", path = "protocols/deflate", optional = true }
libp2p-dns = { version = "0.25.0", path = "transports/dns", optional = true } libp2p-dns = { version = "0.26.0", path = "transports/dns", optional = true }
libp2p-mdns = { version = "0.26.0", path = "protocols/mdns", optional = true } libp2p-mdns = { version = "0.27.0", path = "protocols/mdns", optional = true }
libp2p-tcp = { version = "0.25.1", path = "transports/tcp", optional = true } libp2p-tcp = { version = "0.26.0", path = "transports/tcp", optional = true }
libp2p-websocket = { version = "0.26.3", path = "transports/websocket", optional = true } libp2p-websocket = { version = "0.27.0", path = "transports/websocket", optional = true }
[dev-dependencies] [dev-dependencies]
async-std = "1.6.2" async-std = "1.6.2"

View File

@ -1,3 +1,8 @@
# 0.26.0 [unreleased]
- Make `PeerId` be `Copy`, including small `PeerId` API changes.
[PR 1874](https://github.com/libp2p/rust-libp2p/pull/1874/).
# 0.25.2 [2020-12-02] # 0.25.2 [2020-12-02]
- Require `multistream-select-0.9.1`. - Require `multistream-select-0.9.1`.

View File

@ -2,7 +2,7 @@
name = "libp2p-core" name = "libp2p-core"
edition = "2018" edition = "2018"
description = "Core traits and structs of libp2p" description = "Core traits and structs of libp2p"
version = "0.25.2" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -12,7 +12,6 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
asn1_der = "0.6.1" asn1_der = "0.6.1"
bs58 = "0.4.0" bs58 = "0.4.0"
bytes = "0.5"
ed25519-dalek = "1.0.1" ed25519-dalek = "1.0.1"
either = "1.5" either = "1.5"
fnv = "1.0" fnv = "1.0"

View File

@ -18,18 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE. // DEALINGS IN THE SOFTWARE.
use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; use criterion::{black_box, criterion_group, criterion_main, Criterion};
use libp2p_core::{identity, PeerId}; use libp2p_core::{identity, PeerId};
fn from_bytes(c: &mut Criterion) { fn from_bytes(c: &mut Criterion) {
let peer_id_bytes = identity::Keypair::generate_ed25519() let peer_id_bytes = identity::Keypair::generate_ed25519()
.public() .public()
.into_peer_id() .into_peer_id()
.into_bytes(); .to_bytes();
c.bench_function("from_bytes", |b| { c.bench_function("from_bytes", |b| {
b.iter(|| { b.iter(|| {
black_box(PeerId::from_bytes(peer_id_bytes.clone()).unwrap()); black_box(PeerId::from_bytes(&peer_id_bytes).unwrap());
}) })
}); });
} }

View File

@ -555,7 +555,7 @@ where
} }
self.network.pool.add(connection, connected) self.network.pool.add(connection, connected)
.map(|_id| ConnectedPeer { .map(move |_id| ConnectedPeer {
network: self.network, network: self.network,
peer_id: self.peer_id, peer_id: self.peer_id,
}) })

View File

@ -19,11 +19,10 @@
// DEALINGS IN THE SOFTWARE. // DEALINGS IN THE SOFTWARE.
use crate::PublicKey; use crate::PublicKey;
use bytes::Bytes; use multihash::{Code, Error, Multihash, MultihashDigest};
use thiserror::Error;
use multihash::{Code, Multihash, MultihashDigest};
use rand::Rng; use rand::Rng;
use std::{convert::TryFrom, borrow::Borrow, fmt, hash, str::FromStr, cmp}; use std::{convert::TryFrom, fmt, str::FromStr};
use thiserror::Error;
/// Public keys with byte-lengths smaller than `MAX_INLINE_KEY_LENGTH` will be /// Public keys with byte-lengths smaller than `MAX_INLINE_KEY_LENGTH` will be
/// automatically used as the peer id using an identity multihash. /// automatically used as the peer id using an identity multihash.
@ -32,10 +31,9 @@ const MAX_INLINE_KEY_LENGTH: usize = 42;
/// Identifier of a peer of the network. /// Identifier of a peer of the network.
/// ///
/// The data is a multihash of the public key of the peer. /// The data is a multihash of the public key of the peer.
// TODO: maybe keep things in decoded version? #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(Clone, Eq)]
pub struct PeerId { pub struct PeerId {
multihash: Bytes, multihash: Multihash,
} }
impl fmt::Debug for PeerId { impl fmt::Debug for PeerId {
@ -52,21 +50,6 @@ impl fmt::Display for PeerId {
} }
} }
impl cmp::PartialOrd for PeerId {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(Ord::cmp(self, other))
}
}
impl cmp::Ord for PeerId {
fn cmp(&self, other: &Self) -> cmp::Ordering {
// must use borrow, because as_bytes is not consistent with equality
let lhs: &[u8] = self.borrow();
let rhs: &[u8] = other.borrow();
lhs.cmp(rhs)
}
}
impl PeerId { impl PeerId {
/// Builds a `PeerId` from a public key. /// Builds a `PeerId` from a public key.
pub fn from_public_key(key: PublicKey) -> PeerId { pub fn from_public_key(key: PublicKey) -> PeerId {
@ -78,18 +61,15 @@ impl PeerId {
Code::Sha2_256 Code::Sha2_256
}; };
let multihash = hash_algorithm.digest(&key_enc).to_bytes().into(); let multihash = hash_algorithm.digest(&key_enc);
PeerId { multihash } PeerId { multihash }
} }
/// Checks whether `data` is a valid `PeerId`. If so, returns the `PeerId`. If not, returns /// Parses a `PeerId` from bytes.
/// back the data as an error. pub fn from_bytes(data: &[u8]) -> Result<PeerId, Error> {
pub fn from_bytes(data: Vec<u8>) -> Result<PeerId, Vec<u8>> { Ok(PeerId::from_multihash(Multihash::from_bytes(&data)?)
match Multihash::from_bytes(&data) { .map_err(|mh| Error::UnsupportedCode(mh.code()))?)
Ok(multihash) => PeerId::from_multihash(multihash).map_err(|_| data),
Err(_err) => Err(data),
}
} }
/// Tries to turn a `Multihash` into a `PeerId`. /// Tries to turn a `Multihash` into a `PeerId`.
@ -99,9 +79,9 @@ impl PeerId {
/// peer ID, it is returned as an `Err`. /// peer ID, it is returned as an `Err`.
pub fn from_multihash(multihash: Multihash) -> Result<PeerId, Multihash> { pub fn from_multihash(multihash: Multihash) -> Result<PeerId, Multihash> {
match Code::try_from(multihash.code()) { match Code::try_from(multihash.code()) {
Ok(Code::Sha2_256) => Ok(PeerId { multihash: multihash.to_bytes().into() }), Ok(Code::Sha2_256) => Ok(PeerId { multihash }),
Ok(Code::Identity) if multihash.digest().len() <= MAX_INLINE_KEY_LENGTH Ok(Code::Identity) if multihash.digest().len() <= MAX_INLINE_KEY_LENGTH
=> Ok(PeerId { multihash: multihash.to_bytes().into() }), => Ok(PeerId { multihash }),
_ => Err(multihash) _ => Err(multihash)
} }
} }
@ -113,31 +93,18 @@ impl PeerId {
let peer_id = rand::thread_rng().gen::<[u8; 32]>(); let peer_id = rand::thread_rng().gen::<[u8; 32]>();
PeerId { PeerId {
multihash: Multihash::wrap(Code::Identity.into(), &peer_id) multihash: Multihash::wrap(Code::Identity.into(), &peer_id)
.expect("The digest size is never too large").to_bytes().into() .expect("The digest size is never too large")
} }
} }
/// Returns a raw bytes representation of this `PeerId`. /// Returns a raw bytes representation of this `PeerId`.
/// pub fn to_bytes(&self) -> Vec<u8> {
/// **NOTE:** This byte representation is not necessarily consistent with self.multihash.to_bytes()
/// equality of peer IDs. That is, two peer IDs may be considered equal
/// while having a different byte representation as per `into_bytes`.
pub fn into_bytes(self) -> Vec<u8> {
self.multihash.to_vec()
}
/// Returns a raw bytes representation of this `PeerId`.
///
/// **NOTE:** This byte representation is not necessarily consistent with
/// equality of peer IDs. That is, two peer IDs may be considered equal
/// while having a different byte representation as per `as_bytes`.
pub fn as_bytes(&self) -> &[u8] {
&self.multihash
} }
/// Returns a base-58 encoded string of this `PeerId`. /// Returns a base-58 encoded string of this `PeerId`.
pub fn to_base58(&self) -> String { pub fn to_base58(&self) -> String {
bs58::encode(self.borrow() as &[u8]).into_string() bs58::encode(self.to_bytes()).into_string()
} }
/// Checks whether the public key passed as parameter matches the public key of this `PeerId`. /// Checks whether the public key passed as parameter matches the public key of this `PeerId`.
@ -145,22 +112,10 @@ impl PeerId {
/// Returns `None` if this `PeerId`s hash algorithm is not supported when encoding the /// Returns `None` if this `PeerId`s hash algorithm is not supported when encoding the
/// given public key, otherwise `Some` boolean as the result of an equality check. /// given public key, otherwise `Some` boolean as the result of an equality check.
pub fn is_public_key(&self, public_key: &PublicKey) -> Option<bool> { pub fn is_public_key(&self, public_key: &PublicKey) -> Option<bool> {
let multihash = Multihash::from_bytes(&self.multihash) let alg = Code::try_from(self.multihash.code())
.expect("Internal multihash is always a valid");
let alg = Code::try_from(multihash.code())
.expect("Internal multihash is always a valid `Code`"); .expect("Internal multihash is always a valid `Code`");
let enc = public_key.clone().into_protobuf_encoding(); let enc = public_key.clone().into_protobuf_encoding();
Some(alg.digest(&enc) == multihash) Some(alg.digest(&enc) == self.multihash)
}
}
impl hash::Hash for PeerId {
fn hash<H>(&self, state: &mut H)
where
H: hash::Hasher
{
let digest = self.borrow() as &[u8];
hash::Hash::hash(digest, state)
} }
} }
@ -174,7 +129,7 @@ impl TryFrom<Vec<u8>> for PeerId {
type Error = Vec<u8>; type Error = Vec<u8>;
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> { fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
PeerId::from_bytes(value) PeerId::from_bytes(&value).map_err(|_| value)
} }
} }
@ -186,33 +141,21 @@ impl TryFrom<Multihash> for PeerId {
} }
} }
impl PartialEq<PeerId> for PeerId { impl AsRef<Multihash> for PeerId {
fn eq(&self, other: &PeerId) -> bool { fn as_ref(&self) -> &Multihash {
let self_digest = self.borrow() as &[u8];
let other_digest = other.borrow() as &[u8];
self_digest == other_digest
}
}
impl Borrow<[u8]> for PeerId {
fn borrow(&self) -> &[u8] {
&self.multihash &self.multihash
} }
} }
/// **NOTE:** This byte representation is not necessarily consistent with
/// equality of peer IDs. That is, two peer IDs may be considered equal
/// while having a different byte representation as per `AsRef<[u8]>`.
impl AsRef<[u8]> for PeerId {
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl From<PeerId> for Multihash { impl From<PeerId> for Multihash {
fn from(peer_id: PeerId) -> Self { fn from(peer_id: PeerId) -> Self {
Multihash::from_bytes(&peer_id.multihash) peer_id.multihash
.expect("PeerIds always contain valid Multihashes") }
}
impl From<PeerId> for Vec<u8> {
fn from(peer_id: PeerId) -> Self {
peer_id.to_bytes()
} }
} }
@ -230,7 +173,7 @@ impl FromStr for PeerId {
#[inline] #[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = bs58::decode(s).into_vec()?; let bytes = bs58::decode(s).into_vec()?;
PeerId::from_bytes(bytes).map_err(|_| ParseError::MultiHash) PeerId::from_bytes(&bytes).map_err(|_| ParseError::MultiHash)
} }
} }
@ -248,7 +191,7 @@ mod tests {
#[test] #[test]
fn peer_id_into_bytes_then_from_bytes() { fn peer_id_into_bytes_then_from_bytes() {
let peer_id = identity::Keypair::generate_ed25519().public().into_peer_id(); let peer_id = identity::Keypair::generate_ed25519().public().into_peer_id();
let second = PeerId::from_bytes(peer_id.clone().into_bytes()).unwrap(); let second = PeerId::from_bytes(&peer_id.to_bytes()).unwrap();
assert_eq!(peer_id, second); assert_eq!(peer_id, second);
} }
@ -263,7 +206,7 @@ mod tests {
fn random_peer_id_is_valid() { fn random_peer_id_is_valid() {
for _ in 0 .. 5000 { for _ in 0 .. 5000 {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
assert_eq!(peer_id, PeerId::from_bytes(peer_id.clone().into_bytes()).unwrap()); assert_eq!(peer_id, PeerId::from_bytes(&peer_id.to_bytes()).unwrap());
} }
} }
} }

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-mplex" name = "libp2p-mplex"
edition = "2018" edition = "2018"
description = "Mplex multiplexing protocol for libp2p" description = "Mplex multiplexing protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"]
bytes = "0.5" bytes = "0.5"
futures = "0.3.1" futures = "0.3.1"
futures_codec = "0.4.1" futures_codec = "0.4.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4" log = "0.4"
nohash-hasher = "0.2" nohash-hasher = "0.2"
parking_lot = "0.11" parking_lot = "0.11"

View File

@ -1,3 +1,7 @@
# 0.29.0 [unreleased]
- Update `libp2p-core`.
# 0.28.0 [2020-11-25] # 0.28.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-yamux" name = "libp2p-yamux"
edition = "2018" edition = "2018"
description = "Yamux multiplexing protocol for libp2p" description = "Yamux multiplexing protocol for libp2p"
version = "0.28.0" version = "0.29.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
parking_lot = "0.11" parking_lot = "0.11"
thiserror = "1.0" thiserror = "1.0"
yamux = "0.8.0" yamux = "0.8.0"

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-deflate" name = "libp2p-deflate"
edition = "2018" edition = "2018"
description = "Deflate encryption protocol for libp2p" description = "Deflate encryption protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
flate2 = "1.0" flate2 = "1.0"
[dev-dependencies] [dev-dependencies]

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-swarm` and `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-swarm` and `libp2p-core`. - Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-floodsub" name = "libp2p-floodsub"
edition = "2018" edition = "2018"
description = "Floodsub protocol for libp2p" description = "Floodsub protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -13,8 +13,8 @@ categories = ["network-programming", "asynchronous"]
cuckoofilter = "0.5.0" cuckoofilter = "0.5.0"
fnv = "1.0" fnv = "1.0"
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
log = "0.4" log = "0.4"
prost = "0.6.1" prost = "0.6.1"
rand = "0.7" rand = "0.7"

View File

@ -61,7 +61,7 @@ where
let mut messages = Vec::with_capacity(rpc.publish.len()); let mut messages = Vec::with_capacity(rpc.publish.len());
for publish in rpc.publish.into_iter() { for publish in rpc.publish.into_iter() {
messages.push(FloodsubMessage { messages.push(FloodsubMessage {
source: PeerId::from_bytes(publish.from.unwrap_or_default()).map_err(|_| { source: PeerId::from_bytes(&publish.from.unwrap_or_default()).map_err(|_| {
FloodsubDecodeError::InvalidPeerId FloodsubDecodeError::InvalidPeerId
})?, })?,
data: publish.data.unwrap_or_default(), data: publish.data.unwrap_or_default(),
@ -179,7 +179,7 @@ impl FloodsubRpc {
publish: self.messages.into_iter() publish: self.messages.into_iter()
.map(|msg| { .map(|msg| {
rpc_proto::Message { rpc_proto::Message {
from: Some(msg.source.into_bytes()), from: Some(msg.source.to_bytes()),
data: Some(msg.data), data: Some(msg.data),
seqno: Some(msg.sequence_number), seqno: Some(msg.sequence_number),
topic_ids: msg.topics topic_ids: msg.topics

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-swarm` and `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-swarm` and `libp2p-core`. - Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-gossipsub" name = "libp2p-gossipsub"
edition = "2018" edition = "2018"
description = "Gossipsub protocol for libp2p" description = "Gossipsub protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Age Manning <Age@AgeManning.com>"] authors = ["Age Manning <Age@AgeManning.com>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -10,8 +10,8 @@ keywords = ["peer-to-peer", "libp2p", "networking"]
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
bytes = "0.5.4" bytes = "0.5.4"
byteorder = "1.3.2" byteorder = "1.3.2"
fnv = "1.0.6" fnv = "1.0.6"

View File

@ -1100,7 +1100,7 @@ impl Gossipsub {
let signature = { let signature = {
let message = rpc_proto::Message { let message = rpc_proto::Message {
from: Some(author.clone().into_bytes()), from: Some(author.clone().to_bytes()),
data: Some(data.clone()), data: Some(data.clone()),
seqno: Some(sequence_number.to_be_bytes().to_vec()), seqno: Some(sequence_number.to_be_bytes().to_vec()),
topic_ids: topics.clone().into_iter().map(|t| t.into()).collect(), topic_ids: topics.clone().into_iter().map(|t| t.into()).collect(),

View File

@ -139,7 +139,7 @@ impl Default for GossipsubConfig {
let mut source_string = if let Some(peer_id) = message.source.as_ref() { let mut source_string = if let Some(peer_id) = message.source.as_ref() {
peer_id.to_base58() peer_id.to_base58()
} else { } else {
PeerId::from_bytes(vec![0, 1, 0]) PeerId::from_bytes(&[0, 1, 0])
.expect("Valid peer id") .expect("Valid peer id")
.to_base58() .to_base58()
}; };

View File

@ -137,7 +137,7 @@ impl GossipsubCodec {
} }
}; };
let source = match PeerId::from_bytes(from.clone()) { let source = match PeerId::from_bytes(&from) {
Ok(v) => v, Ok(v) => v,
Err(_) => { Err(_) => {
debug!("Signature verification failed: Invalid Peer Id"); debug!("Signature verification failed: Invalid Peer Id");
@ -161,7 +161,7 @@ impl GossipsubCodec {
.map(|key| PublicKey::from_protobuf_encoding(&key)) .map(|key| PublicKey::from_protobuf_encoding(&key))
{ {
Some(Ok(key)) => key, Some(Ok(key)) => key,
_ => match PublicKey::from_protobuf_encoding(&source.as_bytes()[2..]) { _ => match PublicKey::from_protobuf_encoding(&source.to_bytes()[2..]) {
Ok(v) => v, Ok(v) => v,
Err(_) => { Err(_) => {
warn!("Signature verification failed: No valid public key supplied"); warn!("Signature verification failed: No valid public key supplied");
@ -200,7 +200,7 @@ impl Encoder for GossipsubCodec {
for message in item.messages.into_iter() { for message in item.messages.into_iter() {
let message = rpc_proto::Message { let message = rpc_proto::Message {
from: message.source.map(|m| m.into_bytes()), from: message.source.map(|m| m.to_bytes()),
data: Some(message.data), data: Some(message.data),
seqno: message.sequence_number.map(|s| s.to_be_bytes().to_vec()), seqno: message.sequence_number.map(|s| s.to_be_bytes().to_vec()),
topic_ids: message.topics.into_iter().map(TopicHash::into).collect(), topic_ids: message.topics.into_iter().map(TopicHash::into).collect(),
@ -372,7 +372,7 @@ impl Decoder for GossipsubCodec {
let source = if verify_source { let source = if verify_source {
Some( Some(
PeerId::from_bytes(message.from.unwrap_or_default()).map_err(|_| { PeerId::from_bytes(&message.from.unwrap_or_default()).map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, "Invalid Peer Id") io::Error::new(io::ErrorKind::InvalidData, "Invalid Peer Id")
})?, })?,
) )

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-swarm` and `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-swarm` and `libp2p-core`. - Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-identify" name = "libp2p-identify"
edition = "2018" edition = "2018"
description = "Nodes identifcation protocol for libp2p" description = "Nodes identifcation protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -11,8 +11,8 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
log = "0.4.1" log = "0.4.1"
prost = "0.6.1" prost = "0.6.1"
smallvec = "1.0" smallvec = "1.0"

View File

@ -1,3 +1,7 @@
# 0.27.0 [unreleased]
- Update `libp2p-core` and `libp2p-swarm`.
# 0.26.0 [2020-11-25] # 0.26.0 [2020-11-25]
- Update `libp2p-core` and `libp2p-swarm`. - Update `libp2p-core` and `libp2p-swarm`.

View File

@ -2,7 +2,7 @@
name = "libp2p-kad" name = "libp2p-kad"
edition = "2018" edition = "2018"
description = "Kademlia protocol for libp2p" description = "Kademlia protocol for libp2p"
version = "0.26.0" version = "0.27.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -17,8 +17,8 @@ fnv = "1.0"
futures_codec = "0.4" futures_codec = "0.4"
futures = "0.3.1" futures = "0.3.1"
log = "0.4" log = "0.4"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
prost = "0.6.1" prost = "0.6.1"
rand = "0.7.2" rand = "0.7.2"
sha2 = "0.9.1" sha2 = "0.9.1"

View File

@ -47,7 +47,7 @@ use libp2p_swarm::{
}; };
use log::{info, debug, warn}; use log::{info, debug, warn};
use smallvec::SmallVec; use smallvec::SmallVec;
use std::{borrow::{Borrow, Cow}, error, iter, time::Duration}; use std::{borrow::Cow, error, iter, time::Duration};
use std::collections::{HashSet, VecDeque}; use std::collections::{HashSet, VecDeque};
use std::fmt; use std::fmt;
use std::num::NonZeroUsize; use std::num::NonZeroUsize;
@ -337,7 +337,7 @@ where
/// Creates a new `Kademlia` network behaviour with the given configuration. /// Creates a new `Kademlia` network behaviour with the given configuration.
pub fn with_config(id: PeerId, store: TStore, config: KademliaConfig) -> Self { pub fn with_config(id: PeerId, store: TStore, config: KademliaConfig) -> Self {
let local_key = kbucket::Key::new(id.clone()); let local_key = kbucket::Key::from(id);
let put_record_job = config let put_record_job = config
.record_replication_interval .record_replication_interval
@ -428,7 +428,7 @@ where
/// If the routing table has been updated as a result of this operation, /// If the routing table has been updated as a result of this operation,
/// a [`KademliaEvent::RoutingUpdated`] event is emitted. /// a [`KademliaEvent::RoutingUpdated`] event is emitted.
pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate {
let key = kbucket::Key::new(peer.clone()); let key = kbucket::Key::from(*peer);
match self.kbuckets.entry(&key) { match self.kbuckets.entry(&key) {
kbucket::Entry::Present(mut entry, _) => { kbucket::Entry::Present(mut entry, _) => {
if entry.value().insert(address) { if entry.value().insert(address) {
@ -495,7 +495,7 @@ where
pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr)
-> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> -> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>>
{ {
let key = kbucket::Key::new(peer.clone()); let key = kbucket::Key::from(*peer);
match self.kbuckets.entry(&key) { match self.kbuckets.entry(&key) {
kbucket::Entry::Present(mut entry, _) => { kbucket::Entry::Present(mut entry, _) => {
if entry.value().remove(address).is_err() { if entry.value().remove(address).is_err() {
@ -524,7 +524,7 @@ where
pub fn remove_peer(&mut self, peer: &PeerId) pub fn remove_peer(&mut self, peer: &PeerId)
-> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>> -> Option<kbucket::EntryView<kbucket::Key<PeerId>, Addresses>>
{ {
let key = kbucket::Key::new(peer.clone()); let key = kbucket::Key::from(*peer);
match self.kbuckets.entry(&key) { match self.kbuckets.entry(&key) {
kbucket::Entry::Present(entry, _) => { kbucket::Entry::Present(entry, _) => {
Some(entry.remove()) Some(entry.remove())
@ -551,9 +551,9 @@ where
pub fn kbucket<K>(&mut self, key: K) pub fn kbucket<K>(&mut self, key: K)
-> Option<kbucket::KBucketRef<'_, kbucket::Key<PeerId>, Addresses>> -> Option<kbucket::KBucketRef<'_, kbucket::Key<PeerId>, Addresses>>
where where
K: Borrow<[u8]> + Clone K: Into<kbucket::Key<K>> + Clone
{ {
self.kbuckets.bucket(&kbucket::Key::new(key)) self.kbuckets.bucket(&key.into())
} }
/// Initiates an iterative query for the closest peers to the given key. /// Initiates an iterative query for the closest peers to the given key.
@ -562,10 +562,10 @@ where
/// [`KademliaEvent::QueryResult{QueryResult::GetClosestPeers}`]. /// [`KademliaEvent::QueryResult{QueryResult::GetClosestPeers}`].
pub fn get_closest_peers<K>(&mut self, key: K) -> QueryId pub fn get_closest_peers<K>(&mut self, key: K) -> QueryId
where where
K: Borrow<[u8]> + Clone K: Into<kbucket::Key<K>> + Into<Vec<u8>> + Clone
{ {
let info = QueryInfo::GetClosestPeers { key: key.borrow().to_vec() }; let info = QueryInfo::GetClosestPeers { key: key.clone().into() };
let target = kbucket::Key::new(key); let target: kbucket::Key<K> = key.into();
let peers = self.kbuckets.closest_keys(&target); let peers = self.kbuckets.closest_keys(&target);
let inner = QueryInner::new(info); let inner = QueryInner::new(info);
self.queries.add_iter_closest(target.clone(), peers, inner) self.queries.add_iter_closest(target.clone(), peers, inner)
@ -823,7 +823,7 @@ where
if &node_id == kbuckets.local_key().preimage() { if &node_id == kbuckets.local_key().preimage() {
Some(local_addrs.iter().cloned().collect::<Vec<_>>()) Some(local_addrs.iter().cloned().collect::<Vec<_>>())
} else { } else {
let key = kbucket::Key::new(node_id.clone()); let key = kbucket::Key::from(node_id);
kbuckets.entry(&key).view().map(|e| e.node.value.clone().into_vec()) kbuckets.entry(&key).view().map(|e| e.node.value.clone().into_vec())
} }
} else { } else {
@ -870,7 +870,7 @@ where
/// Updates the routing table with a new connection status and address of a peer. /// Updates the routing table with a new connection status and address of a peer.
fn connection_updated(&mut self, peer: PeerId, address: Option<Multiaddr>, new_status: NodeStatus) { fn connection_updated(&mut self, peer: PeerId, address: Option<Multiaddr>, new_status: NodeStatus) {
let key = kbucket::Key::new(peer.clone()); let key = kbucket::Key::from(peer);
match self.kbuckets.entry(&key) { match self.kbuckets.entry(&key) {
kbucket::Entry::Present(mut entry, old_status) => { kbucket::Entry::Present(mut entry, old_status) => {
if let Some(address) = address { if let Some(address) = address {
@ -985,13 +985,13 @@ where
// Pr(bucket-253) = 1 - (7/8)^16 ~= 0.88 // Pr(bucket-253) = 1 - (7/8)^16 ~= 0.88
// Pr(bucket-252) = 1 - (15/16)^16 ~= 0.64 // Pr(bucket-252) = 1 - (15/16)^16 ~= 0.64
// ... // ...
let mut target = kbucket::Key::new(PeerId::random()); let mut target = kbucket::Key::from(PeerId::random());
for _ in 0 .. 16 { for _ in 0 .. 16 {
let d = local_key.distance(&target); let d = local_key.distance(&target);
if b.contains(&d) { if b.contains(&d) {
break; break;
} }
target = kbucket::Key::new(PeerId::random()); target = kbucket::Key::from(PeerId::random());
} }
target target
}).collect::<Vec<_>>().into_iter() }).collect::<Vec<_>>().into_iter()
@ -1447,7 +1447,7 @@ where
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> { fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
// We should order addresses from decreasing likelyhood of connectivity, so start with // We should order addresses from decreasing likelyhood of connectivity, so start with
// the addresses of that peer in the k-buckets. // the addresses of that peer in the k-buckets.
let key = kbucket::Key::new(peer_id.clone()); let key = kbucket::Key::from(*peer_id);
let mut peer_addrs = let mut peer_addrs =
if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) {
let addrs = entry.value().iter().cloned().collect::<Vec<_>>(); let addrs = entry.value().iter().cloned().collect::<Vec<_>>();
@ -1500,7 +1500,7 @@ where
let (old, new) = (old.get_remote_address(), new.get_remote_address()); let (old, new) = (old.get_remote_address(), new.get_remote_address());
// Update routing table. // Update routing table.
if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::new(peer.clone())).value() { if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(*peer)).value() {
if addrs.replace(old, new) { if addrs.replace(old, new) {
debug!("Address '{}' replaced with '{}' for peer '{}'.", old, new, peer); debug!("Address '{}' replaced with '{}' for peer '{}'.", old, new, peer);
} else { } else {
@ -1550,7 +1550,7 @@ where
err: &dyn error::Error err: &dyn error::Error
) { ) {
if let Some(peer_id) = peer_id { if let Some(peer_id) = peer_id {
let key = kbucket::Key::new(peer_id.clone()); let key = kbucket::Key::from(*peer_id);
if let Some(addrs) = self.kbuckets.entry(&key).value() { if let Some(addrs) = self.kbuckets.entry(&key).value() {
// TODO: Ideally, the address should only be removed if the error can // TODO: Ideally, the address should only be removed if the error can
@ -2403,7 +2403,7 @@ impl QueryInfo {
fn to_request(&self, query_id: QueryId) -> KademliaHandlerIn<QueryId> { fn to_request(&self, query_id: QueryId) -> KademliaHandlerIn<QueryId> {
match &self { match &self {
QueryInfo::Bootstrap { peer, .. } => KademliaHandlerIn::FindNodeReq { QueryInfo::Bootstrap { peer, .. } => KademliaHandlerIn::FindNodeReq {
key: peer.clone().into_bytes(), key: peer.to_bytes(),
user_data: query_id, user_data: query_id,
}, },
QueryInfo::GetClosestPeers { key, .. } => KademliaHandlerIn::FindNodeReq { QueryInfo::GetClosestPeers { key, .. } => KademliaHandlerIn::FindNodeReq {

View File

@ -239,13 +239,13 @@ fn query_iter() {
// Ask the first peer in the list to search a random peer. The search should // Ask the first peer in the list to search a random peer. The search should
// propagate forwards through the list of peers. // propagate forwards through the list of peers.
let search_target = PeerId::random(); let search_target = PeerId::random();
let search_target_key = kbucket::Key::new(search_target.clone()); let search_target_key = kbucket::Key::from(search_target);
let qid = swarms[0].get_closest_peers(search_target.clone()); let qid = swarms[0].get_closest_peers(search_target);
match swarms[0].query(&qid) { match swarms[0].query(&qid) {
Some(q) => match q.info() { Some(q) => match q.info() {
QueryInfo::GetClosestPeers { key } => { QueryInfo::GetClosestPeers { key } => {
assert_eq!(&key[..], search_target.borrow() as &[u8]) assert_eq!(&key[..], search_target.to_bytes().as_slice())
}, },
i => panic!("Unexpected query info: {:?}", i) i => panic!("Unexpected query info: {:?}", i)
} }
@ -268,7 +268,7 @@ fn query_iter() {
id, result: QueryResult::GetClosestPeers(Ok(ok)), .. id, result: QueryResult::GetClosestPeers(Ok(ok)), ..
})) => { })) => {
assert_eq!(id, qid); assert_eq!(id, qid);
assert_eq!(&ok.key[..], search_target.as_bytes()); assert_eq!(&ok.key[..], search_target.to_bytes().as_slice());
assert_eq!(swarm_ids[i], expected_swarm_id); assert_eq!(swarm_ids[i], expected_swarm_id);
assert_eq!(swarm.queries.size(), 0); assert_eq!(swarm.queries.size(), 0);
assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p))); assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p)));
@ -310,7 +310,7 @@ fn unresponsive_not_returned_direct() {
// Ask first to search a random value. // Ask first to search a random value.
let search_target = PeerId::random(); let search_target = PeerId::random();
swarms[0].get_closest_peers(search_target.clone()); swarms[0].get_closest_peers(search_target);
block_on( block_on(
poll_fn(move |ctx| { poll_fn(move |ctx| {
@ -320,7 +320,7 @@ fn unresponsive_not_returned_direct() {
Poll::Ready(Some(KademliaEvent::QueryResult { Poll::Ready(Some(KademliaEvent::QueryResult {
result: QueryResult::GetClosestPeers(Ok(ok)), .. result: QueryResult::GetClosestPeers(Ok(ok)), ..
})) => { })) => {
assert_eq!(&ok.key[..], search_target.as_bytes()); assert_eq!(&ok.key[..], search_target.to_bytes().as_slice());
assert_eq!(ok.peers.len(), 0); assert_eq!(ok.peers.len(), 0);
return Poll::Ready(()); return Poll::Ready(());
} }
@ -360,7 +360,7 @@ fn unresponsive_not_returned_indirect() {
// Ask second to search a random value. // Ask second to search a random value.
let search_target = PeerId::random(); let search_target = PeerId::random();
swarms[1].get_closest_peers(search_target.clone()); swarms[1].get_closest_peers(search_target);
block_on( block_on(
poll_fn(move |ctx| { poll_fn(move |ctx| {
@ -370,7 +370,7 @@ fn unresponsive_not_returned_indirect() {
Poll::Ready(Some(KademliaEvent::QueryResult { Poll::Ready(Some(KademliaEvent::QueryResult {
result: QueryResult::GetClosestPeers(Ok(ok)), .. result: QueryResult::GetClosestPeers(Ok(ok)), ..
})) => { })) => {
assert_eq!(&ok.key[..], search_target.as_bytes()); assert_eq!(&ok.key[..], search_target.to_bytes().as_slice());
assert_eq!(ok.peers.len(), 1); assert_eq!(ok.peers.len(), 1);
assert_eq!(ok.peers[0], first_peer_id); assert_eq!(ok.peers[0], first_peer_id);
return Poll::Ready(()); return Poll::Ready(());
@ -570,8 +570,8 @@ fn put_record() {
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
expected.sort_by(|id1, id2| expected.sort_by(|id1, id2|
kbucket::Key::new(id1.clone()).distance(&key).cmp( kbucket::Key::from(*id1).distance(&key).cmp(
&kbucket::Key::new(id2.clone()).distance(&key))); &kbucket::Key::from(*id2).distance(&key)));
let expected = expected let expected = expected
.into_iter() .into_iter()
@ -838,8 +838,8 @@ fn add_provider() {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let kbucket_key = kbucket::Key::new(key); let kbucket_key = kbucket::Key::new(key);
expected.sort_by(|id1, id2| expected.sort_by(|id1, id2|
kbucket::Key::new(id1.clone()).distance(&kbucket_key).cmp( kbucket::Key::from(*id1).distance(&kbucket_key).cmp(
&kbucket::Key::new(id2.clone()).distance(&kbucket_key))); &kbucket::Key::from(*id2).distance(&kbucket_key)));
let expected = expected let expected = expected
.into_iter() .into_iter()
@ -1084,7 +1084,7 @@ fn manual_bucket_inserts() {
routable.push(peer); routable.push(peer);
if expected.is_empty() { if expected.is_empty() {
for peer in routable.iter() { for peer in routable.iter() {
let bucket = swarm.kbucket(peer.clone()).unwrap(); let bucket = swarm.kbucket(*peer).unwrap();
assert!(bucket.iter().all(|e| e.node.key.preimage() != peer)); assert!(bucket.iter().all(|e| e.node.key.preimage() != peer));
} }
return Poll::Ready(()) return Poll::Ready(())

View File

@ -432,7 +432,7 @@ mod tests {
let mut bucket = KBucket::<Key<PeerId>, ()>::new(timeout); let mut bucket = KBucket::<Key<PeerId>, ()>::new(timeout);
let num_nodes = g.gen_range(1, K_VALUE.get() + 1); let num_nodes = g.gen_range(1, K_VALUE.get() + 1);
for _ in 0 .. num_nodes { for _ in 0 .. num_nodes {
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key: key.clone(), value: () }; let node = Node { key: key.clone(), value: () };
let status = NodeStatus::arbitrary(g); let status = NodeStatus::arbitrary(g);
match bucket.insert(node, status) { match bucket.insert(node, status) {
@ -464,7 +464,7 @@ mod tests {
fn fill_bucket(bucket: &mut KBucket<Key<PeerId>, ()>, status: NodeStatus) { fn fill_bucket(bucket: &mut KBucket<Key<PeerId>, ()>, status: NodeStatus) {
let num_entries_start = bucket.num_entries(); let num_entries_start = bucket.num_entries();
for i in 0 .. K_VALUE.get() - num_entries_start { for i in 0 .. K_VALUE.get() - num_entries_start {
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key, value: () }; let node = Node { key, value: () };
assert_eq!(InsertResult::Inserted, bucket.insert(node, status)); assert_eq!(InsertResult::Inserted, bucket.insert(node, status));
assert_eq!(bucket.num_entries(), num_entries_start + i + 1); assert_eq!(bucket.num_entries(), num_entries_start + i + 1);
@ -482,7 +482,7 @@ mod tests {
// Fill the bucket, thereby populating the expected lists in insertion order. // Fill the bucket, thereby populating the expected lists in insertion order.
for status in status { for status in status {
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key: key.clone(), value: () }; let node = Node { key: key.clone(), value: () };
let full = bucket.num_entries() == K_VALUE.get(); let full = bucket.num_entries() == K_VALUE.get();
match bucket.insert(node, status) { match bucket.insert(node, status) {
@ -529,7 +529,7 @@ mod tests {
fill_bucket(&mut bucket, NodeStatus::Disconnected); fill_bucket(&mut bucket, NodeStatus::Disconnected);
// Trying to insert another disconnected node fails. // Trying to insert another disconnected node fails.
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key, value: () }; let node = Node { key, value: () };
match bucket.insert(node, NodeStatus::Disconnected) { match bucket.insert(node, NodeStatus::Disconnected) {
InsertResult::Full => {}, InsertResult::Full => {},
@ -544,7 +544,7 @@ mod tests {
// Add a connected node, which is expected to be pending, scheduled to // Add a connected node, which is expected to be pending, scheduled to
// replace the first (i.e. least-recently connected) node. // replace the first (i.e. least-recently connected) node.
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key: key.clone(), value: () }; let node = Node { key: key.clone(), value: () };
match bucket.insert(node.clone(), NodeStatus::Connected) { match bucket.insert(node.clone(), NodeStatus::Connected) {
InsertResult::Pending { disconnected } => InsertResult::Pending { disconnected } =>
@ -577,7 +577,7 @@ mod tests {
assert_eq!(K_VALUE.get(), bucket.num_entries()); assert_eq!(K_VALUE.get(), bucket.num_entries());
// Trying to insert another connected node fails. // Trying to insert another connected node fails.
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key, value: () }; let node = Node { key, value: () };
match bucket.insert(node, NodeStatus::Connected) { match bucket.insert(node, NodeStatus::Connected) {
InsertResult::Full => {}, InsertResult::Full => {},
@ -593,7 +593,7 @@ mod tests {
let first_disconnected = first.clone(); let first_disconnected = first.clone();
// Add a connected pending node. // Add a connected pending node.
let key = Key::new(PeerId::random()); let key = Key::from(PeerId::random());
let node = Node { key: key.clone(), value: () }; let node = Node { key: key.clone(), value: () };
if let InsertResult::Pending { disconnected } = bucket.insert(node, NodeStatus::Connected) { if let InsertResult::Pending { disconnected } = bucket.insert(node, NodeStatus::Connected) {
assert_eq!(&disconnected, &first_disconnected.key); assert_eq!(&disconnected, &first_disconnected.key);

View File

@ -103,7 +103,11 @@ impl From<Multihash> for Key<Multihash> {
impl From<PeerId> for Key<PeerId> { impl From<PeerId> for Key<PeerId> {
fn from(p: PeerId) -> Self { fn from(p: PeerId) -> Self {
Key::new(p) let bytes = KeyBytes(Sha256::digest(&p.to_bytes()));
Key {
preimage: p,
bytes
}
} }
} }

View File

@ -101,7 +101,7 @@ impl TryFrom<proto::message::Peer> for KadPeer {
fn try_from(peer: proto::message::Peer) -> Result<KadPeer, Self::Error> { fn try_from(peer: proto::message::Peer) -> Result<KadPeer, Self::Error> {
// TODO: this is in fact a CID; not sure if this should be handled in `from_bytes` or // TODO: this is in fact a CID; not sure if this should be handled in `from_bytes` or
// as a special case here // as a special case here
let node_id = PeerId::from_bytes(peer.id) let node_id = PeerId::from_bytes(&peer.id)
.map_err(|_| invalid_data("invalid peer id"))?; .map_err(|_| invalid_data("invalid peer id"))?;
let mut addrs = Vec::with_capacity(peer.addrs.len()); let mut addrs = Vec::with_capacity(peer.addrs.len());
@ -126,7 +126,7 @@ impl TryFrom<proto::message::Peer> for KadPeer {
impl Into<proto::message::Peer> for KadPeer { impl Into<proto::message::Peer> for KadPeer {
fn into(self) -> proto::message::Peer { fn into(self) -> proto::message::Peer {
proto::message::Peer { proto::message::Peer {
id: self.node_id.into_bytes(), id: self.node_id.to_bytes(),
addrs: self.multiaddrs.into_iter().map(|a| a.to_vec()).collect(), addrs: self.multiaddrs.into_iter().map(|a| a.to_vec()).collect(),
connection: { connection: {
let ct: proto::message::ConnectionType = self.connection_ty.into(); let ct: proto::message::ConnectionType = self.connection_ty.into();
@ -533,7 +533,7 @@ fn record_from_proto(record: proto::Record) -> Result<Record, io::Error> {
let publisher = let publisher =
if !record.publisher.is_empty() { if !record.publisher.is_empty() {
PeerId::from_bytes(record.publisher) PeerId::from_bytes(&record.publisher)
.map(Some) .map(Some)
.map_err(|_| invalid_data("Invalid publisher peer ID."))? .map_err(|_| invalid_data("Invalid publisher peer ID."))?
} else { } else {
@ -554,7 +554,7 @@ fn record_to_proto(record: Record) -> proto::Record {
proto::Record { proto::Record {
key: record.key.to_vec(), key: record.key.to_vec(),
value: record.value, value: record.value,
publisher: record.publisher.map(PeerId::into_bytes).unwrap_or_default(), publisher: record.publisher.map(|id| id.to_bytes()).unwrap_or_default(),
ttl: record.expires ttl: record.expires
.map(|t| { .map(|t| {
let now = Instant::now(); let now = Instant::now();

View File

@ -770,7 +770,7 @@ mod tests {
impl Graph { impl Graph {
fn get_closest_peer(&self, target: &KeyBytes) -> PeerId { fn get_closest_peer(&self, target: &KeyBytes) -> PeerId {
self.0.iter() self.0.iter()
.map(|(peer_id, _)| (target.distance(&Key::new(peer_id.clone())), peer_id)) .map(|(peer_id, _)| (target.distance(&Key::from(*peer_id)), peer_id))
.fold(None, |acc, (distance_b, peer_id_b)| { .fold(None, |acc, (distance_b, peer_id_b)| {
match acc { match acc {
None => Some((distance_b, peer_id_b)), None => Some((distance_b, peer_id_b)),
@ -848,7 +848,7 @@ mod tests {
let mut known_closest_peers = graph.0.iter() let mut known_closest_peers = graph.0.iter()
.take(K_VALUE.get()) .take(K_VALUE.get())
.map(|(key, _peers)| Key::new(key.clone())) .map(|(key, _peers)| Key::from(*key))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
known_closest_peers.sort_unstable_by(|a, b| { known_closest_peers.sort_unstable_by(|a, b| {
target.distance(a).cmp(&target.distance(b)) target.distance(a).cmp(&target.distance(b))
@ -934,7 +934,7 @@ mod tests {
} }
} }
let mut result = iter.into_result().into_iter().map(Key::new).collect::<Vec<_>>(); let mut result = iter.into_result().into_iter().map(Key::from).collect::<Vec<_>>();
result.sort_unstable_by(|a, b| { result.sort_unstable_by(|a, b| {
target.distance(a).cmp(&target.distance(b)) target.distance(a).cmp(&target.distance(b))
}); });

View File

@ -78,7 +78,7 @@ impl MemoryStore {
/// Creates a new `MemoryRecordStore` with the given configuration. /// Creates a new `MemoryRecordStore` with the given configuration.
pub fn with_config(local_id: PeerId, config: MemoryStoreConfig) -> Self { pub fn with_config(local_id: PeerId, config: MemoryStoreConfig) -> Self {
MemoryStore { MemoryStore {
local_key: kbucket::Key::new(local_id), local_key: kbucket::Key::from(local_id),
config, config,
records: HashMap::default(), records: HashMap::default(),
provided: HashSet::default(), provided: HashSet::default(),
@ -161,9 +161,9 @@ impl<'a> RecordStore<'a> for MemoryStore {
// It is a new provider record for that key. // It is a new provider record for that key.
let local_key = self.local_key.clone(); let local_key = self.local_key.clone();
let key = kbucket::Key::new(record.key.clone()); let key = kbucket::Key::new(record.key.clone());
let provider = kbucket::Key::new(record.provider.clone()); let provider = kbucket::Key::from(record.provider);
if let Some(i) = providers.iter().position(|p| { if let Some(i) = providers.iter().position(|p| {
let pk = kbucket::Key::new(p.provider.clone()); let pk = kbucket::Key::from(p.provider);
provider.distance(&key) < pk.distance(&key) provider.distance(&key) < pk.distance(&key)
}) { }) {
// Insert the new provider. // Insert the new provider.
@ -225,7 +225,7 @@ mod tests {
fn distance(r: &ProviderRecord) -> kbucket::Distance { fn distance(r: &ProviderRecord) -> kbucket::Distance {
kbucket::Key::new(r.key.clone()) kbucket::Key::new(r.key.clone())
.distance(&kbucket::Key::new(r.provider.clone())) .distance(&kbucket::Key::from(r.provider))
} }
#[test] #[test]
@ -318,4 +318,3 @@ mod tests {
} }
} }
} }

View File

@ -1,3 +1,7 @@
# 0.27.0 [unreleased]
- Update `libp2p-swarm` and `libp2p-core`.
# 0.26.0 [2020-12-08] # 0.26.0 [2020-12-08]
- Create multiple multicast response packets as required to avoid - Create multiple multicast response packets as required to avoid

View File

@ -1,7 +1,7 @@
[package] [package]
name = "libp2p-mdns" name = "libp2p-mdns"
edition = "2018" edition = "2018"
version = "0.26.0" version = "0.27.0"
description = "Implementation of the libp2p mDNS discovery method" description = "Implementation of the libp2p mDNS discovery method"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
@ -16,8 +16,8 @@ dns-parser = "0.8.0"
futures = "0.3.8" futures = "0.3.8"
if-watch = "0.1.6" if-watch = "0.1.6"
lazy_static = "1.4.0" lazy_static = "1.4.0"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
log = "0.4.11" log = "0.4.11"
rand = "0.7.3" rand = "0.7.3"
smallvec = "1.5.0" smallvec = "1.5.0"

View File

@ -281,7 +281,7 @@ fn segment_peer_id(peer_id: String) -> String {
/// Combines and encodes a `PeerId` and service name for a DNS query. /// Combines and encodes a `PeerId` and service name for a DNS query.
fn encode_peer_id(peer_id: &PeerId) -> Vec<u8> { fn encode_peer_id(peer_id: &PeerId) -> Vec<u8> {
// DNS-safe encoding for the Peer ID // DNS-safe encoding for the Peer ID
let raw_peer_id = data_encoding::BASE32_DNSCURVE.encode(&peer_id.as_bytes()); let raw_peer_id = data_encoding::BASE32_DNSCURVE.encode(&peer_id.to_bytes());
// ensure we don't have any labels over 63 bytes long // ensure we don't have any labels over 63 bytes long
let encoded_peer_id = segment_peer_id(raw_peer_id); let encoded_peer_id = segment_peer_id(raw_peer_id);
let service_name = str::from_utf8(SERVICE_NAME).expect("SERVICE_NAME is always ASCII"); let service_name = str::from_utf8(SERVICE_NAME).expect("SERVICE_NAME is always ASCII");

View File

@ -450,7 +450,7 @@ impl MdnsResponse {
peer_name.retain(|c| c != '.'); peer_name.retain(|c| c != '.');
let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) { let peer_id = match data_encoding::BASE32_DNSCURVE.decode(peer_name.as_bytes()) {
Ok(bytes) => match PeerId::from_bytes(bytes) { Ok(bytes) => match PeerId::from_bytes(&bytes) {
Ok(id) => id, Ok(id) => id,
Err(_) => return None, Err(_) => return None,
}, },

View File

@ -1,3 +1,7 @@
# 0.28.0 [unreleased]
- Update `libp2p-core`.
# 0.27.0 [2020-11-25] # 0.27.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -1,7 +1,7 @@
[package] [package]
name = "libp2p-noise" name = "libp2p-noise"
description = "Cryptographic handshake protocol using the noise framework." description = "Cryptographic handshake protocol using the noise framework."
version = "0.27.0" version = "0.28.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -12,7 +12,7 @@ bytes = "0.5"
curve25519-dalek = "3.0.0" curve25519-dalek = "3.0.0"
futures = "0.3.1" futures = "0.3.1"
lazy_static = "1.2" lazy_static = "1.2"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4" log = "0.4"
prost = "0.6.1" prost = "0.6.1"
rand = "0.7.2" rand = "0.7.2"

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-swarm` and `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-swarm` and `libp2p-core`. - Update `libp2p-swarm` and `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-ping" name = "libp2p-ping"
edition = "2018" edition = "2018"
description = "Ping protocol for libp2p" description = "Ping protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -11,8 +11,8 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
log = "0.4.1" log = "0.4.1"
rand = "0.7.2" rand = "0.7.2"
void = "1.0" void = "1.0"

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-plaintext" name = "libp2p-plaintext"
edition = "2018" edition = "2018"
description = "Plaintext encryption dummy protocol for libp2p" description = "Plaintext encryption dummy protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"]
bytes = "0.5" bytes = "0.5"
futures = "0.3.1" futures = "0.3.1"
futures_codec = "0.4.0" futures_codec = "0.4.0"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4.8" log = "0.4.8"
prost = "0.6.1" prost = "0.6.1"
unsigned-varint = { version = "0.5.1", features = ["futures-codec"] } unsigned-varint = { version = "0.5.1", features = ["futures-codec"] }

View File

@ -53,7 +53,7 @@ pub struct Remote {
impl HandshakeContext<Local> { impl HandshakeContext<Local> {
fn new(config: PlainText2Config) -> Result<Self, PlainTextError> { fn new(config: PlainText2Config) -> Result<Self, PlainTextError> {
let exchange = Exchange { let exchange = Exchange {
id: Some(config.local_public_key.clone().into_peer_id().into_bytes()), id: Some(config.local_public_key.clone().into_peer_id().to_bytes()),
pubkey: Some(config.local_public_key.clone().into_protobuf_encoding()) pubkey: Some(config.local_public_key.clone().into_protobuf_encoding())
}; };
let mut buf = Vec::with_capacity(exchange.encoded_len()); let mut buf = Vec::with_capacity(exchange.encoded_len());
@ -86,7 +86,7 @@ impl HandshakeContext<Local> {
return Err(PlainTextError::InvalidPayload(None)); return Err(PlainTextError::InvalidPayload(None));
}, },
}; };
let peer_id = match PeerId::from_bytes(prop.id.unwrap_or_default()) { let peer_id = match PeerId::from_bytes(&prop.id.unwrap_or_default()) {
Ok(p) => p, Ok(p) => p,
Err(_) => { Err(_) => {
debug!("failed to parse remote's exchange's id protobuf"); debug!("failed to parse remote's exchange's id protobuf");

View File

@ -1,3 +1,7 @@
# 0.8.0 [unreleased]
- Update `libp2p-swarm` and `libp2p-core`.
# 0.7.0 [2020-12-08] # 0.7.0 [2020-12-08]
- Refine emitted events for inbound requests, introducing - Refine emitted events for inbound requests, introducing

View File

@ -2,7 +2,7 @@
name = "libp2p-request-response" name = "libp2p-request-response"
edition = "2018" edition = "2018"
description = "Generic Request/Response Protocols" description = "Generic Request/Response Protocols"
version = "0.7.0" version = "0.8.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -13,8 +13,8 @@ categories = ["network-programming", "asynchronous"]
async-trait = "0.1" async-trait = "0.1"
bytes = "0.5.6" bytes = "0.5.6"
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
libp2p-swarm = { version = "0.25.0", path = "../../swarm" } libp2p-swarm = { version = "0.26.0", path = "../../swarm" }
log = "0.4.11" log = "0.4.11"
lru = "0.6" lru = "0.6"
minicbor = { version = "0.7", features = ["std", "derive"] } minicbor = { version = "0.7", features = ["std", "derive"] }

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-secio" name = "libp2p-secio"
edition = "2018" edition = "2018"
description = "Secio encryption protocol for libp2p" description = "Secio encryption protocol for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -19,7 +19,7 @@ ctr = "0.3"
futures = "0.3.1" futures = "0.3.1"
hmac = "0.9.0" hmac = "0.9.0"
lazy_static = "1.2.0" lazy_static = "1.2.0"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4.6" log = "0.4.6"
prost = "0.6.1" prost = "0.6.1"
pin-project = "1.0.0" pin-project = "1.0.0"

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.1 [2020-11-26] # 0.25.1 [2020-11-26]
- Add `ExpandedSwarm::is_connected`. - Add `ExpandedSwarm::is_connected`.

View File

@ -2,7 +2,7 @@
name = "libp2p-swarm" name = "libp2p-swarm"
edition = "2018" edition = "2018"
description = "The libp2p swarm" description = "The libp2p swarm"
version = "0.25.1" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
either = "1.6.0" either = "1.6.0"
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../core" } libp2p-core = { version = "0.26.0", path = "../core" }
log = "0.4" log = "0.4"
rand = "0.7" rand = "0.7"
smallvec = "1.0" smallvec = "1.0"

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-dns" name = "libp2p-dns"
edition = "2018" edition = "2018"
description = "DNS transport implementation for libp2p" description = "DNS transport implementation for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -10,6 +10,6 @@ keywords = ["peer-to-peer", "libp2p", "networking"]
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4.1" log = "0.4.1"
futures = "0.3.1" futures = "0.3.1"

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `async-io`.
# 0.25.1 [2020-11-26] # 0.25.1 [2020-11-26]
- Lower `async-std` version to `1.6`, for compatibility - Lower `async-std` version to `1.6`, for compatibility

View File

@ -2,7 +2,7 @@
name = "libp2p-tcp" name = "libp2p-tcp"
edition = "2018" edition = "2018"
description = "TCP/IP transport protocol for libp2p" description = "TCP/IP transport protocol for libp2p"
version = "0.25.1" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -15,7 +15,7 @@ futures = "0.3.1"
futures-timer = "3.0" futures-timer = "3.0"
if-addrs = "0.6.4" if-addrs = "0.6.4"
ipnet = "2.0.0" ipnet = "2.0.0"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4.1" log = "0.4.1"
socket2 = { version = "0.3.12" } socket2 = { version = "0.3.12" }
tokio = { version = "0.3", default-features = false, features = ["net"], optional = true } tokio = { version = "0.3", default-features = false, features = ["net"], optional = true }

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -2,7 +2,7 @@
name = "libp2p-uds" name = "libp2p-uds"
edition = "2018" edition = "2018"
description = "Unix domain sockets transport for libp2p" description = "Unix domain sockets transport for libp2p"
version = "0.25.0" version = "0.26.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -11,7 +11,7 @@ categories = ["network-programming", "asynchronous"]
[target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies] [target.'cfg(all(unix, not(target_os = "emscripten")))'.dependencies]
async-std = { version = "1.6.2", optional = true } async-std = { version = "1.6.2", optional = true }
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4.1" log = "0.4.1"
futures = "0.3.1" futures = "0.3.1"
tokio = { version = "0.3", default-features = false, features = ["net"], optional = true } tokio = { version = "0.3", default-features = false, features = ["net"], optional = true }

View File

@ -1,3 +1,7 @@
# 0.26.0 [unreleased]
- Update `libp2p-core`.
# 0.25.0 [2020-11-25] # 0.25.0 [2020-11-25]
- Update `libp2p-core`. - Update `libp2p-core`.

View File

@ -1,6 +1,6 @@
[package] [package]
name = "libp2p-wasm-ext" name = "libp2p-wasm-ext"
version = "0.25.0" version = "0.26.0"
authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>"] authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>"]
edition = "2018" edition = "2018"
description = "Allows passing in an external transport in a WASM environment" description = "Allows passing in an external transport in a WASM environment"
@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"]
[dependencies] [dependencies]
futures = "0.3.1" futures = "0.3.1"
js-sys = "0.3.19" js-sys = "0.3.19"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
parity-send-wrapper = "0.1.0" parity-send-wrapper = "0.1.0"
wasm-bindgen = "0.2.42" wasm-bindgen = "0.2.42"
wasm-bindgen-futures = "0.4.4" wasm-bindgen-futures = "0.4.4"

View File

@ -1,3 +1,7 @@
# 0.27.0 [unreleased]
- Update `libp2p-core`.
# 0.26.3 [2020-12-10] # 0.26.3 [2020-12-10]
- Update `async-tls`. - Update `async-tls`.

View File

@ -2,7 +2,7 @@
name = "libp2p-websocket" name = "libp2p-websocket"
edition = "2018" edition = "2018"
description = "WebSocket transport for libp2p" description = "WebSocket transport for libp2p"
version = "0.26.3" version = "0.27.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT" license = "MIT"
repository = "https://github.com/libp2p/rust-libp2p" repository = "https://github.com/libp2p/rust-libp2p"
@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"]
async-tls = "0.11.0" async-tls = "0.11.0"
either = "1.5.3" either = "1.5.3"
futures = "0.3.1" futures = "0.3.1"
libp2p-core = { version = "0.25.0", path = "../../core" } libp2p-core = { version = "0.26.0", path = "../../core" }
log = "0.4.8" log = "0.4.8"
quicksink = "0.1" quicksink = "0.1"
rustls = "0.19.0" rustls = "0.19.0"

View File

@ -168,4 +168,3 @@ impl From<io::Error> for Error {
Error::Io(e) Error::Io(e)
} }
} }