mirror of
https://github.com/fluencelabs/rust-libp2p
synced 2025-06-30 02:01:35 +00:00
Implement Kademlia peer discovery (#120)
* Impl Clone for SwarmController and remove 'static * Implement Kademlia * Implement ConnectionReuse correctly * Implement ConnectionReuse correctly * Add some tests and fixes * Remove useless boolean in active_connections * Correctly run tests * Optimize the processing * Rustfmt on libp2p-kad * Improve Kademlia example * Next incoming is now in two steps * Some work * Remove log * Fix dialing a node even if we already have a connection * Add a proper PeerId to Peerstore * Turn identify into a transport layer * Expose the dialed multiaddress * Add identified nodes to the peerstore * Allow configuring the TTL of the addresses * Split identify in two modules * Some comments and tweaks * Run rustfmt * More work * Add test and bugfix * Fix everything * Start transition to new identify system * More work * Minor style * Start implementation of Kademlia server upgrade * Continue implementing the Kademlia server * Start reimplementing high-level kademlia code * Continue reimplementing high-level code * More work * More work * More work * Fix wrong address reported when dialing * Make it work * Remove cluster_level field everywhere * Fix bug in varint-rs when encoding * More work * More work * More work * More work * More work * Bugfix * More work * Implement ping * Style in kademlia_handler * More work * Better error handling in query.rs * More work * More work * More work * Debug impls * Some cleanup in swarm * More work * Clean up changes in swarm * Unpublish the kbucket module * Fix examples and some warnings * Fix websocket browser code * Rustfmt on libp2p-kad * Kad initialization process * Add logging to the example * Fix concerns * Fix style
This commit is contained in:
@ -32,15 +32,8 @@ where
|
||||
P: Peerstore + Clone,
|
||||
{
|
||||
const ADDRESSES: &[&str] = &[
|
||||
"/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
|
||||
"/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM",
|
||||
"/ip4/162.243.248.213/tcp/4001/ipfs/QmSoLueR4xBeUbY9WZ9xGUUxunbKWcrNFTDAadQJmocnWm",
|
||||
"/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu",
|
||||
"/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
|
||||
"/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd",
|
||||
"/ip4/178.62.61.185/tcp/4001/ipfs/QmSoLMeWqB7YGVLJN3pNLQpmmEk35v6wYtsMGLzSr5QBU3",
|
||||
"/dns4/wss0.bootstrap.libp2p.io/tcp/443/wss/ipfs/QmZMxNdpMkewiVZLMRxaNxUeZpDUb34pWjZ1kZvsd16Zic",
|
||||
"/dns4/wss1.bootstrap.libp2p.io/tcp/443/wss/ipfs/Qmbut9Ywz9YEDrz8ySBSgWyJk41Uvm2QJPhwDJzJyGFsD6"
|
||||
"/ip4/127.0.0.1/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
|
||||
// TODO: add some bootstrap nodes here
|
||||
];
|
||||
|
||||
let ttl = Duration::from_secs(100 * 365 * 24 * 3600);
|
||||
@ -51,14 +44,16 @@ where
|
||||
.expect("failed to parse hard-coded multiaddr");
|
||||
|
||||
let ipfs_component = multiaddr.pop().expect("hard-coded multiaddr is empty");
|
||||
let public_key = match ipfs_component {
|
||||
multiaddr::AddrComponent::IPFS(key) => key,
|
||||
let peer = match ipfs_component {
|
||||
multiaddr::AddrComponent::IPFS(key) => {
|
||||
PeerId::from_bytes(key).expect("invalid peer id")
|
||||
}
|
||||
_ => panic!("hard-coded multiaddr didn't end with /ipfs/"),
|
||||
};
|
||||
|
||||
peer_store
|
||||
.clone()
|
||||
.peer_or_create(&PeerId::from_bytes(public_key).unwrap())
|
||||
.peer_or_create(&peer)
|
||||
.add_addr(multiaddr, ttl.clone());
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user