mirror of
https://github.com/fluencelabs/go-libp2p-kad-dht
synced 2025-04-25 06:42:13 +00:00
optimize allocations
Try to preallocate slices up-front instead of repeatedly reallocating.
This commit is contained in:
parent
1677049a56
commit
491afc8e27
2
dht.go
2
dht.go
@ -330,7 +330,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) [
|
||||
return nil
|
||||
}
|
||||
|
||||
var filtered []peer.ID
|
||||
filtered := make([]peer.ID, 0, len(closer))
|
||||
for _, clp := range closer {
|
||||
|
||||
// == to self? thats bad
|
||||
|
@ -197,8 +197,9 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Mess
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
var withAddresses []pstore.PeerInfo
|
||||
closestinfos := pstore.PeerInfos(dht.peerstore, closest)
|
||||
// possibly an over-allocation but this array is temporary anyways.
|
||||
withAddresses := make([]pstore.PeerInfo, 0, len(closestinfos))
|
||||
for _, pi := range closestinfos {
|
||||
if len(pi.Addrs) > 0 {
|
||||
withAddresses = append(withAddresses, pi)
|
||||
|
@ -110,8 +110,9 @@ func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key string, p peer.ID
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out []peer.ID
|
||||
for _, pbp := range pmes.GetCloserPeers() {
|
||||
closer := pmes.GetCloserPeers()
|
||||
out := make([]peer.ID, 0, len(closer))
|
||||
for _, pbp := range closer {
|
||||
pid := peer.ID(pbp.GetId())
|
||||
if pid != dht.self { // dont add self
|
||||
dht.peerstore.AddAddrs(pid, pbp.Addresses(), pstore.TempAddrTTL)
|
||||
|
@ -270,6 +270,7 @@ func (pm *ProviderManager) run() {
|
||||
log.Error("Error loading provider keys: ", err)
|
||||
continue
|
||||
}
|
||||
now := time.Now()
|
||||
for {
|
||||
k, ok := keys()
|
||||
if !ok {
|
||||
@ -281,21 +282,23 @@ func (pm *ProviderManager) run() {
|
||||
log.Error("error loading known provset: ", err)
|
||||
continue
|
||||
}
|
||||
var filtered []peer.ID
|
||||
for p, t := range provs.set {
|
||||
if time.Now().Sub(t) > ProvideValidity {
|
||||
if now.Sub(t) > ProvideValidity {
|
||||
delete(provs.set, p)
|
||||
} else {
|
||||
filtered = append(filtered, p)
|
||||
}
|
||||
}
|
||||
|
||||
provs.providers = filtered
|
||||
if len(filtered) == 0 {
|
||||
// have we run out of providers?
|
||||
if len(provs.set) == 0 {
|
||||
provs.providers = nil
|
||||
err := pm.deleteProvSet(k)
|
||||
if err != nil {
|
||||
log.Error("error deleting provider set: ", err)
|
||||
}
|
||||
} else if len(provs.providers) != len(provs.set) {
|
||||
provs.providers = make([]peer.ID, 0, len(provs.set))
|
||||
for p := range provs.set {
|
||||
provs.providers = append(provs.providers, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-pm.proc.Closing():
|
||||
|
@ -93,7 +93,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var recs [][]byte
|
||||
recs := make([][]byte, 0, len(vals))
|
||||
for _, v := range vals {
|
||||
if v.Val != nil {
|
||||
recs = append(recs, v.Val)
|
||||
@ -144,7 +144,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) GetValues(ctx context.Context, key string, nvals int) ([]routing.RecvdVal, error) {
|
||||
var vals []routing.RecvdVal
|
||||
vals := make([]routing.RecvdVal, 0, nvals)
|
||||
var valslock sync.Mutex
|
||||
|
||||
// If we have it local, dont bother doing an RPC!
|
||||
|
Loading…
x
Reference in New Issue
Block a user