optimize allocations

Try to preallocate slices up-front instead of repeatedly reallocating.
This commit is contained in:
Steven Allen 2017-12-05 11:31:34 -08:00
parent 1677049a56
commit 491afc8e27
5 changed files with 18 additions and 13 deletions

2
dht.go
View File

@ -330,7 +330,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) [
return nil return nil
} }
var filtered []peer.ID filtered := make([]peer.ID, 0, len(closer))
for _, clp := range closer { for _, clp := range closer {
// == to self? thats bad // == to self? thats bad

View File

@ -197,8 +197,9 @@ func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Mess
return resp, nil return resp, nil
} }
var withAddresses []pstore.PeerInfo
closestinfos := pstore.PeerInfos(dht.peerstore, closest) closestinfos := pstore.PeerInfos(dht.peerstore, closest)
// possibly an over-allocation but this array is temporary anyways.
withAddresses := make([]pstore.PeerInfo, 0, len(closestinfos))
for _, pi := range closestinfos { for _, pi := range closestinfos {
if len(pi.Addrs) > 0 { if len(pi.Addrs) > 0 {
withAddresses = append(withAddresses, pi) withAddresses = append(withAddresses, pi)

View File

@ -110,8 +110,9 @@ func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key string, p peer.ID
return nil, err return nil, err
} }
var out []peer.ID closer := pmes.GetCloserPeers()
for _, pbp := range pmes.GetCloserPeers() { out := make([]peer.ID, 0, len(closer))
for _, pbp := range closer {
pid := peer.ID(pbp.GetId()) pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self if pid != dht.self { // dont add self
dht.peerstore.AddAddrs(pid, pbp.Addresses(), pstore.TempAddrTTL) dht.peerstore.AddAddrs(pid, pbp.Addresses(), pstore.TempAddrTTL)

View File

@ -270,6 +270,7 @@ func (pm *ProviderManager) run() {
log.Error("Error loading provider keys: ", err) log.Error("Error loading provider keys: ", err)
continue continue
} }
now := time.Now()
for { for {
k, ok := keys() k, ok := keys()
if !ok { if !ok {
@ -281,21 +282,23 @@ func (pm *ProviderManager) run() {
log.Error("error loading known provset: ", err) log.Error("error loading known provset: ", err)
continue continue
} }
var filtered []peer.ID
for p, t := range provs.set { for p, t := range provs.set {
if time.Now().Sub(t) > ProvideValidity { if now.Sub(t) > ProvideValidity {
delete(provs.set, p) delete(provs.set, p)
} else {
filtered = append(filtered, p)
} }
} }
// have we run out of providers?
provs.providers = filtered if len(provs.set) == 0 {
if len(filtered) == 0 { provs.providers = nil
err := pm.deleteProvSet(k) err := pm.deleteProvSet(k)
if err != nil { if err != nil {
log.Error("error deleting provider set: ", err) log.Error("error deleting provider set: ", err)
} }
} else if len(provs.providers) != len(provs.set) {
provs.providers = make([]peer.ID, 0, len(provs.set))
for p := range provs.set {
provs.providers = append(provs.providers, p)
}
} }
} }
case <-pm.proc.Closing(): case <-pm.proc.Closing():

View File

@ -93,7 +93,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
return nil, err return nil, err
} }
var recs [][]byte recs := make([][]byte, 0, len(vals))
for _, v := range vals { for _, v := range vals {
if v.Val != nil { if v.Val != nil {
recs = append(recs, v.Val) recs = append(recs, v.Val)
@ -144,7 +144,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
} }
func (dht *IpfsDHT) GetValues(ctx context.Context, key string, nvals int) ([]routing.RecvdVal, error) { func (dht *IpfsDHT) GetValues(ctx context.Context, key string, nvals int) ([]routing.RecvdVal, error) {
var vals []routing.RecvdVal vals := make([]routing.RecvdVal, 0, nvals)
var valslock sync.Mutex var valslock sync.Mutex
// If we have it local, dont bother doing an RPC! // If we have it local, dont bother doing an RPC!