golangci-lint run -D errcheck (#298)

This commit is contained in:
Matt Joiner 2019-03-14 11:07:15 +11:00 committed by GitHub
parent 076b93d1ce
commit 86d78dc072
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 11 additions and 101 deletions

13
dht.go
View File

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"errors"
"fmt"
"sync"
"time"
@ -20,7 +19,6 @@ import (
logging "github.com/ipfs/go-log"
goprocess "github.com/jbenet/goprocess"
goprocessctx "github.com/jbenet/goprocess/context"
ci "github.com/libp2p/go-libp2p-crypto"
host "github.com/libp2p/go-libp2p-host"
kb "github.com/libp2p/go-libp2p-kbucket"
inet "github.com/libp2p/go-libp2p-net"
@ -253,17 +251,6 @@ func (dht *IpfsDHT) getLocal(key string) (*recpb.Record, error) {
return rec, nil
}
// getOwnPrivateKey attempts to load the local peers private
// key from the peerstore.
func (dht *IpfsDHT) getOwnPrivateKey() (ci.PrivKey, error) {
sk := dht.peerstore.PrivKey(dht.self)
if sk == nil {
logger.Warningf("%s dht cannot get own private key!", dht.self)
return nil, fmt.Errorf("cannot get private key to sign record!")
}
return sk, nil
}
// putLocal stores the key value pair in the datastore
func (dht *IpfsDHT) putLocal(key string, rec *recpb.Record) error {
logger.Debugf("putLocal: %v %v", key, rec)

View File

@ -36,7 +36,6 @@ import (
ma "github.com/multiformats/go-multiaddr"
)
var testCaseValues = map[string][]byte{}
var testCaseCids []cid.Cid
func init() {
@ -58,9 +57,9 @@ type testValidator struct{}
func (testValidator) Select(_ string, bs [][]byte) (int, error) {
index := -1
for i, b := range bs {
if bytes.Compare(b, []byte("newer")) == 0 {
if bytes.Equal(b, []byte("newer")) {
index = i
} else if bytes.Compare(b, []byte("valid")) == 0 {
} else if bytes.Equal(b, []byte("valid")) {
if index == -1 {
index = i
}
@ -72,7 +71,7 @@ func (testValidator) Select(_ string, bs [][]byte) (int, error) {
return index, nil
}
func (testValidator) Validate(_ string, b []byte) error {
if bytes.Compare(b, []byte("expired")) == 0 {
if bytes.Equal(b, []byte("expired")) {
return errors.New("expired")
}
return nil
@ -168,8 +167,7 @@ func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {
// 100 sync https://gist.github.com/jbenet/6c59e7c15426e48aaedd
// probably because results compound
var cfg BootstrapConfig
cfg = DefaultBootstrapConfig
cfg := DefaultBootstrapConfig
cfg.Queries = 3
start := rand.Intn(len(dhts)) // randomize to decrease bias.
@ -683,8 +681,7 @@ func TestPeriodicBootstrap(t *testing.T) {
}
}()
var cfg BootstrapConfig
cfg = DefaultBootstrapConfig
cfg := DefaultBootstrapConfig
cfg.Queries = 5
t.Logf("dhts are not connected. %d", nDHTs)
@ -1032,33 +1029,6 @@ func TestFindPeersConnectedToPeer(t *testing.T) {
}
}
func testPeerListsMatch(t *testing.T, p1, p2 []peer.ID) {
if len(p1) != len(p2) {
t.Fatal("did not find as many peers as should have", p1, p2)
}
ids1 := make([]string, len(p1))
ids2 := make([]string, len(p2))
for i, p := range p1 {
ids1[i] = string(p)
}
for i, p := range p2 {
ids2[i] = string(p)
}
sort.Sort(sort.StringSlice(ids1))
sort.Sort(sort.StringSlice(ids2))
for i := range ids1 {
if ids1[i] != ids2[i] {
t.Fatal("Didnt find expected peer", ids1[i], ids2)
}
}
}
func TestConnectCollision(t *testing.T) {
// t.Skip("skipping test to debug another")
if testing.Short() {

View File

@ -2,7 +2,6 @@ package dht
import (
"context"
"fmt"
"math"
"sync"
"time"
@ -76,17 +75,6 @@ func dqDefaultConfig() dqConfig {
}
}
func (dqc *dqConfig) validate() error {
if dqc.minParallelism > dqc.maxParallelism {
return fmt.Errorf("minParallelism must be below maxParallelism; actual values: min=%d, max=%d",
dqc.minParallelism, dqc.maxParallelism)
}
if dqc.scalingFactor < 1 {
return fmt.Errorf("scalingFactor must be >= 1; actual value: %f", dqc.scalingFactor)
}
return nil
}
type waitingCh struct {
ch chan<- peer.ID
ts time.Time

View File

@ -118,7 +118,7 @@ func (dht *IpfsDHT) checkLocalDatastore(k []byte) (*recpb.Record, error) {
recordIsBad = true
}
if time.Now().Sub(recvtime) > MaxRecordAge {
if time.Since(recvtime) > MaxRecordAge {
logger.Debug("old record found, tossing.")
recordIsBad = true
}
@ -320,7 +320,7 @@ func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.
logger.Debugf("%s have the value. added self as provider", reqDesc)
}
if providers != nil && len(providers) > 0 {
if len(providers) > 0 {
infos := pstore.PeerInfos(dht.peerstore, providers)
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
logger.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos)

View File

@ -31,12 +31,10 @@ type ProviderManager struct {
// all non channel fields are meant to be accessed only within
// the run method
providers *lru.Cache
lpeer peer.ID
dstore ds.Datastore
newprovs chan *addProv
getprovs chan *getProv
period time.Duration
proc goprocess.Process
cleanupInterval time.Duration

View File

@ -28,11 +28,9 @@ type dhtQuery struct {
}
type dhtQueryResult struct {
value []byte // GetValue
peer *pstore.PeerInfo // FindPeer
providerPeers []pstore.PeerInfo // GetProviders
closerPeers []*pstore.PeerInfo // *
success bool
peer *pstore.PeerInfo // FindPeer
closerPeers []*pstore.PeerInfo // *
success bool
finalSet *pset.PeerSet
queriedSet *pset.PeerSet
@ -146,10 +144,8 @@ func (r *dhtQueryRunner) Run(ctx context.Context, peers []peer.ID) (*dhtQueryRes
// ONLY AFTER spawn workers has exited.
r.proc.Go(r.spawnWorkers)
// so workers are working.
// wait until they're done.
err := routing.ErrNotFound
var err error
// now, if the context finishes, close the proc.
// we have to do it here because the logic before is setup, which

29
util.go
View File

@ -1,9 +1,5 @@
package dht
import (
"sync"
)
// Pool size is the number of nodes used for group find/set RPC calls
var PoolSize = 6
@ -12,28 +8,3 @@ var KValue = 20
// Alpha is the concurrency factor for asynchronous requests.
var AlphaValue = 3
// A counter for incrementing a variable across multiple threads
type counter struct {
n int
mut sync.Mutex
}
func (c *counter) Increment() {
c.mut.Lock()
c.n++
c.mut.Unlock()
}
func (c *counter) Decrement() {
c.mut.Lock()
c.n--
c.mut.Unlock()
}
func (c *counter) Size() (s int) {
c.mut.Lock()
s = c.n
c.mut.Unlock()
return
}