mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-10 03:51:20 +00:00
add staticcheck linting (#3828)
cleanup to add linter grpc change: https://godoc.org/google.golang.org/grpc#WithContextDialer https://godoc.org/google.golang.org/grpc#WithDialer grpc/grpc-go#2627 prometheous change: due to UninstrumentedHandler, being deprecated in the future empty branch = empty if or else statement didn't delete them entirely but commented couldn't find a reason to have them could not replicate the issue #3406 but if want to keep it commented then we should comment out the if statement as well
This commit is contained in:
parent
0335add437
commit
ff9e08a32f
@ -8,7 +8,6 @@ linters:
|
||||
- golint
|
||||
- maligned
|
||||
- errcheck
|
||||
- staticcheck
|
||||
- interfacer
|
||||
- unconvert
|
||||
- goconst
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@ -39,7 +39,7 @@ func NewGRPCClient(addr string, mustConnect bool) *grpcClient {
|
||||
return cli
|
||||
}
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
||||
@ -49,7 +49,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@ -65,7 +65,7 @@ RETRY_LOOP:
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
@ -125,7 +125,7 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true))
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -134,7 +134,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true))
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -143,7 +143,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true))
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -152,7 +152,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true))
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -161,7 +161,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true))
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -170,7 +170,7 @@ func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true))
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -179,7 +179,7 @@ func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true))
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -188,7 +188,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true))
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -197,7 +197,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true))
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -206,7 +206,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true))
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@ -215,7 +215,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true))
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
//-------------------------
|
||||
// test grpc
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
defer server.Stop()
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
@ -141,9 +141,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
if !peer.Send(BlockchainChannel, msgBytes) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.SetPeerHeight
|
||||
}
|
||||
@ -191,18 +191,13 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
if queued := bcR.respondToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
}
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcBlockResponseMessage:
|
||||
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
queued := src.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// sorry
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
|
||||
@ -274,9 +269,10 @@ FOR_LOOP:
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced)
|
||||
} else {
|
||||
// should only happen during testing
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
break FOR_LOOP
|
||||
}
|
||||
|
@ -169,9 +169,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
if !peer.Send(BlockchainChannel, msgBytes) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
@ -381,10 +381,11 @@ ForLoop:
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
@ -465,9 +466,10 @@ func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
} else {
|
||||
// Should only happen during testing.
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
|
@ -155,12 +155,14 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
// and the tx should get removed from the pool
|
||||
err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) {
|
||||
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
|
||||
t.Fatalf("expected checktx to return bad nonce, got %v", r)
|
||||
t.Errorf("expected checktx to return bad nonce, got %v", r)
|
||||
return
|
||||
}
|
||||
checkTxRespCh <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error after CheckTx: %v", err)
|
||||
t.Errorf("Error after CheckTx: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// check for the tx
|
||||
@ -180,7 +182,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
case <-checkTxRespCh:
|
||||
// success
|
||||
case <-ticker:
|
||||
t.Fatalf("Timed out waiting for tx to return")
|
||||
t.Errorf("Timed out waiting for tx to return")
|
||||
return
|
||||
}
|
||||
|
||||
// Wait until the tx is removed
|
||||
@ -189,7 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
case <-emptyMempoolCh:
|
||||
// success
|
||||
case <-ticker:
|
||||
t.Fatalf("Timed out waiting for tx to be removed")
|
||||
t.Errorf("Timed out waiting for tx to be removed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
// send a tx
|
||||
if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil {
|
||||
//t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
|
@ -690,13 +690,13 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
cs.statsMsgQueue <- mi
|
||||
}
|
||||
|
||||
if err == ErrAddingVote {
|
||||
// TODO: punish peer
|
||||
// We probably don't want to stop the peer here. The vote does not
|
||||
// necessarily comes from a malicious peer but can be just broadcasted by
|
||||
// a typical peer.
|
||||
// https://github.com/tendermint/tendermint/issues/1281
|
||||
}
|
||||
// if err == ErrAddingVote {
|
||||
// TODO: punish peer
|
||||
// We probably don't want to stop the peer here. The vote does not
|
||||
// necessarily comes from a malicious peer but can be just broadcasted by
|
||||
// a typical peer.
|
||||
// https://github.com/tendermint/tendermint/issues/1281
|
||||
// }
|
||||
|
||||
// NOTE: the vote is broadcast to peers by the reactor listening
|
||||
// for vote events
|
||||
@ -709,7 +709,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err != nil { // nolint:staticcheck
|
||||
// Causes TestReactorValidatorSetChanges to timeout
|
||||
// https://github.com/tendermint/tendermint/issues/3406
|
||||
// cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round,
|
||||
@ -1227,9 +1227,10 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
|
||||
cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent())
|
||||
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
|
||||
} else {
|
||||
// We just need to keep waiting.
|
||||
}
|
||||
// else {
|
||||
// We just need to keep waiting.
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -621,8 +621,6 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
@ -1317,8 +1315,6 @@ func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
@ -1370,8 +1366,6 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
|
1
go.sum
1
go.sum
@ -84,6 +84,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4=
|
||||
|
@ -61,9 +61,10 @@ func (trs *TaskResultSet) Reap() *TaskResultSet {
|
||||
TaskResult: result,
|
||||
OK: true,
|
||||
}
|
||||
} else {
|
||||
// We already wrote it.
|
||||
}
|
||||
// else {
|
||||
// We already wrote it.
|
||||
// }
|
||||
default:
|
||||
// Do nothing.
|
||||
}
|
||||
@ -83,9 +84,10 @@ func (trs *TaskResultSet) Wait() *TaskResultSet {
|
||||
TaskResult: result,
|
||||
OK: true,
|
||||
}
|
||||
} else {
|
||||
// We already wrote it.
|
||||
}
|
||||
// else {
|
||||
// We already wrote it.
|
||||
// }
|
||||
}
|
||||
return trs
|
||||
}
|
||||
|
@ -40,9 +40,10 @@ func TestParallel(t *testing.T) {
|
||||
} else if !assert.Equal(t, -1*i, taskResult.Value.(int)) {
|
||||
assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int))
|
||||
failedTasks++
|
||||
} else {
|
||||
// Good!
|
||||
}
|
||||
// else {
|
||||
// Good!
|
||||
// }
|
||||
}
|
||||
assert.Equal(t, failedTasks, 0, "No task should have failed")
|
||||
assert.Nil(t, trs.FirstError(), "There should be no errors")
|
||||
|
@ -273,11 +273,11 @@ func TestResubscribe(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
_, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
err = s.Unsubscribe(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
subscription, err = s.Subscribe(ctx, clientID, query.Empty{})
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Cable")
|
||||
|
@ -143,13 +143,13 @@ func TestTxProofs(t *testing.T) {
|
||||
|
||||
// First let's make sure a bogus transaction hash returns a valid non-existence proof.
|
||||
key := types.Tx([]byte("bogus")).Hash()
|
||||
res, err := cl.Tx(key, true)
|
||||
_, err = cl.Tx(key, true)
|
||||
require.NotNil(err)
|
||||
require.Contains(err.Error(), "not found")
|
||||
|
||||
// Now let's check with the real tx root hash.
|
||||
key = types.Tx(tx).Hash()
|
||||
res, err = cl.Tx(key, true)
|
||||
res, err := cl.Tx(key, true)
|
||||
require.NoError(err, "%#v", err)
|
||||
require.NotNil(res)
|
||||
keyHash := merkle.SimpleHashFromByteSlices([][]byte{key})
|
||||
|
@ -250,11 +250,11 @@ func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), t
|
||||
// so we only record the sender for txs still in the mempool.
|
||||
if e, ok := mem.txsMap.Load(txKey(tx)); ok {
|
||||
memTx := e.(*clist.CElement).Value.(*mempoolTx)
|
||||
if _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true); loaded {
|
||||
// TODO: consider punishing peer for dups,
|
||||
// its non-trivial since invalid txs can become valid,
|
||||
// but they can spam the same tx with little cost to them atm.
|
||||
}
|
||||
memTx.senders.LoadOrStore(txInfo.SenderID, true)
|
||||
// TODO: consider punishing peer for dups,
|
||||
// its non-trivial since invalid txs can become valid,
|
||||
// but they can spam the same tx with little cost to them atm.
|
||||
|
||||
}
|
||||
|
||||
return ErrTxInCache
|
||||
|
@ -57,7 +57,8 @@ func TestMConnectionSendFlushStop(t *testing.T) {
|
||||
msgB := make([]byte, aminoMsgLength)
|
||||
_, err := server.Read(msgB)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
errCh <- err
|
||||
}()
|
||||
|
@ -192,7 +192,8 @@ func writeLots(t *testing.T, wg *sync.WaitGroup, conn net.Conn, txt string, n in
|
||||
for i := 0; i < n; i++ {
|
||||
_, err := conn.Write([]byte(txt))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to fooSecConn: %v", err)
|
||||
t.Errorf("Failed to write to fooSecConn: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -408,7 +409,8 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
|
||||
if err == io.EOF {
|
||||
return
|
||||
} else if err != nil {
|
||||
b.Fatalf("Failed to read from barSecConn: %v", err)
|
||||
b.Errorf("Failed to read from barSecConn: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -418,7 +420,8 @@ func BenchmarkWriteSecretConnection(b *testing.B) {
|
||||
idx := cmn.RandIntn(len(fooWriteBytes))
|
||||
_, err := fooSecConn.Write(fooWriteBytes[idx])
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to write to fooSecConn: %v", err)
|
||||
b.Errorf("Failed to write to fooSecConn: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
@ -451,7 +454,8 @@ func BenchmarkReadSecretConnection(b *testing.B) {
|
||||
idx := cmn.RandIntn(len(fooWriteBytes))
|
||||
_, err := fooSecConn.Write(fooWriteBytes[idx])
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N)
|
||||
b.Errorf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -348,7 +348,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSwitchStopPeerForError(t *testing.T) {
|
||||
s := httptest.NewServer(stdprometheus.UninstrumentedHandler())
|
||||
s := httptest.NewServer(promhttp.Handler())
|
||||
defer s.Close()
|
||||
|
||||
scrapeMetrics := func() string {
|
||||
|
@ -331,9 +331,10 @@ func TestErrUnexpectedResponse(t *testing.T) {
|
||||
// we do not want to Start() the remote signer here and instead use the connection to
|
||||
// reply with intentionally wrong replies below:
|
||||
rsConn, err := serviceEndpoint.connect()
|
||||
defer rsConn.Close()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rsConn)
|
||||
defer rsConn.Close()
|
||||
|
||||
// send over public key to get the remote signer running:
|
||||
go testReadWriteResponse(t, &PubKeyResponse{}, rsConn)
|
||||
<-readyCh
|
||||
|
@ -2,8 +2,8 @@ package core_grpc
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@ -26,13 +26,13 @@ func StartGRPCServer(ln net.Listener) error {
|
||||
// StartGRPCClient dials the gRPC server using protoAddr and returns a new
|
||||
// BroadcastAPIClient.
|
||||
func StartGRPCClient(protoAddr string) BroadcastAPIClient {
|
||||
conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewBroadcastAPIClient(conn)
|
||||
}
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
@ -369,10 +369,11 @@ func (c *WSClient) writeRoutine() {
|
||||
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
if err := c.conn.Close(); err != nil {
|
||||
// ignore error; it will trigger in tests
|
||||
// likely because it's closing an already closed connection
|
||||
}
|
||||
c.conn.Close()
|
||||
// err != nil {
|
||||
// ignore error; it will trigger in tests
|
||||
// likely because it's closing an already closed connection
|
||||
// }
|
||||
c.wg.Done()
|
||||
}()
|
||||
|
||||
@ -421,10 +422,11 @@ func (c *WSClient) writeRoutine() {
|
||||
// executing all reads from this goroutine.
|
||||
func (c *WSClient) readRoutine() {
|
||||
defer func() {
|
||||
if err := c.conn.Close(); err != nil {
|
||||
// ignore error; it will trigger in tests
|
||||
// likely because it's closing an already closed connection
|
||||
}
|
||||
c.conn.Close()
|
||||
// err != nil {
|
||||
// ignore error; it will trigger in tests
|
||||
// likely because it's closing an already closed connection
|
||||
// }
|
||||
c.wg.Done()
|
||||
}()
|
||||
|
||||
|
@ -212,7 +212,8 @@ func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) {
|
||||
select {
|
||||
case resp := <-c.ResponsesCh:
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("unexpected error: %v", resp.Error)
|
||||
t.Errorf("unexpected error: %v", resp.Error)
|
||||
return
|
||||
}
|
||||
if resp.Result != nil {
|
||||
wg.Done()
|
||||
|
@ -185,11 +185,11 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// Can't load anything for height 0.
|
||||
v, err := sm.LoadValidators(stateDB, 0)
|
||||
_, err := sm.LoadValidators(stateDB, 0)
|
||||
assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0")
|
||||
|
||||
// Should be able to load for height 1.
|
||||
v, err = sm.LoadValidators(stateDB, 1)
|
||||
v, err := sm.LoadValidators(stateDB, 1)
|
||||
assert.Nil(err, "expected no err at height 1")
|
||||
assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match")
|
||||
|
||||
|
@ -54,7 +54,7 @@ func (c *RpcClient) Call(method string, params map[string]interface{}, result in
|
||||
}
|
||||
|
||||
rv, rt := reflect.ValueOf(result), reflect.TypeOf(result)
|
||||
rv, rt = rv.Elem(), rt.Elem()
|
||||
rv, _ = rv.Elem(), rt.Elem()
|
||||
rv.Set(reflect.ValueOf(s))
|
||||
|
||||
return s, nil
|
||||
|
@ -68,7 +68,7 @@ func TestGenesisGood(t *testing.T) {
|
||||
genDoc.ConsensusParams.Block.MaxBytes = 0
|
||||
genDocBytes, err = cdc.MarshalJSON(genDoc)
|
||||
assert.NoError(t, err, "error marshalling genDoc")
|
||||
genDoc, err = GenesisDocFromJSON(genDocBytes)
|
||||
_, err = GenesisDocFromJSON(genDocBytes)
|
||||
assert.Error(t, err, "expected error for genDoc json with block size of 0")
|
||||
|
||||
// Genesis doc from raw json
|
||||
|
@ -619,10 +619,11 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
|
||||
// Good precommit!
|
||||
if blockID.Equals(precommit.BlockID) {
|
||||
talliedVotingPower += val.VotingPower
|
||||
} else {
|
||||
// It's OK that the BlockID doesn't match. We include stray
|
||||
// precommits to measure validator availability.
|
||||
}
|
||||
// else {
|
||||
// It's OK that the BlockID doesn't match. We include stray
|
||||
// precommits to measure validator availability.
|
||||
// }
|
||||
}
|
||||
|
||||
if talliedVotingPower > vals.TotalVotingPower()*2/3 {
|
||||
@ -703,10 +704,11 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin
|
||||
// Good precommit!
|
||||
if blockID.Equals(precommit.BlockID) {
|
||||
oldVotingPower += val.VotingPower
|
||||
} else {
|
||||
// It's OK that the BlockID doesn't match. We include stray
|
||||
// precommits to measure validator availability.
|
||||
}
|
||||
// else {
|
||||
// It's OK that the BlockID doesn't match. We include stray
|
||||
// precommits to measure validator availability.
|
||||
// }
|
||||
}
|
||||
|
||||
if oldVotingPower <= oldVals.TotalVotingPower()*2/3 {
|
||||
|
Loading…
x
Reference in New Issue
Block a user