remove or comment out unused code

This commit is contained in:
Anton Kaliaev 2019-02-06 15:16:38 +04:00
parent da33dd04cc
commit ffd3bf8448
No known key found for this signature in database
GPG Key ID: 7B6881D965918214
10 changed files with 163 additions and 185 deletions

View File

@ -26,8 +26,6 @@ linters:
- stylecheck
- deadcode
- prealloc
- unused
- gosimple
# linters-settings:
# govet:

View File

@ -363,23 +363,23 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
pool.errorsCh <- peerError{err, peerID}
}
// unused by tendermint; left for debugging purposes
func (pool *BlockPool) debug() string {
pool.mtx.Lock()
defer pool.mtx.Unlock()
// for debugging purposes
// func (pool *BlockPool) debug() string {
// pool.mtx.Lock()
// defer pool.mtx.Unlock()
str := ""
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
if pool.requesters[h] == nil {
str += fmt.Sprintf("H(%v):X ", h)
} else {
str += fmt.Sprintf("H(%v):", h)
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
}
}
return str
}
// str := ""
// nextHeight := pool.height + pool.requestersLen()
// for h := pool.height; h < nextHeight; h++ {
// if pool.requesters[h] == nil {
// str += fmt.Sprintf("H(%v):X ", h)
// } else {
// str += fmt.Sprintf("H(%v):", h)
// str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
// }
// }
// return str
// }
//-------------------------------------

View File

@ -378,35 +378,35 @@ func ensureNewEvent(
}
}
func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
ensureNewEvent(
stepCh,
height,
round,
ensureTimeout,
"Timeout expired while waiting for NewStep event")
}
// func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
// ensureNewEvent(
// stepCh,
// height,
// round,
// ensureTimeout,
// "Timeout expired while waiting for NewStep event")
// }
func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
select {
case <-time.After(ensureTimeout):
break
case v := <-voteCh:
edv, ok := v.(types.EventDataVote)
if !ok {
panic(fmt.Sprintf("expected a *types.Vote, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(v)))
}
vote := edv.Vote
if vote.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
}
if vote.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
}
}
}
// func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
// select {
// case <-time.After(ensureTimeout):
// break
// case v := <-voteCh:
// edv, ok := v.(types.EventDataVote)
// if !ok {
// panic(fmt.Sprintf("expected a *types.Vote, "+
// "got %v. wrong subscription channel?",
// reflect.TypeOf(v)))
// }
// vote := edv.Vote
// if vote.Height != height {
// panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
// }
// if vote.Round != round {
// panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
// }
// }
// }
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
select {

View File

@ -22,10 +22,6 @@ func init() {
config = ResetConfig("consensus_state_test")
}
func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration {
return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond
}
/*
ProposeSuite

View File

@ -26,17 +26,17 @@ func NewDominoOp(key, input, output string) DominoOp {
}
}
func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
if pop.Type != ProofOpDomino {
panic("unexpected proof op type")
}
var op DominoOp // a bit strange as we'll discard this, but it works.
err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
}
return NewDominoOp(string(pop.Key), op.Input, op.Output), nil
}
// func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
// if pop.Type != ProofOpDomino {
// panic("unexpected proof op type")
// }
// var op DominoOp // a bit strange as we'll discard this, but it works.
// err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
// if err != nil {
// return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
// }
// return NewDominoOp(string(pop.Key), op.Input, op.Output), nil
// }
func (dop DominoOp) ProofOp() ProofOp {
bz := amino.MustMarshalBinaryLengthPrefixed(dop)

View File

@ -4,7 +4,6 @@ import (
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -21,7 +20,7 @@ import (
var node *nm.Node
var chainID = "tendermint_test" // TODO use from config.
var waitForEventTimeout = 5 * time.Second
// var waitForEventTimeout = 5 * time.Second
// TODO fix tests!!
@ -42,83 +41,83 @@ func kvstoreTx(k, v []byte) []byte {
// TODO: enable it after general proof format has been adapted
// in abci/examples/kvstore.go
func _TestAppProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// func TestAppProofs(t *testing.T) {
// assert, require := assert.New(t), require.New(t)
prt := defaultProofRuntime()
cl := client.NewLocal(node)
client.WaitForHeight(cl, 1, nil)
// prt := defaultProofRuntime()
// cl := client.NewLocal(node)
// client.WaitForHeight(cl, 1, nil)
// This sets up our trust on the node based on some past point.
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, 1, 1)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// // This sets up our trust on the node based on some past point.
// source := certclient.NewProvider(chainID, cl)
// seed, err := source.LatestFullCommit(chainID, 1, 1)
// require.NoError(err, "%#v", err)
// cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// Wait for tx confirmation.
done := make(chan int64)
go func() {
evtTyp := types.EventTx
_, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
require.Nil(err, "%#v", err)
close(done)
}()
// // Wait for tx confirmation.
// done := make(chan int64)
// go func() {
// evtTyp := types.EventTx
// _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
// require.Nil(err, "%#v", err)
// close(done)
// }()
// Submit a transaction.
k := []byte("my-key")
v := []byte("my-value")
tx := kvstoreTx(k, v)
br, err := cl.BroadcastTxCommit(tx)
require.NoError(err, "%#v", err)
require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
require.EqualValues(0, br.DeliverTx.Code)
brh := br.Height
// // Submit a transaction.
// k := []byte("my-key")
// v := []byte("my-value")
// tx := kvstoreTx(k, v)
// br, err := cl.BroadcastTxCommit(tx)
// require.NoError(err, "%#v", err)
// require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
// require.EqualValues(0, br.DeliverTx.Code)
// brh := br.Height
// Fetch latest after tx commit.
<-done
latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash := latest.SignedHeader.AppHash
if rootHash == nil {
// Fetch one block later, AppHash hasn't been committed yet.
// TODO find a way to avoid doing this.
client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash = latest.SignedHeader.AppHash
}
require.NotNil(rootHash)
// // Fetch latest after tx commit.
// <-done
// latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
// require.NoError(err, "%#v", err)
// rootHash := latest.SignedHeader.AppHash
// if rootHash == nil {
// // Fetch one block later, AppHash hasn't been committed yet.
// // TODO find a way to avoid doing this.
// client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
// latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
// require.NoError(err, "%#v", err)
// rootHash = latest.SignedHeader.AppHash
// }
// require.NotNil(rootHash)
// verify a query before the tx block has no data (and valid non-exist proof)
bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
require.NoError(err, "%#v", err)
// // verify a query before the tx block has no data (and valid non-exist proof)
// bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
// require.NoError(err, "%#v", err)
// // require.NotNil(proof)
// // TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// // (currently there's a race condition)
// // and ensure that proof proves absence of k.
// require.Nil(bs)
// // but given that block it is good
// bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
// require.NoError(err, "%#v", err)
// require.NotNil(proof)
// TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// (currently there's a race condition)
// and ensure that proof proves absence of k.
require.Nil(bs)
// require.Equal(height, brh)
// but given that block it is good
bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
require.NoError(err, "%#v", err)
require.NotNil(proof)
require.Equal(height, brh)
// assert.EqualValues(v, bs)
// err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
// assert.NoError(err, "%#v", err)
assert.EqualValues(v, bs)
err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
assert.NoError(err, "%#v", err)
// Test non-existing key.
missing := []byte("my-missing-key")
bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
require.NoError(err)
require.Nil(bs)
require.NotNil(proof)
err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
assert.NoError(err, "%#v", err)
err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
assert.Error(err, "%#v", err)
}
// // Test non-existing key.
// missing := []byte("my-missing-key")
// bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
// require.NoError(err)
// require.Nil(bs)
// require.NotNil(proof)
// err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
// assert.NoError(err, "%#v", err)
// err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
// assert.Error(err, "%#v", err)
// }
func TestTxProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t)

View File

@ -398,12 +398,3 @@ func BenchmarkSecretConnection(b *testing.B) {
}
//barSecConn.Close() race condition
}
func fingerprint(bz []byte) []byte {
const fbsize = 40
if len(bz) < fbsize {
return bz
} else {
return bz[:fbsize]
}
}

View File

@ -480,14 +480,12 @@ func (sw *Switch) acceptRoutine() {
metrics: sw.metrics,
})
if err != nil {
switch err.(type) {
switch err := err.(type) {
case ErrRejected:
rErr := err.(ErrRejected)
if rErr.IsSelf() {
if err.IsSelf() {
// Remove the given address from the address book and add to our addresses
// to avoid dialing in the future.
addr := rErr.Addr()
addr := err.Addr()
sw.addrBook.RemoveAddress(&addr)
sw.addrBook.AddOurAddress(&addr)
}

View File

@ -65,44 +65,44 @@ func TestTrustMetricCopyNilPointer(t *testing.T) {
}
// XXX: This test fails non-deterministically
func _TestTrustMetricStopPause(t *testing.T) {
// The TestTicker will provide manual control over
// the passing of time within the metric
tt := NewTestTicker()
tm := NewMetric()
tm.SetTicker(tt)
tm.Start()
// Allow some time intervals to pass and pause
tt.NextTick()
tt.NextTick()
tm.Pause()
// func _TestTrustMetricStopPause(t *testing.T) {
// // The TestTicker will provide manual control over
// // the passing of time within the metric
// tt := NewTestTicker()
// tm := NewMetric()
// tm.SetTicker(tt)
// tm.Start()
// // Allow some time intervals to pass and pause
// tt.NextTick()
// tt.NextTick()
// tm.Pause()
// could be 1 or 2 because Pause and NextTick race
first := tm.Copy().numIntervals
// // could be 1 or 2 because Pause and NextTick race
// first := tm.Copy().numIntervals
// Allow more time to pass and check the intervals are unchanged
tt.NextTick()
tt.NextTick()
assert.Equal(t, first, tm.Copy().numIntervals)
// // Allow more time to pass and check the intervals are unchanged
// tt.NextTick()
// tt.NextTick()
// assert.Equal(t, first, tm.Copy().numIntervals)
// Get the trust metric activated again
tm.GoodEvents(5)
// Allow some time intervals to pass and stop
tt.NextTick()
tt.NextTick()
tm.Stop()
tm.Wait()
// // Get the trust metric activated again
// tm.GoodEvents(5)
// // Allow some time intervals to pass and stop
// tt.NextTick()
// tt.NextTick()
// tm.Stop()
// tm.Wait()
second := tm.Copy().numIntervals
// Allow more intervals to pass while the metric is stopped
// and check that the number of intervals match
tm.NextTimeInterval()
tm.NextTimeInterval()
// XXX: fails non-deterministically:
// expected 5, got 6
assert.Equal(t, second+2, tm.Copy().numIntervals)
// second := tm.Copy().numIntervals
// // Allow more intervals to pass while the metric is stopped
// // and check that the number of intervals match
// tm.NextTimeInterval()
// tm.NextTimeInterval()
// // XXX: fails non-deterministically:
// // expected 5, got 6
// assert.Equal(t, second+2, tm.Copy().numIntervals)
if first > second {
t.Fatalf("numIntervals should always increase or stay the same over time")
}
}
// if first > second {
// t.Fatalf("numIntervals should always increase or stay the same over time")
// }
// }

View File

@ -938,10 +938,6 @@ func makeParams(blockBytes, blockGas, evidenceAge int64) types.ConsensusParams {
}
}
func pk() []byte {
return ed25519.GenPrivKey().PubKey().Bytes()
}
func TestApplyUpdates(t *testing.T) {
initParams := makeParams(1, 2, 3)