From f571ee8876d56d3b80a96019ba326fe8932ef116 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 7 Feb 2019 19:34:01 -0500 Subject: [PATCH 01/16] prepare v0.29.2 (#3272) * update changelog * linkify * bump version * update main changelog * final fixes * entry for wal fix * changelog preamble * remove a line --- CHANGELOG.md | 41 +++++++++++++++++++++++++++++++++++++++++ CHANGELOG_PENDING.md | 23 ++--------------------- version/version.go | 2 +- 3 files changed, 44 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 437b7970..779cb886 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,46 @@ # Changelog +## v0.29.2 + +*February 7th, 2019* + +Special thanks to external contributors on this release: +@ackratos, @rickyyangz + +**Note**: This release contains security sensitive patches in the `p2p` and +`crypto` packages: +- p2p: + - Partial fix for MITM attacks on the p2p connection. MITM conditions may + still exist. See \#3010. +- crypto: + - Eliminate our fork of `btcd` and use the `btcd/btcec` library directly for + native secp256k1 signing. Note we still modify the signature encoding to + prevent malleability. + - Support the libsecp256k1 library via CGo through the `go-ethereum/crypto/secp256k1` package. + +### BREAKING CHANGES: + +* Go API + - [types] [\#3245](https://github.com/tendermint/tendermint/issues/3245) Commit uses `type CommitSig Vote` instead of `Vote` directly. + In preparation for removing redundant fields from the commit [\#1648](https://github.com/tendermint/tendermint/issues/1648) + +### IMPROVEMENTS: +- [consensus] [\#3246](https://github.com/tendermint/tendermint/issues/3246) Better logging and notes on recovery for corrupted WAL file +- [crypto] [\#3163](https://github.com/tendermint/tendermint/issues/3163) Use ethereum's libsecp256k1 go-wrapper for signatures when cgo is available +- [crypto] [\#3162](https://github.com/tendermint/tendermint/issues/3162) Wrap btcd instead of forking it to keep up with fixes (used if cgo is not available) +- [makefile] [\#3233](https://github.com/tendermint/tendermint/issues/3233) Use golangci-lint instead of go-metalinter +- [tools] [\#3218](https://github.com/tendermint/tendermint/issues/3218) Add go-deadlock tool to help detect deadlocks +- [tools] [\#3106](https://github.com/tendermint/tendermint/issues/3106) Add tm-signer-harness test harness for remote signers +- [tests] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fixed a bunch of non-deterministic test failures + +### BUG FIXES: +- [node] [\#3186](https://github.com/tendermint/tendermint/issues/3186) EventBus and indexerService should be started before first block (for replay last block on handshake) execution (@ackratos) +- [p2p] [\#3232](https://github.com/tendermint/tendermint/issues/3232) Fix infinite loop leading to addrbook deadlock for seed nodes +- [p2p] [\#3247](https://github.com/tendermint/tendermint/issues/3247) Fix panic in SeedMode when calling FlushStop and OnStop + concurrently +- [p2p] [\#3040](https://github.com/tendermint/tendermint/issues/3040) Fix MITM on secret connection by checking low-order points +- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket + ## v0.29.1 *January 24, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 2c70ee5a..4548eb1e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,32 +1,13 @@ -## v0.30.0 +## v0.30 -*TBD* +** Special thanks to external contributors on this release: ### BREAKING CHANGES: -* CLI/RPC/Config - -* Apps - -* Go API - - [types] \#3245 Commit uses `type CommitSig Vote` instead of `Vote` directly. - -* Blockchain Protocol - -* P2P Protocol - ### FEATURES: ### IMPROVEMENTS: -- [tools] Add go-deadlock tool to help detect deadlocks -- [tools] \#3106 Add tm-signer-harness test harness for remote signers -- [crypto] \#3163 Use ethereum's libsecp256k1 go-wrapper for signatures when cgo is available -- [crypto] \#3162 Wrap btcd instead of forking it to keep up with fixes (used if cgo is not available) ### BUG FIXES: -- [node] \#3186 EventBus and indexerService should be started before first block (for replay last block on handshake) execution -- [p2p] \#3232 Fix infinite loop leading to addrbook deadlock for seed nodes -- [p2p] \#3247 Fix panic in SeedMode when calling FlushStop and OnStop - concurrently diff --git a/version/version.go b/version/version.go index 86c38c03..b20223c2 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.29.1" + TMCoreSemVer = "0.29.2" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.15.0" From ad4bd92fec3bcba8715935c2c42eb27f68f5109a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 7 Feb 2019 19:57:30 -0500 Subject: [PATCH 02/16] secp256k1: change build tags (#3277) --- crypto/secp256k1/secp256k1_cgo.go | 2 +- crypto/secp256k1/secp256k1_nocgo.go | 2 +- crypto/secp256k1/secp256k1_nocgo_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/secp256k1/secp256k1_cgo.go b/crypto/secp256k1/secp256k1_cgo.go index 30414d2b..3e5b1ddd 100644 --- a/crypto/secp256k1/secp256k1_cgo.go +++ b/crypto/secp256k1/secp256k1_cgo.go @@ -1,4 +1,4 @@ -// +build cgo +// +build libsecp256k1 package secp256k1 diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index 34b006fa..052c3d14 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -1,4 +1,4 @@ -// +build !cgo +// +build !libsecp256k1 package secp256k1 diff --git a/crypto/secp256k1/secp256k1_nocgo_test.go b/crypto/secp256k1/secp256k1_nocgo_test.go index 95966478..a06a0e3d 100644 --- a/crypto/secp256k1/secp256k1_nocgo_test.go +++ b/crypto/secp256k1/secp256k1_nocgo_test.go @@ -1,4 +1,4 @@ -// +build !cgo +// +build !libsecp256k1 package secp256k1 From af6e6cd350541147aed50c4e936f6ba596231027 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 7 Feb 2019 20:12:57 -0500 Subject: [PATCH 03/16] remove MixEntropy (#3278) * remove MixEntropy * changelog --- CHANGELOG.md | 3 + README.md | 1 + crypto/random.go | 104 --------------------- crypto/xsalsa20symmetric/symmetric.go | 1 - crypto/xsalsa20symmetric/symmetric_test.go | 4 - 5 files changed, 4 insertions(+), 109 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 779cb886..a0e736bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,10 +17,13 @@ Special thanks to external contributors on this release: native secp256k1 signing. Note we still modify the signature encoding to prevent malleability. - Support the libsecp256k1 library via CGo through the `go-ethereum/crypto/secp256k1` package. + - Eliminate MixEntropy functions ### BREAKING CHANGES: * Go API + - [crypto] [\#3278](https://github.com/tendermint/tendermint/issues/3278) Remove + MixEntropy functions - [types] [\#3245](https://github.com/tendermint/tendermint/issues/3245) Commit uses `type CommitSig Vote` instead of `Vote` directly. In preparation for removing redundant fields from the commit [\#1648](https://github.com/tendermint/tendermint/issues/1648) diff --git a/README.md b/README.md index 601e3830..9251e3ca 100644 --- a/README.md +++ b/README.md @@ -96,6 +96,7 @@ include the in-process Go APIs. That said, breaking changes in the following packages will be documented in the CHANGELOG even if they don't lead to MINOR version bumps: +- crypto - types - rpc/client - config diff --git a/crypto/random.go b/crypto/random.go index 914c321b..275fb104 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -1,42 +1,11 @@ package crypto import ( - "crypto/cipher" crand "crypto/rand" - "crypto/sha256" "encoding/hex" "io" - "sync" - - "golang.org/x/crypto/chacha20poly1305" ) -// NOTE: This is ignored for now until we have time -// to properly review the MixEntropy function - https://github.com/tendermint/tendermint/issues/2099. -// -// The randomness here is derived from xoring a chacha20 keystream with -// output from crypto/rand's OS Entropy Reader. (Due to fears of the OS' -// entropy being backdoored) -// -// For forward secrecy of produced randomness, the internal chacha key is hashed -// and thereby rotated after each call. -var gRandInfo *randInfo - -func init() { - gRandInfo = &randInfo{} - - // TODO: uncomment after reviewing MixEntropy - - // https://github.com/tendermint/tendermint/issues/2099 - // gRandInfo.MixEntropy(randBytes(32)) // Init -} - -// WARNING: This function needs review - https://github.com/tendermint/tendermint/issues/2099. -// Mix additional bytes of randomness, e.g. from hardware, user-input, etc. -// It is OK to call it multiple times. It does not diminish security. -func MixEntropy(seedBytes []byte) { - gRandInfo.MixEntropy(seedBytes) -} - // This only uses the OS's randomness func randBytes(numBytes int) []byte { b := make([]byte, numBytes) @@ -52,19 +21,6 @@ func CRandBytes(numBytes int) []byte { return randBytes(numBytes) } -/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099 -// This uses the OS and the Seed(s). -func CRandBytes(numBytes int) []byte { - return randBytes(numBytes) - b := make([]byte, numBytes) - _, err := gRandInfo.Read(b) - if err != nil { - panic(err) - } - return b -} -*/ - // CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long. // // Note: CRandHex(24) gives 96 bits of randomness that @@ -77,63 +33,3 @@ func CRandHex(numDigits int) string { func CReader() io.Reader { return crand.Reader } - -/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099 -// Returns a crand.Reader mixed with user-supplied entropy -func CReader() io.Reader { - return gRandInfo -} -*/ - -//-------------------------------------------------------------------------------- - -type randInfo struct { - mtx sync.Mutex - seedBytes [chacha20poly1305.KeySize]byte - chacha cipher.AEAD - reader io.Reader -} - -// You can call this as many times as you'd like. -// XXX/TODO: review - https://github.com/tendermint/tendermint/issues/2099 -func (ri *randInfo) MixEntropy(seedBytes []byte) { - ri.mtx.Lock() - defer ri.mtx.Unlock() - // Make new ri.seedBytes using passed seedBytes and current ri.seedBytes: - // ri.seedBytes = sha256( seedBytes || ri.seedBytes ) - h := sha256.New() - h.Write(seedBytes) - h.Write(ri.seedBytes[:]) - hashBytes := h.Sum(nil) - copy(ri.seedBytes[:], hashBytes) - chacha, err := chacha20poly1305.New(ri.seedBytes[:]) - if err != nil { - panic("Initializing chacha20 failed") - } - ri.chacha = chacha - // Create new reader - ri.reader = &cipher.StreamReader{S: ri, R: crand.Reader} -} - -func (ri *randInfo) XORKeyStream(dst, src []byte) { - // nonce being 0 is safe due to never re-using a key. - emptyNonce := make([]byte, 12) - tmpDst := ri.chacha.Seal([]byte{}, emptyNonce, src, []byte{0}) - // this removes the poly1305 tag as well, since chacha is a stream cipher - // and we truncate at input length. - copy(dst, tmpDst[:len(src)]) - // hash seedBytes for forward secrecy, and initialize new chacha instance - newSeed := sha256.Sum256(ri.seedBytes[:]) - chacha, err := chacha20poly1305.New(newSeed[:]) - if err != nil { - panic("Initializing chacha20 failed") - } - ri.chacha = chacha -} - -func (ri *randInfo) Read(b []byte) (n int, err error) { - ri.mtx.Lock() - n, err = ri.reader.Read(b) - ri.mtx.Unlock() - return -} diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go index 3228a935..10a0f6f3 100644 --- a/crypto/xsalsa20symmetric/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -17,7 +17,6 @@ const secretLen = 32 // secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -// NOTE: call crypto.MixEntropy() first. func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { if len(secret) != secretLen { cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go index bca0b336..160d49a9 100644 --- a/crypto/xsalsa20symmetric/symmetric_test.go +++ b/crypto/xsalsa20symmetric/symmetric_test.go @@ -13,8 +13,6 @@ import ( func TestSimple(t *testing.T) { - crypto.MixEntropy([]byte("someentropy")) - plaintext := []byte("sometext") secret := []byte("somesecretoflengththirtytwo===32") ciphertext := EncryptSymmetric(plaintext, secret) @@ -26,8 +24,6 @@ func TestSimple(t *testing.T) { func TestSimpleWithKDF(t *testing.T) { - crypto.MixEntropy([]byte("someentropy")) - plaintext := []byte("sometext") secretPass := []byte("somesecret") secret, err := bcrypt.GenerateFromPassword(secretPass, 12) From c1f7399a86f61bcf26791b9e0a6cf30b28e2048c Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Fri, 8 Feb 2019 15:48:09 +0100 Subject: [PATCH 04/16] review comment: cleaner constant for N/2, delete secp256k1N and use (#3279) `secp256k1.S256().N` directly instead --- crypto/secp256k1/secp256k1_nocgo.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index 052c3d14..cd1655a5 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -14,8 +14,7 @@ import ( // see: // - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 // - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39 -var secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16) -var secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2)) +var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) // Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. // The returned signature will be of the form R || S (in lower-S form). From cce4d21ccbaa94163b393dd4dc1fd7c202d4feb7 Mon Sep 17 00:00:00 2001 From: Anca Zamfir Date: Fri, 8 Feb 2019 19:05:09 +0100 Subject: [PATCH 05/16] treat validator updates as set (#3222) * Initial commit for 3181..still early * unit test updates * unit test updates * fix check of dups accross updates and deletes * simplify the processChange() func * added overflow check utest * Added checks for empty valset, new utest * deepcopy changes in processUpdate() * moved to new API, fixed tests * test cleanup * address review comments * make sure votePower > 0 * gofmt fixes * handle duplicates and invalid values * more work on tests, review comments * Renamed and explained K * make TestVal private * split verifyUpdatesAndComputeNewPriorities.., added check for deletes * return error if validator set is empty after processing changes * address review comments * lint err * Fixed the total voting power and added comments * fix lint * fix lint --- state/execution.go | 79 +------- state/execution_test.go | 2 +- state/state_test.go | 137 ++++++++----- types/validator.go | 11 ++ types/validator_set.go | 363 +++++++++++++++++++++++++++------- types/validator_set_test.go | 382 ++++++++++++++++++++++++++++++++++-- 6 files changed, 756 insertions(+), 218 deletions(-) diff --git a/state/execution.go b/state/execution.go index 85477eeb..470e22bc 100644 --- a/state/execution.go +++ b/state/execution.go @@ -2,7 +2,6 @@ package state import ( "fmt" - "strings" "time" abci "github.com/tendermint/tendermint/abci/types" @@ -143,7 +142,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b return state, err } if len(validatorUpdates) > 0 { - blockExec.logger.Info("Updates to validators", "updates", makeValidatorUpdatesLogString(validatorUpdates)) + blockExec.logger.Info("Updates to validators", "updates", types.ValidatorListString(validatorUpdates)) } // Update the state with the block and responses. @@ -368,70 +367,6 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, return nil } -// If more or equal than 1/3 of total voting power changed in one block, then -// a light client could never prove the transition externally. See -// ./lite/doc.go for details on how a light client tracks validators. -func updateValidators(currentSet *types.ValidatorSet, updates []*types.Validator) error { - for _, valUpdate := range updates { - // should already have been checked - if valUpdate.VotingPower < 0 { - return fmt.Errorf("Voting power can't be negative %v", valUpdate) - } - - address := valUpdate.Address - _, val := currentSet.GetByAddress(address) - // valUpdate.VotingPower is ensured to be non-negative in validation method - if valUpdate.VotingPower == 0 { // remove val - _, removed := currentSet.Remove(address) - if !removed { - return fmt.Errorf("Failed to remove validator %X", address) - } - } else if val == nil { // add val - // make sure we do not exceed MaxTotalVotingPower by adding this validator: - totalVotingPower := currentSet.TotalVotingPower() - updatedVotingPower := valUpdate.VotingPower + totalVotingPower - overflow := updatedVotingPower > types.MaxTotalVotingPower || updatedVotingPower < 0 - if overflow { - return fmt.Errorf( - "Failed to add new validator %v. Adding it would exceed max allowed total voting power %v", - valUpdate, - types.MaxTotalVotingPower) - } - // TODO: issue #1558 update spec according to the following: - // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't - // unbond/rebond to reset their (potentially previously negative) ProposerPriority to zero. - // - // Contract: totalVotingPower < MaxTotalVotingPower to ensure ProposerPriority does - // not exceed the bounds of int64. - // - // Compute ProposerPriority = -1.125*totalVotingPower == -(totalVotingPower + (totalVotingPower >> 3)). - valUpdate.ProposerPriority = -(totalVotingPower + (totalVotingPower >> 3)) - added := currentSet.Add(valUpdate) - if !added { - return fmt.Errorf("Failed to add new validator %v", valUpdate) - } - } else { // update val - // make sure we do not exceed MaxTotalVotingPower by updating this validator: - totalVotingPower := currentSet.TotalVotingPower() - curVotingPower := val.VotingPower - updatedVotingPower := totalVotingPower - curVotingPower + valUpdate.VotingPower - overflow := updatedVotingPower > types.MaxTotalVotingPower || updatedVotingPower < 0 - if overflow { - return fmt.Errorf( - "Failed to update existing validator %v. Updating it would exceed max allowed total voting power %v", - valUpdate, - types.MaxTotalVotingPower) - } - - updated := currentSet.Update(valUpdate) - if !updated { - return fmt.Errorf("Failed to update validator %X to %v", address, valUpdate) - } - } - } - return nil -} - // updateState returns a new State updated according to the header and responses. func updateState( state State, @@ -448,7 +383,7 @@ func updateState( // Update the validator set with the latest abciResponses. lastHeightValsChanged := state.LastHeightValidatorsChanged if len(validatorUpdates) > 0 { - err := updateValidators(nValSet, validatorUpdates) + err := nValSet.UpdateWithChangeSet(validatorUpdates) if err != nil { return state, fmt.Errorf("Error changing validator set: %v", err) } @@ -552,13 +487,3 @@ func ExecCommitBlock( // ResponseCommit has no error or log, just data return res.Data, nil } - -// Make pretty string for validatorUpdates logging -func makeValidatorUpdatesLogString(vals []*types.Validator) string { - chunks := make([]string, len(vals)) - for i, val := range vals { - chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.VotingPower) - } - - return strings.Join(chunks, ",") -} diff --git a/state/execution_test.go b/state/execution_test.go index 041fb558..e0c9b4b9 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -280,7 +280,7 @@ func TestUpdateValidators(t *testing.T) { t.Run(tc.name, func(t *testing.T) { updates, err := types.PB2TM.ValidatorUpdates(tc.abciUpdates) assert.NoError(t, err) - err = updateValidators(tc.currentSet, updates) + err = tc.currentSet.UpdateWithChangeSet(updates) if tc.shouldErr { assert.Error(t, err) } else { diff --git a/state/state_test.go b/state/state_test.go index 904d7a10..9cbe8342 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -322,7 +322,8 @@ func TestProposerFrequency(t *testing.T) { vals := make([]*types.Validator, N) totalVotePower := int64(0) for j := 0; j < N; j++ { - votePower := int64(cmn.RandInt() % maxPower) + // make sure votePower > 0 + votePower := int64(cmn.RandInt()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() pubKey := privVal.GetPubKey() @@ -424,49 +425,71 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { require.Equal(t, len(updatedState2.NextValidators.Validators), 2) _, updatedVal1 := updatedState2.NextValidators.GetByAddress(val1PubKey.Address()) _, addedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address()) + // adding a validator should not lead to a ProposerPriority equal to zero (unless the combination of averaging and // incrementing would cause so; which is not the case here) - totalPowerBefore2 := curTotal - // while adding we compute prio == -1.125 * total: - wantVal2ProposerPrio := -(totalPowerBefore2 + (totalPowerBefore2 >> 3)) - wantVal2ProposerPrio = wantVal2ProposerPrio + val2VotingPower - // then increment: + // Steps from adding new validator: + // 0 - val1 prio is 0, TVP after add: + wantVal1Prio := int64(0) totalPowerAfter := val1VotingPower + val2VotingPower - // mostest: - wantVal2ProposerPrio = wantVal2ProposerPrio - totalPowerAfter - avg := big.NewInt(0).Add(big.NewInt(val1VotingPower), big.NewInt(wantVal2ProposerPrio)) + // 1. Add - Val2 should be initially added with (-123) => + wantVal2Prio := -(totalPowerAfter + (totalPowerAfter >> 3)) + // 2. Scale - noop + // 3. Center - with avg, resulting val2:-61, val1:62 + avg := big.NewInt(0).Add(big.NewInt(wantVal1Prio), big.NewInt(wantVal2Prio)) avg.Div(avg, big.NewInt(2)) - wantVal2ProposerPrio = wantVal2ProposerPrio - avg.Int64() - wantVal1Prio := 0 + val1VotingPower - avg.Int64() + wantVal2Prio = wantVal2Prio - avg.Int64() // -61 + wantVal1Prio = wantVal1Prio - avg.Int64() // 62 + + // 4. Steps from IncrementProposerPriority + wantVal1Prio = wantVal1Prio + val1VotingPower // 72 + wantVal2Prio = wantVal2Prio + val2VotingPower // 39 + wantVal1Prio = wantVal1Prio - totalPowerAfter // -38 as val1 is proposer + assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority) - assert.Equal(t, wantVal2ProposerPrio, addedVal2.ProposerPriority) + assert.Equal(t, wantVal2Prio, addedVal2.ProposerPriority) // Updating a validator does not reset the ProposerPriority to zero: + // 1. Add - Val2 VotingPower change to 1 => updatedVotingPowVal2 := int64(1) updateVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: updatedVotingPowVal2} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal}) assert.NoError(t, err) - // this will cause the diff of priorities (31) + // this will cause the diff of priorities (77) // to be larger than threshold == 2*totalVotingPower (22): updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) require.Equal(t, len(updatedState3.NextValidators.Validators), 2) _, prevVal1 := updatedState3.Validators.GetByAddress(val1PubKey.Address()) + _, prevVal2 := updatedState3.Validators.GetByAddress(val2PubKey.Address()) + _, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address()) _, updatedVal2 := updatedState3.NextValidators.GetByAddress(val2PubKey.Address()) - // divide previous priorities by 2 == CEIL(31/22) as diff > threshold: - expectedVal1PrioBeforeAvg := prevVal1.ProposerPriority/2 + prevVal1.VotingPower - wantVal2ProposerPrio = wantVal2ProposerPrio/2 + updatedVotingPowVal2 - // val1 will be proposer: - total := val1VotingPower + updatedVotingPowVal2 - expectedVal1PrioBeforeAvg = expectedVal1PrioBeforeAvg - total - avgI64 := (wantVal2ProposerPrio + expectedVal1PrioBeforeAvg) / 2 - wantVal2ProposerPrio = wantVal2ProposerPrio - avgI64 - wantVal1Prio = expectedVal1PrioBeforeAvg - avgI64 - assert.Equal(t, wantVal2ProposerPrio, updatedVal2.ProposerPriority) - _, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address()) + // 2. Scale + // old prios: v1(10):-38, v2(1):39 + wantVal1Prio = prevVal1.ProposerPriority + wantVal2Prio = prevVal2.ProposerPriority + // scale to diffMax = 22 = 2 * tvp, diff=39-(-38)=77 + // new totalPower + totalPower := updatedVal1.VotingPower + updatedVal2.VotingPower + dist := wantVal2Prio - wantVal1Prio + // ratio := (dist + 2*totalPower - 1) / 2*totalPower = 98/22 = 4 + ratio := (dist + 2*totalPower - 1) / (2 * totalPower) + // v1(10):-38/4, v2(1):39/4 + wantVal1Prio /= ratio // -9 + wantVal2Prio /= ratio // 9 + + // 3. Center - noop + // 4. IncrementProposerPriority() -> + // v1(10):-9+10, v2(1):9+1 -> v2 proposer so subsract tvp(11) + // v1(10):1, v2(1):-1 + wantVal2Prio += updatedVal2.VotingPower // 10 -> prop + wantVal1Prio += updatedVal1.VotingPower // 1 + wantVal2Prio -= totalPower // -1 + + assert.Equal(t, wantVal2Prio, updatedVal2.ProposerPriority) assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority) } @@ -527,22 +550,22 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { _, oldVal1 := updatedState2.Validators.GetByAddress(val1PubKey.Address()) _, updatedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address()) - totalPower = val1VotingPower // no update - v2PrioWhenAddedVal2 := -(totalPower + (totalPower >> 3)) - v2PrioWhenAddedVal2 = v2PrioWhenAddedVal2 + val1VotingPower // -11 + 10 == -1 - v1PrioWhenAddedVal2 := oldVal1.ProposerPriority + val1VotingPower // -10 + 10 == 0 - totalPower = 2 * val1VotingPower // now we have to validators with that power - v1PrioWhenAddedVal2 = v1PrioWhenAddedVal2 - totalPower // mostest - // have to express the AVG in big.Ints as -1/2 == -1 in big.Int while -1/2 == 0 in int64 - avgSum := big.NewInt(0).Add(big.NewInt(v2PrioWhenAddedVal2), big.NewInt(v1PrioWhenAddedVal2)) - avg := avgSum.Div(avgSum, big.NewInt(2)) - expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64() - totalPower = 2 * val1VotingPower // 10 + 10 - expectedVal1Prio := oldVal1.ProposerPriority + val1VotingPower - avg.Int64() - totalPower - // val1's ProposerPriority story: -10 (see above) + 10 (voting pow) - (-1) (avg) - 20 (total) == -19 + // 1. Add + val2VotingPower := val1VotingPower + totalPower = val1VotingPower + val2VotingPower // 20 + v2PrioWhenAddedVal2 := -(totalPower + (totalPower >> 3)) // -22 + // 2. Scale - noop + // 3. Center + avgSum := big.NewInt(0).Add(big.NewInt(v2PrioWhenAddedVal2), big.NewInt(oldVal1.ProposerPriority)) + avg := avgSum.Div(avgSum, big.NewInt(2)) // -11 + expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64() // -11 + expectedVal1Prio := oldVal1.ProposerPriority - avg.Int64() // 11 + // 4. Increment + expectedVal2Prio = expectedVal2Prio + val2VotingPower // -11 + 10 = -1 + expectedVal1Prio = expectedVal1Prio + val1VotingPower // 11 + 10 == 21 + expectedVal1Prio = expectedVal1Prio - totalPower // 1, val1 proposer + assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority) - // val2 prio when added: -(totalVotingPower + (totalVotingPower >> 3)) == -11 - // -> -11 + 10 (voting power) - (-1) (avg) == 0 assert.EqualValues(t, expectedVal2Prio, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2) validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) @@ -551,34 +574,40 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) - // proposer changes from now on (every iteration) as long as there are no changes in the validator set: - assert.NotEqual(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address) + assert.Equal(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address) assert.Equal(t, updatedState3.Validators, updatedState2.NextValidators) _, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address()) - _, oldVal1 = updatedState3.Validators.GetByAddress(val1PubKey.Address()) _, updatedVal2 = updatedState3.NextValidators.GetByAddress(val2PubKey.Address()) - _, oldVal2 := updatedState3.Validators.GetByAddress(val2PubKey.Address()) - // val2 will be proposer: - assert.Equal(t, val2PubKey.Address(), updatedState3.NextValidators.Proposer.Address) + // val1 will still be proposer: + assert.Equal(t, val1PubKey.Address(), updatedState3.NextValidators.Proposer.Address) + // check if expected proposer prio is matched: + // Increment + expectedVal2Prio2 := expectedVal2Prio + val2VotingPower // -1 + 10 = 9 + expectedVal1Prio2 := expectedVal1Prio + val1VotingPower // 1 + 10 == 11 + expectedVal1Prio2 = expectedVal1Prio2 - totalPower // -9, val1 proposer - expectedVal1Prio2 := oldVal1.ProposerPriority + val1VotingPower - expectedVal2Prio2 := oldVal2.ProposerPriority + val1VotingPower - totalPower - avgSum = big.NewInt(expectedVal1Prio + expectedVal2Prio) - avg = avgSum.Div(avgSum, big.NewInt(2)) - expectedVal1Prio -= avg.Int64() - expectedVal2Prio -= avg.Int64() - - // -19 + 10 - 0 (avg) == -9 assert.EqualValues(t, expectedVal1Prio2, updatedVal1.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2) - // 0 + 10 - 0 (avg) - 20 (total) == -10 assert.EqualValues(t, expectedVal2Prio2, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2) // no changes in voting power and both validators have same voting power // -> proposers should alternate: oldState := updatedState3 + abciResponses = &ABCIResponses{ + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + } + validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + require.NoError(t, err) + + oldState, err = updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + assert.NoError(t, err) + expectedVal1Prio2 = 1 + expectedVal2Prio2 = -1 + expectedVal1Prio = -9 + expectedVal2Prio = 9 + for i := 0; i < 1000; i++ { // no validator updates: abciResponses := &ABCIResponses{ diff --git a/types/validator.go b/types/validator.go index 0b8967b2..325d20f5 100644 --- a/types/validator.go +++ b/types/validator.go @@ -3,6 +3,7 @@ package types import ( "bytes" "fmt" + "strings" "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" @@ -68,6 +69,16 @@ func (v *Validator) String() string { v.ProposerPriority) } +// ValidatorListString returns a prettified validator list for logging purposes. +func ValidatorListString(vals []*Validator) string { + chunks := make([]string, len(vals)) + for i, val := range vals { + chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.VotingPower) + } + + return strings.Join(chunks, ",") +} + // Bytes computes the unique encoding of a validator with a given voting power. // These are the bytes that gets hashed in consensus. It excludes address // as its redundant with the pubkey. This also excludes ProposerPriority diff --git a/types/validator_set.go b/types/validator_set.go index 2edec595..c70f3396 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -12,14 +12,20 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) -// The maximum allowed total voting power. -// It needs to be sufficiently small to, in all cases:: +// MaxTotalVotingPower - the maximum allowed total voting power. +// It needs to be sufficiently small to, in all cases: // 1. prevent clipping in incrementProposerPriority() -// 2. let (diff+diffMax-1) not overflow in IncrementPropposerPriotity() +// 2. let (diff+diffMax-1) not overflow in IncrementProposerPriority() // (Proof of 1 is tricky, left to the reader). // It could be higher, but this is sufficiently large for our purposes, // and leaves room for defensive purposes. -const MaxTotalVotingPower = int64(math.MaxInt64) / 8 +// PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives +// the maximum allowed distance between validator priorities. + +const ( + MaxTotalVotingPower = int64(math.MaxInt64) / 8 + PriorityWindowSizeFactor = 2 +) // ValidatorSet represent a set of *Validator at a given height. // The validators can be fetched by address or index. @@ -42,19 +48,17 @@ type ValidatorSet struct { // NewValidatorSet initializes a ValidatorSet by copying over the // values from `valz`, a list of Validators. If valz is nil or empty, // the new ValidatorSet will have an empty list of Validators. +// The addresses of validators in `valz` must be unique otherwise the +// function panics. func NewValidatorSet(valz []*Validator) *ValidatorSet { - validators := make([]*Validator, len(valz)) - for i, val := range valz { - validators[i] = val.Copy() - } - sort.Sort(ValidatorsByAddress(validators)) - vals := &ValidatorSet{ - Validators: validators, + vals := &ValidatorSet{} + err := vals.updateWithChangeSet(valz, false) + if err != nil { + panic(fmt.Sprintf("cannot create validator set: %s", err)) } if len(valz) > 0 { vals.IncrementProposerPriority(1) } - return vals } @@ -74,6 +78,9 @@ func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet // proposer. Panics if validator set is empty. // `times` must be positive. func (vals *ValidatorSet) IncrementProposerPriority(times int) { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } if times <= 0 { panic("Cannot call IncrementProposerPriority with non-positive times") } @@ -81,20 +88,23 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int) { // Cap the difference between priorities to be proportional to 2*totalPower by // re-normalizing priorities, i.e., rescale all priorities by multiplying with: // 2*totalVotingPower/(maxPriority - minPriority) - diffMax := 2 * vals.TotalVotingPower() + diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower() vals.RescalePriorities(diffMax) + vals.shiftByAvgProposerPriority() var proposer *Validator // call IncrementProposerPriority(1) times times: for i := 0; i < times; i++ { proposer = vals.incrementProposerPriority() } - vals.shiftByAvgProposerPriority() vals.Proposer = proposer } func (vals *ValidatorSet) RescalePriorities(diffMax int64) { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } // NOTE: This check is merely a sanity check which could be // removed if all tests would init. voting power appropriately; // i.e. diffMax should always be > 0 @@ -102,7 +112,7 @@ func (vals *ValidatorSet) RescalePriorities(diffMax int64) { return } - // Caculating ceil(diff/diffMax): + // Calculating ceil(diff/diffMax): // Re-normalization is performed by dividing by an integer for simplicity. // NOTE: This may make debugging priority issues easier as well. diff := computeMaxMinPriorityDiff(vals) @@ -146,6 +156,9 @@ func (vals *ValidatorSet) computeAvgProposerPriority() int64 { // compute the difference between the max and min ProposerPriority of that set func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } max := int64(math.MinInt64) min := int64(math.MaxInt64) for _, v := range vals.Validators { @@ -173,21 +186,31 @@ func (vals *ValidatorSet) getValWithMostPriority() *Validator { } func (vals *ValidatorSet) shiftByAvgProposerPriority() { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } avgProposerPriority := vals.computeAvgProposerPriority() for _, val := range vals.Validators { val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority) } } +// Makes a copy of the validator list +func validatorListCopy(valsList []*Validator) []*Validator { + if valsList == nil { + return nil + } + valsCopy := make([]*Validator, len(valsList)) + for i, val := range valsList { + valsCopy[i] = val.Copy() + } + return valsCopy +} + // Copy each validator into a new ValidatorSet func (vals *ValidatorSet) Copy() *ValidatorSet { - validators := make([]*Validator, len(vals.Validators)) - for i, val := range vals.Validators { - // NOTE: must copy, since IncrementProposerPriority updates in place. - validators[i] = val.Copy() - } return &ValidatorSet{ - Validators: validators, + Validators: validatorListCopy(vals.Validators), Proposer: vals.Proposer, totalVotingPower: vals.totalVotingPower, } @@ -284,57 +307,6 @@ func (vals *ValidatorSet) Hash() []byte { return merkle.SimpleHashFromByteSlices(bzs) } -// Add adds val to the validator set and returns true. It returns false if val -// is already in the set. -func (vals *ValidatorSet) Add(val *Validator) (added bool) { - val = val.Copy() - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(val.Address, vals.Validators[i].Address) <= 0 - }) - if idx >= len(vals.Validators) { - vals.Validators = append(vals.Validators, val) - // Invalidate cache - vals.Proposer = nil - vals.totalVotingPower = 0 - return true - } else if bytes.Equal(vals.Validators[idx].Address, val.Address) { - return false - } else { - newValidators := make([]*Validator, len(vals.Validators)+1) - copy(newValidators[:idx], vals.Validators[:idx]) - newValidators[idx] = val - copy(newValidators[idx+1:], vals.Validators[idx:]) - vals.Validators = newValidators - // Invalidate cache - vals.Proposer = nil - vals.totalVotingPower = 0 - return true - } -} - -// Update updates the ValidatorSet by copying in the val. -// If the val is not found, it returns false; otherwise, -// it returns true. The val.ProposerPriority field is ignored -// and unchanged by this method. -func (vals *ValidatorSet) Update(val *Validator) (updated bool) { - index, sameVal := vals.GetByAddress(val.Address) - if sameVal == nil { - return false - } - // Overwrite the ProposerPriority so it doesn't change. - // During block execution, the val passed in here comes - // from ABCI via PB2TM.ValidatorUpdates. Since ABCI - // doesn't know about ProposerPriority, PB2TM.ValidatorUpdates - // uses the default value of 0, which would cause issues for - // proposer selection every time a validator's voting power changes. - val.ProposerPriority = sameVal.ProposerPriority - vals.Validators[index] = val.Copy() - // Invalidate cache - vals.Proposer = nil - vals.totalVotingPower = 0 - return true -} - // Remove deletes the validator with address. It returns the validator removed // and true. If returns nil and false if validator is not present in the set. func (vals *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { @@ -366,6 +338,253 @@ func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } } +// Checks changes against duplicates, splits the changes in updates and removals, sorts them by address +// +// Returns: +// updates, removals - the sorted lists of updates and removals +// err - non-nil if duplicate entries or entries with negative voting power are seen +// +// No changes are made to 'origChanges' +func processChanges(origChanges []*Validator) (updates, removals []*Validator, err error) { + // Make a deep copy of the changes and sort by address + changes := validatorListCopy(origChanges) + sort.Sort(ValidatorsByAddress(changes)) + + removals = make([]*Validator, 0, len(changes)) + updates = make([]*Validator, 0, len(changes)) + var prevAddr Address + + // Scan changes by address and append valid validators to updates or removals lists + for _, valUpdate := range changes { + if bytes.Equal(valUpdate.Address, prevAddr) { + err = fmt.Errorf("duplicate entry %v in %v", valUpdate, changes) + return nil, nil, err + } + if valUpdate.VotingPower < 0 { + err = fmt.Errorf("voting power can't be negative %v", valUpdate) + return nil, nil, err + } + if valUpdate.VotingPower == 0 { + removals = append(removals, valUpdate) + } else { + updates = append(updates, valUpdate) + } + prevAddr = valUpdate.Address + } + return updates, removals, err +} + +// Verifies a list of updates against a validator set, making sure the allowed +// total voting power would not be exceeded if these updates would be applied to the set. +// It also computes the total voting power of the set that would result after the updates but +// before the removals. +// +// Returns: +// updatedTotalVotingPower - the new total voting power if these updates would be applied +// err - non-nil if the maximum allowed total voting power would be exceeded +// +// 'updates' should be a list of proper validator changes, i.e. they have been scanned +// by processChanges for duplicates and invalid values. +// No changes are made to the validator set 'vals'. +func verifyUpdates(updates []*Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, err error) { + + // Scan the updates, compute new total voting power, check for overflow + updatedTotalVotingPower = vals.TotalVotingPower() + + for _, valUpdate := range updates { + address := valUpdate.Address + _, val := vals.GetByAddress(address) + if val == nil { + // new validator, add its voting power the the total + updatedTotalVotingPower += valUpdate.VotingPower + } else { + // updated validator, add the difference in power to the total + updatedTotalVotingPower += valUpdate.VotingPower - val.VotingPower + } + + if updatedTotalVotingPower < 0 { + err = fmt.Errorf( + "failed to add/update validator with negative voting power %v", + valUpdate) + return 0, err + } + overflow := updatedTotalVotingPower > MaxTotalVotingPower + if overflow { + err = fmt.Errorf( + "failed to add/update validator %v, total voting power would exceed the max allowed %v", + valUpdate, MaxTotalVotingPower) + return 0, err + } + } + + return updatedTotalVotingPower, nil +} + +// Computes the proposer priority for the validators not present in the set based on 'updatedTotalVotingPower' +// Leaves unchanged the priorities of validators that are changed. +// +// 'updates' parameter must be a list of unique validators to be added or updated. +// No changes are made to the validator set 'vals'. +func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) int { + + numNew := 0 + // Scan and update the proposerPriority for newly added and updated validators + for _, valUpdate := range updates { + address := valUpdate.Address + _, val := vals.GetByAddress(address) + if val == nil { + // add val + // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't + // un-bond and then re-bond to reset their (potentially previously negative) ProposerPriority to zero. + // + // Contract: updatedVotingPower < MaxTotalVotingPower to ensure ProposerPriority does + // not exceed the bounds of int64. + // + // Compute ProposerPriority = -1.125*totalVotingPower == -(updatedVotingPower + (updatedVotingPower >> 3)). + valUpdate.ProposerPriority = -(updatedTotalVotingPower + (updatedTotalVotingPower >> 3)) + numNew++ + } else { + valUpdate.ProposerPriority = val.ProposerPriority + } + } + + return numNew +} + +// Merges the vals' validator list with the updates list. +// When two elements with same address are seen, the one from updates is selected. +// Expects updates to be a list of updates sorted by address with no duplicates or errors, +// must have been validated with verifyUpdates() and priorities computed with computeNewPriorities(). +func (vals *ValidatorSet) applyUpdates(updates []*Validator) { + + existing := make([]*Validator, len(vals.Validators)) + copy(existing, vals.Validators) + + merged := make([]*Validator, len(existing)+len(updates)) + i := 0 + + for len(existing) > 0 && len(updates) > 0 { + if bytes.Compare(existing[0].Address, updates[0].Address) < 0 { + merged[i] = existing[0] + existing = existing[1:] + } else { + merged[i] = updates[0] + if bytes.Equal(existing[0].Address, updates[0].Address) { + // validator present in both, advance existing + existing = existing[1:] + } + updates = updates[1:] + } + i++ + } + + for j := 0; j < len(existing); j++ { + merged[i] = existing[j] + i++ + } + + for j := 0; j < len(updates); j++ { + merged[i] = updates[j] + i++ + } + + vals.Validators = merged[:i] + vals.totalVotingPower = 0 +} + +// Checks that the validators to be removed are part of the validator set. +// No changes are made to the validator set 'vals'. +func verifyRemovals(deletes []*Validator, vals *ValidatorSet) error { + + for _, valUpdate := range deletes { + address := valUpdate.Address + _, val := vals.GetByAddress(address) + if val == nil { + return fmt.Errorf("failed to find validator %X to remove", address) + } + } + return nil +} + +// Removes the validators specified in 'deletes' from validator set 'vals'. +// Should not fail as verification has been done before. +func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { + + for _, valUpdate := range deletes { + address := valUpdate.Address + _, removed := vals.Remove(address) + if !removed { + // Should never happen + panic(fmt.Sprintf("failed to remove validator %X", address)) + } + } +} + +// UpdateWithChangeSet attempts to update the validator set with 'changes' +// It performs the following steps: +// - validates the changes making sure there are no duplicates and splits them in updates and deletes +// - verifies that applying the changes will not result in errors +// - computes the total voting power BEFORE removals to ensure that in the next steps the relative priorities +// across old and newly added validators is fair +// - computes the priorities of new validators against the final set +// - applies the updates against the validator set +// - applies the removals against the validator set +// - performs scaling and centering of priority values +// If error is detected during verification steps it is returned and the validator set +// is not changed. +func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { + return vals.updateWithChangeSet(changes, true) +} + +// main function used by UpdateWithChangeSet() and NewValidatorSet() +// If 'allowDeletes' is false then delete operations are not allowed and must be reported if +// present in 'changes' +func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool) error { + + if len(changes) <= 0 { + return nil + } + + // Check for duplicates within changes, split in 'updates' and 'deletes' lists (sorted) + updates, deletes, err := processChanges(changes) + if err != nil { + return err + } + + if !allowDeletes && len(deletes) != 0 { + err = fmt.Errorf("cannot process validators with voting power 0: %v", deletes) + return err + } + + // Verify that applying the 'deletes' against 'vals' will not result in error. + if err := verifyRemovals(deletes, vals); err != nil { + return err + } + + // Verify that applying the 'updates' against 'vals' will not result in error. + updatedTotalVotingPower, err := verifyUpdates(updates, vals) + if err != nil { + return err + } + + // Compute the priorities for updates + numNewValidators := computeNewPriorities(updates, vals, updatedTotalVotingPower) + if len(vals.Validators)+numNewValidators <= len(deletes) { + err = fmt.Errorf("applying the validator changes would result in empty set") + return err + } + + // Apply updates and removals + vals.applyUpdates(updates) + vals.applyRemovals(deletes) + + // Scale and center + vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) + vals.shiftByAvgProposerPriority() + + return nil +} + // Verify that +2/3 of the set had signed the given signBytes. func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 72b2f661..04874c19 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "math" + "math/rand" "strings" "testing" "testing/quick" @@ -45,31 +46,29 @@ func TestValidatorSetBasic(t *testing.T) { assert.Nil(t, vset.Hash()) // add - val = randValidator_(vset.TotalVotingPower()) - assert.True(t, vset.Add(val)) + assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) + assert.True(t, vset.HasAddress(val.Address)) - idx, val2 := vset.GetByAddress(val.Address) + idx, _ = vset.GetByAddress(val.Address) assert.Equal(t, 0, idx) - assert.Equal(t, val, val2) - addr, val2 = vset.GetByIndex(0) + addr, _ = vset.GetByIndex(0) assert.Equal(t, []byte(val.Address), addr) - assert.Equal(t, val, val2) assert.Equal(t, 1, vset.Size()) assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) - assert.Equal(t, val, vset.GetProposer()) assert.NotNil(t, vset.Hash()) assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) }) + assert.Equal(t, val.Address, vset.GetProposer().Address) // update - assert.False(t, vset.Update(randValidator_(vset.TotalVotingPower()))) + val = randValidator_(vset.TotalVotingPower()) + assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) val.VotingPower += 100 proposerPriority := val.ProposerPriority - // Mimic update from types.PB2TM.ValidatorUpdates which does not know about ProposerPriority - // and hence defaults to 0. + val.ProposerPriority = 0 - assert.True(t, vset.Update(val)) + assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) assert.Equal(t, proposerPriority, val.ProposerPriority) @@ -116,8 +115,9 @@ func BenchmarkValidatorSetCopy(b *testing.B) { for i := 0; i < 1000; i++ { privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() - val := NewValidator(pubKey, 0) - if !vset.Add(val) { + val := NewValidator(pubKey, 10) + err := vset.UpdateWithChangeSet([]*Validator{val}) + if err != nil { panic("Failed to add validator") } } @@ -284,7 +284,7 @@ func randPubKey() crypto.PubKey { func randValidator_(totalVotingPower int64) *Validator { // this modulo limits the ProposerPriority/VotingPower to stay in the // bounds of MaxTotalVotingPower minus the already existing voting power: - val := NewValidator(randPubKey(), cmn.RandInt64()%(MaxTotalVotingPower-totalVotingPower)) + val := NewValidator(randPubKey(), int64(cmn.RandUint64()%uint64((MaxTotalVotingPower-totalVotingPower)))) val.ProposerPriority = cmn.RandInt64() % (MaxTotalVotingPower - totalVotingPower) return val } @@ -599,3 +599,357 @@ func TestValidatorSetVerifyCommit(t *testing.T) { err = vset.VerifyCommit(chainID, blockID, height, commit) assert.Nil(t, err) } + +func TestEmptySet(t *testing.T) { + + var valList []*Validator + valSet := NewValidatorSet(valList) + assert.Panics(t, func() { valSet.IncrementProposerPriority(1) }) + assert.Panics(t, func() { valSet.RescalePriorities(100) }) + assert.Panics(t, func() { valSet.shiftByAvgProposerPriority() }) + assert.Panics(t, func() { assert.Zero(t, computeMaxMinPriorityDiff(valSet)) }) + valSet.GetProposer() + + // Add to empty set + v1 := newValidator([]byte("v1"), 100) + v2 := newValidator([]byte("v2"), 100) + valList = []*Validator{v1, v2} + assert.NoError(t, valSet.UpdateWithChangeSet(valList)) + verifyValidatorSet(t, valSet) + + // Delete all validators from set + v1 = newValidator([]byte("v1"), 0) + v2 = newValidator([]byte("v2"), 0) + delList := []*Validator{v1, v2} + assert.Error(t, valSet.UpdateWithChangeSet(delList)) + + // Attempt delete from empty set + assert.Error(t, valSet.UpdateWithChangeSet(delList)) + +} + +func TestUpdatesForNewValidatorSet(t *testing.T) { + + v1 := newValidator([]byte("v1"), 100) + v2 := newValidator([]byte("v2"), 100) + valList := []*Validator{v1, v2} + valSet := NewValidatorSet(valList) + verifyValidatorSet(t, valSet) + + // Verify duplicates are caught in NewValidatorSet() and it panics + v111 := newValidator([]byte("v1"), 100) + v112 := newValidator([]byte("v1"), 123) + v113 := newValidator([]byte("v1"), 234) + valList = []*Validator{v111, v112, v113} + assert.Panics(t, func() { NewValidatorSet(valList) }) + + // Verify set including validator with voting power 0 cannot be created + v1 = newValidator([]byte("v1"), 0) + v2 = newValidator([]byte("v2"), 22) + v3 := newValidator([]byte("v3"), 33) + valList = []*Validator{v1, v2, v3} + assert.Panics(t, func() { NewValidatorSet(valList) }) + + // Verify set including validator with negative voting power cannot be created + v1 = newValidator([]byte("v1"), 10) + v2 = newValidator([]byte("v2"), -20) + v3 = newValidator([]byte("v3"), 30) + valList = []*Validator{v1, v2, v3} + assert.Panics(t, func() { NewValidatorSet(valList) }) + +} + +type testVal struct { + name string + power int64 +} + +func TestValSetUpdatesBasicTestsExecute(t *testing.T) { + valSetUpdatesBasicTests := []struct { + startVals []testVal + updateVals []testVal + expectedVals []testVal + expError bool + }{ + // Operations that should result in error + 0: { // updates leading to overflows + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v1", math.MaxInt64}}, + []testVal{{"v1", 10}, {"v2", 10}}, + true}, + 1: { // duplicate entries in changes + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v1", 11}, {"v1", 22}}, + []testVal{{"v1", 10}, {"v2", 10}}, + true}, + 2: { // duplicate entries in removes + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v1", 0}, {"v1", 0}}, + []testVal{{"v1", 10}, {"v2", 10}}, + true}, + 3: { // duplicate entries in removes + changes + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}}, + []testVal{{"v1", 10}, {"v2", 10}}, + true}, + 4: { // update with negative voting power + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v1", -123}}, + []testVal{{"v1", 10}, {"v2", 10}}, + true}, + 5: { // delete non existing validator + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v3", 0}}, + []testVal{{"v1", 10}, {"v2", 10}}, + true}, + + // Operations that should be successful + 6: { // no changes + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{}, + []testVal{{"v1", 10}, {"v2", 10}}, + false}, + 7: { // voting power changes + []testVal{{"v1", 10}, {"v2", 10}}, + []testVal{{"v1", 11}, {"v2", 22}}, + []testVal{{"v1", 11}, {"v2", 22}}, + false}, + 8: { // add new validators + []testVal{{"v1", 10}, {"v2", 20}}, + []testVal{{"v3", 30}, {"v4", 40}}, + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, + false}, + 9: { // delete validators + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, + []testVal{{"v2", 0}}, + []testVal{{"v1", 10}, {"v3", 30}}, + false}, + 10: { // delete all validators + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, + []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}}, + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, + true}, + } + + for i, tt := range valSetUpdatesBasicTests { + // create a new set and apply updates, keeping copies for the checks + valSet := createNewValidatorSet(tt.startVals) + valSetCopy := valSet.Copy() + valList := createNewValidatorList(tt.updateVals) + valListCopy := validatorListCopy(valList) + err := valSet.UpdateWithChangeSet(valList) + + if tt.expError { + // for errors check the validator set has not been changed + assert.Error(t, err, "test %d", i) + assert.Equal(t, valSet, valSetCopy, "test %v", i) + } else { + assert.NoError(t, err, "test %d", i) + } + // check the parameter list has not changed + assert.Equal(t, valList, valListCopy, "test %v", i) + + // check the final validator list is as expected and the set is properly scaled and centered. + assert.Equal(t, getValidatorResults(valSet.Validators), tt.expectedVals, "test %v", i) + verifyValidatorSet(t, valSet) + } +} + +func getValidatorResults(valList []*Validator) []testVal { + testList := make([]testVal, len(valList)) + for i, val := range valList { + testList[i].name = string(val.Address) + testList[i].power = val.VotingPower + } + return testList +} + +// Test that different permutations of an update give the same result. +func TestValSetUpdatesOrderTestsExecute(t *testing.T) { + // startVals - initial validators to create the set with + // updateVals - a sequence of updates to be applied to the set. + // updateVals is shuffled a number of times during testing to check for same resulting validator set. + valSetUpdatesOrderTests := []struct { + startVals []testVal + updateVals []testVal + }{ + 0: { // order of changes should not matter, the final validator sets should be the same + []testVal{{"v1", 10}, {"v2", 10}, {"v3", 30}, {"v4", 40}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}}, + + 1: { // order of additions should not matter + []testVal{{"v1", 10}, {"v2", 20}}, + []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}}, + + 2: { // order of removals should not matter + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, + []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}}, + + 3: { // order of mixed operations should not matter + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, + []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}}, + } + + for i, tt := range valSetUpdatesOrderTests { + // create a new set and apply updates + valSet := createNewValidatorSet(tt.startVals) + valSetCopy := valSet.Copy() + valList := createNewValidatorList(tt.updateVals) + assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList)) + + // save the result as expected for next updates + valSetExp := valSetCopy.Copy() + + // perform at most 20 permutations on the updates and call UpdateWithChangeSet() + n := len(tt.updateVals) + maxNumPerms := cmn.MinInt(20, n*n) + for j := 0; j < maxNumPerms; j++ { + // create a copy of original set and apply a random permutation of updates + valSetCopy := valSet.Copy() + valList := createNewValidatorList(permutation(tt.updateVals)) + + // check there was no error and the set is properly scaled and centered. + assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList), + "test %v failed for permutation %v", i, valList) + verifyValidatorSet(t, valSetCopy) + + // verify the resulting test is same as the expected + assert.Equal(t, valSetCopy, valSetExp, + "test %v failed for permutation %v", i, valList) + } + } +} + +// This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes. +// Should perform a proper merge of updatedVals and startVals +func TestValSetApplyUpdatesTestsExecute(t *testing.T) { + valSetUpdatesBasicTests := []struct { + startVals []testVal + updateVals []testVal + expectedVals []testVal + }{ + // additions + 0: { // prepend + []testVal{{"v4", 44}, {"v5", 55}}, + []testVal{{"v1", 11}}, + []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}}, + 1: { // append + []testVal{{"v4", 44}, {"v5", 55}}, + []testVal{{"v6", 66}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}}, + 2: { // insert + []testVal{{"v4", 44}, {"v6", 66}}, + []testVal{{"v5", 55}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}}, + 3: { // insert multi + []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}}, + []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}}, + // changes + 4: { // head + []testVal{{"v1", 111}, {"v2", 22}}, + []testVal{{"v1", 11}}, + []testVal{{"v1", 11}, {"v2", 22}}}, + 5: { // tail + []testVal{{"v1", 11}, {"v2", 222}}, + []testVal{{"v2", 22}}, + []testVal{{"v1", 11}, {"v2", 22}}}, + 6: { // middle + []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}}, + []testVal{{"v2", 22}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}}, + 7: { // multi + []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}}, + // additions and changes + 8: { + []testVal{{"v1", 111}, {"v2", 22}}, + []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}}, + } + + for i, tt := range valSetUpdatesBasicTests { + // create a new validator set with the start values + valSet := createNewValidatorSet(tt.startVals) + + // applyUpdates() with the update values + valList := createNewValidatorList(tt.updateVals) + valSet.applyUpdates(valList) + + // check the new list of validators for proper merge + assert.Equal(t, getValidatorResults(valSet.Validators), tt.expectedVals, "test %v", i) + verifyValidatorSet(t, valSet) + } +} + +func permutation(valList []testVal) []testVal { + if len(valList) == 0 { + return nil + } + permList := make([]testVal, len(valList)) + perm := rand.Perm(len(valList)) + for i, v := range perm { + permList[v] = valList[i] + } + return permList +} + +func createNewValidatorList(testValList []testVal) []*Validator { + valList := make([]*Validator, 0, len(testValList)) + for _, val := range testValList { + valList = append(valList, newValidator([]byte(val.name), val.power)) + } + return valList +} + +func createNewValidatorSet(testValList []testVal) *ValidatorSet { + valList := createNewValidatorList(testValList) + valSet := NewValidatorSet(valList) + return valSet +} + +func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) { + // verify that the vals' tvp is set to the sum of the all vals voting powers + tvp := valSet.TotalVotingPower() + assert.Equal(t, valSet.totalVotingPower, tvp, + "expected TVP %d. Got %d, valSet=%s", tvp, valSet.totalVotingPower, valSet) + + // verify that validator priorities are centered + l := int64(len(valSet.Validators)) + tpp := valSet.TotalVotingPower() + assert.True(t, tpp <= l || tpp >= -l, + "expected total priority in (-%d, %d). Got %d", l, l, tpp) + + // verify that priorities are scaled + dist := computeMaxMinPriorityDiff(valSet) + assert.True(t, dist <= PriorityWindowSizeFactor*tvp, + "expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist) +} + +func BenchmarkUpdates(b *testing.B) { + const ( + n = 100 + m = 2000 + ) + // Init with n validators + vs := make([]*Validator, n) + for j := 0; j < n; j++ { + vs[j] = newValidator([]byte(fmt.Sprintf("v%d", j)), 100) + } + valSet := NewValidatorSet(vs) + l := len(valSet.Validators) + + // Make m new validators + newValList := make([]*Validator, m) + for j := 0; j < m; j++ { + newValList[j] = newValidator([]byte(fmt.Sprintf("v%d", j+l)), 1000) + } + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Add m validators to valSetCopy + valSetCopy := valSet.Copy() + assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList)) + } +} From 90ba63948ad48856fbe23c725b9ffd85c956c174 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 8 Feb 2019 18:25:48 -0500 Subject: [PATCH 06/16] Sec/bucky/35 commit duplicate evidence (#36) Don't add committed evidence to evpool --- evidence/pool.go | 3 ++- evidence/pool_test.go | 4 ++-- evidence/reactor_test.go | 4 ++-- evidence/store.go | 38 +++++++++++++++++++++----------------- evidence/store_test.go | 22 +++++++++++++++++++--- node/node.go | 3 +-- node/node_test.go | 6 ++---- state/execution.go | 1 - state/services.go | 1 + state/validation.go | 2 ++ 10 files changed, 52 insertions(+), 32 deletions(-) diff --git a/evidence/pool.go b/evidence/pool.go index b5fdbdf1..3df0ec70 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -28,7 +28,8 @@ type EvidencePool struct { state sm.State } -func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool { +func NewEvidencePool(stateDB, evidenceDB dbm.DB) *EvidencePool { + evidenceStore := NewEvidenceStore(evidenceDB) evpool := &EvidencePool{ stateDB: stateDB, state: sm.LoadState(stateDB), diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 1f4f1a06..677ce78b 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -56,8 +56,8 @@ func TestEvidencePool(t *testing.T) { valAddr := []byte("val1") height := int64(5) stateDB := initializeValidatorState(valAddr, height) - store := NewEvidenceStore(dbm.NewMemDB()) - pool := NewEvidencePool(stateDB, store) + evidenceDB := dbm.NewMemDB() + pool := NewEvidencePool(stateDB, evidenceDB) goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr) badEvidence := types.MockBadEvidence{goodEvidence} diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 1c4e731a..635e9553 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -37,8 +37,8 @@ func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*Ev logger := evidenceLogger() for i := 0; i < N; i++ { - store := NewEvidenceStore(dbm.NewMemDB()) - pool := NewEvidencePool(stateDBs[i], store) + evidenceDB := dbm.NewMemDB() + pool := NewEvidencePool(stateDBs[i], evidenceDB) reactors[i] = NewEvidenceReactor(pool) reactors[i].SetLogger(logger.With("validator", i)) } diff --git a/evidence/store.go b/evidence/store.go index 17b37aab..998a763d 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -117,32 +117,33 @@ func (store *EvidenceStore) listEvidence(prefixKey string, maxNum int64) (eviden return evidence } -// GetEvidence fetches the evidence with the given height and hash. -func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo { +// GetEvidenceInfo fetches the EvidenceInfo with the given height and hash. +// If not found, ei.Evidence is nil. +func (store *EvidenceStore) GetEvidenceInfo(height int64, hash []byte) EvidenceInfo { key := keyLookupFromHeightAndHash(height, hash) val := store.db.Get(key) if len(val) == 0 { - return nil + return EvidenceInfo{} } var ei EvidenceInfo err := cdc.UnmarshalBinaryBare(val, &ei) if err != nil { panic(err) } - return &ei + return ei } // AddNewEvidence adds the given evidence to the database. // It returns false if the evidence is already stored. func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int64) bool { // check if we already have seen it - ei_ := store.GetEvidence(evidence.Height(), evidence.Hash()) - if ei_ != nil && ei_.Evidence != nil { + ei := store.getEvidenceInfo(evidence) + if ei.Evidence != nil { return false } - ei := EvidenceInfo{ + ei = EvidenceInfo{ Committed: false, Priority: priority, Evidence: evidence, @@ -165,6 +166,11 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int // MarkEvidenceAsBroadcasted removes evidence from Outqueue. func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) { ei := store.getEvidenceInfo(evidence) + if ei.Evidence == nil { + // nothin to do + return + } + // remove from the outqueue key := keyOutqueue(evidence, ei.Priority) store.db.Delete(key) } @@ -177,8 +183,12 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) { pendingKey := keyPending(evidence) store.db.Delete(pendingKey) - ei := store.getEvidenceInfo(evidence) - ei.Committed = true + // committed EvidenceInfo doens't need priority + ei := EvidenceInfo{ + Committed: true, + Evidence: evidence, + Priority: 0, + } lookupKey := keyLookup(evidence) store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei)) @@ -187,13 +197,7 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) { //--------------------------------------------------- // utils +// getEvidenceInfo is convenience for calling GetEvidenceInfo if we have the full evidence. func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInfo { - key := keyLookup(evidence) - var ei EvidenceInfo - b := store.db.Get(key) - err := cdc.UnmarshalBinaryBare(b, &ei) - if err != nil { - panic(err) - } - return ei + return store.GetEvidenceInfo(evidence.Height(), evidence.Hash()) } diff --git a/evidence/store_test.go b/evidence/store_test.go index 35eb28d0..5a7a8bd3 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -27,6 +27,21 @@ func TestStoreAddDuplicate(t *testing.T) { assert.False(added) } +func TestStoreCommitDuplicate(t *testing.T) { + assert := assert.New(t) + + db := dbm.NewMemDB() + store := NewEvidenceStore(db) + + priority := int64(10) + ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) + + store.MarkEvidenceAsCommitted(ev) + + added := store.AddNewEvidence(ev, priority) + assert.False(added) +} + func TestStoreMark(t *testing.T) { assert := assert.New(t) @@ -46,7 +61,7 @@ func TestStoreMark(t *testing.T) { assert.True(added) // get the evidence. verify. should be uncommitted - ei := store.GetEvidence(ev.Height(), ev.Hash()) + ei := store.GetEvidenceInfo(ev.Height(), ev.Hash()) assert.Equal(ev, ei.Evidence) assert.Equal(priority, ei.Priority) assert.False(ei.Committed) @@ -72,9 +87,10 @@ func TestStoreMark(t *testing.T) { assert.Equal(0, len(pendingEv)) // evidence should show committed - ei = store.GetEvidence(ev.Height(), ev.Hash()) + newPriority := int64(0) + ei = store.GetEvidenceInfo(ev.Height(), ev.Hash()) assert.Equal(ev, ei.Evidence) - assert.Equal(priority, ei.Priority) + assert.Equal(newPriority, ei.Priority) assert.True(ei.Committed) } diff --git a/node/node.go b/node/node.go index 1b731981..969452c4 100644 --- a/node/node.go +++ b/node/node.go @@ -345,8 +345,7 @@ func NewNode(config *cfg.Config, return nil, err } evidenceLogger := logger.With("module", "evidence") - evidenceStore := evidence.NewEvidenceStore(evidenceDB) - evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore) + evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB) evidencePool.SetLogger(evidenceLogger) evidenceReactor := evidence.NewEvidenceReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) diff --git a/node/node_test.go b/node/node_test.go index 3218c832..4b4610e1 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -227,11 +227,10 @@ func TestCreateProposalBlock(t *testing.T) { mempool.SetLogger(logger) // Make EvidencePool - types.RegisterMockEvidencesGlobal() + types.RegisterMockEvidencesGlobal() // XXX! evidence.RegisterMockEvidences() evidenceDB := dbm.NewMemDB() - evidenceStore := evidence.NewEvidenceStore(evidenceDB) - evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore) + evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB) evidencePool.SetLogger(logger) // fill the evidence pool with more evidence @@ -270,7 +269,6 @@ func TestCreateProposalBlock(t *testing.T) { err = blockExec.ValidateBlock(state, block) assert.NoError(t, err) - } func state(nVals int, height int64) (sm.State, dbm.DB) { diff --git a/state/execution.go b/state/execution.go index 470e22bc..e3668c77 100644 --- a/state/execution.go +++ b/state/execution.go @@ -94,7 +94,6 @@ func (blockExec *BlockExecutor) CreateProposalBlock( txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) return state.MakeBlock(height, txs, commit, evidence, proposerAddr) - } // ValidateBlock validates the given block against the given state. diff --git a/state/services.go b/state/services.go index b8f1febe..90f0cd01 100644 --- a/state/services.go +++ b/state/services.go @@ -80,6 +80,7 @@ type BlockStore interface { // evidence pool // EvidencePool defines the EvidencePool interface used by the ConsensusState. +// Get/Set/Commit type EvidencePool interface { PendingEvidence(int64) []types.Evidence AddEvidence(types.Evidence) error diff --git a/state/validation.go b/state/validation.go index cd571e34..82c479ac 100644 --- a/state/validation.go +++ b/state/validation.go @@ -185,6 +185,8 @@ func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error // The address must have been an active validator at the height. // NOTE: we will ignore evidence from H if the key was not a validator // at H, even if it is a validator at some nearby H' + // XXX: this makes lite-client bisection as is unsafe + // See https://github.com/tendermint/tendermint/issues/3244 ev := evidence height, addr := ev.Height(), ev.Address() _, val := valset.GetByAddress(addr) From 87bdc42bf827c659aaa4b3c9c796bc3f8e698a87 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Sat, 9 Feb 2019 00:30:45 +0100 Subject: [PATCH 07/16] Reject blocks with committed evidence (#37) * evidence: NewEvidencePool takes evidenceDB * evidence: failing TestStoreCommitDuplicate tendermint/security#35 * GetEvidence -> GetEvidenceInfo * fix TestStoreCommitDuplicate * comment in VerifyEvidence * add check if evidence was already seen - modify EventPool interface (EventStore is not known in ApplyBlock): - add IsCommitted method to iface - add test * update changelog * fix TestStoreMark: - priority in evidence info gets reset to zero after evidence gets committed * review comments: simplify EvidencePool.IsCommitted - delete obsolete EvidenceStore.IsCommitted * add simple test for IsCommitted * update changelog: this is actually breaking (PR number still missing) * fix TestStoreMark: - priority in evidence info gets reset to zero after evidence gets committed * review suggestion: simplify return --- CHANGELOG_PENDING.md | 15 +++++++++++++++ consensus/reactor_test.go | 1 + evidence/pool.go | 6 ++++++ evidence/pool_test.go | 21 +++++++++++++++++++++ evidence/store.go | 2 +- state/execution.go | 2 +- state/services.go | 3 +++ state/validation.go | 5 ++++- state/validation_test.go | 25 +++++++++++++++++++++++++ 9 files changed, 77 insertions(+), 3 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 4548eb1e..91eed188 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -6,8 +6,23 @@ Special thanks to external contributors on this release: ### BREAKING CHANGES: +* CLI/RPC/Config + +* Apps + +* Go API + +* Blockchain Protocol + + - [types] Reject blocks which contain already committed evidence + +* P2P Protocol + ### FEATURES: ### IMPROVEMENTS: ### BUG FIXES: + + - [evidence] Do not store evidence which was already marked as committed + diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 28e245ae..d35eaf3c 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -211,6 +211,7 @@ func (m *mockEvidencePool) Update(block *types.Block, state sm.State) { } m.height++ } +func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false } //------------------------------------ diff --git a/evidence/pool.go b/evidence/pool.go index 3df0ec70..18ccb334 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -133,6 +133,12 @@ func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []typ } +// IsCommitted returns true if we have already seen this exact evidence and it is already marked as committed. +func (evpool *EvidencePool) IsCommitted(evidence types.Evidence) bool { + ei := evpool.evidenceStore.getEvidenceInfo(evidence) + return ei.Evidence != nil && ei.Committed +} + func (evpool *EvidencePool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) { for e := evpool.evidenceList.Front(); e != nil; e = e.Next() { ev := e.Value.(types.Evidence) diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 677ce78b..30b20011 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -84,3 +84,24 @@ func TestEvidencePool(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 1, pool.evidenceList.Len()) } + +func TestEvidencePoolIsCommitted(t *testing.T) { + // Initialization: + valAddr := []byte("validator_address") + height := int64(42) + stateDB := initializeValidatorState(valAddr, height) + evidenceDB := dbm.NewMemDB() + pool := NewEvidencePool(stateDB, evidenceDB) + + // evidence not seen yet: + evidence := types.NewMockGoodEvidence(height, 0, valAddr) + assert.False(t, pool.IsCommitted(evidence)) + + // evidence seen but not yet committed: + assert.NoError(t, pool.AddEvidence(evidence)) + assert.False(t, pool.IsCommitted(evidence)) + + // evidence seen and committed: + pool.MarkEvidenceAsCommitted(height, []types.Evidence{evidence}) + assert.True(t, pool.IsCommitted(evidence)) +} diff --git a/evidence/store.go b/evidence/store.go index 998a763d..464d6138 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -167,7 +167,7 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) { ei := store.getEvidenceInfo(evidence) if ei.Evidence == nil { - // nothin to do + // nothing to do; we did not store the evidence yet (AddNewEvidence): return } // remove from the outqueue diff --git a/state/execution.go b/state/execution.go index e3668c77..8ab95839 100644 --- a/state/execution.go +++ b/state/execution.go @@ -101,7 +101,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { - return validateBlock(blockExec.db, state, block) + return validateBlock(blockExec.evpool, blockExec.db, state, block) } // ApplyBlock validates the block against the state, executes it against the app, diff --git a/state/services.go b/state/services.go index 90f0cd01..02c3aa7d 100644 --- a/state/services.go +++ b/state/services.go @@ -85,6 +85,8 @@ type EvidencePool interface { PendingEvidence(int64) []types.Evidence AddEvidence(types.Evidence) error Update(*types.Block, State) + // IsCommitted indicates if this evidence was already marked committed in another block. + IsCommitted(types.Evidence) bool } // MockMempool is an empty implementation of a Mempool, useful for testing. @@ -93,3 +95,4 @@ type MockEvidencePool struct{} func (m MockEvidencePool) PendingEvidence(int64) []types.Evidence { return nil } func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } func (m MockEvidencePool) Update(*types.Block, State) {} +func (m MockEvidencePool) IsCommitted(types.Evidence) bool { return false } diff --git a/state/validation.go b/state/validation.go index 82c479ac..3cb0ee8f 100644 --- a/state/validation.go +++ b/state/validation.go @@ -13,7 +13,7 @@ import ( //----------------------------------------------------- // Validate block -func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { +func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block *types.Block) error { // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err @@ -145,6 +145,9 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { if err := VerifyEvidence(stateDB, state, ev); err != nil { return types.NewErrEvidenceInvalid(ev, err) } + if evidencePool != nil && evidencePool.IsCommitted(ev) { + return types.NewErrEvidenceInvalid(ev, errors.New("evidence was already committed")) + } } // NOTE: We can't actually verify it's the right proposer because we dont diff --git a/state/validation_test.go b/state/validation_test.go index 12aaf636..a873855a 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -121,6 +121,31 @@ func TestValidateBlockEvidence(t *testing.T) { require.True(t, ok) } +// always returns true if asked if any evidence was already committed. +type mockEvPoolAlwaysCommitted struct{} + +func (m mockEvPoolAlwaysCommitted) PendingEvidence(int64) []types.Evidence { return nil } +func (m mockEvPoolAlwaysCommitted) AddEvidence(types.Evidence) error { return nil } +func (m mockEvPoolAlwaysCommitted) Update(*types.Block, State) {} +func (m mockEvPoolAlwaysCommitted) IsCommitted(types.Evidence) bool { return true } + +func TestValidateFailBlockOnCommittedEvidence(t *testing.T) { + var height int64 = 1 + state, stateDB := state(1, int(height)) + + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, mockEvPoolAlwaysCommitted{}) + // A block with a couple pieces of evidence passes. + block := makeBlock(state, height) + addr, _ := state.Validators.GetByIndex(0) + alreadyCommittedEvidence := types.NewMockGoodEvidence(height, 0, addr) + block.Evidence.Evidence = []types.Evidence{alreadyCommittedEvidence} + block.EvidenceHash = block.Evidence.Hash() + err := blockExec.ValidateBlock(state, block) + + require.Error(t, err) + require.IsType(t, err, &types.ErrEvidenceInvalid{}) +} + /* TODO(#2589): - test unmarshalling BlockParts that are too big into a Block that From 4f2ef3670143e8bc46fc76e734be80416053cc16 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 8 Feb 2019 18:40:41 -0500 Subject: [PATCH 08/16] types.NewCommit (#3275) * types.NewCommit * use types.NewCommit everywhere * fix log in unsafe_reset * memoize height and round in constructor * notes about deprecating toVote * bring back memoizeHeightRound --- blockchain/reactor_test.go | 4 +-- blockchain/store_test.go | 7 ++-- .../commands/reset_priv_validator.go | 2 +- consensus/replay_test.go | 6 ++-- consensus/state.go | 2 +- consensus/types/round_state_test.go | 7 ++-- lite/helpers.go | 8 ++--- lite/proxy/validate_test.go | 8 ++--- node/node_test.go | 2 +- state/execution_test.go | 4 +-- types/block.go | 34 +++++++++++-------- types/validator_set_test.go | 10 ++---- types/vote_set.go | 5 +-- 13 files changed, 42 insertions(+), 57 deletions(-) diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 138e1622..1e8666f1 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -95,13 +95,13 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := &types.Commit{} + lastCommit := types.NewCommit(types.BlockID{}, nil) if blockHeight > 1 { lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) lastBlock := blockStore.LoadBlock(blockHeight - 1) vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig() - lastCommit = &types.Commit{Precommits: []*types.CommitSig{vote}, BlockID: lastBlockMeta.BlockID} + lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote}) } thisBlock := makeBlock(blockHeight, state, lastCommit) diff --git a/blockchain/store_test.go b/blockchain/store_test.go index 9abc210b..931faf6a 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -23,11 +23,8 @@ import ( // make a Commit with a single vote containing just the height and a timestamp func makeTestCommit(height int64, timestamp time.Time) *types.Commit { - return &types.Commit{ - Precommits: []*types.CommitSig{ - {Height: height, Timestamp: timestamp}, - }, - } + commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}} + return types.NewCommit(types.BlockID{}, commitSigs) } func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 122c2a72..055a76c5 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -61,7 +61,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) { } else { pv := privval.GenFilePV(privValKeyFile, privValStateFile) pv.Save() - logger.Info("Generated private validator file", "file", "keyFile", privValKeyFile, + logger.Info("Generated private validator file", "keyFile", privValKeyFile, "stateFile", privValStateFile) } } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index e7269254..297c13c3 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -537,10 +537,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } case *types.Vote: if p.Type == types.PrecommitType { - thisBlockCommit = &types.Commit{ - BlockID: p.BlockID, - Precommits: []*types.CommitSig{p.CommitSig()}, - } + commitSigs := []*types.CommitSig{p.CommitSig()} + thisBlockCommit = types.NewCommit(p.BlockID, commitSigs) } } } diff --git a/consensus/state.go b/consensus/state.go index 74165801..c6c49d87 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -954,7 +954,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts if cs.Height == 1 { // We're creating a proposal for the first block. // The commit is empty, but not nil. - commit = &types.Commit{} + commit = types.NewCommit(types.BlockID{}, nil) } else if cs.LastCommit.HasTwoThirdsMajority() { // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index cb16f939..a9bc8e14 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -53,11 +53,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { Data: types.Data{ Txs: txs, }, - Evidence: types.EvidenceData{}, - LastCommit: &types.Commit{ - BlockID: blockID, - Precommits: precommits, - }, + Evidence: types.EvidenceData{}, + LastCommit: types.NewCommit(blockID, precommits), } parts := block.MakePartSet(4096) // Random Proposal diff --git a/lite/helpers.go b/lite/helpers.go index 6b18b351..119797f3 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -80,12 +80,8 @@ func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Com vote := makeVote(header, vset, pkz[i]) commitSigs[vote.ValidatorIndex] = vote.CommitSig() } - - res := &types.Commit{ - BlockID: types.BlockID{Hash: header.Hash()}, - Precommits: commitSigs, - } - return res + blockID := types.BlockID{Hash: header.Hash()} + return types.NewCommit(blockID, commitSigs) } func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote { diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 1ce4d667..dce177d7 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -70,7 +70,7 @@ func TestValidateBlock(t *testing.T) { }, signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, + Commit: types.NewCommit(types.BlockID{Hash: []byte("0xDEADBEEF")}, nil), }, wantErr: "Data hash doesn't match header", }, @@ -81,7 +81,7 @@ func TestValidateBlock(t *testing.T) { }, signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, + Commit: types.NewCommit(types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, }, // End Header.Data hash mismatch test @@ -169,7 +169,7 @@ func TestValidateBlockMeta(t *testing.T) { ValidatorsHash: []byte("Tendermint"), Time: testTime2, }, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, + Commit: types.NewCommit(types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, wantErr: "Headers don't match", }, @@ -188,7 +188,7 @@ func TestValidateBlockMeta(t *testing.T) { ValidatorsHash: []byte("Tendermint-x"), Time: testTime2, }, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, + Commit: types.NewCommit(types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, wantErr: "Headers don't match", }, diff --git a/node/node_test.go b/node/node_test.go index 4b4610e1..d7907e88 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -260,7 +260,7 @@ func TestCreateProposalBlock(t *testing.T) { evidencePool, ) - commit := &types.Commit{} + commit := types.NewCommit(types.BlockID{}, nil) block, _ := blockExec.CreateProposalBlock( height, state, commit, diff --git a/state/execution_test.go b/state/execution_test.go index e0c9b4b9..8cd90f96 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -79,7 +79,7 @@ func TestBeginBlockValidators(t *testing.T) { } for _, tc := range testCases { - lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits} + lastCommit := types.NewCommit(prevBlockID, tc.lastCommitPrecommits) // block for height 2 block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) @@ -139,7 +139,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { commitSig0 := (&types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType}).CommitSig() commitSig1 := (&types.Vote{ValidatorIndex: 1, Timestamp: now}).CommitSig() commitSigs := []*types.CommitSig{commitSig0, commitSig1} - lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: commitSigs} + lastCommit := types.NewCommit(prevBlockID, commitSigs) for _, tc := range testCases { block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) diff --git a/types/block.go b/types/block.go index ec09fd44..bcaf0725 100644 --- a/types/block.go +++ b/types/block.go @@ -490,8 +490,8 @@ func (cs *CommitSig) String() string { } // toVote converts the CommitSig to a vote. -// Once CommitSig has fewer fields than vote, -// converting to a Vote will require more information. +// TODO: deprecate for #1648. Converting to Vote will require +// access to ValidatorSet. func (cs *CommitSig) toVote() *Vote { if cs == nil { return nil @@ -509,18 +509,30 @@ type Commit struct { BlockID BlockID `json:"block_id"` Precommits []*CommitSig `json:"precommits"` - // Volatile + // memoized in first call to corresponding method + // NOTE: can't memoize in constructor because constructor + // isn't used for unmarshaling height int64 round int hash cmn.HexBytes bitArray *cmn.BitArray } +// NewCommit returns a new Commit with the given blockID and precommits. +// TODO: memoize ValidatorSet in constructor so votes can be easily reconstructed +// from CommitSig after #1648. +func NewCommit(blockID BlockID, precommits []*CommitSig) *Commit { + return &Commit{ + BlockID: blockID, + Precommits: precommits, + } +} + // VoteSignBytes constructs the SignBytes for the given CommitSig. // The only unique part of the SignBytes is the Timestamp - all other fields // signed over are otherwise the same for all validators. func (commit *Commit) VoteSignBytes(chainID string, cs *CommitSig) []byte { - return cs.toVote().SignBytes(chainID) + return commit.ToVote(cs).SignBytes(chainID) } // memoizeHeightRound memoizes the height and round of the commit using @@ -543,27 +555,20 @@ func (commit *Commit) memoizeHeightRound() { // ToVote converts a CommitSig to a Vote. // If the CommitSig is nil, the Vote will be nil. -// When CommitSig is reduced to contain fewer fields, -// this will need access to the ValidatorSet to properly -// reconstruct the vote. func (commit *Commit) ToVote(cs *CommitSig) *Vote { + // TODO: use commit.validatorSet to reconstruct vote + // and deprecate .toVote return cs.toVote() } // Height returns the height of the commit func (commit *Commit) Height() int64 { - if len(commit.Precommits) == 0 { - return 0 - } commit.memoizeHeightRound() return commit.height } // Round returns the round of the commit func (commit *Commit) Round() int { - if len(commit.Precommits) == 0 { - return 0 - } commit.memoizeHeightRound() return commit.round } @@ -595,9 +600,10 @@ func (commit *Commit) BitArray() *cmn.BitArray { } // GetByIndex returns the vote corresponding to a given validator index. +// Panics if `index >= commit.Size()`. // Implements VoteSetReader. func (commit *Commit) GetByIndex(index int) *Vote { - return commit.Precommits[index].toVote() + return commit.ToVote(commit.Precommits[index]) } // IsCommit returns true if there is at least one vote. diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 04874c19..cdc04d45 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -563,18 +563,12 @@ func TestValidatorSetVerifyCommit(t *testing.T) { sig, err := privKey.Sign(vote.SignBytes(chainID)) assert.NoError(t, err) vote.Signature = sig - commit := &Commit{ - BlockID: blockID, - Precommits: []*CommitSig{vote.CommitSig()}, - } + commit := NewCommit(blockID, []*CommitSig{vote.CommitSig()}) badChainID := "notmychainID" badBlockID := BlockID{Hash: []byte("goodbye")} badHeight := height + 1 - badCommit := &Commit{ - BlockID: blockID, - Precommits: []*CommitSig{nil}, - } + badCommit := NewCommit(blockID, []*CommitSig{nil}) // test some error cases // TODO: test more cases! diff --git a/types/vote_set.go b/types/vote_set.go index 14930da4..1cd0f228 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -545,10 +545,7 @@ func (voteSet *VoteSet) MakeCommit() *Commit { for i, v := range voteSet.votes { commitSigs[i] = v.CommitSig() } - return &Commit{ - BlockID: *voteSet.maj23, - Precommits: commitSigs, - } + return NewCommit(*voteSet.maj23, commitSigs) } //-------------------------------------------------------------------------------- From 792b12573eee1a58b574decce2bd62399b0b24c3 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 8 Feb 2019 18:50:02 -0500 Subject: [PATCH 09/16] Prepare v0.30.0 (#3287) * changelog, upgrading, version * update for evidence fixes * linkify * fix an entry --- CHANGELOG.md | 49 +++++++++++++++++++++++++++++++++++++++++++- CHANGELOG_PENDING.md | 7 +------ UPGRADING.md | 23 +++++++++++++++++++++ version/version.go | 8 +++++--- 4 files changed, 77 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0e736bc..592a7c6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,52 @@ # Changelog +## v0.30.0 + +*February 8th, 2019* + +This release fixes yet another issue with the proposer selection algorithm. +We hope it's the last one, but we won't be surprised if it's not. +We plan to one day expose the selection algorithm more directly to +the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)). +For more, see issues marked +[proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection). + +This release also includes a fix to prevent Tendermint from including the same +piece of evidence in more than one block. This issue was reported by @chengwenxi in our +[bug bounty program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +* Apps + - [state] [\#3222](https://github.com/tendermint/tendermint/issues/3222) + Duplicate updates for the same validator are forbidden. Apps must ensure + that a given `ResponseEndBlock.ValidatorUpdates` contains only one entry per pubkey. + +* Go API + - [types] [\#3222](https://github.com/tendermint/tendermint/issues/3222) + Remove `Add` and `Update` methods from `ValidatorSet` in favor of new + `UpdateWithChangeSet`. This allows updates to be applied as a set, instead of + one at a time. + +* Block Protocol + - [state] [\#3286](https://github.com/tendermint/tendermint/issues/3286) Blocks that include already committed evidence are invalid. + +* P2P Protocol + - [consensus] [\#3222](https://github.com/tendermint/tendermint/issues/3222) + Validator updates are applied as a set, instead of one at a time, thus + impacting the proposer priority calculation. This ensures that the proposer + selection algorithm does not depend on the order of updates in + `ResponseEndBlock.ValidatorUpdates`. + +### IMPROVEMENTS: +- [crypto] [\#3279](https://github.com/tendermint/tendermint/issues/3279) Use `btcec.S256().N` directly instead of hard coding a copy. + +### BUG FIXES: +- [state] [\#3222](https://github.com/tendermint/tendermint/issues/3222) Fix validator set updates so they are applied as a set, rather + than one at a time. This makes the proposer selection algorithm independent of + the order of updates in `ResponseEndBlock.ValidatorUpdates`. +- [evidence] [\#3286](https://github.com/tendermint/tendermint/issues/3286) Don't add committed evidence to evidence pool. + ## v0.29.2 *February 7th, 2019* @@ -11,7 +58,7 @@ Special thanks to external contributors on this release: `crypto` packages: - p2p: - Partial fix for MITM attacks on the p2p connection. MITM conditions may - still exist. See \#3010. + still exist. See [\#3010](https://github.com/tendermint/tendermint/issues/3010). - crypto: - Eliminate our fork of `btcd` and use the `btcd/btcec` library directly for native secp256k1 signing. Note we still modify the signature encoding to diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 91eed188..64098583 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,4 +1,4 @@ -## v0.30 +## v0.31.0 ** @@ -14,8 +14,6 @@ Special thanks to external contributors on this release: * Blockchain Protocol - - [types] Reject blocks which contain already committed evidence - * P2P Protocol ### FEATURES: @@ -23,6 +21,3 @@ Special thanks to external contributors on this release: ### IMPROVEMENTS: ### BUG FIXES: - - - [evidence] Do not store evidence which was already marked as committed - diff --git a/UPGRADING.md b/UPGRADING.md index dd35ff26..f3fecb5e 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -3,6 +3,29 @@ This guide provides steps to be followed when you upgrade your applications to a newer version of Tendermint Core. +## v0.30.0 + +This release contains a breaking change to both the block and p2p protocols, +however it may be compatible with blockchains created with v0.29.0 depending on +the chain history. If your blockchain has not included any pieces of evidence, +or no piece of evidence has been included in more than one block, +and if your application has never returned multiple updates +for the same validator in a single block, then v0.30.0 will work fine with +blockchains created with v0.29.0. + +The p2p protocol change is to fix the proposer selection algorithm again. +Note that proposer selection is purely a p2p concern right +now since the algorithm is only relevant during real time consensus. +This change is thus compatible with v0.29.0, but +all nodes must be upgraded to avoid disagreements on the proposer. + +### Applications + +Applications must ensure they do not return duplicates in +`ResponseEndBlock.ValidatorUpdates`. A pubkey must only appear once per set of +updates. Duplicates will cause irrecoverable failure. If you have a very good +reason why we shouldn't do this, please open an issue. + ## v0.29.0 This release contains some breaking changes to the block and p2p protocols, diff --git a/version/version.go b/version/version.go index b20223c2..37a0da78 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.29.2" + TMCoreSemVer = "0.30.0" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.15.0" @@ -38,10 +38,12 @@ func (p Protocol) Uint64() uint64 { var ( // P2PProtocol versions all p2p behaviour and msgs. - P2PProtocol Protocol = 6 + // This includes proposer selection. + P2PProtocol Protocol = 7 // BlockProtocol versions all block data structures and processing. - BlockProtocol Protocol = 9 + // This includes validity of blocks and state updates. + BlockProtocol Protocol = 10 ) //------------------------------------------------------------------------ From 966b5bdf6ecb89f548a9bffa6c328f918ba7e541 Mon Sep 17 00:00:00 2001 From: Anca Zamfir Date: Mon, 11 Feb 2019 13:21:51 +0100 Subject: [PATCH 10/16] fix failure in TestProposerFrequency (#3293) ``` --- FAIL: TestProposerFrequency (2.50s) panic: empty validator set [recovered] panic: empty validator set goroutine 91 [running]: testing.tRunner.func1(0xc000a98c00) /usr/local/go/src/testing/testing.go:792 +0x6a7 panic(0xeae7e0, 0x11fbb30) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/types.(*ValidatorSet).RescalePriorities(0xc0000e7380, 0x0) /go/src/github.com/tendermint/tendermint/types/validator_set.go:106 +0x1ac github.com/tendermint/tendermint/state.TestProposerFrequency(0xc000a98c00) /go/src/github.com/tendermint/tendermint/state/state_test.go:335 +0xb44 testing.tRunner(0xc000a98c00, 0x111a4d8) /usr/local/go/src/testing/testing.go:827 +0x163 created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x65a FAIL github.com/tendermint/tendermint/state 3.139s ``` --- state/state_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/state/state_test.go b/state/state_test.go index 9cbe8342..6d5f8f46 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -313,12 +313,12 @@ func TestProposerFrequency(t *testing.T) { } } - // some random test cases with up to 300 validators + // some random test cases with up to 100 validators maxVals := 100 maxPower := 1000 nTestCases := 5 for i := 0; i < nTestCases; i++ { - N := cmn.RandInt() % maxVals + N := cmn.RandInt()%maxVals + 1 vals := make([]*types.Validator, N) totalVotePower := int64(0) for j := 0; j < N; j++ { From 7fd51e6ade5bd3d6820acdd53ad443c509a4fc7a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 11 Feb 2019 16:31:34 +0400 Subject: [PATCH 11/16] make govet linter pass (#3292) * make govet linter pass Refs #3262 * close PipeReader and check for err --- .golangci.yml | 1 - abci/client/grpc_client.go | 22 +++++++++++----------- abci/cmd/abci-cli/abci-cli.go | 1 - blockchain/reactor.go | 2 +- cmd/tendermint/commands/run_node.go | 2 -- consensus/common_test.go | 4 +--- consensus/reactor.go | 2 +- consensus/replay.go | 2 +- consensus/replay_file.go | 2 -- consensus/state.go | 10 +++++----- consensus/types/height_vote_set_test.go | 1 - crypto/encoding/amino/encode_test.go | 2 ++ crypto/secp256k1/secp256k1_nocgo.go | 4 ++-- libs/common/bit_array.go | 2 +- p2p/conn/secret_connection_test.go | 9 ++++----- privval/file.go | 1 - privval/server.go | 2 +- rpc/client/mock/abci.go | 12 ++++++------ rpc/client/rpc_test.go | 2 +- rpc/core/abci.go | 4 ++-- rpc/core/blocks.go | 6 ++++-- rpc/core/consensus.go | 14 ++++++++++---- rpc/core/events.go | 2 +- rpc/core/mempool.go | 2 +- rpc/core/net.go | 6 +++--- state/execution.go | 2 +- tools/tm-monitor/monitor/node.go | 4 ++-- 27 files changed, 61 insertions(+), 62 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 4cae2f76..0bea5e3e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -18,7 +18,6 @@ linters: - nakedret - lll - gochecknoglobals - - govet - gocritic - gosec - gochecknoinits diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 94aabc5e..d04f42b6 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}}) } func (cli *grpcClient) FlushAsync() *ReqRes { @@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}}) } func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { @@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}}) } func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { @@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}}) } func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes { @@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}}) } func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes { @@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}) } func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { @@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}}) } func (cli *grpcClient) CommitAsync() *ReqRes { @@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}}) } func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { @@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}}) } func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { @@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}}) } func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { @@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { if err != nil { cli.StopForError(err) } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}}) + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) } func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes { diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index cc3f9c45..8823f7be 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -394,7 +394,6 @@ func cmdConsole(cmd *cobra.Command, args []string) error { return err } } - return nil } func muxOnCommands(cmd *cobra.Command, pArgs []string) error { diff --git a/blockchain/reactor.go b/blockchain/reactor.go index bed082cd..847398b7 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -302,7 +302,7 @@ FOR_LOOP: firstParts := first.MakePartSet(types.BlockPartSizeBytes) firstPartsHeader := firstParts.Header() - firstID := types.BlockID{first.Hash(), firstPartsHeader} + firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader} // Finally, verify the first block using the second's commit // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index ef205aa6..c720de7d 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -77,8 +77,6 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { // Run forever select {} - - return nil }, } diff --git a/consensus/common_test.go b/consensus/common_test.go index e6e64c25..7e24d3ff 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -621,7 +621,6 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { } } panic("didnt find peer in switches") - return -1 } //------------------------------------------------------------------------------- @@ -699,8 +698,7 @@ func (m *mockTicker) Chan() <-chan timeoutInfo { return m.c } -func (mockTicker) SetLogger(log.Logger) { -} +func (*mockTicker) SetLogger(log.Logger) {} //------------------------------------ diff --git a/consensus/reactor.go b/consensus/reactor.go index b92ae1f7..604e54b4 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -896,7 +896,7 @@ type PeerState struct { peer p2p.Peer logger log.Logger - mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly. + mtx sync.Mutex // NOTE: Modify below using setters, never directly. PRS cstypes.PeerRoundState `json:"round_state"` // Exposed. Stats *peerStateStats `json:"stats"` // Exposed. } diff --git a/consensus/replay.go b/consensus/replay.go index 21fef6b2..5453c700 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -334,7 +334,7 @@ func (h *Handshaker) ReplayBlocks( } else if storeBlockHeight < appBlockHeight { // the app should never be ahead of the store (but this is under app's control) - return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight} + return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight} } else if storeBlockHeight < stateBlockHeight { // the state should never be ahead of the store (this is under tendermint's control) diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 3e92bad6..15246d22 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -103,7 +103,6 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error { } pb.count++ } - return nil } //------------------------------------------------ @@ -295,7 +294,6 @@ func (pb *playback) replayConsoleLoop() int { fmt.Println(pb.count) } } - return 0 } //-------------------------------------------------------------------------------- diff --git a/consensus/state.go b/consensus/state.go index c6c49d87..940318c0 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -909,7 +909,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { } // Make proposal - propBlockId := types.BlockID{block.Hash(), blockParts.Header()} + propBlockId := types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()} proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { @@ -1320,7 +1320,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) { // Execute and commit the block, update and save the state, and update the mempool. // NOTE The block.AppHash wont reflect these txs until the next block. var err error - stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block) + stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block) if err != nil { cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) err := cmn.Kill() @@ -1543,7 +1543,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) - cs.eventBus.PublishEventVote(types.EventDataVote{vote}) + cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) cs.evsw.FireEvent(types.EventVote, vote) // if we can skip timeoutCommit and have all the votes now, @@ -1571,7 +1571,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, return } - cs.eventBus.PublishEventVote(types.EventDataVote{vote}) + cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) cs.evsw.FireEvent(types.EventVote, vote) switch vote.Type { @@ -1683,7 +1683,7 @@ func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, heade Round: cs.Round, Timestamp: cs.voteTime(), Type: type_, - BlockID: types.BlockID{hash, header}, + BlockID: types.BlockID{Hash: hash, PartsHeader: header}, } err := cs.privValidator.SignVote(cs.state.ChainID, vote) return vote, err diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 4460cd3e..afb74162 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -64,7 +64,6 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali err := privVal.SignVote(chainID, vote) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) - return nil } return vote } diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 95510306..d4e34945 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -47,6 +47,8 @@ func checkAminoJSON(t *testing.T, src interface{}, dst interface{}, isNil bool) require.Nil(t, err, "%+v", err) } +// ExamplePrintRegisteredTypes refers to unknown identifier: PrintRegisteredTypes +//nolint:govet func ExamplePrintRegisteredTypes() { cdc.PrintTypes(os.Stdout) // Output: | Type | Name | Prefix | Length | Notes | diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index cd1655a5..18782b37 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -52,8 +52,8 @@ func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool { // that len(sigStr) == 64. func signatureFromBytes(sigStr []byte) *secp256k1.Signature { return &secp256k1.Signature{ - new(big.Int).SetBytes(sigStr[:32]), - new(big.Int).SetBytes(sigStr[32:64]), + R: new(big.Int).SetBytes(sigStr[:32]), + S: new(big.Int).SetBytes(sigStr[32:64]), } } diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index ebd6cc4a..2e6c255c 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -412,6 +412,6 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { bA2.SetIndex(i, true) } } - *bA = *bA2 + *bA = *bA2 //nolint:govet return nil } diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 6b285476..5c389ee3 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -238,6 +238,10 @@ func TestSecretConnectionReadWrite(t *testing.T) { for { n, err := nodeSecretConn.Read(readBuffer) if err == io.EOF { + if err := nodeConn.PipeReader.Close(); err != nil { + t.Error(err) + return nil, err, true + } return nil, nil, false } else if err != nil { t.Errorf("Failed to read from nodeSecretConn: %v", err) @@ -245,11 +249,6 @@ func TestSecretConnectionReadWrite(t *testing.T) { } *nodeReads = append(*nodeReads, string(readBuffer[:n])) } - if err := nodeConn.PipeReader.Close(); err != nil { - t.Error(err) - return nil, err, true - } - return nil, nil, false }, ) assert.True(t, ok, "Unexpected task abortion") diff --git a/privval/file.go b/privval/file.go index d27d7a78..8eb38e80 100644 --- a/privval/file.go +++ b/privval/file.go @@ -31,7 +31,6 @@ func voteToStep(vote *types.Vote) int8 { return stepPrecommit default: panic("Unknown vote type") - return 0 } } diff --git a/privval/server.go b/privval/server.go index 8b22c69e..cce65952 100644 --- a/privval/server.go +++ b/privval/server.go @@ -67,7 +67,7 @@ func DialTCPFn(addr string, connTimeout time.Duration, privKey ed25519.PrivKeyEd // DialUnixFn dials the given unix socket. func DialUnixFn(addr string) Dialer { return func() (net.Conn, error) { - unixAddr := &net.UnixAddr{addr, "unix"} + unixAddr := &net.UnixAddr{Name: addr, Net: "unix"} return net.DialUnix("unix", nil, unixAddr) } } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index e63d22e0..2ab62a42 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -23,7 +23,7 @@ var ( ) func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{a.App.Info(proxy.RequestInfo)}, nil + return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil } func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { @@ -37,7 +37,7 @@ func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clien Height: opts.Height, Prove: opts.Prove, }) - return &ctypes.ResultABCIQuery{q}, nil + return &ctypes.ResultABCIQuery{Response: q}, nil } // NOTE: Caller should call a.App.Commit() separately, @@ -60,7 +60,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error if !c.IsErr() { go func() { a.App.DeliverTx(tx) }() // nolint: errcheck } - return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil + return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil } func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { @@ -69,7 +69,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) if !c.IsErr() { go func() { a.App.DeliverTx(tx) }() // nolint: errcheck } - return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil + return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil } // ABCIMock will send all abci related request to the named app, @@ -87,7 +87,7 @@ func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil + return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil } func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { @@ -100,7 +100,7 @@ func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clie return nil, err } resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{resQuery}, nil + return &ctypes.ResultABCIQuery{Response: resQuery}, nil } func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 8ae88f43..d3dc90b8 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -42,9 +42,9 @@ func TestCorsEnabled(t *testing.T) { req.Header.Set("Origin", origin) c := &http.Client{} resp, err := c.Do(req) + require.Nil(t, err, "%+v", err) defer resp.Body.Close() - require.Nil(t, err, "%+v", err) assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) } diff --git a/rpc/core/abci.go b/rpc/core/abci.go index c9d516f9..aa6089b6 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -63,7 +63,7 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctype return nil, err } logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) - return &ctypes.ResultABCIQuery{*resQuery}, nil + return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } // Get some info about the application. @@ -101,5 +101,5 @@ func ABCIInfo() (*ctypes.ResultABCIInfo, error) { if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{*resInfo}, nil + return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index ee4009e5..906aea7b 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -85,7 +85,9 @@ func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, e blockMetas = append(blockMetas, blockMeta) } - return &ctypes.ResultBlockchainInfo{blockStore.Height(), blockMetas}, nil + return &ctypes.ResultBlockchainInfo{ + LastHeight: blockStore.Height(), + BlockMetas: blockMetas}, nil } // error if either min or max are negative or min < max @@ -233,7 +235,7 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { blockMeta := blockStore.LoadBlockMeta(height) block := blockStore.LoadBlock(height) - return &ctypes.ResultBlock{blockMeta, block}, nil + return &ctypes.ResultBlock{BlockMeta: blockMeta, Block: block}, nil } // Get block commit at a given height. diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 9968a1b2..81694b7e 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -60,7 +60,9 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { if err != nil { return nil, err } - return &ctypes.ResultValidators{height, validators.Validators}, nil + return &ctypes.ResultValidators{ + BlockHeight: height, + Validators: validators.Validators}, nil } // DumpConsensusState dumps consensus state. @@ -223,7 +225,9 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { if err != nil { return nil, err } - return &ctypes.ResultDumpConsensusState{roundState, peerStates}, nil + return &ctypes.ResultDumpConsensusState{ + RoundState: roundState, + Peers: peerStates}, nil } // ConsensusState returns a concise summary of the consensus state. @@ -276,7 +280,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { func ConsensusState() (*ctypes.ResultConsensusState, error) { // Get self round state. bz, err := consensusState.GetRoundStateSimpleJSON() - return &ctypes.ResultConsensusState{bz}, err + return &ctypes.ResultConsensusState{RoundState: bz}, err } // Get the consensus parameters at the given block height. @@ -327,5 +331,7 @@ func ConsensusParams(heightPtr *int64) (*ctypes.ResultConsensusParams, error) { if err != nil { return nil, err } - return &ctypes.ResultConsensusParams{BlockHeight: height, ConsensusParams: consensusparams}, nil + return &ctypes.ResultConsensusParams{ + BlockHeight: height, + ConsensusParams: consensusparams}, nil } diff --git a/rpc/core/events.go b/rpc/core/events.go index e4fd2041..21979f01 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -109,7 +109,7 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri go func() { for event := range ch { - tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)} + tmResult := &ctypes.ResultEvent{Query: query, Data: event.(tmtypes.TMEventData)} wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", wsCtx.Request.ID)), tmResult)) } }() diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index ff6b029c..d6dcc93e 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -275,7 +275,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { limit = validatePerPage(limit) txs := mempool.ReapMaxTxs(limit) - return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil + return &ctypes.ResultUnconfirmedTxs{N: len(txs), Txs: txs}, nil } // Get number of unconfirmed transactions. diff --git a/rpc/core/net.go b/rpc/core/net.go index 4d95c2ef..56e9624d 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -77,7 +77,7 @@ func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { if err != nil { return &ctypes.ResultDialSeeds{}, err } - return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil + return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil } func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { @@ -90,7 +90,7 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, if err != nil { return &ctypes.ResultDialPeers{}, err } - return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil + return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil } // Get genesis file. @@ -136,5 +136,5 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, // } // ``` func Genesis() (*ctypes.ResultGenesis, error) { - return &ctypes.ResultGenesis{genDoc}, nil + return &ctypes.ResultGenesis{Genesis: genDoc}, nil } diff --git a/state/execution.go b/state/execution.go index 8ab95839..c4b94fb9 100644 --- a/state/execution.go +++ b/state/execution.go @@ -446,7 +446,7 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty }) for i, tx := range block.Data.Txs { - eventBus.PublishEventTx(types.EventDataTx{types.TxResult{ + eventBus.PublishEventTx(types.EventDataTx{TxResult: types.TxResult{ Height: block.Height, Index: uint32(i), Tx: tx, diff --git a/tools/tm-monitor/monitor/node.go b/tools/tm-monitor/monitor/node.go index 6f705145..c16f0609 100644 --- a/tools/tm-monitor/monitor/node.go +++ b/tools/tm-monitor/monitor/node.go @@ -21,8 +21,8 @@ const maxRestarts = 25 type Node struct { rpcAddr string - IsValidator bool `json:"is_validator"` // validator or non-validator? - pubKey crypto.PubKey `json:"pub_key"` + IsValidator bool `json:"is_validator"` // validator or non-validator? + pubKey crypto.PubKey Name string `json:"name"` Online bool `json:"online"` From b089587b42e7d4a6f7e78971fb4bfadcadb798d0 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Tue, 12 Feb 2019 05:54:12 +0100 Subject: [PATCH 12/16] make gosec linter pass (#3294) * not related to linter: remove obsolete constants: - `Insecure` and `Secure` and type `Security` are not used anywhere * not related to linter: update example - NewInsecure was deleted; change example to NewRemoteDB * address: Binds to all network interfaces (gosec): - bind to localhost instead of 0.0.0.0 - regenerate test key and cert for this purpose (was valid for ::) and otherwise we would see: transport: authentication handshake failed: x509: certificate is valid for ::, not 127.0.0.1\" (used https://github.com/google/keytransparency/blob/master/scripts/gen_server_keys.sh to regenerate certs) * use sha256 in tests instead of md5; time difference is negligible * nolint usage of math/rand in test and add comment on its import - crypto/rand is slower and we do not need sth more secure in tests * enable linter in circle-ci * another nolint math/rand in test * replace another occurrence of md5 * consistent comment about importing math/rand --- .golangci.yml | 1 - crypto/merkle/proof_key_path_test.go | 4 ++- libs/db/remotedb/doc.go | 2 +- libs/db/remotedb/grpcdb/client.go | 8 ----- libs/db/remotedb/remotedb_test.go | 2 +- libs/db/remotedb/test.crt | 40 ++++++++++------------ libs/db/remotedb/test.key | 50 ++++++++++++++-------------- mempool/mempool_test.go | 4 +-- tools/tm-bench/transacter.go | 13 +++++--- tools/tm-bench/transacter_test.go | 6 ++-- types/block_test.go | 6 ++-- 11 files changed, 64 insertions(+), 72 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 0bea5e3e..45cabe20 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,7 +19,6 @@ linters: - lll - gochecknoglobals - gocritic - - gosec - gochecknoinits - scopelint - stylecheck diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go index 48fda303..34c918f4 100644 --- a/crypto/merkle/proof_key_path_test.go +++ b/crypto/merkle/proof_key_path_test.go @@ -1,6 +1,8 @@ package merkle import ( + // it is ok to use math/rand here: we do not need a cryptographically secure random + // number generator here and we can run the tests a bit faster "math/rand" "testing" @@ -24,7 +26,7 @@ func TestKeyPath(t *testing.T) { keys[i][j] = alphanum[rand.Intn(len(alphanum))] } case KeyEncodingHex: - rand.Read(keys[i]) + rand.Read(keys[i]) //nolint: gosec default: panic("Unexpected encoding") } diff --git a/libs/db/remotedb/doc.go b/libs/db/remotedb/doc.go index 07c95a56..93d9c8a2 100644 --- a/libs/db/remotedb/doc.go +++ b/libs/db/remotedb/doc.go @@ -11,7 +11,7 @@ remotedb's RemoteDB implements db.DB so can be used normally like other databases. One just has to explicitly connect to the remote database with a client setup such as: - client, err := remotedb.NewInsecure(addr) + client, err := remotedb.NewRemoteDB(addr, cert) // Make sure to invoke InitRemote! if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { log.Fatalf("Failed to initialize the remote db") diff --git a/libs/db/remotedb/grpcdb/client.go b/libs/db/remotedb/grpcdb/client.go index e11b7839..b3c69ff2 100644 --- a/libs/db/remotedb/grpcdb/client.go +++ b/libs/db/remotedb/grpcdb/client.go @@ -7,14 +7,6 @@ import ( protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" ) -// Security defines how the client will talk to the gRPC server. -type Security uint - -const ( - Insecure Security = iota - Secure -) - // NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. // Use kind to set the level of security to either Secure or Insecure. func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go index 0e731997..f5c8e2cb 100644 --- a/libs/db/remotedb/remotedb_test.go +++ b/libs/db/remotedb/remotedb_test.go @@ -14,7 +14,7 @@ import ( func TestRemoteDB(t *testing.T) { cert := "test.crt" key := "test.key" - ln, err := net.Listen("tcp", "0.0.0.0:0") + ln, err := net.Listen("tcp", "localhost:0") require.Nil(t, err, "expecting a port to have been assigned on which we can listen") srv, err := grpcdb.NewServer(cert, key) require.Nil(t, err) diff --git a/libs/db/remotedb/test.crt b/libs/db/remotedb/test.crt index bdc8a0f2..06ffec1d 100644 --- a/libs/db/remotedb/test.crt +++ b/libs/db/remotedb/test.crt @@ -1,25 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIEQTCCAimgAwIBAgIRANqF1HD19i/uvQ3n62TAKTwwDQYJKoZIhvcNAQELBQAw -GTEXMBUGA1UEAxMOdGVuZGVybWludC5jb20wHhcNMTgwNzAyMDMwNzMyWhcNMjAw -MTAyMDMwNzMwWjANMQswCQYDVQQDEwI6OjCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAOuWUMCSzYJmvKU1vsouDTe7OxnPWO3oV0FjSH8vKYoi2zpZQX35 -dQDPtLDF2/v/ANZJ5pzMJR8yMMtEQ4tWxKuGzJw1ZgTgHtASPbj/M5fDnDO7Hqg4 -D09eLTkZAUfiBf6BzDyQIHn22CUexhaS70TbIT9AOAoOsGXMZz9d+iImKIm+gbzf -pR52LNbBGesHWGjwIuGF4InstIMsKSwGv2DctzhWI+i/m5Goi3rd1V8z/lzUbsf1 -0uXqQcSfTyv3ee6YiCWj2W8vcdc5H+B6KzSlGjAR4sRcHTHOQJYO9BgA9evQ3qsJ -Pp00iez13RdheJWPtbfUqQy4gdpu8HFeZx8CAwEAAaOBjzCBjDAOBgNVHQ8BAf8E -BAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRc -XBo+bJILrLcJiGkTWeMPpXb1TDAfBgNVHSMEGDAWgBQqk1Xu65Ww7EBCROw4KLGw -KuToaDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAAAAAAMA0GCSqGSIb3DQEBCwUA -A4ICAQAbGsIMhL8clczNmhGl9xZhmyNz6FbLq6g163x9LTgfvwHPt+7urthtd++O -uy4Ut8zFurh/yk7eooPlzf8jO7QUJBAFVy4vj8IcsvpWbFa7cuEOIulbjIzyAm/v -lgy7vUQ6xrWn8x8O9K1ww9z7wugwCyl22BD0wSHZKclJz++AwpL6vUVOD76IIuJO -+S6bE6z26/0ndpundh2AkA++2eIleD6ygnTeTl0PWu6aGoCggBmos50f8KgYHZF/ -OZVef203kDls9xCaOiMzaU91VsgLqq/gNcT+2cBd5r3IZTY3C8Rve6EEHS+/4zxf -PKlmiLN7lU9GFZogKecYzY+zPT7OArY7OVFnGTo4qdhdmxnXzHsI+anMCjxLOgEJ -381hyplQGPQOouEupCBxFcwa7oMYoGu20+1nLWYEqFcIXCeyH+s77MyteJSsseqL -xivG5PT+jKJn9hrnFb39bBmht9Vsa+Th6vk953zi5wCSe1j2wXsxFaENDq6BQZOK -f86Kp86M2elYnv3lJ3j2DE2ZTMpw+PA5ThYUnB+HVqYeeB2Y3ErRS8P1FOp1LBE8 -+eTz7yXQO5OM2wdYhNNL1zDri/41fHXi9b6337PZVqc39GM+N74x/O4Q7xEBiWgQ -T0dT8SNwf55kv63MeZh63ImxFV0FNRkCteYLcJMle3ohIY4zyQ== +MIIDAjCCAeqgAwIBAgIJAOGCVedOwRbOMA0GCSqGSIb3DQEBBQUAMCExCzAJBgNV +BAYTAlVTMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwMjExMTU0NjQ5WhcNMjAw +MjExMTU0NjQ5WjAhMQswCQYDVQQGEwJVUzESMBAGA1UEAwwJbG9jYWxob3N0MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA60S/fNUWoHm1PYI/yrlnZNtr +dRqDORHe0hPwl/lttLz7+a7HzQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWc +GjxJL24tVwiOwqYRzTPZ/rK3JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4D +h/XgWjEt4DhpHwf/zuIK9XkJw0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0 +AdsQCjt1GKcIROkcOGUHqByINJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhq +HRTCt5UELWs/53Gj1ffNuhjECOVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQID +AQABoz0wOzAMBgNVHRMEBTADAQH/MCsGA1UdEQQkMCKCCWxvY2FsaG9zdIIJbG9j +YWxob3N0hwQAAAAAhwR/AAABMA0GCSqGSIb3DQEBBQUAA4IBAQCe2A5gDc3jiZwT +a5TJrc2J2KouqxB/PCddw5VY8jPsZJfsr9gxHi+Xa5g8p3oqmEOIlqM5BVhrZRUG +RWHDmL+bCsuzMoA/vGHtHmUIwLeZQLWgT3kv12Dc8M9flNNjmXWxdMR9lOMwcL83 +F0CdElxSmaEbNvCIJBDetJJ7vMCqS2lnTLWurbH4ZGeGwvjzNgpgGCKwbyK/gU+j +UXiTQbVvPQ3WWACDnfH6rg0TpxU9jOBkd+4/9tUrBG7UclQBfGULk3sObLO9kx4N +8RxJmtp8jljIXVPX3udExI05pz039pAgvaeZWtP17QSbYcKF1jFtKo6ckrv2GKXX +M5OXGXdw -----END CERTIFICATE----- diff --git a/libs/db/remotedb/test.key b/libs/db/remotedb/test.key index 14d28558..e1adb3e1 100644 --- a/libs/db/remotedb/test.key +++ b/libs/db/remotedb/test.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpgIBAAKCAQEA65ZQwJLNgma8pTW+yi4NN7s7Gc9Y7ehXQWNIfy8piiLbOllB -ffl1AM+0sMXb+/8A1knmnMwlHzIwy0RDi1bEq4bMnDVmBOAe0BI9uP8zl8OcM7se -qDgPT14tORkBR+IF/oHMPJAgefbYJR7GFpLvRNshP0A4Cg6wZcxnP136IiYoib6B -vN+lHnYs1sEZ6wdYaPAi4YXgiey0gywpLAa/YNy3OFYj6L+bkaiLet3VXzP+XNRu -x/XS5epBxJ9PK/d57piIJaPZby9x1zkf4HorNKUaMBHixFwdMc5Alg70GAD169De -qwk+nTSJ7PXdF2F4lY+1t9SpDLiB2m7wcV5nHwIDAQABAoIBAQCB2/ilPgaUE8d2 -ldqWHa5hgw4/2uCdO04ll/GVUczm/PG1BxAnvYL2MIfcTSRGkrjGZjP9SDZKLONi -mD1XKDv+hK5yiKi0lUnGzddCC0JILKYEieeLOGOQD0yERblEA13kfW20EIomUJ+y -TnVIajQD03pPIDoDqTco1fQvpMDFYw5Q//UhH7VBC261GO1akvhT2Gqdb4aKLaYQ -iDW9IEButL5cRKIJuRxToB/JbmPVEF7xIZtm0sf9dtYVOlBQLeID0uHXgaci0enc -de6GMajmj7NFqc36ypb+Ct18fqEwQBYD+TSQdKs7/lMsAXwRjd5HW4RbYiMZyYnf -Dxgh7QVBAoGBAP9aLLIUcIG7+gk1x7xd+8wRhfo+dhsungeCluSigI9AsfDr6dpR -G9/0lEJH56noZZKQueACTmj7shmRB40xFFLc8w0IDRZCnofsl+Z15k9K84uFPA3W -hdZH9nMieU/mRKdcUYK7pHGqbicHTaJQ5ydZ+xb2E+zYQHOzYpQacHv/AoGBAOwv -TjDZSiassnAPYmmfcHtkUF4gf7PTpiZfH0hXHGAb0mJX4cXAoktAeDeHSi2tz3LW -dAc0ReP8Pdf3uSNv7wkJ1KpNRxAhU5bhnDFmjRc7gMZknVOU+az2M+4yGOn/SOiJ -I6uMHgQDS/VsI+N583n6gbGxVHbQfr9TOc4bLpThAoGBAKin0JmWMnEdzRnEMbZS -hPrWIB2Wn794XNws/qjoQ+1aF60+xGhz5etXyYy1nWd1nZDekkZIf62LgKiuR8ST -xA6u7MGQrcQkID06oWGQQZvhr1ZZm76wEBnl0ftdq66AMpwvt46XjReeL78LbdVl -hidRoSwbQDHQ61EADH4xsFXVAoGBAISXqhXSZsZ/fU1b1avmTod3MYcmR4r07vnr -vOwnu05ZUCrVm3IhSvtkHhlOYl5yjVuy+UByICp1mWJ9N/qlBFTWqAVTjOmJTBwQ -XFd/cwXv6cN3CLu7js+DCHRYu5PiNVQWaWgNKWynTSViqGM0O3PnJphTLU/mjMFs -P69toyEBAoGBALh9YsqxHdYdS5WK9chzDfGlaTQ79jwN+gEzQuP1ooLF0JkMgh5W -//2C6kCrgBsGTm1gfHAjEfC04ZDZLFbKLm56YVKUGL6JJNapm6e5kfiZGjbRKWAg -ViCeRS2qQnVbH74GfHyimeTPDI9cJMiJfDDTPbfosqWSsPEcg2jfsySJ +MIIEogIBAAKCAQEA60S/fNUWoHm1PYI/yrlnZNtrdRqDORHe0hPwl/lttLz7+a7H +zQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWcGjxJL24tVwiOwqYRzTPZ/rK3 +JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4Dh/XgWjEt4DhpHwf/zuIK9XkJ +w0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0AdsQCjt1GKcIROkcOGUHqByI +NJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhqHRTCt5UELWs/53Gj1ffNuhjE +COVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQIDAQABAoIBAAb5n8+8pZIWaags +L2X8PzN/Sd1L7u4HOJrz2mM3EuiT3ciWRPgwImpETeJ5UW27Qc+0dTahX5DcuYxE +UErefSZ2ru0cMnNEifWVnF3q/IYf7mudss5bJ9NZYi+Dqdu7mTAXp4xFlHtaALbp +iFK/8wjoBbTHNmKWKK0IHx27Z/sjK+7QnoKij+rRzvhmNyN2r3dT7EO4VePriesr +zyVaGexNPFhtd1HLJLQ5GqRAidtLM4x1ubvp3NLTCvvoQKKYFOg7WqKycZ2VllOg +ApcpZb/kB/sNTacLvum5HgMNWuWwgREISuQJR+esz/5WaSTQ04L2+vMVomGM18X+ +9n4KYwECgYEA/Usajzl3tWv1IIairSk9Md7Z2sbaPVBNKv4IDJy3mLwt+2VN2mqo +fpeV5rBaFNWzJR0M0JwLbdlsvSfXgVFkUePg1UiJyFqOKmMO8Bd/nxV9NAewVg1D +KXQLsfrojBfka7HtFmfk/GA2swEMCGzUcY23bwah1JUTLhvbl19GNMECgYEA7chW +Ip/IvYBiaaD/qgklwJE8QoAVzi9zqlI1MOJJNf1r/BTeZ2R8oXlRk8PVxFglliuA +vMgwCkfuqxA8irIdHReLzqcLddPtaHo6R8zKP2cpYBo61C3CPzEAucasaOXQFpjs +DPnp4QFeboNPgiEGLVGHFvD5TwZpideBpWTwud0CgYEAy04MDGfJEQKNJ0VJr4mJ +R80iubqgk1QwDFEILu9fYiWxFrbSTX0Mr0eGlzp3o39/okt17L9DYTGCWTVwgajN +x/kLjsYBaaJdt+H4rHeABTWfYDLHs9pDTTOK65mELGZE/rg6n6BWqMelP/qYKO8J +efeRA3mkTVg2o+zSTea4GEECgYEA3DB4EvgD2/fXKhl8puhxnTDgrHQPvS8T3NTj +jLD/Oo/CP1zT1sqm3qCJelwOyBMYO0dtn2OBmQOjb6VJauYlL5tuS59EbYgigG0v +Ku3pG21cUzH26CS3i+zEz0O6xCiL2WEitaF3gnTSDWRrbAVIww6MGiJru1IkyRBX +beFbScECf1n00W9qrXnqsWefk73ucggfV0gQQmDnauMA9J7B96+MvGprE54Tx9vl +SBodgvJsCod9Y9Q7QsMcXb4CuEgTgWKDBp5cA/KUOQmK5buOrysosLnnm12LaHiF +O7IIh8Cmb9TbdldgW+8ndZ4EQ3lfIS0zN3/7rWD34bs19JDYkRY= -----END RSA PRIVATE KEY----- diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 9d21e734..dd624262 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -1,8 +1,8 @@ package mempool import ( - "crypto/md5" "crypto/rand" + "crypto/sha256" "encoding/binary" "fmt" "io/ioutil" @@ -451,7 +451,7 @@ func TestMempoolMaxMsgSize(t *testing.T) { } func checksumIt(data []byte) string { - h := md5.New() + h := sha256.New() h.Write(data) return fmt.Sprintf("%x", h.Sum(nil)) } diff --git a/tools/tm-bench/transacter.go b/tools/tm-bench/transacter.go index c20aa5b5..3347fdfb 100644 --- a/tools/tm-bench/transacter.go +++ b/tools/tm-bench/transacter.go @@ -1,11 +1,14 @@ package main import ( - "crypto/md5" + "crypto/sha256" "encoding/binary" "encoding/hex" "encoding/json" "fmt" + + // it is ok to use math/rand here: we do not need a cryptographically secure random + // number generator here and we can run the tests a bit faster "math/rand" "net" "net/http" @@ -154,12 +157,12 @@ func (t *transacter) sendLoop(connIndex int) { }() // hash of the host name is a part of each tx - var hostnameHash [md5.Size]byte + var hostnameHash [sha256.Size]byte hostname, err := os.Hostname() if err != nil { hostname = "127.0.0.1" } - hostnameHash = md5.Sum([]byte(hostname)) + hostnameHash = sha256.Sum256([]byte(hostname)) // each transaction embeds connection index, tx number and hash of the hostname // we update the tx number between successive txs tx := generateTx(connIndex, txNumber, t.Size, hostnameHash) @@ -257,7 +260,7 @@ func connect(host string) (*websocket.Conn, *http.Response, error) { return websocket.DefaultDialer.Dial(u.String(), nil) } -func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [md5.Size]byte) []byte { +func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [sha256.Size]byte) []byte { tx := make([]byte, txSize) binary.PutUvarint(tx[:8], uint64(connIndex)) @@ -266,7 +269,7 @@ func generateTx(connIndex int, txNumber int, txSize int, hostnameHash [md5.Size] binary.PutUvarint(tx[32:40], uint64(time.Now().Unix())) // 40-* random data - if _, err := rand.Read(tx[40:]); err != nil { + if _, err := rand.Read(tx[40:]); err != nil { //nolint: gosec panic(errors.Wrap(err, "failed to read random bytes")) } diff --git a/tools/tm-bench/transacter_test.go b/tools/tm-bench/transacter_test.go index 03f30460..7379da07 100644 --- a/tools/tm-bench/transacter_test.go +++ b/tools/tm-bench/transacter_test.go @@ -1,7 +1,7 @@ package main import ( - "crypto/md5" + "crypto/sha256" "encoding/hex" "encoding/json" "fmt" @@ -28,7 +28,7 @@ func TestGenerateTxUpdateTxConsistentency(t *testing.T) { } for tcIndex, tc := range cases { - hostnameHash := md5.Sum([]byte(tc.hostname)) + hostnameHash := sha256.Sum256([]byte(tc.hostname)) // Tx generated from update tx. This is defined outside of the loop, since we have // to a have something initially to update updatedTx := generateTx(tc.connIndex, tc.startingTxNumber, tc.txSize, hostnameHash) @@ -69,7 +69,7 @@ func BenchmarkIterationOfSendLoop(b *testing.B) { // something too far away to matter endTime := now.Add(time.Hour) txNumber := 0 - hostnameHash := md5.Sum([]byte{0}) + hostnameHash := sha256.Sum256([]byte{0}) tx := generateTx(connIndex, txNumber, txSize, hostnameHash) txHex := make([]byte, len(tx)*2) hex.Encode(txHex, tx) diff --git a/types/block_test.go b/types/block_test.go index 31e7983f..75b5c19d 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -1,6 +1,8 @@ package types import ( + // it is ok to use math/rand here: we do not need a cryptographically secure random + // number generator here and we can run the tests a bit faster "crypto/rand" "math" "os" @@ -162,8 +164,8 @@ func TestBlockString(t *testing.T) { func makeBlockIDRandom() BlockID { blockHash := make([]byte, tmhash.Size) partSetHash := make([]byte, tmhash.Size) - rand.Read(blockHash) - rand.Read(partSetHash) + rand.Read(blockHash) //nolint: gosec + rand.Read(partSetHash) //nolint: gosec blockPartsHeader := PartSetHeader{123, partSetHash} return BlockID{blockHash, blockPartsHeader} } From 8a9eecce7fd00b8510186aa03290a8c4e252d4c4 Mon Sep 17 00:00:00 2001 From: Anca Zamfir Date: Tue, 12 Feb 2019 06:02:44 +0100 Subject: [PATCH 13/16] test blockExec does not panic if all vals removed (#3241) Fix for #3224 Also address #2084 --- blockchain/reactor.go | 4 +--- state/execution_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 847398b7..4a3f9049 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -8,7 +8,6 @@ import ( amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" @@ -338,8 +337,7 @@ FOR_LOOP: state, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? - cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v", - first.Height, first.Hash(), err)) + panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } blocksSynced++ diff --git a/state/execution_test.go b/state/execution_test.go index 8cd90f96..143faa1a 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -354,6 +354,33 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } } +// TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that +// would result in empty set causes no panic, an error is raised and NextValidators is not updated +func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state, stateDB := state(1, 1) + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), MockMempool{}, MockEvidencePool{}) + + block := makeBlock(state, 1) + blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + + // Remove the only validator + app.ValidatorUpdates = []abci.ValidatorUpdate{ + {PubKey: types.TM2PB.PubKey(state.Validators.Validators[0].PubKey), Power: 0}, + } + + assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) + assert.NotNil(t, err) + assert.NotEmpty(t, state.NextValidators.Validators) + +} + //---------------------------------------------------------------------------- // make some bogus txs From 08dabab024b88d6ed9a2416a6e8b9a2604e6d59b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Feb 2019 06:04:23 +0100 Subject: [PATCH 14/16] types: validator set update tests (#3284) * types: validator set update tests * docs: abci val updates must not include duplicates --- docs/spec/abci/apps.md | 4 + types/validator_set_test.go | 217 ++++++++++++++++++++++++++---------- 2 files changed, 163 insertions(+), 58 deletions(-) diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index a3b34240..caffaaea 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -171,6 +171,10 @@ Note that the maximum total power of the validator set is bounded by they do not make changes to the validator set that cause it to exceed this limit. +Additionally, applications must ensure that a single set of updates does not contain any duplicates - +a given public key can only appear in an update once. If an update includes +duplicates, the block execution will fail irrecoverably. + ### InitChain ResponseInitChain can return a list of validators. diff --git a/types/validator_set_test.go b/types/validator_set_test.go index cdc04d45..d9558d13 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -658,88 +658,189 @@ type testVal struct { power int64 } +func testValSet(nVals int, power int64) []testVal { + vals := make([]testVal, nVals) + for i := 0; i < nVals; i++ { + vals[i] = testVal{fmt.Sprintf("v%d", i+1), power} + } + return vals +} + +type valSetErrTestCase struct { + startVals []testVal + updateVals []testVal +} + +func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) { + // create a new set and apply updates, keeping copies for the checks + valSet := createNewValidatorSet(tt.startVals) + valSetCopy := valSet.Copy() + valList := createNewValidatorList(tt.updateVals) + valListCopy := validatorListCopy(valList) + err := valSet.UpdateWithChangeSet(valList) + + // for errors check the validator set has not been changed + assert.Error(t, err, "test %d", idx) + assert.Equal(t, valSet, valSetCopy, "test %v", idx) + + // check the parameter list has not changed + assert.Equal(t, valList, valListCopy, "test %v", idx) +} + +func TestValSetUpdatesDuplicateEntries(t *testing.T) { + testCases := []valSetErrTestCase{ + // Duplicate entries in changes + { // first entry is duplicated change + testValSet(2, 10), + []testVal{{"v1", 11}, {"v1", 22}}, + }, + { // second entry is duplicated change + testValSet(2, 10), + []testVal{{"v2", 11}, {"v2", 22}}, + }, + { // change duplicates are separated by a valid change + testValSet(2, 10), + []testVal{{"v1", 11}, {"v2", 22}, {"v1", 12}}, + }, + { // change duplicates are separated by a valid change + testValSet(3, 10), + []testVal{{"v1", 11}, {"v3", 22}, {"v1", 12}}, + }, + + // Duplicate entries in remove + { // first entry is duplicated remove + testValSet(2, 10), + []testVal{{"v1", 0}, {"v1", 0}}, + }, + { // second entry is duplicated remove + testValSet(2, 10), + []testVal{{"v2", 0}, {"v2", 0}}, + }, + { // remove duplicates are separated by a valid remove + testValSet(2, 10), + []testVal{{"v1", 0}, {"v2", 0}, {"v1", 0}}, + }, + { // remove duplicates are separated by a valid remove + testValSet(3, 10), + []testVal{{"v1", 0}, {"v3", 0}, {"v1", 0}}, + }, + + { // remove and update same val + testValSet(2, 10), + []testVal{{"v1", 0}, {"v2", 20}, {"v1", 30}}, + }, + { // duplicate entries in removes + changes + testValSet(2, 10), + []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}}, + }, + { // duplicate entries in removes + changes + testValSet(3, 10), + []testVal{{"v1", 0}, {"v3", 5}, {"v2", 20}, {"v2", 30}, {"v1", 0}}, + }, + } + + for i, tt := range testCases { + executeValSetErrTestCase(t, i, tt) + } +} + +func TestValSetUpdatesOverflows(t *testing.T) { + maxVP := MaxTotalVotingPower + testCases := []valSetErrTestCase{ + { // single update leading to overflow + testValSet(2, 10), + []testVal{{"v1", math.MaxInt64}}, + }, + { // single update leading to overflow + testValSet(2, 10), + []testVal{{"v2", math.MaxInt64}}, + }, + { // add validator leading to exceed Max + testValSet(1, maxVP-1), + []testVal{{"v2", 5}}, + }, + { // add validator leading to exceed Max + testValSet(2, maxVP/3), + []testVal{{"v3", maxVP / 2}}, + }, + } + + for i, tt := range testCases { + executeValSetErrTestCase(t, i, tt) + } +} + +func TestValSetUpdatesOtherErrors(t *testing.T) { + testCases := []valSetErrTestCase{ + { // update with negative voting power + testValSet(2, 10), + []testVal{{"v1", -123}}, + }, + { // update with negative voting power + testValSet(2, 10), + []testVal{{"v2", -123}}, + }, + { // remove non-existing validator + testValSet(2, 10), + []testVal{{"v3", 0}}, + }, + { // delete all validators + []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, + []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}}, + }, + } + + for i, tt := range testCases { + executeValSetErrTestCase(t, i, tt) + } +} + func TestValSetUpdatesBasicTestsExecute(t *testing.T) { valSetUpdatesBasicTests := []struct { startVals []testVal updateVals []testVal expectedVals []testVal - expError bool }{ - // Operations that should result in error - 0: { // updates leading to overflows - []testVal{{"v1", 10}, {"v2", 10}}, - []testVal{{"v1", math.MaxInt64}}, - []testVal{{"v1", 10}, {"v2", 10}}, - true}, - 1: { // duplicate entries in changes - []testVal{{"v1", 10}, {"v2", 10}}, - []testVal{{"v1", 11}, {"v1", 22}}, - []testVal{{"v1", 10}, {"v2", 10}}, - true}, - 2: { // duplicate entries in removes - []testVal{{"v1", 10}, {"v2", 10}}, - []testVal{{"v1", 0}, {"v1", 0}}, - []testVal{{"v1", 10}, {"v2", 10}}, - true}, - 3: { // duplicate entries in removes + changes - []testVal{{"v1", 10}, {"v2", 10}}, - []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}}, - []testVal{{"v1", 10}, {"v2", 10}}, - true}, - 4: { // update with negative voting power - []testVal{{"v1", 10}, {"v2", 10}}, - []testVal{{"v1", -123}}, - []testVal{{"v1", 10}, {"v2", 10}}, - true}, - 5: { // delete non existing validator - []testVal{{"v1", 10}, {"v2", 10}}, - []testVal{{"v3", 0}}, - []testVal{{"v1", 10}, {"v2", 10}}, - true}, - - // Operations that should be successful - 6: { // no changes - []testVal{{"v1", 10}, {"v2", 10}}, + { // no changes + testValSet(2, 10), []testVal{}, - []testVal{{"v1", 10}, {"v2", 10}}, - false}, - 7: { // voting power changes - []testVal{{"v1", 10}, {"v2", 10}}, + testValSet(2, 10), + }, + { // voting power changes + testValSet(2, 10), []testVal{{"v1", 11}, {"v2", 22}}, []testVal{{"v1", 11}, {"v2", 22}}, - false}, - 8: { // add new validators + }, + { // add new validators []testVal{{"v1", 10}, {"v2", 20}}, []testVal{{"v3", 30}, {"v4", 40}}, []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, - false}, - 9: { // delete validators + }, + { // add new validator to middle + []testVal{{"v1", 10}, {"v3", 20}}, + []testVal{{"v2", 30}}, + []testVal{{"v1", 10}, {"v2", 30}, {"v3", 20}}, + }, + { // add new validator to beginning + []testVal{{"v2", 10}, {"v3", 20}}, + []testVal{{"v1", 30}}, + []testVal{{"v1", 30}, {"v2", 10}, {"v3", 20}}, + }, + { // delete validators []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, []testVal{{"v2", 0}}, []testVal{{"v1", 10}, {"v3", 30}}, - false}, - 10: { // delete all validators - []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, - []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}}, - []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, - true}, + }, } for i, tt := range valSetUpdatesBasicTests { // create a new set and apply updates, keeping copies for the checks valSet := createNewValidatorSet(tt.startVals) - valSetCopy := valSet.Copy() valList := createNewValidatorList(tt.updateVals) valListCopy := validatorListCopy(valList) err := valSet.UpdateWithChangeSet(valList) + assert.NoError(t, err, "test %d", i) - if tt.expError { - // for errors check the validator set has not been changed - assert.Error(t, err, "test %d", i) - assert.Equal(t, valSet, valSetCopy, "test %v", i) - } else { - assert.NoError(t, err, "test %d", i) - } // check the parameter list has not changed assert.Equal(t, valList, valListCopy, "test %v", i) From dc6567c677dfdfed7278759927ec47e6e94e7bff Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Feb 2019 06:32:40 +0100 Subject: [PATCH 15/16] consensus: flush wal on stop (#3297) Should fix #3295 Also partial fix of #3043 --- CHANGELOG.md | 2 +- CHANGELOG_PENDING.md | 2 ++ consensus/wal.go | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 592a7c6b..1842c8e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,7 +89,7 @@ Special thanks to external contributors on this release: - [p2p] [\#3247](https://github.com/tendermint/tendermint/issues/3247) Fix panic in SeedMode when calling FlushStop and OnStop concurrently - [p2p] [\#3040](https://github.com/tendermint/tendermint/issues/3040) Fix MITM on secret connection by checking low-order points -- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket +- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket that was causing messages to be corrupted ## v0.29.1 diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 64098583..f23ec5bf 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -21,3 +21,5 @@ Special thanks to external contributors on this release: ### IMPROVEMENTS: ### BUG FIXES: +- [consensus] \#3297 Flush WAL on stop to prevent data corruption during + graceful shutdown diff --git a/consensus/wal.go b/consensus/wal.go index d56ede26..26428a4c 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -116,6 +116,7 @@ func (wal *baseWAL) OnStart() error { // Use Wait() to ensure it's finished shutting down // before cleaning up files. func (wal *baseWAL) OnStop() { + wal.group.Flush() wal.group.Stop() wal.group.Close() } From d32f7d241608c8079f0755a38dbd1b9bd2262724 Mon Sep 17 00:00:00 2001 From: Zach Date: Tue, 12 Feb 2019 23:26:01 -0500 Subject: [PATCH 16/16] update codeowners (#3305) limit the scope of @zramsay because he's annoyed by notifications --- .github/CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9d2fc15b..df77531c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,4 +4,6 @@ * @ebuchman @melekes @xla # Precious documentation -/docs/ @zramsay +/docs/README.md @zramsay +/docs/DOCS_README.md @zramsay +/docs/.vuepress/ @zramsay