Compare commits

..

14 Commits

Author SHA1 Message Date
mossid
f89daec13e address comments in progress 2019-02-08 21:48:41 +01:00
mossid
29a7bea364 Merge branch 'joon/2252-evidence-rpc' of github.com:mossid/tendermint into joon/2252-evidence-rpc 2019-02-08 20:33:36 +01:00
mossid
c4f1b7560d Merge branch 'develop' into joon/2252-evidence-rpc 2019-02-08 20:32:47 +01:00
Anton Kaliaev
0e0e0e8b5a Update rpc/client/main_test.go
Co-Authored-By: mossid <torecursedivine@gmail.com>
2019-02-08 11:26:55 -08:00
Anton Kaliaev
44be0393b5 Update abci/example/kvstore/persistent_kvstore.go
Co-Authored-By: mossid <torecursedivine@gmail.com>
2019-02-08 11:26:38 -08:00
Anca Zamfir
cce4d21ccb treat validator updates as set (#3222)
* Initial commit for 3181..still early

* unit test updates

* unit test updates

* fix check of dups accross updates and deletes

* simplify the processChange() func

* added overflow check utest

* Added checks for empty valset, new utest

* deepcopy changes in processUpdate()

* moved to new API, fixed tests

* test cleanup

* address review comments

* make sure votePower > 0

* gofmt fixes

* handle duplicates and invalid values

* more work on tests, review comments

* Renamed and explained K

* make TestVal private

* split verifyUpdatesAndComputeNewPriorities.., added check for deletes

* return error if validator set is empty after processing changes

* address review comments

* lint err

* Fixed the total voting power and added comments

* fix lint

* fix lint
2019-02-08 13:05:09 -05:00
Ismail Khoffi
c1f7399a86 review comment: cleaner constant for N/2, delete secp256k1N and use (#3279)
`secp256k1.S256().N` directly instead
2019-02-08 09:48:09 -05:00
Ethan Buchman
44a89a3537 Merge pull request #3282 from tendermint/master
Merge master back to develop
2019-02-08 09:43:36 -05:00
Ethan Buchman
a8dbc64319 Merge pull request #3276 from tendermint/release/v0.29.2
Release/v0.29.2
2019-02-08 09:42:52 -05:00
Ethan Buchman
0d7f17ce57 Merge branch 'develop' into joon/2252-evidence-rpc 2018-12-15 18:58:14 -05:00
mossid
1b978c0e9a address comments 2018-11-06 18:55:34 +01:00
mossid
2420536bfa address comments 2018-11-06 18:11:10 +01:00
mossid
1a339fdd6e fix test_cover 2018-10-13 20:08:03 +09:00
mossid
93e2350270 implement broadcast_duplicate_vote endpoint 2018-10-13 18:58:32 +09:00
18 changed files with 1019 additions and 226 deletions

View File

@@ -9,8 +9,10 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
tmtypes "github.com/tendermint/tendermint/types"
)
const (
@@ -27,6 +29,8 @@ type PersistentKVStoreApplication struct {
// validator set
ValUpdates []types.ValidatorUpdate
valAddrToPubKeyMap map[string]types.PubKey
logger log.Logger
}
@@ -40,8 +44,9 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
state := loadState(db)
return &PersistentKVStoreApplication{
app: &KVStoreApplication{state: state},
logger: log.NewNopLogger(),
app: &KVStoreApplication{state: state},
valAddrToPubKeyMap: make(map[string]types.PubKey),
logger: log.NewNopLogger(),
}
}
@@ -83,8 +88,18 @@ func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
return app.app.Commit()
}
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
return app.app.Query(reqQuery)
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
switch reqQuery.Path {
case "/val":
key := []byte("val:" + string(reqQuery.Data))
value := app.app.state.db.Get(key)
resQuery.Key = reqQuery.Data
resQuery.Value = value
return
default:
return app.app.Query(reqQuery)
}
}
// Save the validators in the merkle tree
@@ -102,6 +117,20 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
for _, ev := range req.ByzantineValidators {
switch ev.Type {
case tmtypes.ABCIEvidenceTypeDuplicateVote:
// decrease voting power by 1
if ev.TotalVotingPower == 0 {
continue
}
app.updateValidator(types.ValidatorUpdate{
PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)],
Power: ev.TotalVotingPower - 1,
})
}
}
return types.ResponseBeginBlock{}
}
@@ -173,6 +202,10 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
key := []byte("val:" + string(v.PubKey.Data))
pubkey := ed25519.PubKeyEd25519{}
copy(pubkey[:], v.PubKey.Data)
if v.Power == 0 {
// remove validator
if !app.app.state.db.Has(key) {
@@ -181,6 +214,9 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)}
}
app.app.state.db.Delete(key)
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
} else {
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
@@ -190,6 +226,8 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
Log: fmt.Sprintf("Error encoding validator: %v", err)}
}
app.app.state.db.Set(key, value.Bytes())
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
}
// we only update the changes array if we successfully updated the tree

View File

@@ -14,8 +14,7 @@ import (
// see:
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
var secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
var secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2))
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
// The returned signature will be of the form R || S (in lower-S form).

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
@@ -247,6 +248,15 @@ func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) {
return result, nil
}
func (c *HTTP) BroadcastDuplicateVote(pubkey crypto.PubKey, vote1 types.Vote, vote2 types.Vote) (*ctypes.ResultBroadcastDuplicateVote, error) {
result := new(ctypes.ResultBroadcastDuplicateVote)
_, err := c.rpc.Call("broadcast_duplicate_vote", map[string]interface{}{"pubkey": pubkey, "vote1": vote1, "vote2": vote2}, result)
if err != nil {
return nil, errors.Wrap(err, "BroadcastDuplicateVote")
}
return result, nil
}
/** websocket event stuff here... **/
type WSEvents struct {

View File

@@ -21,6 +21,7 @@ implementation.
*/
import (
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
@@ -74,6 +75,7 @@ type Client interface {
HistoryClient
StatusClient
EventsClient
EvidenceClient
}
// NetworkClient is general info about the network state. May not
@@ -99,3 +101,8 @@ type MempoolClient interface {
UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error)
NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error)
}
// EvidenceClient is used for submitting evidence for malicious behaviours
type EvidenceClient interface {
BroadcastDuplicateVote(pubkey crypto.PubKey, vote1 types.Vote, vote2 types.Vote) (*ctypes.ResultBroadcastDuplicateVote, error)
}

View File

@@ -3,6 +3,7 @@ package client
import (
"context"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
nm "github.com/tendermint/tendermint/node"
@@ -140,6 +141,10 @@ func (Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.Resu
return core.TxSearch(query, prove, page, perPage)
}
func (Local) BroadcastDuplicateVote(pubkey crypto.PubKey, vote1 types.Vote, vote2 types.Vote) (*ctypes.ResultBroadcastDuplicateVote, error) {
return core.BroadcastDuplicateVote(pubkey, vote1, vote2)
}
func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error {
return c.EventBus.Subscribe(ctx, subscriber, query, out)
}

View File

@@ -1,6 +1,8 @@
package client_test
import (
"fmt"
"io/ioutil"
"os"
"testing"
@@ -13,7 +15,12 @@ var node *nm.Node
func TestMain(m *testing.M) {
// start a tendermint node (and kvstore) in the background to test against
app := kvstore.NewKVStoreApplication()
dir, err := ioutil.TempDir("/tmp", "rpc-client-test")
if err != nil {
m.Fatal(err)
os.Exit(1)
}
app := kvstore.NewPersistentKVStoreApplication(dir)
node = rpctest.StartTendermint(app)
code := m.Run()

View File

@@ -35,6 +35,7 @@ type Client struct {
client.HistoryClient
client.StatusClient
client.EventsClient
client.EvidenceClient
cmn.Service
}
@@ -134,3 +135,7 @@ func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) {
func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) {
return core.Validators(height)
}
func (c Client) BroadcastEvidence(pubkey crypto.PubKey, vote1, vote2 types.Vote) (*ctypes.ResultBroadcastDuplicateVote, error) {
return core.BroadcastDuplicateVote(pubkey, vote1, vote2)
}

View File

@@ -1,7 +1,9 @@
package client_test
import (
"bytes"
"fmt"
"math/rand"
"net/http"
"strings"
"testing"
@@ -11,6 +13,8 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
@@ -435,3 +439,137 @@ func TestTxSearch(t *testing.T) {
require.Len(t, result.Txs, 0)
}
}
func deepcpVote(vote *types.Vote) (res *types.Vote) {
res = &types.Vote{
ValidatorAddress: make([]byte, len(vote.ValidatorAddress)),
ValidatorIndex: vote.ValidatorIndex,
Height: vote.Height,
Round: vote.Round,
Type: vote.Type,
BlockID: types.BlockID{
Hash: make([]byte, len(vote.BlockID.Hash)),
PartsHeader: vote.BlockID.PartsHeader,
},
Signature: make([]byte, len(vote.Signature)),
}
copy(res.ValidatorAddress, vote.ValidatorAddress)
copy(res.BlockID.Hash, vote.BlockID.Hash)
copy(res.Signature, vote.Signature)
return
}
func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, chainID string) types.DuplicateVoteEvidence {
var err error
vote2_ := deepcpVote(vote2)
vote2_.Signature, err = val.PrivKey.Sign(vote2_.SignBytes(chainID))
require.NoError(t, err)
return types.DuplicateVoteEvidence{
PubKey: val.PubKey,
VoteA: vote,
VoteB: vote2_,
}
}
func makeEvidences(t *testing.T, val *privval.FilePV, chainID string) (ev types.DuplicateVoteEvidence, fakes []types.DuplicateVoteEvidence) {
vote := &types.Vote{
ValidatorAddress: val.Address,
ValidatorIndex: 0,
Height: 1,
Round: 0,
Type: types.VoteTypePrevote,
BlockID: types.BlockID{
Hash: []byte{0x00},
PartsHeader: types.PartSetHeader{},
},
}
var err error
vote.Signature, err = val.PrivKey.Sign(vote.SignBytes(chainID))
require.NoError(t, err)
vote2 := deepcpVote(vote)
vote2.BlockID.Hash = []byte{0x01}
ev = newEvidence(t, val, vote, vote2, chainID)
fakes = make([]types.DuplicateVoteEvidence, 42)
// different address
vote2 = deepcpVote(vote)
for i := 0; i < 10; i++ {
rand.Read(vote2.ValidatorAddress)
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
}
// different index
vote2 = deepcpVote(vote)
for i := 10; i < 20; i++ {
vote2.ValidatorIndex = rand.Int()%100 + 1
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
}
// different height
vote2 = deepcpVote(vote)
for i := 20; i < 30; i++ {
vote2.Height = rand.Int63()%1000 + 100
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
}
// different round
vote2 = deepcpVote(vote)
for i := 30; i < 40; i++ {
vote2.Round = rand.Int()%10 + 1
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
}
// different type
vote2 = deepcpVote(vote)
vote2.Type = types.VoteTypePrecommit
fakes[40] = newEvidence(t, val, vote, vote2, chainID)
// exactly same vote
vote2 = deepcpVote(vote)
fakes[41] = newEvidence(t, val, vote, vote2, chainID)
return
}
func TestBroadcastDuplicateVote(t *testing.T) {
config := rpctest.GetConfig()
chainID := config.ChainID()
pv := privval.LoadOrGenFilePV(config.PrivValidatorFile())
ev, fakes := makeEvidences(t, pv, chainID)
t.Logf("evidence %v", ev)
for i, c := range GetClients() {
t.Logf("client %d", i)
result, err := c.BroadcastDuplicateVote(ev.PubKey, *ev.VoteA, *ev.VoteB)
require.Nil(t, err, "Error broadcasting evidence")
info, err := c.BlockchainInfo(0, 0)
require.NoError(t, err)
client.WaitForHeight(c, info.LastHeight+1, nil)
require.Equal(t, ev.Hash(), result.Hash, "Invalid response, result %+v", result)
ed25519pub := ev.PubKey.(ed25519.PubKeyEd25519)
rawpub := ed25519pub[:]
result2, err := c.ABCIQuery("/val", rawpub)
require.Nil(t, err, "Error querying evidence, err %v", err)
qres := result2.Response
require.True(t, qres.IsOK(), "Response not OK")
var v abci.ValidatorUpdate
err = abci.ReadMessage(bytes.NewReader(qres.Value), &v)
require.NoError(t, err, "Error reading query result, value %v", qres.Value)
require.EqualValues(t, rawpub, v.PubKey.Data, "Stored PubKey not equal with expected, value %v", string(qres.Value))
require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value))
for _, fake := range fakes {
_, err := c.BroadcastDuplicateVote(fake.PubKey, *fake.VoteA, *fake.VoteB)
require.Error(t, err, "Broadcasting fake evidence succeed: %s", fake.String())
require.True(t, strings.HasPrefix(err.Error(), "Error broadcasting evidence, adding evidence"), "Broadcasting fake evidence failed on HTTP call: %s", fake.String())
}
}
}

12
rpc/client/wire.go Normal file
View File

@@ -0,0 +1,12 @@
package client
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
func init() {
types.RegisterEvidences(cdc)
}

26
rpc/core/evidence.go Normal file
View File

@@ -0,0 +1,26 @@
package core
import (
"fmt"
"github.com/tendermint/tendermint/crypto"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// ### Broadcast Duplicate Vote Parameters
//
// | Parameter | Type | Default | Required | Description |
// |-----------+--------+---------+----------+-------------------------------|
// | pubkey | PubKey | nil | true | PubKey of the byzantine actor |
// | vote1 | Vote | nil | true | First vote |
// | vote2 | Vote | nil | true | Second vote |
func BroadcastDuplicateVote(pubkey crypto.PubKey, vote1 types.Vote, vote2 types.Vote) (*ctypes.ResultBroadcastDuplicateVote, error) {
ev := &types.DuplicateVoteEvidence{pubkey, &vote1, &vote2}
err := evidencePool.AddEvidence(ev)
if err != nil {
return nil, fmt.Errorf("Error broadcasting evidence, adding evidence: %v", err)
}
return &ctypes.ResultBroadcastDuplicateVote{ev.Hash()}, nil
}

View File

@@ -30,7 +30,7 @@ var Routes = map[string]*rpc.RPCFunc{
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
// broadcast API
// tx broadcast API
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"),
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"),
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"),
@@ -38,6 +38,9 @@ var Routes = map[string]*rpc.RPCFunc{
// abci API
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
"abci_info": rpc.NewRPCFunc(ABCIInfo, ""),
// evidence API
"broadcast_duplicate_vote": rpc.NewRPCFunc(BroadcastDuplicateVote, "pubkey,vote1,vote2"),
}
func AddUnsafeRoutes() {

View File

@@ -193,6 +193,11 @@ type ResultABCIQuery struct {
Response abci.ResponseQuery `json:"response"`
}
// Result of broadcasting evidence
type ResultBroadcastDuplicateVote struct {
Hash []byte `json:"hash"`
}
// empty results
type (
ResultUnsafeFlushMempool struct{}

View File

@@ -2,7 +2,6 @@ package state
import (
"fmt"
"strings"
"time"
abci "github.com/tendermint/tendermint/abci/types"
@@ -143,7 +142,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b
return state, err
}
if len(validatorUpdates) > 0 {
blockExec.logger.Info("Updates to validators", "updates", makeValidatorUpdatesLogString(validatorUpdates))
blockExec.logger.Info("Updates to validators", "updates", types.ValidatorListString(validatorUpdates))
}
// Update the state with the block and responses.
@@ -368,70 +367,6 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate,
return nil
}
// If more or equal than 1/3 of total voting power changed in one block, then
// a light client could never prove the transition externally. See
// ./lite/doc.go for details on how a light client tracks validators.
func updateValidators(currentSet *types.ValidatorSet, updates []*types.Validator) error {
for _, valUpdate := range updates {
// should already have been checked
if valUpdate.VotingPower < 0 {
return fmt.Errorf("Voting power can't be negative %v", valUpdate)
}
address := valUpdate.Address
_, val := currentSet.GetByAddress(address)
// valUpdate.VotingPower is ensured to be non-negative in validation method
if valUpdate.VotingPower == 0 { // remove val
_, removed := currentSet.Remove(address)
if !removed {
return fmt.Errorf("Failed to remove validator %X", address)
}
} else if val == nil { // add val
// make sure we do not exceed MaxTotalVotingPower by adding this validator:
totalVotingPower := currentSet.TotalVotingPower()
updatedVotingPower := valUpdate.VotingPower + totalVotingPower
overflow := updatedVotingPower > types.MaxTotalVotingPower || updatedVotingPower < 0
if overflow {
return fmt.Errorf(
"Failed to add new validator %v. Adding it would exceed max allowed total voting power %v",
valUpdate,
types.MaxTotalVotingPower)
}
// TODO: issue #1558 update spec according to the following:
// Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't
// unbond/rebond to reset their (potentially previously negative) ProposerPriority to zero.
//
// Contract: totalVotingPower < MaxTotalVotingPower to ensure ProposerPriority does
// not exceed the bounds of int64.
//
// Compute ProposerPriority = -1.125*totalVotingPower == -(totalVotingPower + (totalVotingPower >> 3)).
valUpdate.ProposerPriority = -(totalVotingPower + (totalVotingPower >> 3))
added := currentSet.Add(valUpdate)
if !added {
return fmt.Errorf("Failed to add new validator %v", valUpdate)
}
} else { // update val
// make sure we do not exceed MaxTotalVotingPower by updating this validator:
totalVotingPower := currentSet.TotalVotingPower()
curVotingPower := val.VotingPower
updatedVotingPower := totalVotingPower - curVotingPower + valUpdate.VotingPower
overflow := updatedVotingPower > types.MaxTotalVotingPower || updatedVotingPower < 0
if overflow {
return fmt.Errorf(
"Failed to update existing validator %v. Updating it would exceed max allowed total voting power %v",
valUpdate,
types.MaxTotalVotingPower)
}
updated := currentSet.Update(valUpdate)
if !updated {
return fmt.Errorf("Failed to update validator %X to %v", address, valUpdate)
}
}
}
return nil
}
// updateState returns a new State updated according to the header and responses.
func updateState(
state State,
@@ -448,7 +383,7 @@ func updateState(
// Update the validator set with the latest abciResponses.
lastHeightValsChanged := state.LastHeightValidatorsChanged
if len(validatorUpdates) > 0 {
err := updateValidators(nValSet, validatorUpdates)
err := nValSet.UpdateWithChangeSet(validatorUpdates)
if err != nil {
return state, fmt.Errorf("Error changing validator set: %v", err)
}
@@ -552,13 +487,3 @@ func ExecCommitBlock(
// ResponseCommit has no error or log, just data
return res.Data, nil
}
// Make pretty string for validatorUpdates logging
func makeValidatorUpdatesLogString(vals []*types.Validator) string {
chunks := make([]string, len(vals))
for i, val := range vals {
chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.VotingPower)
}
return strings.Join(chunks, ",")
}

View File

@@ -280,7 +280,7 @@ func TestUpdateValidators(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
updates, err := types.PB2TM.ValidatorUpdates(tc.abciUpdates)
assert.NoError(t, err)
err = updateValidators(tc.currentSet, updates)
err = tc.currentSet.UpdateWithChangeSet(updates)
if tc.shouldErr {
assert.Error(t, err)
} else {

View File

@@ -322,7 +322,8 @@ func TestProposerFrequency(t *testing.T) {
vals := make([]*types.Validator, N)
totalVotePower := int64(0)
for j := 0; j < N; j++ {
votePower := int64(cmn.RandInt() % maxPower)
// make sure votePower > 0
votePower := int64(cmn.RandInt()%maxPower) + 1
totalVotePower += votePower
privVal := types.NewMockPV()
pubKey := privVal.GetPubKey()
@@ -424,49 +425,71 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
require.Equal(t, len(updatedState2.NextValidators.Validators), 2)
_, updatedVal1 := updatedState2.NextValidators.GetByAddress(val1PubKey.Address())
_, addedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address())
// adding a validator should not lead to a ProposerPriority equal to zero (unless the combination of averaging and
// incrementing would cause so; which is not the case here)
totalPowerBefore2 := curTotal
// while adding we compute prio == -1.125 * total:
wantVal2ProposerPrio := -(totalPowerBefore2 + (totalPowerBefore2 >> 3))
wantVal2ProposerPrio = wantVal2ProposerPrio + val2VotingPower
// then increment:
// Steps from adding new validator:
// 0 - val1 prio is 0, TVP after add:
wantVal1Prio := int64(0)
totalPowerAfter := val1VotingPower + val2VotingPower
// mostest:
wantVal2ProposerPrio = wantVal2ProposerPrio - totalPowerAfter
avg := big.NewInt(0).Add(big.NewInt(val1VotingPower), big.NewInt(wantVal2ProposerPrio))
// 1. Add - Val2 should be initially added with (-123) =>
wantVal2Prio := -(totalPowerAfter + (totalPowerAfter >> 3))
// 2. Scale - noop
// 3. Center - with avg, resulting val2:-61, val1:62
avg := big.NewInt(0).Add(big.NewInt(wantVal1Prio), big.NewInt(wantVal2Prio))
avg.Div(avg, big.NewInt(2))
wantVal2ProposerPrio = wantVal2ProposerPrio - avg.Int64()
wantVal1Prio := 0 + val1VotingPower - avg.Int64()
wantVal2Prio = wantVal2Prio - avg.Int64() // -61
wantVal1Prio = wantVal1Prio - avg.Int64() // 62
// 4. Steps from IncrementProposerPriority
wantVal1Prio = wantVal1Prio + val1VotingPower // 72
wantVal2Prio = wantVal2Prio + val2VotingPower // 39
wantVal1Prio = wantVal1Prio - totalPowerAfter // -38 as val1 is proposer
assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority)
assert.Equal(t, wantVal2ProposerPrio, addedVal2.ProposerPriority)
assert.Equal(t, wantVal2Prio, addedVal2.ProposerPriority)
// Updating a validator does not reset the ProposerPriority to zero:
// 1. Add - Val2 VotingPower change to 1 =>
updatedVotingPowVal2 := int64(1)
updateVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: updatedVotingPowVal2}
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal})
assert.NoError(t, err)
// this will cause the diff of priorities (31)
// this will cause the diff of priorities (77)
// to be larger than threshold == 2*totalVotingPower (22):
updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
require.Equal(t, len(updatedState3.NextValidators.Validators), 2)
_, prevVal1 := updatedState3.Validators.GetByAddress(val1PubKey.Address())
_, prevVal2 := updatedState3.Validators.GetByAddress(val2PubKey.Address())
_, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address())
_, updatedVal2 := updatedState3.NextValidators.GetByAddress(val2PubKey.Address())
// divide previous priorities by 2 == CEIL(31/22) as diff > threshold:
expectedVal1PrioBeforeAvg := prevVal1.ProposerPriority/2 + prevVal1.VotingPower
wantVal2ProposerPrio = wantVal2ProposerPrio/2 + updatedVotingPowVal2
// val1 will be proposer:
total := val1VotingPower + updatedVotingPowVal2
expectedVal1PrioBeforeAvg = expectedVal1PrioBeforeAvg - total
avgI64 := (wantVal2ProposerPrio + expectedVal1PrioBeforeAvg) / 2
wantVal2ProposerPrio = wantVal2ProposerPrio - avgI64
wantVal1Prio = expectedVal1PrioBeforeAvg - avgI64
assert.Equal(t, wantVal2ProposerPrio, updatedVal2.ProposerPriority)
_, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address())
// 2. Scale
// old prios: v1(10):-38, v2(1):39
wantVal1Prio = prevVal1.ProposerPriority
wantVal2Prio = prevVal2.ProposerPriority
// scale to diffMax = 22 = 2 * tvp, diff=39-(-38)=77
// new totalPower
totalPower := updatedVal1.VotingPower + updatedVal2.VotingPower
dist := wantVal2Prio - wantVal1Prio
// ratio := (dist + 2*totalPower - 1) / 2*totalPower = 98/22 = 4
ratio := (dist + 2*totalPower - 1) / (2 * totalPower)
// v1(10):-38/4, v2(1):39/4
wantVal1Prio /= ratio // -9
wantVal2Prio /= ratio // 9
// 3. Center - noop
// 4. IncrementProposerPriority() ->
// v1(10):-9+10, v2(1):9+1 -> v2 proposer so subsract tvp(11)
// v1(10):1, v2(1):-1
wantVal2Prio += updatedVal2.VotingPower // 10 -> prop
wantVal1Prio += updatedVal1.VotingPower // 1
wantVal2Prio -= totalPower // -1
assert.Equal(t, wantVal2Prio, updatedVal2.ProposerPriority)
assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority)
}
@@ -527,22 +550,22 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
_, oldVal1 := updatedState2.Validators.GetByAddress(val1PubKey.Address())
_, updatedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address())
totalPower = val1VotingPower // no update
v2PrioWhenAddedVal2 := -(totalPower + (totalPower >> 3))
v2PrioWhenAddedVal2 = v2PrioWhenAddedVal2 + val1VotingPower // -11 + 10 == -1
v1PrioWhenAddedVal2 := oldVal1.ProposerPriority + val1VotingPower // -10 + 10 == 0
totalPower = 2 * val1VotingPower // now we have to validators with that power
v1PrioWhenAddedVal2 = v1PrioWhenAddedVal2 - totalPower // mostest
// have to express the AVG in big.Ints as -1/2 == -1 in big.Int while -1/2 == 0 in int64
avgSum := big.NewInt(0).Add(big.NewInt(v2PrioWhenAddedVal2), big.NewInt(v1PrioWhenAddedVal2))
avg := avgSum.Div(avgSum, big.NewInt(2))
expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64()
totalPower = 2 * val1VotingPower // 10 + 10
expectedVal1Prio := oldVal1.ProposerPriority + val1VotingPower - avg.Int64() - totalPower
// val1's ProposerPriority story: -10 (see above) + 10 (voting pow) - (-1) (avg) - 20 (total) == -19
// 1. Add
val2VotingPower := val1VotingPower
totalPower = val1VotingPower + val2VotingPower // 20
v2PrioWhenAddedVal2 := -(totalPower + (totalPower >> 3)) // -22
// 2. Scale - noop
// 3. Center
avgSum := big.NewInt(0).Add(big.NewInt(v2PrioWhenAddedVal2), big.NewInt(oldVal1.ProposerPriority))
avg := avgSum.Div(avgSum, big.NewInt(2)) // -11
expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64() // -11
expectedVal1Prio := oldVal1.ProposerPriority - avg.Int64() // 11
// 4. Increment
expectedVal2Prio = expectedVal2Prio + val2VotingPower // -11 + 10 = -1
expectedVal1Prio = expectedVal1Prio + val1VotingPower // 11 + 10 == 21
expectedVal1Prio = expectedVal1Prio - totalPower // 1, val1 proposer
assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority)
// val2 prio when added: -(totalVotingPower + (totalVotingPower >> 3)) == -11
// -> -11 + 10 (voting power) - (-1) (avg) == 0
assert.EqualValues(t, expectedVal2Prio, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
@@ -551,34 +574,40 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
// proposer changes from now on (every iteration) as long as there are no changes in the validator set:
assert.NotEqual(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address)
assert.Equal(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address)
assert.Equal(t, updatedState3.Validators, updatedState2.NextValidators)
_, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address())
_, oldVal1 = updatedState3.Validators.GetByAddress(val1PubKey.Address())
_, updatedVal2 = updatedState3.NextValidators.GetByAddress(val2PubKey.Address())
_, oldVal2 := updatedState3.Validators.GetByAddress(val2PubKey.Address())
// val2 will be proposer:
assert.Equal(t, val2PubKey.Address(), updatedState3.NextValidators.Proposer.Address)
// val1 will still be proposer:
assert.Equal(t, val1PubKey.Address(), updatedState3.NextValidators.Proposer.Address)
// check if expected proposer prio is matched:
// Increment
expectedVal2Prio2 := expectedVal2Prio + val2VotingPower // -1 + 10 = 9
expectedVal1Prio2 := expectedVal1Prio + val1VotingPower // 1 + 10 == 11
expectedVal1Prio2 = expectedVal1Prio2 - totalPower // -9, val1 proposer
expectedVal1Prio2 := oldVal1.ProposerPriority + val1VotingPower
expectedVal2Prio2 := oldVal2.ProposerPriority + val1VotingPower - totalPower
avgSum = big.NewInt(expectedVal1Prio + expectedVal2Prio)
avg = avgSum.Div(avgSum, big.NewInt(2))
expectedVal1Prio -= avg.Int64()
expectedVal2Prio -= avg.Int64()
// -19 + 10 - 0 (avg) == -9
assert.EqualValues(t, expectedVal1Prio2, updatedVal1.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
// 0 + 10 - 0 (avg) - 20 (total) == -10
assert.EqualValues(t, expectedVal2Prio2, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
// no changes in voting power and both validators have same voting power
// -> proposers should alternate:
oldState := updatedState3
abciResponses = &ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
oldState, err = updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
expectedVal1Prio2 = 1
expectedVal2Prio2 = -1
expectedVal1Prio = -9
expectedVal2Prio = 9
for i := 0; i < 1000; i++ {
// no validator updates:
abciResponses := &ABCIResponses{

View File

@@ -3,6 +3,7 @@ package types
import (
"bytes"
"fmt"
"strings"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
@@ -68,6 +69,16 @@ func (v *Validator) String() string {
v.ProposerPriority)
}
// ValidatorListString returns a prettified validator list for logging purposes.
func ValidatorListString(vals []*Validator) string {
chunks := make([]string, len(vals))
for i, val := range vals {
chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.VotingPower)
}
return strings.Join(chunks, ",")
}
// Bytes computes the unique encoding of a validator with a given voting power.
// These are the bytes that gets hashed in consensus. It excludes address
// as its redundant with the pubkey. This also excludes ProposerPriority

View File

@@ -12,14 +12,20 @@ import (
cmn "github.com/tendermint/tendermint/libs/common"
)
// The maximum allowed total voting power.
// It needs to be sufficiently small to, in all cases::
// MaxTotalVotingPower - the maximum allowed total voting power.
// It needs to be sufficiently small to, in all cases:
// 1. prevent clipping in incrementProposerPriority()
// 2. let (diff+diffMax-1) not overflow in IncrementPropposerPriotity()
// 2. let (diff+diffMax-1) not overflow in IncrementProposerPriority()
// (Proof of 1 is tricky, left to the reader).
// It could be higher, but this is sufficiently large for our purposes,
// and leaves room for defensive purposes.
const MaxTotalVotingPower = int64(math.MaxInt64) / 8
// PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives
// the maximum allowed distance between validator priorities.
const (
MaxTotalVotingPower = int64(math.MaxInt64) / 8
PriorityWindowSizeFactor = 2
)
// ValidatorSet represent a set of *Validator at a given height.
// The validators can be fetched by address or index.
@@ -42,19 +48,17 @@ type ValidatorSet struct {
// NewValidatorSet initializes a ValidatorSet by copying over the
// values from `valz`, a list of Validators. If valz is nil or empty,
// the new ValidatorSet will have an empty list of Validators.
// The addresses of validators in `valz` must be unique otherwise the
// function panics.
func NewValidatorSet(valz []*Validator) *ValidatorSet {
validators := make([]*Validator, len(valz))
for i, val := range valz {
validators[i] = val.Copy()
}
sort.Sort(ValidatorsByAddress(validators))
vals := &ValidatorSet{
Validators: validators,
vals := &ValidatorSet{}
err := vals.updateWithChangeSet(valz, false)
if err != nil {
panic(fmt.Sprintf("cannot create validator set: %s", err))
}
if len(valz) > 0 {
vals.IncrementProposerPriority(1)
}
return vals
}
@@ -74,6 +78,9 @@ func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet
// proposer. Panics if validator set is empty.
// `times` must be positive.
func (vals *ValidatorSet) IncrementProposerPriority(times int) {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
if times <= 0 {
panic("Cannot call IncrementProposerPriority with non-positive times")
}
@@ -81,20 +88,23 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int) {
// Cap the difference between priorities to be proportional to 2*totalPower by
// re-normalizing priorities, i.e., rescale all priorities by multiplying with:
// 2*totalVotingPower/(maxPriority - minPriority)
diffMax := 2 * vals.TotalVotingPower()
diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower()
vals.RescalePriorities(diffMax)
vals.shiftByAvgProposerPriority()
var proposer *Validator
// call IncrementProposerPriority(1) times times:
for i := 0; i < times; i++ {
proposer = vals.incrementProposerPriority()
}
vals.shiftByAvgProposerPriority()
vals.Proposer = proposer
}
func (vals *ValidatorSet) RescalePriorities(diffMax int64) {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
// NOTE: This check is merely a sanity check which could be
// removed if all tests would init. voting power appropriately;
// i.e. diffMax should always be > 0
@@ -102,7 +112,7 @@ func (vals *ValidatorSet) RescalePriorities(diffMax int64) {
return
}
// Caculating ceil(diff/diffMax):
// Calculating ceil(diff/diffMax):
// Re-normalization is performed by dividing by an integer for simplicity.
// NOTE: This may make debugging priority issues easier as well.
diff := computeMaxMinPriorityDiff(vals)
@@ -146,6 +156,9 @@ func (vals *ValidatorSet) computeAvgProposerPriority() int64 {
// compute the difference between the max and min ProposerPriority of that set
func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
max := int64(math.MinInt64)
min := int64(math.MaxInt64)
for _, v := range vals.Validators {
@@ -173,21 +186,31 @@ func (vals *ValidatorSet) getValWithMostPriority() *Validator {
}
func (vals *ValidatorSet) shiftByAvgProposerPriority() {
if vals.IsNilOrEmpty() {
panic("empty validator set")
}
avgProposerPriority := vals.computeAvgProposerPriority()
for _, val := range vals.Validators {
val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority)
}
}
// Makes a copy of the validator list
func validatorListCopy(valsList []*Validator) []*Validator {
if valsList == nil {
return nil
}
valsCopy := make([]*Validator, len(valsList))
for i, val := range valsList {
valsCopy[i] = val.Copy()
}
return valsCopy
}
// Copy each validator into a new ValidatorSet
func (vals *ValidatorSet) Copy() *ValidatorSet {
validators := make([]*Validator, len(vals.Validators))
for i, val := range vals.Validators {
// NOTE: must copy, since IncrementProposerPriority updates in place.
validators[i] = val.Copy()
}
return &ValidatorSet{
Validators: validators,
Validators: validatorListCopy(vals.Validators),
Proposer: vals.Proposer,
totalVotingPower: vals.totalVotingPower,
}
@@ -284,57 +307,6 @@ func (vals *ValidatorSet) Hash() []byte {
return merkle.SimpleHashFromByteSlices(bzs)
}
// Add adds val to the validator set and returns true. It returns false if val
// is already in the set.
func (vals *ValidatorSet) Add(val *Validator) (added bool) {
val = val.Copy()
idx := sort.Search(len(vals.Validators), func(i int) bool {
return bytes.Compare(val.Address, vals.Validators[i].Address) <= 0
})
if idx >= len(vals.Validators) {
vals.Validators = append(vals.Validators, val)
// Invalidate cache
vals.Proposer = nil
vals.totalVotingPower = 0
return true
} else if bytes.Equal(vals.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(vals.Validators)+1)
copy(newValidators[:idx], vals.Validators[:idx])
newValidators[idx] = val
copy(newValidators[idx+1:], vals.Validators[idx:])
vals.Validators = newValidators
// Invalidate cache
vals.Proposer = nil
vals.totalVotingPower = 0
return true
}
}
// Update updates the ValidatorSet by copying in the val.
// If the val is not found, it returns false; otherwise,
// it returns true. The val.ProposerPriority field is ignored
// and unchanged by this method.
func (vals *ValidatorSet) Update(val *Validator) (updated bool) {
index, sameVal := vals.GetByAddress(val.Address)
if sameVal == nil {
return false
}
// Overwrite the ProposerPriority so it doesn't change.
// During block execution, the val passed in here comes
// from ABCI via PB2TM.ValidatorUpdates. Since ABCI
// doesn't know about ProposerPriority, PB2TM.ValidatorUpdates
// uses the default value of 0, which would cause issues for
// proposer selection every time a validator's voting power changes.
val.ProposerPriority = sameVal.ProposerPriority
vals.Validators[index] = val.Copy()
// Invalidate cache
vals.Proposer = nil
vals.totalVotingPower = 0
return true
}
// Remove deletes the validator with address. It returns the validator removed
// and true. If returns nil and false if validator is not present in the set.
func (vals *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
@@ -366,6 +338,253 @@ func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
}
}
// Checks changes against duplicates, splits the changes in updates and removals, sorts them by address
//
// Returns:
// updates, removals - the sorted lists of updates and removals
// err - non-nil if duplicate entries or entries with negative voting power are seen
//
// No changes are made to 'origChanges'
func processChanges(origChanges []*Validator) (updates, removals []*Validator, err error) {
// Make a deep copy of the changes and sort by address
changes := validatorListCopy(origChanges)
sort.Sort(ValidatorsByAddress(changes))
removals = make([]*Validator, 0, len(changes))
updates = make([]*Validator, 0, len(changes))
var prevAddr Address
// Scan changes by address and append valid validators to updates or removals lists
for _, valUpdate := range changes {
if bytes.Equal(valUpdate.Address, prevAddr) {
err = fmt.Errorf("duplicate entry %v in %v", valUpdate, changes)
return nil, nil, err
}
if valUpdate.VotingPower < 0 {
err = fmt.Errorf("voting power can't be negative %v", valUpdate)
return nil, nil, err
}
if valUpdate.VotingPower == 0 {
removals = append(removals, valUpdate)
} else {
updates = append(updates, valUpdate)
}
prevAddr = valUpdate.Address
}
return updates, removals, err
}
// Verifies a list of updates against a validator set, making sure the allowed
// total voting power would not be exceeded if these updates would be applied to the set.
// It also computes the total voting power of the set that would result after the updates but
// before the removals.
//
// Returns:
// updatedTotalVotingPower - the new total voting power if these updates would be applied
// err - non-nil if the maximum allowed total voting power would be exceeded
//
// 'updates' should be a list of proper validator changes, i.e. they have been scanned
// by processChanges for duplicates and invalid values.
// No changes are made to the validator set 'vals'.
func verifyUpdates(updates []*Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, err error) {
// Scan the updates, compute new total voting power, check for overflow
updatedTotalVotingPower = vals.TotalVotingPower()
for _, valUpdate := range updates {
address := valUpdate.Address
_, val := vals.GetByAddress(address)
if val == nil {
// new validator, add its voting power the the total
updatedTotalVotingPower += valUpdate.VotingPower
} else {
// updated validator, add the difference in power to the total
updatedTotalVotingPower += valUpdate.VotingPower - val.VotingPower
}
if updatedTotalVotingPower < 0 {
err = fmt.Errorf(
"failed to add/update validator with negative voting power %v",
valUpdate)
return 0, err
}
overflow := updatedTotalVotingPower > MaxTotalVotingPower
if overflow {
err = fmt.Errorf(
"failed to add/update validator %v, total voting power would exceed the max allowed %v",
valUpdate, MaxTotalVotingPower)
return 0, err
}
}
return updatedTotalVotingPower, nil
}
// Computes the proposer priority for the validators not present in the set based on 'updatedTotalVotingPower'
// Leaves unchanged the priorities of validators that are changed.
//
// 'updates' parameter must be a list of unique validators to be added or updated.
// No changes are made to the validator set 'vals'.
func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) int {
numNew := 0
// Scan and update the proposerPriority for newly added and updated validators
for _, valUpdate := range updates {
address := valUpdate.Address
_, val := vals.GetByAddress(address)
if val == nil {
// add val
// Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't
// un-bond and then re-bond to reset their (potentially previously negative) ProposerPriority to zero.
//
// Contract: updatedVotingPower < MaxTotalVotingPower to ensure ProposerPriority does
// not exceed the bounds of int64.
//
// Compute ProposerPriority = -1.125*totalVotingPower == -(updatedVotingPower + (updatedVotingPower >> 3)).
valUpdate.ProposerPriority = -(updatedTotalVotingPower + (updatedTotalVotingPower >> 3))
numNew++
} else {
valUpdate.ProposerPriority = val.ProposerPriority
}
}
return numNew
}
// Merges the vals' validator list with the updates list.
// When two elements with same address are seen, the one from updates is selected.
// Expects updates to be a list of updates sorted by address with no duplicates or errors,
// must have been validated with verifyUpdates() and priorities computed with computeNewPriorities().
func (vals *ValidatorSet) applyUpdates(updates []*Validator) {
existing := make([]*Validator, len(vals.Validators))
copy(existing, vals.Validators)
merged := make([]*Validator, len(existing)+len(updates))
i := 0
for len(existing) > 0 && len(updates) > 0 {
if bytes.Compare(existing[0].Address, updates[0].Address) < 0 {
merged[i] = existing[0]
existing = existing[1:]
} else {
merged[i] = updates[0]
if bytes.Equal(existing[0].Address, updates[0].Address) {
// validator present in both, advance existing
existing = existing[1:]
}
updates = updates[1:]
}
i++
}
for j := 0; j < len(existing); j++ {
merged[i] = existing[j]
i++
}
for j := 0; j < len(updates); j++ {
merged[i] = updates[j]
i++
}
vals.Validators = merged[:i]
vals.totalVotingPower = 0
}
// Checks that the validators to be removed are part of the validator set.
// No changes are made to the validator set 'vals'.
func verifyRemovals(deletes []*Validator, vals *ValidatorSet) error {
for _, valUpdate := range deletes {
address := valUpdate.Address
_, val := vals.GetByAddress(address)
if val == nil {
return fmt.Errorf("failed to find validator %X to remove", address)
}
}
return nil
}
// Removes the validators specified in 'deletes' from validator set 'vals'.
// Should not fail as verification has been done before.
func (vals *ValidatorSet) applyRemovals(deletes []*Validator) {
for _, valUpdate := range deletes {
address := valUpdate.Address
_, removed := vals.Remove(address)
if !removed {
// Should never happen
panic(fmt.Sprintf("failed to remove validator %X", address))
}
}
}
// UpdateWithChangeSet attempts to update the validator set with 'changes'
// It performs the following steps:
// - validates the changes making sure there are no duplicates and splits them in updates and deletes
// - verifies that applying the changes will not result in errors
// - computes the total voting power BEFORE removals to ensure that in the next steps the relative priorities
// across old and newly added validators is fair
// - computes the priorities of new validators against the final set
// - applies the updates against the validator set
// - applies the removals against the validator set
// - performs scaling and centering of priority values
// If error is detected during verification steps it is returned and the validator set
// is not changed.
func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error {
return vals.updateWithChangeSet(changes, true)
}
// main function used by UpdateWithChangeSet() and NewValidatorSet()
// If 'allowDeletes' is false then delete operations are not allowed and must be reported if
// present in 'changes'
func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool) error {
if len(changes) <= 0 {
return nil
}
// Check for duplicates within changes, split in 'updates' and 'deletes' lists (sorted)
updates, deletes, err := processChanges(changes)
if err != nil {
return err
}
if !allowDeletes && len(deletes) != 0 {
err = fmt.Errorf("cannot process validators with voting power 0: %v", deletes)
return err
}
// Verify that applying the 'deletes' against 'vals' will not result in error.
if err := verifyRemovals(deletes, vals); err != nil {
return err
}
// Verify that applying the 'updates' against 'vals' will not result in error.
updatedTotalVotingPower, err := verifyUpdates(updates, vals)
if err != nil {
return err
}
// Compute the priorities for updates
numNewValidators := computeNewPriorities(updates, vals, updatedTotalVotingPower)
if len(vals.Validators)+numNewValidators <= len(deletes) {
err = fmt.Errorf("applying the validator changes would result in empty set")
return err
}
// Apply updates and removals
vals.applyUpdates(updates)
vals.applyRemovals(deletes)
// Scale and center
vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower())
vals.shiftByAvgProposerPriority()
return nil
}
// Verify that +2/3 of the set had signed the given signBytes.
func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error {

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"math"
"math/rand"
"strings"
"testing"
"testing/quick"
@@ -45,31 +46,29 @@ func TestValidatorSetBasic(t *testing.T) {
assert.Nil(t, vset.Hash())
// add
val = randValidator_(vset.TotalVotingPower())
assert.True(t, vset.Add(val))
assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
assert.True(t, vset.HasAddress(val.Address))
idx, val2 := vset.GetByAddress(val.Address)
idx, _ = vset.GetByAddress(val.Address)
assert.Equal(t, 0, idx)
assert.Equal(t, val, val2)
addr, val2 = vset.GetByIndex(0)
addr, _ = vset.GetByIndex(0)
assert.Equal(t, []byte(val.Address), addr)
assert.Equal(t, val, val2)
assert.Equal(t, 1, vset.Size())
assert.Equal(t, val.VotingPower, vset.TotalVotingPower())
assert.Equal(t, val, vset.GetProposer())
assert.NotNil(t, vset.Hash())
assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) })
assert.Equal(t, val.Address, vset.GetProposer().Address)
// update
assert.False(t, vset.Update(randValidator_(vset.TotalVotingPower())))
val = randValidator_(vset.TotalVotingPower())
assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
_, val = vset.GetByAddress(val.Address)
val.VotingPower += 100
proposerPriority := val.ProposerPriority
// Mimic update from types.PB2TM.ValidatorUpdates which does not know about ProposerPriority
// and hence defaults to 0.
val.ProposerPriority = 0
assert.True(t, vset.Update(val))
assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
_, val = vset.GetByAddress(val.Address)
assert.Equal(t, proposerPriority, val.ProposerPriority)
@@ -116,8 +115,9 @@ func BenchmarkValidatorSetCopy(b *testing.B) {
for i := 0; i < 1000; i++ {
privKey := ed25519.GenPrivKey()
pubKey := privKey.PubKey()
val := NewValidator(pubKey, 0)
if !vset.Add(val) {
val := NewValidator(pubKey, 10)
err := vset.UpdateWithChangeSet([]*Validator{val})
if err != nil {
panic("Failed to add validator")
}
}
@@ -284,7 +284,7 @@ func randPubKey() crypto.PubKey {
func randValidator_(totalVotingPower int64) *Validator {
// this modulo limits the ProposerPriority/VotingPower to stay in the
// bounds of MaxTotalVotingPower minus the already existing voting power:
val := NewValidator(randPubKey(), cmn.RandInt64()%(MaxTotalVotingPower-totalVotingPower))
val := NewValidator(randPubKey(), int64(cmn.RandUint64()%uint64((MaxTotalVotingPower-totalVotingPower))))
val.ProposerPriority = cmn.RandInt64() % (MaxTotalVotingPower - totalVotingPower)
return val
}
@@ -599,3 +599,357 @@ func TestValidatorSetVerifyCommit(t *testing.T) {
err = vset.VerifyCommit(chainID, blockID, height, commit)
assert.Nil(t, err)
}
func TestEmptySet(t *testing.T) {
var valList []*Validator
valSet := NewValidatorSet(valList)
assert.Panics(t, func() { valSet.IncrementProposerPriority(1) })
assert.Panics(t, func() { valSet.RescalePriorities(100) })
assert.Panics(t, func() { valSet.shiftByAvgProposerPriority() })
assert.Panics(t, func() { assert.Zero(t, computeMaxMinPriorityDiff(valSet)) })
valSet.GetProposer()
// Add to empty set
v1 := newValidator([]byte("v1"), 100)
v2 := newValidator([]byte("v2"), 100)
valList = []*Validator{v1, v2}
assert.NoError(t, valSet.UpdateWithChangeSet(valList))
verifyValidatorSet(t, valSet)
// Delete all validators from set
v1 = newValidator([]byte("v1"), 0)
v2 = newValidator([]byte("v2"), 0)
delList := []*Validator{v1, v2}
assert.Error(t, valSet.UpdateWithChangeSet(delList))
// Attempt delete from empty set
assert.Error(t, valSet.UpdateWithChangeSet(delList))
}
func TestUpdatesForNewValidatorSet(t *testing.T) {
v1 := newValidator([]byte("v1"), 100)
v2 := newValidator([]byte("v2"), 100)
valList := []*Validator{v1, v2}
valSet := NewValidatorSet(valList)
verifyValidatorSet(t, valSet)
// Verify duplicates are caught in NewValidatorSet() and it panics
v111 := newValidator([]byte("v1"), 100)
v112 := newValidator([]byte("v1"), 123)
v113 := newValidator([]byte("v1"), 234)
valList = []*Validator{v111, v112, v113}
assert.Panics(t, func() { NewValidatorSet(valList) })
// Verify set including validator with voting power 0 cannot be created
v1 = newValidator([]byte("v1"), 0)
v2 = newValidator([]byte("v2"), 22)
v3 := newValidator([]byte("v3"), 33)
valList = []*Validator{v1, v2, v3}
assert.Panics(t, func() { NewValidatorSet(valList) })
// Verify set including validator with negative voting power cannot be created
v1 = newValidator([]byte("v1"), 10)
v2 = newValidator([]byte("v2"), -20)
v3 = newValidator([]byte("v3"), 30)
valList = []*Validator{v1, v2, v3}
assert.Panics(t, func() { NewValidatorSet(valList) })
}
type testVal struct {
name string
power int64
}
func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
valSetUpdatesBasicTests := []struct {
startVals []testVal
updateVals []testVal
expectedVals []testVal
expError bool
}{
// Operations that should result in error
0: { // updates leading to overflows
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", math.MaxInt64}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
1: { // duplicate entries in changes
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", 11}, {"v1", 22}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
2: { // duplicate entries in removes
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", 0}, {"v1", 0}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
3: { // duplicate entries in removes + changes
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
4: { // update with negative voting power
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", -123}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
5: { // delete non existing validator
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v3", 0}},
[]testVal{{"v1", 10}, {"v2", 10}},
true},
// Operations that should be successful
6: { // no changes
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{},
[]testVal{{"v1", 10}, {"v2", 10}},
false},
7: { // voting power changes
[]testVal{{"v1", 10}, {"v2", 10}},
[]testVal{{"v1", 11}, {"v2", 22}},
[]testVal{{"v1", 11}, {"v2", 22}},
false},
8: { // add new validators
[]testVal{{"v1", 10}, {"v2", 20}},
[]testVal{{"v3", 30}, {"v4", 40}},
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}},
false},
9: { // delete validators
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
[]testVal{{"v2", 0}},
[]testVal{{"v1", 10}, {"v3", 30}},
false},
10: { // delete all validators
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
[]testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
true},
}
for i, tt := range valSetUpdatesBasicTests {
// create a new set and apply updates, keeping copies for the checks
valSet := createNewValidatorSet(tt.startVals)
valSetCopy := valSet.Copy()
valList := createNewValidatorList(tt.updateVals)
valListCopy := validatorListCopy(valList)
err := valSet.UpdateWithChangeSet(valList)
if tt.expError {
// for errors check the validator set has not been changed
assert.Error(t, err, "test %d", i)
assert.Equal(t, valSet, valSetCopy, "test %v", i)
} else {
assert.NoError(t, err, "test %d", i)
}
// check the parameter list has not changed
assert.Equal(t, valList, valListCopy, "test %v", i)
// check the final validator list is as expected and the set is properly scaled and centered.
assert.Equal(t, getValidatorResults(valSet.Validators), tt.expectedVals, "test %v", i)
verifyValidatorSet(t, valSet)
}
}
func getValidatorResults(valList []*Validator) []testVal {
testList := make([]testVal, len(valList))
for i, val := range valList {
testList[i].name = string(val.Address)
testList[i].power = val.VotingPower
}
return testList
}
// Test that different permutations of an update give the same result.
func TestValSetUpdatesOrderTestsExecute(t *testing.T) {
// startVals - initial validators to create the set with
// updateVals - a sequence of updates to be applied to the set.
// updateVals is shuffled a number of times during testing to check for same resulting validator set.
valSetUpdatesOrderTests := []struct {
startVals []testVal
updateVals []testVal
}{
0: { // order of changes should not matter, the final validator sets should be the same
[]testVal{{"v1", 10}, {"v2", 10}, {"v3", 30}, {"v4", 40}},
[]testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}},
1: { // order of additions should not matter
[]testVal{{"v1", 10}, {"v2", 20}},
[]testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}},
2: { // order of removals should not matter
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}},
[]testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}},
3: { // order of mixed operations should not matter
[]testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}},
[]testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}},
}
for i, tt := range valSetUpdatesOrderTests {
// create a new set and apply updates
valSet := createNewValidatorSet(tt.startVals)
valSetCopy := valSet.Copy()
valList := createNewValidatorList(tt.updateVals)
assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList))
// save the result as expected for next updates
valSetExp := valSetCopy.Copy()
// perform at most 20 permutations on the updates and call UpdateWithChangeSet()
n := len(tt.updateVals)
maxNumPerms := cmn.MinInt(20, n*n)
for j := 0; j < maxNumPerms; j++ {
// create a copy of original set and apply a random permutation of updates
valSetCopy := valSet.Copy()
valList := createNewValidatorList(permutation(tt.updateVals))
// check there was no error and the set is properly scaled and centered.
assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList),
"test %v failed for permutation %v", i, valList)
verifyValidatorSet(t, valSetCopy)
// verify the resulting test is same as the expected
assert.Equal(t, valSetCopy, valSetExp,
"test %v failed for permutation %v", i, valList)
}
}
}
// This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes.
// Should perform a proper merge of updatedVals and startVals
func TestValSetApplyUpdatesTestsExecute(t *testing.T) {
valSetUpdatesBasicTests := []struct {
startVals []testVal
updateVals []testVal
expectedVals []testVal
}{
// additions
0: { // prepend
[]testVal{{"v4", 44}, {"v5", 55}},
[]testVal{{"v1", 11}},
[]testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}},
1: { // append
[]testVal{{"v4", 44}, {"v5", 55}},
[]testVal{{"v6", 66}},
[]testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
2: { // insert
[]testVal{{"v4", 44}, {"v6", 66}},
[]testVal{{"v5", 55}},
[]testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
3: { // insert multi
[]testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}},
[]testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}},
[]testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}},
// changes
4: { // head
[]testVal{{"v1", 111}, {"v2", 22}},
[]testVal{{"v1", 11}},
[]testVal{{"v1", 11}, {"v2", 22}}},
5: { // tail
[]testVal{{"v1", 11}, {"v2", 222}},
[]testVal{{"v2", 22}},
[]testVal{{"v1", 11}, {"v2", 22}}},
6: { // middle
[]testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}},
[]testVal{{"v2", 22}},
[]testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
7: { // multi
[]testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}},
[]testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}},
[]testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
// additions and changes
8: {
[]testVal{{"v1", 111}, {"v2", 22}},
[]testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}},
[]testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}},
}
for i, tt := range valSetUpdatesBasicTests {
// create a new validator set with the start values
valSet := createNewValidatorSet(tt.startVals)
// applyUpdates() with the update values
valList := createNewValidatorList(tt.updateVals)
valSet.applyUpdates(valList)
// check the new list of validators for proper merge
assert.Equal(t, getValidatorResults(valSet.Validators), tt.expectedVals, "test %v", i)
verifyValidatorSet(t, valSet)
}
}
func permutation(valList []testVal) []testVal {
if len(valList) == 0 {
return nil
}
permList := make([]testVal, len(valList))
perm := rand.Perm(len(valList))
for i, v := range perm {
permList[v] = valList[i]
}
return permList
}
func createNewValidatorList(testValList []testVal) []*Validator {
valList := make([]*Validator, 0, len(testValList))
for _, val := range testValList {
valList = append(valList, newValidator([]byte(val.name), val.power))
}
return valList
}
func createNewValidatorSet(testValList []testVal) *ValidatorSet {
valList := createNewValidatorList(testValList)
valSet := NewValidatorSet(valList)
return valSet
}
func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) {
// verify that the vals' tvp is set to the sum of the all vals voting powers
tvp := valSet.TotalVotingPower()
assert.Equal(t, valSet.totalVotingPower, tvp,
"expected TVP %d. Got %d, valSet=%s", tvp, valSet.totalVotingPower, valSet)
// verify that validator priorities are centered
l := int64(len(valSet.Validators))
tpp := valSet.TotalVotingPower()
assert.True(t, tpp <= l || tpp >= -l,
"expected total priority in (-%d, %d). Got %d", l, l, tpp)
// verify that priorities are scaled
dist := computeMaxMinPriorityDiff(valSet)
assert.True(t, dist <= PriorityWindowSizeFactor*tvp,
"expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist)
}
func BenchmarkUpdates(b *testing.B) {
const (
n = 100
m = 2000
)
// Init with n validators
vs := make([]*Validator, n)
for j := 0; j < n; j++ {
vs[j] = newValidator([]byte(fmt.Sprintf("v%d", j)), 100)
}
valSet := NewValidatorSet(vs)
l := len(valSet.Validators)
// Make m new validators
newValList := make([]*Validator, m)
for j := 0; j < m; j++ {
newValList[j] = newValidator([]byte(fmt.Sprintf("v%d", j+l)), 1000)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Add m validators to valSetCopy
valSetCopy := valSet.Copy()
assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList))
}
}