mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 12:51:59 +00:00
Compare commits
25 Commits
v0.28.1
...
release/v0
Author | SHA1 | Date | |
---|---|---|---|
|
a97d6995c9 | ||
|
d9d4f3e629 | ||
|
7a8aeff4b0 | ||
|
de5a6010f0 | ||
|
da95f4aa6d | ||
|
4f8769175e | ||
|
40c887baf7 | ||
|
d3e8889411 | ||
|
d17969e378 | ||
|
f5f1416a14 | ||
|
4d36647eea | ||
|
87991059aa | ||
|
c69dbb25ce | ||
|
bc8874020f | ||
|
55d7238708 | ||
|
4a037f9fe6 | ||
|
dcb8f88525 | ||
|
73ea5effe5 | ||
|
4daca1a634 | ||
|
bc00a032c1 | ||
|
5f4d8e031e | ||
|
7b2c4bb493 | ||
|
ec53ce359b | ||
|
1f68318875 | ||
|
1ccc0918f5 |
72
CHANGELOG.md
72
CHANGELOG.md
@@ -1,5 +1,77 @@
|
||||
# Changelog
|
||||
|
||||
## v0.29.0
|
||||
|
||||
*January 21, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@bradyjoestar, @kunaldhariwal, @gauthamzz, @hrharder
|
||||
|
||||
This release is primarily about making some breaking changes to
|
||||
the Block protocol version before Cosmos launch, and to fixing more issues
|
||||
in the proposer selection algorithm discovered on Cosmos testnets.
|
||||
|
||||
The Block protocol changes include using a standard Merkle tree format (RFC 6962),
|
||||
fixing some inconsistencies between field orders in Vote and Proposal structs,
|
||||
and constraining the hash of the ConsensusParams to include only a few fields.
|
||||
|
||||
The proposer selection algorithm saw significant progress,
|
||||
including a [formal proof by @cwgoes for the base-case in Idris](https://github.com/cwgoes/tm-proposer-idris)
|
||||
and a [much more detailed specification (still in progress) by
|
||||
@ancazamfir](https://github.com/tendermint/tendermint/pull/3140).
|
||||
|
||||
Fixes to the proposer selection algorithm include normalizing the proposer
|
||||
priorities to mitigate the effects of large changes to the validator set.
|
||||
That said, we just discovered [another bug](https://github.com/tendermint/tendermint/issues/3181),
|
||||
which will be fixed in the next breaking release.
|
||||
|
||||
While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
- [state] [\#3049](https://github.com/tendermint/tendermint/issues/3049) Total voting power of the validator set is upper bounded by
|
||||
`MaxInt64 / 8`. Apps must ensure they do not return changes to the validator
|
||||
set that cause this maximum to be exceeded.
|
||||
|
||||
* Go API
|
||||
- [node] [\#3082](https://github.com/tendermint/tendermint/issues/3082) MetricsProvider now requires you to pass a chain ID
|
||||
- [types] [\#2713](https://github.com/tendermint/tendermint/issues/2713) Rename `TxProof.LeafHash` to `TxProof.Leaf`
|
||||
- [crypto/merkle] [\#2713](https://github.com/tendermint/tendermint/issues/2713) `SimpleProof.Verify` takes a `leaf` instead of a
|
||||
`leafHash` and performs the hashing itself
|
||||
|
||||
* Blockchain Protocol
|
||||
* [crypto/merkle] [\#2713](https://github.com/tendermint/tendermint/issues/2713) Merkle trees now match the RFC 6962 specification
|
||||
* [types] [\#3078](https://github.com/tendermint/tendermint/issues/3078) Re-order Timestamp and BlockID in CanonicalVote so it's
|
||||
consistent with CanonicalProposal (BlockID comes
|
||||
first)
|
||||
* [types] [\#3165](https://github.com/tendermint/tendermint/issues/3165) Hash of ConsensusParams only includes BlockSize.MaxBytes and
|
||||
BlockSize.MaxGas
|
||||
|
||||
* P2P Protocol
|
||||
- [consensus] [\#3049](https://github.com/tendermint/tendermint/issues/3049) Normalize priorities to not exceed `2*TotalVotingPower` to mitigate unfair proposer selection
|
||||
heavily preferring earlier joined validators in the case of an early bonded large validator unbonding
|
||||
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [rpc] [\#3065](https://github.com/tendermint/tendermint/issues/3065) Return maxPerPage (100), not defaultPerPage (30) if `per_page` is greater than the max 100.
|
||||
- [instrumentation] [\#3082](https://github.com/tendermint/tendermint/issues/3082) Add `chain_id` label for all metrics
|
||||
|
||||
### BUG FIXES:
|
||||
- [crypto] [\#3164](https://github.com/tendermint/tendermint/issues/3164) Update `btcd` fork for rare signRFC6979 bug
|
||||
- [lite] [\#3171](https://github.com/tendermint/tendermint/issues/3171) Fix verifying large validator set changes
|
||||
- [log] [\#3125](https://github.com/tendermint/tendermint/issues/3125) Fix year format
|
||||
- [mempool] [\#3168](https://github.com/tendermint/tendermint/issues/3168) Limit tx size to fit in the max reactor msg size
|
||||
- [scripts] [\#3147](https://github.com/tendermint/tendermint/issues/3147) Fix json2wal for large block parts (@bradyjoestar)
|
||||
|
||||
## v0.28.1
|
||||
|
||||
*January 18th, 2019*
|
||||
|
@@ -1,4 +1,4 @@
|
||||
## v0.29.0
|
||||
## v0.30.0
|
||||
|
||||
*TBD*
|
||||
|
||||
|
5
Gopkg.lock
generated
5
Gopkg.lock
generated
@@ -361,11 +361,12 @@
|
||||
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
|
||||
digest = "1:83f5e189eea2baad419a6a410984514266ff690075759c87e9ede596809bd0b8"
|
||||
name = "github.com/tendermint/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
revision = "80daadac05d1cd29571fccf27002d79667a88b58"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a"
|
||||
|
@@ -75,6 +75,10 @@
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "^0.9.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/btcd"
|
||||
version = "v0.1.1"
|
||||
|
||||
###################################
|
||||
## Some repos dont have releases.
|
||||
## Pin to revision
|
||||
@@ -92,9 +96,6 @@
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/btcd"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
|
4
Makefile
4
Makefile
@@ -292,9 +292,7 @@ build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
build-docker-localnode:
|
||||
cd networks/local
|
||||
make
|
||||
cd -
|
||||
@cd networks/local && make
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: localnet-stop
|
||||
|
22
UPGRADING.md
22
UPGRADING.md
@@ -3,6 +3,28 @@
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.29.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
and will not be compatible with any previous versions of the software, primarily
|
||||
due to changes in how various data structures are hashed.
|
||||
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
- [Merkle tree](./docs/spec/blockchain/encoding.md#merkle-trees)
|
||||
- [ConsensusParams](./docs/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
- [Vote](https://github.com/tendermint/tendermint/blob/develop/docs/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
specification](https://github.com/tendermint/tendermint/pull/3140).
|
||||
|
||||
For everything else, please see the [CHANGELOG](./CHANGELOG.md#v0.29.0).
|
||||
|
||||
## v0.28.0
|
||||
|
||||
This release breaks the format for the `priv_validator.json` file
|
||||
|
@@ -311,7 +311,7 @@ func TestLoadBlockPart(t *testing.T) {
|
||||
gotPart, _, panicErr := doFn(loadPart)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved block should return a proper block")
|
||||
require.Equal(t, gotPart.(*types.Part).Hash(), part1.Hash(),
|
||||
require.Equal(t, gotPart.(*types.Part), part1,
|
||||
"expecting successful retrieval of previously saved block")
|
||||
}
|
||||
|
||||
|
@@ -8,7 +8,11 @@ import (
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const MetricsSubsystem = "consensus"
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "consensus"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
@@ -50,101 +54,107 @@ type Metrics struct {
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
func PrometheusMetrics(namespace string) *Metrics {
|
||||
// Optionally, labels can be provided along with their values ("foo",
|
||||
// "fooValue").
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "height",
|
||||
Help: "Height of the chain.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "rounds",
|
||||
Help: "Number of rounds.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validators",
|
||||
Help: "Number of validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validators_power",
|
||||
Help: "Total power of all validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "missing_validators",
|
||||
Help: "Number of validators who did not sign.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "missing_validators_power",
|
||||
Help: "Total power of the missing validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "byzantine_validators",
|
||||
Help: "Number of validators who tried to double sign.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "byzantine_validators_power",
|
||||
Help: "Total power of the byzantine validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_interval_seconds",
|
||||
Help: "Time between this and the last block.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "num_txs",
|
||||
Help: "Number of transactions.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_size_bytes",
|
||||
Help: "Size of the block.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "total_txs",
|
||||
Help: "Total number of transactions.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "fast_syncing",
|
||||
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_parts",
|
||||
Help: "Number of blockparts transmitted by peer.",
|
||||
}, []string{"peer_id"}),
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
|
21
crypto/merkle/hash.go
Normal file
21
crypto/merkle/hash.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
// TODO: make these have a large predefined capacity
|
||||
var (
|
||||
leafPrefix = []byte{0}
|
||||
innerPrefix = []byte{1}
|
||||
)
|
||||
|
||||
// returns tmhash(0x00 || leaf)
|
||||
func leafHash(leaf []byte) []byte {
|
||||
return tmhash.Sum(append(leafPrefix, leaf...))
|
||||
}
|
||||
|
||||
// returns tmhash(0x01 || left || right)
|
||||
func innerHash(left []byte, right []byte) []byte {
|
||||
return tmhash.Sum(append(innerPrefix, append(left, right...)...))
|
||||
}
|
@@ -71,11 +71,11 @@ func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
hasher.Write(value) // does not error
|
||||
vhash := hasher.Sum(nil)
|
||||
|
||||
bz := new(bytes.Buffer)
|
||||
// Wrap <op.Key, vhash> to hash the KVPair.
|
||||
hasher = tmhash.New()
|
||||
encodeByteSlice(hasher, []byte(op.key)) // does not error
|
||||
encodeByteSlice(hasher, []byte(vhash)) // does not error
|
||||
kvhash := hasher.Sum(nil)
|
||||
encodeByteSlice(bz, []byte(op.key)) // does not error
|
||||
encodeByteSlice(bz, []byte(vhash)) // does not error
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash)
|
||||
|
97
crypto/merkle/rfc6962_test.go
Normal file
97
crypto/merkle/rfc6962_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package merkle
|
||||
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// These tests were taken from https://github.com/google/trillian/blob/master/merkle/rfc6962/rfc6962_test.go,
|
||||
// and consequently fall under the above license.
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
func TestRFC6962Hasher(t *testing.T) {
|
||||
_, leafHashTrail := trailsFromByteSlices([][]byte{[]byte("L123456")})
|
||||
leafHash := leafHashTrail.Hash
|
||||
_, leafHashTrail = trailsFromByteSlices([][]byte{[]byte{}})
|
||||
emptyLeafHash := leafHashTrail.Hash
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
got []byte
|
||||
want string
|
||||
}{
|
||||
// Since creating a merkle tree of no leaves is unsupported here, we skip
|
||||
// the corresponding trillian test vector.
|
||||
|
||||
// Check that the empty hash is not the same as the hash of an empty leaf.
|
||||
// echo -n 00 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Empty Leaf",
|
||||
want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:tmhash.Size*2],
|
||||
got: emptyLeafHash,
|
||||
},
|
||||
// echo -n 004C313233343536 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Leaf",
|
||||
want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:tmhash.Size*2],
|
||||
got: leafHash,
|
||||
},
|
||||
// echo -n 014E3132334E343536 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Node",
|
||||
want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:tmhash.Size*2],
|
||||
got: innerHash([]byte("N123"), []byte("N456")),
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
wantBytes, err := hex.DecodeString(tc.want)
|
||||
if err != nil {
|
||||
t.Fatalf("hex.DecodeString(%x): %v", tc.want, err)
|
||||
}
|
||||
if got, want := tc.got, wantBytes; !bytes.Equal(got, want) {
|
||||
t.Errorf("got %x, want %x", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRFC6962HasherCollisions(t *testing.T) {
|
||||
// Check that different leaves have different hashes.
|
||||
leaf1, leaf2 := []byte("Hello"), []byte("World")
|
||||
_, leafHashTrail := trailsFromByteSlices([][]byte{leaf1})
|
||||
hash1 := leafHashTrail.Hash
|
||||
_, leafHashTrail = trailsFromByteSlices([][]byte{leaf2})
|
||||
hash2 := leafHashTrail.Hash
|
||||
if bytes.Equal(hash1, hash2) {
|
||||
t.Errorf("Leaf hashes should differ, but both are %x", hash1)
|
||||
}
|
||||
// Compute an intermediate subtree hash.
|
||||
_, subHash1Trail := trailsFromByteSlices([][]byte{hash1, hash2})
|
||||
subHash1 := subHash1Trail.Hash
|
||||
// Check that this is not the same as a leaf hash of their concatenation.
|
||||
preimage := append(hash1, hash2...)
|
||||
_, forgedHashTrail := trailsFromByteSlices([][]byte{preimage})
|
||||
forgedHash := forgedHashTrail.Hash
|
||||
if bytes.Equal(subHash1, forgedHash) {
|
||||
t.Errorf("Hasher is not second-preimage resistant")
|
||||
}
|
||||
// Swap the order of nodes and check that the hash is different.
|
||||
_, subHash2Trail := trailsFromByteSlices([][]byte{hash2, hash1})
|
||||
subHash2 := subHash2Trail.Hash
|
||||
if bytes.Equal(subHash1, subHash2) {
|
||||
t.Errorf("Subtree hash does not depend on the order of leaves")
|
||||
}
|
||||
}
|
@@ -13,14 +13,14 @@ func TestSimpleMap(t *testing.T) {
|
||||
values []string // each string gets converted to []byte in test
|
||||
want string
|
||||
}{
|
||||
{[]string{"key1"}, []string{"value1"}, "321d150de16dceb51c72981b432b115045383259b1a550adf8dc80f927508967"},
|
||||
{[]string{"key1"}, []string{"value2"}, "2a9e4baf321eac99f6eecc3406603c14bc5e85bb7b80483cbfc75b3382d24a2f"},
|
||||
{[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"},
|
||||
{[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"},
|
||||
// swap order with 2 keys
|
||||
{[]string{"key1", "key2"}, []string{"value1", "value2"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"},
|
||||
{[]string{"key2", "key1"}, []string{"value2", "value1"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"},
|
||||
{[]string{"key1", "key2"}, []string{"value1", "value2"}, "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3"},
|
||||
{[]string{"key2", "key1"}, []string{"value2", "value1"}, "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3"},
|
||||
// swap order with 3 keys
|
||||
{[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"},
|
||||
{[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"},
|
||||
{[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc"},
|
||||
{[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc"},
|
||||
}
|
||||
for i, tc := range tests {
|
||||
db := newSimpleMap()
|
||||
|
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
@@ -67,7 +66,8 @@ func SimpleProofsFromMap(m map[string][]byte) (rootHash []byte, proofs map[strin
|
||||
|
||||
// Verify that the SimpleProof proves the root hash.
|
||||
// Check sp.Index/sp.Total manually if needed
|
||||
func (sp *SimpleProof) Verify(rootHash []byte, leafHash []byte) error {
|
||||
func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error {
|
||||
leafHash := leafHash(leaf)
|
||||
if sp.Total < 0 {
|
||||
return errors.New("Proof total must be positive")
|
||||
}
|
||||
@@ -128,19 +128,19 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][
|
||||
if len(innerHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
numLeft := (total + 1) / 2
|
||||
numLeft := getSplitPoint(total)
|
||||
if index < numLeft {
|
||||
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if leftHash == nil {
|
||||
return nil
|
||||
}
|
||||
return simpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
|
||||
return innerHash(leftHash, innerHashes[len(innerHashes)-1])
|
||||
}
|
||||
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if rightHash == nil {
|
||||
return nil
|
||||
}
|
||||
return simpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
|
||||
return innerHash(innerHashes[len(innerHashes)-1], rightHash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,12 +182,13 @@ func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *Simp
|
||||
case 0:
|
||||
return nil, nil
|
||||
case 1:
|
||||
trail := &SimpleProofNode{tmhash.Sum(items[0]), nil, nil, nil}
|
||||
trail := &SimpleProofNode{leafHash(items[0]), nil, nil, nil}
|
||||
return []*SimpleProofNode{trail}, trail
|
||||
default:
|
||||
lefts, leftRoot := trailsFromByteSlices(items[:(len(items)+1)/2])
|
||||
rights, rightRoot := trailsFromByteSlices(items[(len(items)+1)/2:])
|
||||
rootHash := simpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash)
|
||||
k := getSplitPoint(len(items))
|
||||
lefts, leftRoot := trailsFromByteSlices(items[:k])
|
||||
rights, rightRoot := trailsFromByteSlices(items[k:])
|
||||
rootHash := innerHash(leftRoot.Hash, rightRoot.Hash)
|
||||
root := &SimpleProofNode{rootHash, nil, nil, nil}
|
||||
leftRoot.Parent = root
|
||||
leftRoot.Right = rightRoot
|
||||
|
@@ -1,23 +1,9 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// simpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right).
|
||||
func simpleHashFromTwoHashes(left, right []byte) []byte {
|
||||
var hasher = tmhash.New()
|
||||
err := encodeByteSlice(hasher, left)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = encodeByteSlice(hasher, right)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
// SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice,
|
||||
// in the provided order.
|
||||
func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
@@ -25,11 +11,12 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return tmhash.Sum(items[0])
|
||||
return leafHash(items[0])
|
||||
default:
|
||||
left := SimpleHashFromByteSlices(items[:(len(items)+1)/2])
|
||||
right := SimpleHashFromByteSlices(items[(len(items)+1)/2:])
|
||||
return simpleHashFromTwoHashes(left, right)
|
||||
k := getSplitPoint(len(items))
|
||||
left := SimpleHashFromByteSlices(items[:k])
|
||||
right := SimpleHashFromByteSlices(items[k:])
|
||||
return innerHash(left, right)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,3 +31,17 @@ func SimpleHashFromMap(m map[string][]byte) []byte {
|
||||
}
|
||||
return sm.Hash()
|
||||
}
|
||||
|
||||
// getSplitPoint returns the largest power of 2 less than length
|
||||
func getSplitPoint(length int) int {
|
||||
if length < 1 {
|
||||
panic("Trying to split a tree with size < 1")
|
||||
}
|
||||
uLength := uint(length)
|
||||
bitlen := bits.Len(uLength)
|
||||
k := 1 << uint(bitlen-1)
|
||||
if k == length {
|
||||
k >>= 1
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
@@ -34,7 +34,6 @@ func TestSimpleProof(t *testing.T) {
|
||||
|
||||
// For each item, check the trail.
|
||||
for i, item := range items {
|
||||
itemHash := tmhash.Sum(item)
|
||||
proof := proofs[i]
|
||||
|
||||
// Check total/index
|
||||
@@ -43,30 +42,53 @@ func TestSimpleProof(t *testing.T) {
|
||||
require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total)
|
||||
|
||||
// Verify success
|
||||
err := proof.Verify(rootHash, itemHash)
|
||||
require.NoError(t, err, "Verificatior failed: %v.", err)
|
||||
err := proof.Verify(rootHash, item)
|
||||
require.NoError(t, err, "Verification failed: %v.", err)
|
||||
|
||||
// Trail too long should make it fail
|
||||
origAunts := proof.Aunts
|
||||
proof.Aunts = append(proof.Aunts, cmn.RandBytes(32))
|
||||
err = proof.Verify(rootHash, itemHash)
|
||||
err = proof.Verify(rootHash, item)
|
||||
require.Error(t, err, "Expected verification to fail for wrong trail length")
|
||||
|
||||
proof.Aunts = origAunts
|
||||
|
||||
// Trail too short should make it fail
|
||||
proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1]
|
||||
err = proof.Verify(rootHash, itemHash)
|
||||
err = proof.Verify(rootHash, item)
|
||||
require.Error(t, err, "Expected verification to fail for wrong trail length")
|
||||
|
||||
proof.Aunts = origAunts
|
||||
|
||||
// Mutating the itemHash should make it fail.
|
||||
err = proof.Verify(rootHash, MutateByteSlice(itemHash))
|
||||
err = proof.Verify(rootHash, MutateByteSlice(item))
|
||||
require.Error(t, err, "Expected verification to fail for mutated leaf hash")
|
||||
|
||||
// Mutating the rootHash should make it fail.
|
||||
err = proof.Verify(MutateByteSlice(rootHash), itemHash)
|
||||
err = proof.Verify(MutateByteSlice(rootHash), item)
|
||||
require.Error(t, err, "Expected verification to fail for mutated root hash")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getSplitPoint(t *testing.T) {
|
||||
tests := []struct {
|
||||
length int
|
||||
want int
|
||||
}{
|
||||
{1, 0},
|
||||
{2, 1},
|
||||
{3, 2},
|
||||
{4, 2},
|
||||
{5, 4},
|
||||
{10, 8},
|
||||
{20, 16},
|
||||
{100, 64},
|
||||
{255, 128},
|
||||
{256, 128},
|
||||
{257, 256},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := getSplitPoint(tt.length)
|
||||
require.Equal(t, tt.want, got, "getSplitPoint(%d) = %v, want %v", tt.length, got, tt.want)
|
||||
}
|
||||
}
|
||||
|
@@ -45,6 +45,6 @@ Tendermint.
|
||||
See the following for more extensive documentation:
|
||||
|
||||
- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028)
|
||||
- [Tendermint RPC Docs](https://tendermint.github.io/slate/)
|
||||
- [Tendermint RPC Docs](https://tendermint.com/rpc/)
|
||||
- [Tendermint in Production](../tendermint-core/running-in-production.md)
|
||||
- [ABCI spec](./abci-spec.md)
|
||||
|
@@ -63,6 +63,13 @@
|
||||
"author": "Zach Balder",
|
||||
"description": "Public service reporting and tracking"
|
||||
},
|
||||
{
|
||||
"name": "ParadigmCore",
|
||||
"url": "https://github.com/ParadigmFoundation/ParadigmCore",
|
||||
"language": "TypeScript",
|
||||
"author": "Paradigm Labs",
|
||||
"description": "Reference implementation of the Paradigm Protocol, and OrderStream network client."
|
||||
},
|
||||
{
|
||||
"name": "Passchain",
|
||||
"url": "https://github.com/trusch/passchain",
|
||||
|
@@ -78,7 +78,7 @@ endpoint:
|
||||
curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true"
|
||||
```
|
||||
|
||||
Check out [API docs](https://tendermint.github.io/slate/?shell#txsearch)
|
||||
Check out [API docs](https://tendermint.com/rpc/#txsearch)
|
||||
for more information on query syntax and other options.
|
||||
|
||||
## Subscribing to transactions
|
||||
@@ -97,5 +97,5 @@ by providing a query to `/subscribe` RPC endpoint.
|
||||
}
|
||||
```
|
||||
|
||||
Check out [API docs](https://tendermint.github.io/slate/#subscribe) for
|
||||
Check out [API docs](https://tendermint.com/rpc/#subscribe) for
|
||||
more information on query syntax and other options.
|
||||
|
@@ -7,6 +7,7 @@
|
||||
28-08-2018: Third version after Ethan's comments
|
||||
30-08-2018: AminoOverheadForBlock => MaxAminoOverheadForBlock
|
||||
31-08-2018: Bounding evidence and chain ID
|
||||
13-01-2019: Add section on MaxBytes vs MaxDataBytes
|
||||
|
||||
## Context
|
||||
|
||||
@@ -20,6 +21,32 @@ We should just remove MaxTxs all together and stick with MaxBytes, and have a
|
||||
But we can't just reap BlockSize.MaxBytes, since MaxBytes is for the entire block,
|
||||
not for the txs inside the block. There's extra amino overhead + the actual
|
||||
headers on top of the actual transactions + evidence + last commit.
|
||||
We could also consider using a MaxDataBytes instead of or in addition to MaxBytes.
|
||||
|
||||
## MaxBytes vs MaxDataBytes
|
||||
|
||||
The [PR #3045](https://github.com/tendermint/tendermint/pull/3045) suggested
|
||||
additional clarity/justification was necessary here, wither respect to the use
|
||||
of MaxDataBytes in addition to, or instead of, MaxBytes.
|
||||
|
||||
MaxBytes provides a clear limit on the total size of a block that requires no
|
||||
additional calculation if you want to use it to bound resource usage, and there
|
||||
has been considerable discussions about optimizing tendermint around 1MB blocks.
|
||||
Regardless, we need some maximum on the size of a block so we can avoid
|
||||
unmarshalling blocks that are too big during the consensus, and it seems more
|
||||
straightforward to provide a single fixed number for this rather than a
|
||||
computation of "MaxDataBytes + everything else you need to make room for
|
||||
(signatures, evidence, header)". MaxBytes provides a simple bound so we can
|
||||
always say "blocks are less than X MB".
|
||||
|
||||
Having both MaxBytes and MaxDataBytes feels like unnecessary complexity. It's
|
||||
not particularly surprising for MaxBytes to imply the maximum size of the
|
||||
entire block (not just txs), one just has to know that a block includes header,
|
||||
txs, evidence, votes. For more fine grained control over the txs included in the
|
||||
block, there is the MaxGas. In practice, the MaxGas may be expected to do most of
|
||||
the tx throttling, and the MaxBytes to just serve as an upper bound on the total
|
||||
size. Applications can use MaxGas as a MaxDataBytes by just taking the gas for
|
||||
every tx to be its size in bytes.
|
||||
|
||||
## Proposed solution
|
||||
|
||||
@@ -61,7 +88,7 @@ MaxXXX stayed the same.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
Accepted.
|
||||
|
||||
## Consequences
|
||||
|
||||
|
@@ -4,7 +4,7 @@ With Docker Compose, you can spin up local testnets with a single command.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. [Install tendermint](/docs/install.md)
|
||||
1. [Install tendermint](/docs/introduction/install.md)
|
||||
2. [Install docker](https://docs.docker.com/engine/installation/)
|
||||
3. [Install docker-compose](https://docs.docker.com/compose/install/)
|
||||
|
||||
|
@@ -166,6 +166,11 @@ the tags will be hashed into the next block header.
|
||||
The application may set the validator set during InitChain, and update it during
|
||||
EndBlock.
|
||||
|
||||
Note that the maximum total power of the validator set is bounded by
|
||||
`MaxTotalVotingPower = MaxInt64 / 8`. Applications are responsible for ensuring
|
||||
they do not make changes to the validator set that cause it to exceed this
|
||||
limit.
|
||||
|
||||
### InitChain
|
||||
|
||||
ResponseInitChain can return a list of validators.
|
||||
@@ -206,6 +211,7 @@ following rules:
|
||||
- if the validator does not already exist, it will be added to the validator
|
||||
set with the given power
|
||||
- if the validator does already exist, its power will be adjusted to the given power
|
||||
- the total power of the new validator set must not exceed MaxTotalVotingPower
|
||||
|
||||
Note the updates returned in block `H` will only take effect at block `H+2`.
|
||||
|
||||
|
@@ -51,7 +51,7 @@ type Header struct {
|
||||
|
||||
// hashes of block data
|
||||
LastCommitHash []byte // commit from validators from the last block
|
||||
DataHash []byte // Merkle root of transactions
|
||||
DataHash []byte // MerkleRoot of transaction hashes
|
||||
|
||||
// hashes from the app output from the prev block
|
||||
ValidatorsHash []byte // validators for the current block
|
||||
@@ -83,25 +83,27 @@ type Version struct {
|
||||
## BlockID
|
||||
|
||||
The `BlockID` contains two distinct Merkle roots of the block.
|
||||
The first, used as the block's main hash, is the Merkle root
|
||||
of all the fields in the header. The second, used for secure gossipping of
|
||||
the block during consensus, is the Merkle root of the complete serialized block
|
||||
cut into parts. The `BlockID` includes these two hashes, as well as the number of
|
||||
parts.
|
||||
The first, used as the block's main hash, is the MerkleRoot
|
||||
of all the fields in the header (ie. `MerkleRoot(header)`.
|
||||
The second, used for secure gossipping of the block during consensus,
|
||||
is the MerkleRoot of the complete serialized block
|
||||
cut into parts (ie. `MerkleRoot(MakeParts(block))`).
|
||||
The `BlockID` includes these two hashes, as well as the number of
|
||||
parts (ie. `len(MakeParts(block))`)
|
||||
|
||||
```go
|
||||
type BlockID struct {
|
||||
Hash []byte
|
||||
Parts PartsHeader
|
||||
PartsHeader PartSetHeader
|
||||
}
|
||||
|
||||
type PartsHeader struct {
|
||||
Hash []byte
|
||||
type PartSetHeader struct {
|
||||
Total int32
|
||||
Hash []byte
|
||||
}
|
||||
```
|
||||
|
||||
TODO: link to details of merkle sums.
|
||||
See [MerkleRoot](/docs/spec/blockchain/encoding.md#MerkleRoot) for details.
|
||||
|
||||
## Time
|
||||
|
||||
@@ -109,10 +111,6 @@ Tendermint uses the
|
||||
[Google.Protobuf.WellKnownTypes.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/timestamp)
|
||||
format, which uses two integers, one for Seconds and for Nanoseconds.
|
||||
|
||||
NOTE: there is currently a small divergence between Tendermint and the
|
||||
Google.Protobuf.WellKnownTypes.Timestamp that should be resolved. See [this
|
||||
issue](https://github.com/tendermint/go-amino/issues/223) for details.
|
||||
|
||||
## Data
|
||||
|
||||
Data is just a wrapper for a list of transactions, where transactions are
|
||||
@@ -146,12 +144,12 @@ The vote includes information about the validator signing it.
|
||||
|
||||
```go
|
||||
type Vote struct {
|
||||
Type SignedMsgType // byte
|
||||
Type byte
|
||||
Height int64
|
||||
Round int
|
||||
Timestamp time.Time
|
||||
BlockID BlockID
|
||||
ValidatorAddress Address
|
||||
Timestamp Time
|
||||
ValidatorAddress []byte
|
||||
ValidatorIndex int
|
||||
Signature []byte
|
||||
}
|
||||
@@ -164,8 +162,8 @@ a _precommit_ has `vote.Type == 2`.
|
||||
## Signature
|
||||
|
||||
Signatures in Tendermint are raw bytes representing the underlying signature.
|
||||
The only signature scheme currently supported for Tendermint validators is
|
||||
ED25519. The signature is the raw 64-byte ED25519 signature.
|
||||
|
||||
See the [signature spec](/docs/spec/blockchain/encoding.md#key-types) for more.
|
||||
|
||||
## EvidenceData
|
||||
|
||||
@@ -192,6 +190,8 @@ type DuplicateVoteEvidence struct {
|
||||
}
|
||||
```
|
||||
|
||||
See the [pubkey spec](/docs/spec/blockchain/encoding.md#key-types) for more.
|
||||
|
||||
## Validation
|
||||
|
||||
Here we describe the validation rules for every element in a block.
|
||||
@@ -209,7 +209,7 @@ the current version of the `state` corresponds to the state
|
||||
after executing transactions from the `prevBlock`.
|
||||
Elements of an object are accessed as expected,
|
||||
ie. `block.Header`.
|
||||
See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`.
|
||||
See the [definition of `State`](/docs/spec/blockchain/state.md).
|
||||
|
||||
### Header
|
||||
|
||||
@@ -288,28 +288,25 @@ The first block has `block.Header.TotalTxs = block.Header.NumberTxs`.
|
||||
LastBlockID is the previous block's BlockID:
|
||||
|
||||
```go
|
||||
prevBlockParts := MakeParts(prevBlock, state.LastConsensusParams.BlockGossip.BlockPartSize)
|
||||
prevBlockParts := MakeParts(prevBlock)
|
||||
block.Header.LastBlockID == BlockID {
|
||||
Hash: SimpleMerkleRoot(prevBlock.Header),
|
||||
Hash: MerkleRoot(prevBlock.Header),
|
||||
PartsHeader{
|
||||
Hash: SimpleMerkleRoot(prevBlockParts),
|
||||
Hash: MerkleRoot(prevBlockParts),
|
||||
Total: len(prevBlockParts),
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Note: it depends on the ConsensusParams,
|
||||
which are held in the `state` and may be updated by the application.
|
||||
|
||||
The first block has `block.Header.LastBlockID == BlockID{}`.
|
||||
|
||||
### LastCommitHash
|
||||
|
||||
```go
|
||||
block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit)
|
||||
block.Header.LastCommitHash == MerkleRoot(block.LastCommit.Precommits)
|
||||
```
|
||||
|
||||
Simple Merkle root of the votes included in the block.
|
||||
MerkleRoot of the votes included in the block.
|
||||
These are the votes that committed the previous block.
|
||||
|
||||
The first block has `block.Header.LastCommitHash == []byte{}`
|
||||
@@ -317,37 +314,42 @@ The first block has `block.Header.LastCommitHash == []byte{}`
|
||||
### DataHash
|
||||
|
||||
```go
|
||||
block.Header.DataHash == SimpleMerkleRoot(block.Txs.Txs)
|
||||
block.Header.DataHash == MerkleRoot(Hashes(block.Txs.Txs))
|
||||
```
|
||||
|
||||
Simple Merkle root of the transactions included in the block.
|
||||
MerkleRoot of the hashes of transactions included in the block.
|
||||
|
||||
Note the transactions are hashed before being included in the Merkle tree,
|
||||
so the leaves of the Merkle tree are the hashes, not the transactions
|
||||
themselves. This is because transaction hashes are regularly used as identifiers for
|
||||
transactions.
|
||||
|
||||
### ValidatorsHash
|
||||
|
||||
```go
|
||||
block.ValidatorsHash == SimpleMerkleRoot(state.Validators)
|
||||
block.ValidatorsHash == MerkleRoot(state.Validators)
|
||||
```
|
||||
|
||||
Simple Merkle root of the current validator set that is committing the block.
|
||||
MerkleRoot of the current validator set that is committing the block.
|
||||
This can be used to validate the `LastCommit` included in the next block.
|
||||
|
||||
### NextValidatorsHash
|
||||
|
||||
```go
|
||||
block.NextValidatorsHash == SimpleMerkleRoot(state.NextValidators)
|
||||
block.NextValidatorsHash == MerkleRoot(state.NextValidators)
|
||||
```
|
||||
|
||||
Simple Merkle root of the next validator set that will be the validator set that commits the next block.
|
||||
MerkleRoot of the next validator set that will be the validator set that commits the next block.
|
||||
This is included so that the current validator set gets a chance to sign the
|
||||
next validator sets Merkle root.
|
||||
|
||||
### ConsensusParamsHash
|
||||
### ConsensusHash
|
||||
|
||||
```go
|
||||
block.ConsensusParamsHash == TMHASH(amino(state.ConsensusParams))
|
||||
block.ConsensusHash == state.ConsensusParams.Hash()
|
||||
```
|
||||
|
||||
Hash of the amino-encoded consensus parameters.
|
||||
Hash of the amino-encoding of a subset of the consensus parameters.
|
||||
|
||||
### AppHash
|
||||
|
||||
@@ -362,20 +364,20 @@ The first block has `block.Header.AppHash == []byte{}`.
|
||||
### LastResultsHash
|
||||
|
||||
```go
|
||||
block.ResultsHash == SimpleMerkleRoot(state.LastResults)
|
||||
block.ResultsHash == MerkleRoot(state.LastResults)
|
||||
```
|
||||
|
||||
Simple Merkle root of the results of the transactions in the previous block.
|
||||
MerkleRoot of the results of the transactions in the previous block.
|
||||
|
||||
The first block has `block.Header.ResultsHash == []byte{}`.
|
||||
|
||||
## EvidenceHash
|
||||
|
||||
```go
|
||||
block.EvidenceHash == SimpleMerkleRoot(block.Evidence)
|
||||
block.EvidenceHash == MerkleRoot(block.Evidence)
|
||||
```
|
||||
|
||||
Simple Merkle root of the evidence of Byzantine behaviour included in this block.
|
||||
MerkleRoot of the evidence of Byzantine behaviour included in this block.
|
||||
|
||||
### ProposerAddress
|
||||
|
||||
|
@@ -30,6 +30,12 @@ For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`,
|
||||
while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would
|
||||
be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300.
|
||||
|
||||
## Hashing
|
||||
|
||||
Tendermint uses `SHA256` as its hash function.
|
||||
Objects are always Amino encoded before being hashed.
|
||||
So `SHA256(obj)` is short for `SHA256(AminoEncode(obj))`.
|
||||
|
||||
## Public Key Cryptography
|
||||
|
||||
Tendermint uses Amino to distinguish between different types of private keys,
|
||||
@@ -68,23 +74,27 @@ For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey
|
||||
would be encoded as
|
||||
`EB5AE98721020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
|
||||
### Addresses
|
||||
### Key Types
|
||||
|
||||
Addresses for each public key types are computed as follows:
|
||||
Each type specifies it's own pubkey, address, and signature format.
|
||||
|
||||
#### Ed25519
|
||||
|
||||
First 20-bytes of the SHA256 hash of the raw 32-byte public key:
|
||||
TODO: pubkey
|
||||
|
||||
The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key:
|
||||
|
||||
```
|
||||
address = SHA256(pubkey)[:20]
|
||||
```
|
||||
|
||||
NOTE: before v0.22.0, this was the RIPEMD160 of the Amino encoded public key.
|
||||
The signature is the raw 64-byte ED25519 signature.
|
||||
|
||||
#### Secp256k1
|
||||
|
||||
RIPEMD160 hash of the SHA256 hash of the OpenSSL compressed public key:
|
||||
TODO: pubkey
|
||||
|
||||
The address is the RIPEMD160 hash of the SHA256 hash of the OpenSSL compressed public key:
|
||||
|
||||
```
|
||||
address = RIPEMD160(SHA256(pubkey))
|
||||
@@ -92,12 +102,21 @@ address = RIPEMD160(SHA256(pubkey))
|
||||
|
||||
This is the same as Bitcoin.
|
||||
|
||||
The signature is the 64-byte concatenation of ECDSA `r` and `s` (ie. `r || s`),
|
||||
where `s` is lexicographically less than its inverse, to prevent malleability.
|
||||
This is like Ethereum, but without the extra byte for pubkey recovery, since
|
||||
Tendermint assumes the pubkey is always provided anyway.
|
||||
|
||||
#### Multisig
|
||||
|
||||
TODO
|
||||
|
||||
## Other Common Types
|
||||
|
||||
### BitArray
|
||||
|
||||
The BitArray is used in block headers and some consensus messages to signal
|
||||
whether or not something was done by each validator. BitArray is represented
|
||||
The BitArray is used in some consensus messages to represent votes received from
|
||||
validators, or parts received in a block. It is represented
|
||||
with a struct containing the number of bits (`Bits`) and the bit-array itself
|
||||
encoded in base64 (`Elems`).
|
||||
|
||||
@@ -119,24 +138,27 @@ representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as
|
||||
Part is used to break up blocks into pieces that can be gossiped in parallel
|
||||
and securely verified using a Merkle tree of the parts.
|
||||
|
||||
Part contains the index of the part in the larger set (`Index`), the actual
|
||||
underlying data of the part (`Bytes`), and a simple Merkle proof that the part is contained in
|
||||
the larger set (`Proof`).
|
||||
Part contains the index of the part (`Index`), the actual
|
||||
underlying data of the part (`Bytes`), and a Merkle proof that the part is contained in
|
||||
the set (`Proof`).
|
||||
|
||||
```go
|
||||
type Part struct {
|
||||
Index int
|
||||
Bytes byte[]
|
||||
Proof byte[]
|
||||
Bytes []byte
|
||||
Proof SimpleProof
|
||||
}
|
||||
```
|
||||
|
||||
See details of SimpleProof, below.
|
||||
|
||||
### MakeParts
|
||||
|
||||
Encode an object using Amino and slice it into parts.
|
||||
Tendermint uses a part size of 65536 bytes.
|
||||
|
||||
```go
|
||||
func MakeParts(obj interface{}, partSize int) []Part
|
||||
func MakeParts(block Block) []Part
|
||||
```
|
||||
|
||||
## Merkle Trees
|
||||
@@ -144,12 +166,17 @@ func MakeParts(obj interface{}, partSize int) []Part
|
||||
For an overview of Merkle trees, see
|
||||
[wikipedia](https://en.wikipedia.org/wiki/Merkle_tree)
|
||||
|
||||
A Simple Tree is a simple compact binary tree for a static list of items. Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. In a Simple Tree, the transactions and validation signatures of a block are hashed using this simple merkle tree logic.
|
||||
We use the RFC 6962 specification of a merkle tree, with sha256 as the hash function.
|
||||
Merkle trees are used throughout Tendermint to compute a cryptographic digest of a data structure.
|
||||
The differences between RFC 6962 and the simplest form a merkle tree are that:
|
||||
|
||||
If the number of items is not a power of two, the tree will not be full
|
||||
and some leaf nodes will be at different levels. Simple Tree tries to
|
||||
keep both sides of the tree the same size, but the left side may be one
|
||||
greater, for example:
|
||||
1) leaf nodes and inner nodes have different hashes.
|
||||
This is for "second pre-image resistance", to prevent the proof to an inner node being valid as the proof of a leaf.
|
||||
The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`.
|
||||
|
||||
2) When the number of items isn't a power of two, the left half of the tree is as big as it could be.
|
||||
(The smallest power of two less than the number of items) This allows new leaves to be added with less
|
||||
recomputation. For example:
|
||||
|
||||
```
|
||||
Simple Tree with 6 items Simple Tree with 7 items
|
||||
@@ -163,68 +190,79 @@ greater, for example:
|
||||
/ \ / \ / \ / \
|
||||
/ \ / \ / \ / \
|
||||
/ \ / \ / \ / \
|
||||
* h2 * h5 * * * h6
|
||||
/ \ / \ / \ / \ / \
|
||||
h0 h1 h3 h4 h0 h1 h2 h3 h4 h5
|
||||
* * h4 h5 * * * h6
|
||||
/ \ / \ / \ / \ / \
|
||||
h0 h1 h2 h3 h0 h1 h2 h3 h4 h5
|
||||
```
|
||||
|
||||
Tendermint always uses the `TMHASH` hash function, which is equivalent to
|
||||
SHA256:
|
||||
### MerkleRoot
|
||||
|
||||
```
|
||||
func TMHASH(bz []byte) []byte {
|
||||
return SHA256(bz)
|
||||
}
|
||||
```
|
||||
|
||||
### Simple Merkle Root
|
||||
|
||||
The function `SimpleMerkleRoot` is a simple recursive function defined as follows:
|
||||
The function `MerkleRoot` is a simple recursive function defined as follows:
|
||||
|
||||
```go
|
||||
func SimpleMerkleRoot(hashes [][]byte) []byte{
|
||||
switch len(hashes) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return hashes[0]
|
||||
default:
|
||||
left := SimpleMerkleRoot(hashes[:(len(hashes)+1)/2])
|
||||
right := SimpleMerkleRoot(hashes[(len(hashes)+1)/2:])
|
||||
return SimpleConcatHash(left, right)
|
||||
}
|
||||
// SHA256(0x00 || leaf)
|
||||
func leafHash(leaf []byte) []byte {
|
||||
return tmhash.Sum(append(0x00, leaf...))
|
||||
}
|
||||
|
||||
func SimpleConcatHash(left, right []byte) []byte{
|
||||
left = encodeByteSlice(left)
|
||||
right = encodeByteSlice(right)
|
||||
return TMHASH(append(left, right))
|
||||
// SHA256(0x01 || left || right)
|
||||
func innerHash(left []byte, right []byte) []byte {
|
||||
return tmhash.Sum(append(0x01, append(left, right...)...))
|
||||
}
|
||||
|
||||
// largest power of 2 less than k
|
||||
func getSplitPoint(k int) { ... }
|
||||
|
||||
func MerkleRoot(items [][]byte) []byte{
|
||||
switch len(items) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return leafHash(leafs[0])
|
||||
default:
|
||||
k := getSplitPoint(len(items))
|
||||
left := MerkleRoot(items[:k])
|
||||
right := MerkleRoot(items[k:])
|
||||
return innerHash(left, right)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that the leaves are Amino encoded as byte-arrays (ie. simple Uvarint length
|
||||
prefix) before being concatenated together and hashed.
|
||||
Note: `MerkleRoot` operates on items which are arbitrary byte arrays, not
|
||||
necessarily hashes. For items which need to be hashed first, we introduce the
|
||||
`Hashes` function:
|
||||
|
||||
Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`.
|
||||
For `struct` arguments, we compute a `[][]byte` containing the hash of each
|
||||
```
|
||||
func Hashes(items [][]byte) [][]byte {
|
||||
return SHA256 of each item
|
||||
}
|
||||
```
|
||||
|
||||
Note: we will abuse notion and invoke `MerkleRoot` with arguments of type `struct` or type `[]struct`.
|
||||
For `struct` arguments, we compute a `[][]byte` containing the amino encoding of each
|
||||
field in the struct, in the same order the fields appear in the struct.
|
||||
For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements.
|
||||
For `[]struct` arguments, we compute a `[][]byte` by amino encoding the individual `struct` elements.
|
||||
|
||||
### Simple Merkle Proof
|
||||
|
||||
Proof that a leaf is in a Merkle tree consists of a simple structure:
|
||||
Proof that a leaf is in a Merkle tree is composed as follows:
|
||||
|
||||
```
|
||||
```golang
|
||||
type SimpleProof struct {
|
||||
Total int
|
||||
Index int
|
||||
LeafHash []byte
|
||||
Aunts [][]byte
|
||||
}
|
||||
```
|
||||
|
||||
Which is verified using the following:
|
||||
Which is verified as follows:
|
||||
|
||||
```
|
||||
func (proof SimpleProof) Verify(index, total int, leafHash, rootHash []byte) bool {
|
||||
computedHash := computeHashFromAunts(index, total, leafHash, proof.Aunts)
|
||||
```golang
|
||||
func (proof SimpleProof) Verify(rootHash []byte, leaf []byte) bool {
|
||||
assert(proof.LeafHash, leafHash(leaf)
|
||||
|
||||
computedHash := computeHashFromAunts(proof.Index, proof.Total, proof.LeafHash, proof.Aunts)
|
||||
return computedHash == rootHash
|
||||
}
|
||||
|
||||
@@ -238,26 +276,18 @@ func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byt
|
||||
|
||||
assert(len(innerHashes) > 0)
|
||||
|
||||
numLeft := (total + 1) / 2
|
||||
numLeft := getSplitPoint(total) // largest power of 2 less than total
|
||||
if index < numLeft {
|
||||
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
assert(leftHash != nil)
|
||||
return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
|
||||
return innerHash(leftHash, innerHashes[len(innerHashes)-1])
|
||||
}
|
||||
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
assert(rightHash != nil)
|
||||
return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
|
||||
return innerHash(innerHashes[len(innerHashes)-1], rightHash)
|
||||
}
|
||||
```
|
||||
|
||||
### Simple Tree with Dictionaries
|
||||
|
||||
The Simple Tree is used to merkelize a list of items, so to merkelize a
|
||||
(short) dictionary of key-value pairs, encode the dictionary as an
|
||||
ordered list of `KVPair` structs. The block hash is such a hash
|
||||
derived from all the fields of the block `Header`. The state hash is
|
||||
similarly derived.
|
||||
|
||||
### IAVL+ Tree
|
||||
|
||||
Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/sdk/core/multistore.md)
|
||||
@@ -301,12 +331,14 @@ type CanonicalVote struct {
|
||||
Type byte
|
||||
Height int64 `binary:"fixed64"`
|
||||
Round int64 `binary:"fixed64"`
|
||||
Timestamp time.Time
|
||||
BlockID CanonicalBlockID
|
||||
Timestamp time.Time
|
||||
ChainID string
|
||||
}
|
||||
```
|
||||
|
||||
The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes
|
||||
in HSMs. It creates fixed offsets for relevant fields that need to be read in this context.
|
||||
See [#1622](https://github.com/tendermint/tendermint/issues/1622) for more details.
|
||||
For more details, see the [signing spec](/docs/spec/consensus/signing.md).
|
||||
Also, see the motivating discussion in
|
||||
[#1622](https://github.com/tendermint/tendermint/issues/1622).
|
||||
|
@@ -60,7 +60,7 @@ When hashing the Validator struct, the address is not included,
|
||||
because it is redundant with the pubkey.
|
||||
|
||||
The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address,
|
||||
so that there is a canonical order for computing the SimpleMerkleRoot.
|
||||
so that there is a canonical order for computing the MerkleRoot.
|
||||
|
||||
We also define a `TotalVotingPower` function, to return the total voting power:
|
||||
|
||||
@@ -78,6 +78,8 @@ func TotalVotingPower(vals []Validators) int64{
|
||||
|
||||
ConsensusParams define various limits for blockchain data structures.
|
||||
Like validator sets, they are set during genesis and can be updated by the application through ABCI.
|
||||
When hashed, only a subset of the params are included, to allow the params to
|
||||
evolve without breaking the header.
|
||||
|
||||
```go
|
||||
type ConsensusParams struct {
|
||||
@@ -86,6 +88,18 @@ type ConsensusParams struct {
|
||||
Validator
|
||||
}
|
||||
|
||||
type hashedParams struct {
|
||||
BlockMaxBytes int64
|
||||
BlockMaxGas int64
|
||||
}
|
||||
|
||||
func (params ConsensusParams) Hash() []byte {
|
||||
SHA256(hashedParams{
|
||||
BlockMaxBytes: params.BlockSize.MaxBytes,
|
||||
BlockMaxGas: params.BlockSize.MaxGas,
|
||||
})
|
||||
}
|
||||
|
||||
type BlockSize struct {
|
||||
MaxBytes int64
|
||||
MaxGas int64
|
||||
|
@@ -59,9 +59,9 @@ type PartSetHeader struct {
|
||||
```
|
||||
|
||||
To be included in a valid vote or proposal, BlockID must either represent a `nil` block, or a complete one.
|
||||
We introduce two methods, `BlockID.IsNil()` and `BlockID.IsComplete()` for these cases, respectively.
|
||||
We introduce two methods, `BlockID.IsZero()` and `BlockID.IsComplete()` for these cases, respectively.
|
||||
|
||||
`BlockID.IsNil()` returns true for BlockID `b` if each of the following
|
||||
`BlockID.IsZero()` returns true for BlockID `b` if each of the following
|
||||
are true:
|
||||
|
||||
```
|
||||
@@ -81,7 +81,7 @@ len(b.PartsHeader.Hash) == 32
|
||||
|
||||
## Proposals
|
||||
|
||||
The structure of a propsal for signing looks like:
|
||||
The structure of a proposal for signing looks like:
|
||||
|
||||
```
|
||||
type CanonicalProposal struct {
|
||||
@@ -118,8 +118,8 @@ type CanonicalVote struct {
|
||||
Type SignedMsgType // type alias for byte
|
||||
Height int64 `binary:"fixed64"`
|
||||
Round int64 `binary:"fixed64"`
|
||||
Timestamp time.Time
|
||||
BlockID BlockID
|
||||
Timestamp time.Time
|
||||
ChainID string
|
||||
}
|
||||
```
|
||||
@@ -130,7 +130,7 @@ A vote is valid if each of the following lines evaluates to true for vote `v`:
|
||||
v.Type == 0x1 || v.Type == 0x2
|
||||
v.Height > 0
|
||||
v.Round >= 0
|
||||
v.BlockID.IsNil() || v.BlockID.IsValid()
|
||||
v.BlockID.IsZero() || v.BlockID.IsComplete()
|
||||
```
|
||||
|
||||
In other words, a vote is valid for signing if it contains the type of a Prevote
|
||||
|
@@ -90,7 +90,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error {
|
||||
// D - first character of the level, uppercase (ASCII only)
|
||||
// [2016-05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go)
|
||||
// Stopping ... - message
|
||||
enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2016-01-02|15:04:05.000"), msg))
|
||||
enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2006-01-02|15:04:05.000"), msg))
|
||||
|
||||
if module != unknown {
|
||||
enc.buf.WriteString("module=" + module + " ")
|
||||
|
@@ -35,34 +35,40 @@ func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *
|
||||
}
|
||||
|
||||
// Implements Verifier.
|
||||
func (bc *BaseVerifier) ChainID() string {
|
||||
return bc.chainID
|
||||
func (bv *BaseVerifier) ChainID() string {
|
||||
return bv.chainID
|
||||
}
|
||||
|
||||
// Implements Verifier.
|
||||
func (bc *BaseVerifier) Verify(signedHeader types.SignedHeader) error {
|
||||
func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error {
|
||||
|
||||
// We can't verify commits older than bc.height.
|
||||
if signedHeader.Height < bc.height {
|
||||
// We can't verify commits for a different chain.
|
||||
if signedHeader.ChainID != bv.chainID {
|
||||
return cmn.NewError("BaseVerifier chainID is %v, cannot verify chainID %v",
|
||||
bv.chainID, signedHeader.ChainID)
|
||||
}
|
||||
|
||||
// We can't verify commits older than bv.height.
|
||||
if signedHeader.Height < bv.height {
|
||||
return cmn.NewError("BaseVerifier height is %v, cannot verify height %v",
|
||||
bc.height, signedHeader.Height)
|
||||
bv.height, signedHeader.Height)
|
||||
}
|
||||
|
||||
// We can't verify with the wrong validator set.
|
||||
if !bytes.Equal(signedHeader.ValidatorsHash,
|
||||
bc.valset.Hash()) {
|
||||
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash())
|
||||
bv.valset.Hash()) {
|
||||
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bv.valset.Hash())
|
||||
}
|
||||
|
||||
// Do basic sanity checks.
|
||||
err := signedHeader.ValidateBasic(bc.chainID)
|
||||
err := signedHeader.ValidateBasic(bv.chainID)
|
||||
if err != nil {
|
||||
return cmn.ErrorWrap(err, "in verify")
|
||||
}
|
||||
|
||||
// Check commit signatures.
|
||||
err = bc.valset.VerifyCommit(
|
||||
bc.chainID, signedHeader.Commit.BlockID,
|
||||
err = bv.valset.VerifyCommit(
|
||||
bv.chainID, signedHeader.Commit.BlockID,
|
||||
signedHeader.Height, signedHeader.Commit)
|
||||
if err != nil {
|
||||
return cmn.ErrorWrap(err, "in verify")
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// FullCommit is a signed header (the block header and a commit that signs it),
|
||||
// FullCommit contains a SignedHeader (the block header and a commit that signs it),
|
||||
// the validator set which signed the commit, and the next validator set. The
|
||||
// next validator set (which is proven from the block header) allows us to
|
||||
// revert to block-by-block updating of lite Verifier's latest validator set,
|
||||
|
@@ -13,6 +13,9 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var _ PersistentProvider = (*DBProvider)(nil)
|
||||
|
||||
// DBProvider stores commits and validator sets in a DB.
|
||||
type DBProvider struct {
|
||||
logger log.Logger
|
||||
label string
|
||||
|
@@ -53,10 +53,6 @@ SignedHeader, and that the SignedHeader was to be signed by the exact given
|
||||
validator set, and that the height of the commit is at least height (or
|
||||
greater).
|
||||
|
||||
SignedHeader.Commit may be signed by a different validator set, it can get
|
||||
verified with a BaseVerifier as long as sufficient signatures from the
|
||||
previous validator set are present in the commit.
|
||||
|
||||
DynamicVerifier - this Verifier implements an auto-update and persistence
|
||||
strategy to verify any SignedHeader of the blockchain.
|
||||
|
||||
|
@@ -18,12 +18,17 @@ var _ Verifier = (*DynamicVerifier)(nil)
|
||||
// "source" provider to obtain the needed FullCommits to securely sync with
|
||||
// validator set changes. It stores properly validated data on the
|
||||
// "trusted" local system.
|
||||
// TODO: make this single threaded and create a new
|
||||
// ConcurrentDynamicVerifier that wraps it with concurrency.
|
||||
// see https://github.com/tendermint/tendermint/issues/3170
|
||||
type DynamicVerifier struct {
|
||||
logger log.Logger
|
||||
chainID string
|
||||
// These are only properly validated data, from local system.
|
||||
logger log.Logger
|
||||
|
||||
// Already validated, stored locally
|
||||
trusted PersistentProvider
|
||||
// This is a source of new info, like a node rpc, or other import method.
|
||||
|
||||
// New info, like a node rpc, or other import method.
|
||||
source Provider
|
||||
|
||||
// pending map to synchronize concurrent verification requests
|
||||
@@ -35,8 +40,8 @@ type DynamicVerifier struct {
|
||||
// trusted provider to store validated data and the source provider to
|
||||
// obtain missing data (e.g. FullCommits).
|
||||
//
|
||||
// The trusted provider should a CacheProvider, MemProvider or
|
||||
// files.Provider. The source provider should be a client.HTTPProvider.
|
||||
// The trusted provider should be a DBProvider.
|
||||
// The source provider should be a client.HTTPProvider.
|
||||
func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier {
|
||||
return &DynamicVerifier{
|
||||
logger: log.NewNopLogger(),
|
||||
@@ -47,68 +52,71 @@ func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provi
|
||||
}
|
||||
}
|
||||
|
||||
func (ic *DynamicVerifier) SetLogger(logger log.Logger) {
|
||||
func (dv *DynamicVerifier) SetLogger(logger log.Logger) {
|
||||
logger = logger.With("module", "lite")
|
||||
ic.logger = logger
|
||||
ic.trusted.SetLogger(logger)
|
||||
ic.source.SetLogger(logger)
|
||||
dv.logger = logger
|
||||
dv.trusted.SetLogger(logger)
|
||||
dv.source.SetLogger(logger)
|
||||
}
|
||||
|
||||
// Implements Verifier.
|
||||
func (ic *DynamicVerifier) ChainID() string {
|
||||
return ic.chainID
|
||||
func (dv *DynamicVerifier) ChainID() string {
|
||||
return dv.chainID
|
||||
}
|
||||
|
||||
// Implements Verifier.
|
||||
//
|
||||
// If the validators have changed since the last known time, it looks to
|
||||
// ic.trusted and ic.source to prove the new validators. On success, it will
|
||||
// try to store the SignedHeader in ic.trusted if the next
|
||||
// dv.trusted and dv.source to prove the new validators. On success, it will
|
||||
// try to store the SignedHeader in dv.trusted if the next
|
||||
// validator can be sourced.
|
||||
func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error {
|
||||
func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error {
|
||||
|
||||
// Performs synchronization for multi-threads verification at the same height.
|
||||
ic.mtx.Lock()
|
||||
if pending := ic.pendingVerifications[shdr.Height]; pending != nil {
|
||||
ic.mtx.Unlock()
|
||||
dv.mtx.Lock()
|
||||
if pending := dv.pendingVerifications[shdr.Height]; pending != nil {
|
||||
dv.mtx.Unlock()
|
||||
<-pending // pending is chan struct{}
|
||||
} else {
|
||||
pending := make(chan struct{})
|
||||
ic.pendingVerifications[shdr.Height] = pending
|
||||
dv.pendingVerifications[shdr.Height] = pending
|
||||
defer func() {
|
||||
close(pending)
|
||||
ic.mtx.Lock()
|
||||
delete(ic.pendingVerifications, shdr.Height)
|
||||
ic.mtx.Unlock()
|
||||
dv.mtx.Lock()
|
||||
delete(dv.pendingVerifications, shdr.Height)
|
||||
dv.mtx.Unlock()
|
||||
}()
|
||||
ic.mtx.Unlock()
|
||||
dv.mtx.Unlock()
|
||||
}
|
||||
|
||||
//Get the exact trusted commit for h, and if it is
|
||||
// equal to shdr, then don't even verify it,
|
||||
// and just return nil.
|
||||
trustedFCSameHeight, err := ic.trusted.LatestFullCommit(ic.chainID, shdr.Height, shdr.Height)
|
||||
// equal to shdr, then it's already trusted, so
|
||||
// just return nil.
|
||||
trustedFCSameHeight, err := dv.trusted.LatestFullCommit(dv.chainID, shdr.Height, shdr.Height)
|
||||
if err == nil {
|
||||
// If loading trust commit successfully, and trust commit equal to shdr, then don't verify it,
|
||||
// just return nil.
|
||||
if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) {
|
||||
ic.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height))
|
||||
dv.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height))
|
||||
return nil
|
||||
}
|
||||
} else if !lerr.IsErrCommitNotFound(err) {
|
||||
// Return error if it is not CommitNotFound error
|
||||
ic.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height))
|
||||
dv.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height))
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the latest known full commit <= h-1 from our trusted providers.
|
||||
// The full commit at h-1 contains the valset to sign for h.
|
||||
h := shdr.Height - 1
|
||||
trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h)
|
||||
prevHeight := shdr.Height - 1
|
||||
trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, prevHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if trustedFC.Height() == h {
|
||||
// sync up to the prevHeight and assert our latest NextValidatorSet
|
||||
// is the ValidatorSet for the SignedHeader
|
||||
if trustedFC.Height() == prevHeight {
|
||||
// Return error if valset doesn't match.
|
||||
if !bytes.Equal(
|
||||
trustedFC.NextValidators.Hash(),
|
||||
@@ -118,11 +126,12 @@ func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error {
|
||||
shdr.Header.ValidatorsHash)
|
||||
}
|
||||
} else {
|
||||
// If valset doesn't match...
|
||||
if !bytes.Equal(trustedFC.NextValidators.Hash(),
|
||||
// If valset doesn't match, try to update
|
||||
if !bytes.Equal(
|
||||
trustedFC.NextValidators.Hash(),
|
||||
shdr.Header.ValidatorsHash) {
|
||||
// ... update.
|
||||
trustedFC, err = ic.updateToHeight(h)
|
||||
trustedFC, err = dv.updateToHeight(prevHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,14 +146,21 @@ func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error {
|
||||
}
|
||||
|
||||
// Verify the signed header using the matching valset.
|
||||
cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators)
|
||||
cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextValidators)
|
||||
err = cert.Verify(shdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// By now, the SignedHeader is fully validated and we're synced up to
|
||||
// SignedHeader.Height - 1. To sync to SignedHeader.Height, we need
|
||||
// the validator set at SignedHeader.Height + 1 so we can verify the
|
||||
// SignedHeader.NextValidatorSet.
|
||||
// TODO: is the ValidateFull below mostly redundant with the BaseVerifier.Verify above?
|
||||
// See https://github.com/tendermint/tendermint/issues/3174.
|
||||
|
||||
// Get the next validator set.
|
||||
nextValset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1)
|
||||
nextValset, err := dv.source.ValidatorSet(dv.chainID, shdr.Height+1)
|
||||
if lerr.IsErrUnknownValidators(err) {
|
||||
// Ignore this error.
|
||||
return nil
|
||||
@@ -160,31 +176,31 @@ func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error {
|
||||
}
|
||||
// Validate the full commit. This checks the cryptographic
|
||||
// signatures of Commit against Validators.
|
||||
if err := nfc.ValidateFull(ic.chainID); err != nil {
|
||||
if err := nfc.ValidateFull(dv.chainID); err != nil {
|
||||
return err
|
||||
}
|
||||
// Trust it.
|
||||
return ic.trusted.SaveFullCommit(nfc)
|
||||
return dv.trusted.SaveFullCommit(nfc)
|
||||
}
|
||||
|
||||
// verifyAndSave will verify if this is a valid source full commit given the
|
||||
// best match trusted full commit, and if good, persist to ic.trusted.
|
||||
// best match trusted full commit, and if good, persist to dv.trusted.
|
||||
// Returns ErrTooMuchChange when >2/3 of trustedFC did not sign sourceFC.
|
||||
// Panics if trustedFC.Height() >= sourceFC.Height().
|
||||
func (ic *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error {
|
||||
func (dv *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error {
|
||||
if trustedFC.Height() >= sourceFC.Height() {
|
||||
panic("should not happen")
|
||||
}
|
||||
err := trustedFC.NextValidators.VerifyFutureCommit(
|
||||
sourceFC.Validators,
|
||||
ic.chainID, sourceFC.SignedHeader.Commit.BlockID,
|
||||
dv.chainID, sourceFC.SignedHeader.Commit.BlockID,
|
||||
sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ic.trusted.SaveFullCommit(sourceFC)
|
||||
return dv.trusted.SaveFullCommit(sourceFC)
|
||||
}
|
||||
|
||||
// updateToHeight will use divide-and-conquer to find a path to h.
|
||||
@@ -192,29 +208,30 @@ func (ic *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error {
|
||||
// for height h, using repeated applications of bisection if necessary.
|
||||
//
|
||||
// Returns ErrCommitNotFound if source provider doesn't have the commit for h.
|
||||
func (ic *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) {
|
||||
func (dv *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) {
|
||||
|
||||
// Fetch latest full commit from source.
|
||||
sourceFC, err := ic.source.LatestFullCommit(ic.chainID, h, h)
|
||||
sourceFC, err := dv.source.LatestFullCommit(dv.chainID, h, h)
|
||||
if err != nil {
|
||||
return FullCommit{}, err
|
||||
}
|
||||
|
||||
// Validate the full commit. This checks the cryptographic
|
||||
// signatures of Commit against Validators.
|
||||
if err := sourceFC.ValidateFull(ic.chainID); err != nil {
|
||||
return FullCommit{}, err
|
||||
}
|
||||
|
||||
// If sourceFC.Height() != h, we can't do it.
|
||||
if sourceFC.Height() != h {
|
||||
return FullCommit{}, lerr.ErrCommitNotFound()
|
||||
}
|
||||
|
||||
// Validate the full commit. This checks the cryptographic
|
||||
// signatures of Commit against Validators.
|
||||
if err := sourceFC.ValidateFull(dv.chainID); err != nil {
|
||||
return FullCommit{}, err
|
||||
}
|
||||
|
||||
// Verify latest FullCommit against trusted FullCommits
|
||||
FOR_LOOP:
|
||||
for {
|
||||
// Fetch latest full commit from trusted.
|
||||
trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h)
|
||||
trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, h)
|
||||
if err != nil {
|
||||
return FullCommit{}, err
|
||||
}
|
||||
@@ -224,21 +241,21 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
// Try to update to full commit with checks.
|
||||
err = ic.verifyAndSave(trustedFC, sourceFC)
|
||||
err = dv.verifyAndSave(trustedFC, sourceFC)
|
||||
if err == nil {
|
||||
// All good!
|
||||
return sourceFC, nil
|
||||
}
|
||||
|
||||
// Handle special case when err is ErrTooMuchChange.
|
||||
if lerr.IsErrTooMuchChange(err) {
|
||||
if types.IsErrTooMuchChange(err) {
|
||||
// Divide and conquer.
|
||||
start, end := trustedFC.Height(), sourceFC.Height()
|
||||
if !(start < end) {
|
||||
panic("should not happen")
|
||||
}
|
||||
mid := (start + end) / 2
|
||||
_, err = ic.updateToHeight(mid)
|
||||
_, err = dv.updateToHeight(mid)
|
||||
if err != nil {
|
||||
return FullCommit{}, err
|
||||
}
|
||||
@@ -249,8 +266,8 @@ FOR_LOOP:
|
||||
}
|
||||
}
|
||||
|
||||
func (ic *DynamicVerifier) LastTrustedHeight() int64 {
|
||||
fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1)
|
||||
func (dv *DynamicVerifier) LastTrustedHeight() int64 {
|
||||
fc, err := dv.trusted.LatestFullCommit(dv.chainID, 1, 1<<63-1)
|
||||
if err != nil {
|
||||
panic("should not happen")
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestInquirerValidPath(t *testing.T) {
|
||||
@@ -70,6 +71,70 @@ func TestInquirerValidPath(t *testing.T) {
|
||||
assert.Nil(err, "%+v", err)
|
||||
}
|
||||
|
||||
func TestDynamicVerify(t *testing.T) {
|
||||
trust := NewDBProvider("trust", dbm.NewMemDB())
|
||||
source := NewDBProvider("source", dbm.NewMemDB())
|
||||
|
||||
// 10 commits with one valset, 1 to change,
|
||||
// 10 commits with the next one
|
||||
n1, n2 := 10, 10
|
||||
nCommits := n1 + n2 + 1
|
||||
maxHeight := int64(nCommits)
|
||||
fcz := make([]FullCommit, nCommits)
|
||||
|
||||
// gen the 2 val sets
|
||||
chainID := "dynamic-verifier"
|
||||
power := int64(10)
|
||||
keys1 := genPrivKeys(5)
|
||||
vals1 := keys1.ToValidators(power, 0)
|
||||
keys2 := genPrivKeys(5)
|
||||
vals2 := keys2.ToValidators(power, 0)
|
||||
|
||||
// make some commits with the first
|
||||
for i := 0; i < n1; i++ {
|
||||
fcz[i] = makeFullCommit(int64(i), keys1, vals1, vals1, chainID)
|
||||
}
|
||||
|
||||
// update the val set
|
||||
fcz[n1] = makeFullCommit(int64(n1), keys1, vals1, vals2, chainID)
|
||||
|
||||
// make some commits with the new one
|
||||
for i := n1 + 1; i < nCommits; i++ {
|
||||
fcz[i] = makeFullCommit(int64(i), keys2, vals2, vals2, chainID)
|
||||
}
|
||||
|
||||
// Save everything in the source
|
||||
for _, fc := range fcz {
|
||||
source.SaveFullCommit(fc)
|
||||
}
|
||||
|
||||
// Initialize a Verifier with the initial state.
|
||||
err := trust.SaveFullCommit(fcz[0])
|
||||
require.Nil(t, err)
|
||||
ver := NewDynamicVerifier(chainID, trust, source)
|
||||
ver.SetLogger(log.TestingLogger())
|
||||
|
||||
// fetch the latest from the source
|
||||
latestFC, err := source.LatestFullCommit(chainID, 1, maxHeight)
|
||||
require.NoError(t, err)
|
||||
|
||||
// try to update to the latest
|
||||
err = ver.Verify(latestFC.SignedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit {
|
||||
height += 1
|
||||
consHash := []byte("special-params")
|
||||
appHash := []byte(fmt.Sprintf("h=%d", height))
|
||||
resHash := []byte(fmt.Sprintf("res=%d", height))
|
||||
return keys.GenFullCommit(
|
||||
chainID, height, nil,
|
||||
vals, nextVals,
|
||||
appHash, consHash, resHash, 0, len(keys))
|
||||
}
|
||||
|
||||
func TestInquirerVerifyHistorical(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
trust := NewDBProvider("trust", dbm.NewMemDB())
|
||||
|
@@ -25,12 +25,6 @@ func (e errUnexpectedValidators) Error() string {
|
||||
e.got, e.want)
|
||||
}
|
||||
|
||||
type errTooMuchChange struct{}
|
||||
|
||||
func (e errTooMuchChange) Error() string {
|
||||
return "Insufficient signatures to validate due to valset changes"
|
||||
}
|
||||
|
||||
type errUnknownValidators struct {
|
||||
chainID string
|
||||
height int64
|
||||
@@ -85,22 +79,6 @@ func IsErrUnexpectedValidators(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//-----------------
|
||||
// ErrTooMuchChange
|
||||
|
||||
// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3.
|
||||
func ErrTooMuchChange() error {
|
||||
return cmn.ErrorWrap(errTooMuchChange{}, "")
|
||||
}
|
||||
|
||||
func IsErrTooMuchChange(err error) bool {
|
||||
if err_, ok := err.(cmn.Error); ok {
|
||||
_, ok := err_.Data().(errTooMuchChange)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//-----------------
|
||||
// ErrUnknownValidators
|
||||
|
||||
|
@@ -6,6 +6,8 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var _ PersistentProvider = (*multiProvider)(nil)
|
||||
|
||||
// multiProvider allows you to place one or more caches in front of a source
|
||||
// Provider. It runs through them in order until a match is found.
|
||||
type multiProvider struct {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package lite
|
||||
|
||||
import (
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/lite"
|
||||
certclient "github.com/tendermint/tendermint/lite/client"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
@@ -143,12 +144,13 @@ func TestTxProofs(t *testing.T) {
|
||||
require.NotNil(err)
|
||||
require.Contains(err.Error(), "not found")
|
||||
|
||||
// Now let's check with the real tx hash.
|
||||
// Now let's check with the real tx root hash.
|
||||
key = types.Tx(tx).Hash()
|
||||
res, err = cl.Tx(key, true)
|
||||
require.NoError(err, "%#v", err)
|
||||
require.NotNil(res)
|
||||
err = res.Proof.Validate(key)
|
||||
keyHash := merkle.SimpleHashFromByteSlices([][]byte{key})
|
||||
err = res.Proof.Validate(keyHash)
|
||||
assert.NoError(err, "%#v", err)
|
||||
|
||||
commit, err := GetCertifiedCommit(br.Height, cl, cert)
|
||||
|
@@ -65,6 +65,9 @@ var (
|
||||
|
||||
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
|
||||
ErrMempoolIsFull = errors.New("Mempool is full")
|
||||
|
||||
// ErrTxTooLarge means the tx is too big to be sent in a message to other peers
|
||||
ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize)
|
||||
)
|
||||
|
||||
// ErrPreCheck is returned when tx is too big
|
||||
@@ -309,6 +312,13 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
|
||||
return ErrMempoolIsFull
|
||||
}
|
||||
|
||||
// The size of the corresponding amino-encoded TxMessage
|
||||
// can't be larger than the maxMsgSize, otherwise we can't
|
||||
// relay it to peers.
|
||||
if len(tx) > maxTxSize {
|
||||
return ErrTxTooLarge
|
||||
}
|
||||
|
||||
if mem.preCheck != nil {
|
||||
if err := mem.preCheck(tx); err != nil {
|
||||
return ErrPreCheck{err}
|
||||
|
@@ -14,10 +14,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -394,6 +396,60 @@ func TestMempoolCloseWAL(t *testing.T) {
|
||||
require.Equal(t, 1, len(m3), "expecting the wal match in")
|
||||
}
|
||||
|
||||
// Size of the amino encoded TxMessage is the length of the
|
||||
// encoded byte array, plus 1 for the struct field, plus 4
|
||||
// for the amino prefix.
|
||||
func txMessageSize(tx types.Tx) int {
|
||||
return amino.ByteSliceSize(tx) + 1 + 4
|
||||
}
|
||||
|
||||
func TestMempoolMaxMsgSize(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempl := newMempoolWithApp(cc)
|
||||
|
||||
testCases := []struct {
|
||||
len int
|
||||
err bool
|
||||
}{
|
||||
// check small txs. no error
|
||||
{10, false},
|
||||
{1000, false},
|
||||
{1000000, false},
|
||||
|
||||
// check around maxTxSize
|
||||
// changes from no error to error
|
||||
{maxTxSize - 2, false},
|
||||
{maxTxSize - 1, false},
|
||||
{maxTxSize, false},
|
||||
{maxTxSize + 1, true},
|
||||
{maxTxSize + 2, true},
|
||||
|
||||
// check around maxMsgSize. all error
|
||||
{maxMsgSize - 1, true},
|
||||
{maxMsgSize, true},
|
||||
{maxMsgSize + 1, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
caseString := fmt.Sprintf("case %d, len %d", i, testCase.len)
|
||||
|
||||
tx := cmn.RandBytes(testCase.len)
|
||||
err := mempl.CheckTx(tx, nil)
|
||||
msg := &TxMessage{tx}
|
||||
encoded := cdc.MustMarshalBinaryBare(msg)
|
||||
require.Equal(t, len(encoded), txMessageSize(tx), caseString)
|
||||
if !testCase.err {
|
||||
require.True(t, len(encoded) <= maxMsgSize, caseString)
|
||||
require.NoError(t, err, caseString)
|
||||
} else {
|
||||
require.True(t, len(encoded) > maxMsgSize, caseString)
|
||||
require.Equal(t, err, ErrTxTooLarge, caseString)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func checksumIt(data []byte) string {
|
||||
h := md5.New()
|
||||
h.Write(data)
|
||||
|
@@ -7,7 +7,11 @@ import (
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const MetricsSubsytem = "mempool"
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "mempool"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
// see MetricsProvider for descriptions.
|
||||
@@ -23,33 +27,39 @@ type Metrics struct {
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
func PrometheusMetrics(namespace string) *Metrics {
|
||||
// Optionally, labels can be provided along with their values ("foo",
|
||||
// "fooValue").
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsytem,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "size",
|
||||
Help: "Size of the mempool (number of uncommitted transactions).",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsytem,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "tx_size_bytes",
|
||||
Help: "Transaction sizes in bytes.",
|
||||
Buckets: stdprometheus.ExponentialBuckets(1, 3, 17),
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsytem,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "failed_txs",
|
||||
Help: "Number of failed transactions.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsytem,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "recheck_times",
|
||||
Help: "Number of times transactions are rechecked in the mempool.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
@@ -18,8 +17,10 @@ import (
|
||||
const (
|
||||
MempoolChannel = byte(0x30)
|
||||
|
||||
maxMsgSize = 1048576 // 1MB TODO make it configurable
|
||||
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
|
||||
maxMsgSize = 1048576 // 1MB TODO make it configurable
|
||||
maxTxSize = maxMsgSize - 8 // account for amino overhead of TxMessage
|
||||
|
||||
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
|
||||
)
|
||||
|
||||
// MempoolReactor handles mempool tx broadcasting amongst peers.
|
||||
@@ -98,11 +99,6 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastTx is an alias for Mempool.CheckTx. Broadcasting itself happens in peer routines.
|
||||
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error {
|
||||
return memR.Mempool.CheckTx(tx, cb)
|
||||
}
|
||||
|
||||
// PeerState describes the state of a peer.
|
||||
type PeerState interface {
|
||||
GetHeight() int64
|
||||
|
12
node/node.go
12
node/node.go
@@ -117,15 +117,17 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||
}
|
||||
|
||||
// MetricsProvider returns a consensus, p2p and mempool Metrics.
|
||||
type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
|
||||
type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
|
||||
|
||||
// DefaultMetricsProvider returns Metrics build using Prometheus client library
|
||||
// if Prometheus is enabled. Otherwise, it returns no-op Metrics.
|
||||
func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
|
||||
return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
|
||||
return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
|
||||
if config.Prometheus {
|
||||
return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace),
|
||||
mempl.PrometheusMetrics(config.Namespace), sm.PrometheusMetrics(config.Namespace)
|
||||
return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID),
|
||||
sm.PrometheusMetrics(config.Namespace, "chain_id", chainID)
|
||||
}
|
||||
return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
|
||||
}
|
||||
@@ -274,7 +276,7 @@ func NewNode(config *cfg.Config,
|
||||
consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey)
|
||||
}
|
||||
|
||||
csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider()
|
||||
csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID)
|
||||
|
||||
// Make MempoolReactor
|
||||
mempool := mempl.NewMempool(
|
||||
|
@@ -7,7 +7,11 @@ import (
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const MetricsSubsystem = "p2p"
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "p2p"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
@@ -24,45 +28,51 @@ type Metrics struct {
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
func PrometheusMetrics(namespace string) *Metrics {
|
||||
// Optionally, labels can be provided along with their values ("foo",
|
||||
// "fooValue").
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peers",
|
||||
Help: "Number of peers.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peer_receive_bytes_total",
|
||||
Help: "Number of bytes received from a given peer.",
|
||||
}, []string{"peer_id"}),
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peer_send_bytes_total",
|
||||
Help: "Number of bytes sent to a given peer.",
|
||||
}, []string{"peer_id"}),
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "peer_pending_send_bytes",
|
||||
Help: "Number of pending bytes to be sent to a given peer.",
|
||||
}, []string{"peer_id"}),
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "num_txs",
|
||||
Help: "Number of transactions submitted by each peer.",
|
||||
}, []string{"peer_id"}),
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Peers: discard.NewGauge(),
|
||||
Peers: discard.NewGauge(),
|
||||
PeerReceiveBytesTotal: discard.NewCounter(),
|
||||
PeerSendBytesTotal: discard.NewCounter(),
|
||||
PeerPendingSendBytes: discard.NewGauge(),
|
||||
|
@@ -13,7 +13,6 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
p2pconn "github.com/tendermint/tendermint/p2p/conn"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -186,40 +185,36 @@ func TestSocketPVVoteKeepalive(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSocketPVDeadlineTCPOnly is not relevant to Unix domain sockets, since the
|
||||
// OS knows instantaneously the state of both sides of the connection.
|
||||
func TestSocketPVDeadlineTCPOnly(t *testing.T) {
|
||||
var (
|
||||
addr = testFreeTCPAddr(t)
|
||||
listenc = make(chan struct{})
|
||||
thisConnTimeout = 100 * time.Millisecond
|
||||
sc = newSocketVal(log.TestingLogger(), addr, thisConnTimeout)
|
||||
)
|
||||
func TestSocketPVDeadline(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
listenc = make(chan struct{})
|
||||
thisConnTimeout = 100 * time.Millisecond
|
||||
sc = newSocketVal(log.TestingLogger(), tc.addr, thisConnTimeout)
|
||||
)
|
||||
|
||||
go func(sc *SocketVal) {
|
||||
defer close(listenc)
|
||||
go func(sc *SocketVal) {
|
||||
defer close(listenc)
|
||||
|
||||
assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnTimeout)
|
||||
// Note: the TCP connection times out at the accept() phase,
|
||||
// whereas the Unix domain sockets connection times out while
|
||||
// attempting to fetch the remote signer's public key.
|
||||
assert.True(t, IsConnTimeout(sc.Start()))
|
||||
|
||||
assert.False(t, sc.IsRunning())
|
||||
}(sc)
|
||||
assert.False(t, sc.IsRunning())
|
||||
}(sc)
|
||||
|
||||
for {
|
||||
conn, err := cmn.Connect(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for {
|
||||
_, err := cmn.Connect(tc.addr)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_, err = p2pconn.MakeSecretConnection(
|
||||
conn,
|
||||
ed25519.GenPrivKey(),
|
||||
)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
<-listenc
|
||||
}()
|
||||
}
|
||||
|
||||
<-listenc
|
||||
}
|
||||
|
||||
func TestRemoteSignVoteErrors(t *testing.T) {
|
||||
|
@@ -258,3 +258,18 @@ func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValida
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// IsConnTimeout returns a boolean indicating whether the error is known to
|
||||
// report that a connection timeout occurred. This detects both fundamental
|
||||
// network timeouts, as well as ErrConnTimeout errors.
|
||||
func IsConnTimeout(err error) bool {
|
||||
if cmnErr, ok := err.(cmn.Error); ok {
|
||||
if cmnErr.Data() == ErrConnTimeout {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if _, ok := err.(timeoutError); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
@@ -66,3 +67,24 @@ func TestRemoteSignerRetryTCPOnly(t *testing.T) {
|
||||
t.Error("expected remote to observe connection attempts")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) {
|
||||
// Generate a networking timeout
|
||||
dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey())
|
||||
_, err := dialer()
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsConnTimeout(err))
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) {
|
||||
dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey())
|
||||
_, err := dialer()
|
||||
assert.Error(t, err)
|
||||
err = cmn.ErrorWrap(ErrConnTimeout, err.Error())
|
||||
assert.True(t, IsConnTimeout(err))
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForNonTimeoutErrors(t *testing.T) {
|
||||
assert.False(t, IsConnTimeout(cmn.ErrorWrap(ErrDialRetryMax, "max retries exceeded")))
|
||||
assert.False(t, IsConnTimeout(errors.New("completely irrelevant error")))
|
||||
}
|
||||
|
@@ -149,8 +149,10 @@ func validatePage(page, perPage, totalCount int) int {
|
||||
}
|
||||
|
||||
func validatePerPage(perPage int) int {
|
||||
if perPage < 1 || perPage > maxPerPage {
|
||||
if perPage < 1 {
|
||||
return defaultPerPage
|
||||
} else if perPage > maxPerPage {
|
||||
return maxPerPage
|
||||
}
|
||||
return perPage
|
||||
}
|
||||
|
@@ -47,7 +47,6 @@ func TestPaginationPage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPaginationPerPage(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
totalCount int
|
||||
perPage int
|
||||
@@ -59,7 +58,7 @@ func TestPaginationPerPage(t *testing.T) {
|
||||
{5, defaultPerPage, defaultPerPage},
|
||||
{5, maxPerPage - 1, maxPerPage - 1},
|
||||
{5, maxPerPage, maxPerPage},
|
||||
{5, maxPerPage + 1, defaultPerPage},
|
||||
{5, maxPerPage + 1, maxPerPage},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
|
@@ -45,7 +45,10 @@ func main() {
|
||||
}
|
||||
defer walFile.Close()
|
||||
|
||||
br := bufio.NewReader(f)
|
||||
// the length of tendermint/wal/MsgInfo in the wal.json may exceed the defaultBufSize(4096) of bufio
|
||||
// because of the byte array in BlockPart
|
||||
// leading to unmarshal error: unexpected end of JSON input
|
||||
br := bufio.NewReaderSize(f, 2*types.BlockPartSizeBytes)
|
||||
dec := cs.NewWALEncoder(walFile)
|
||||
|
||||
for {
|
||||
|
@@ -7,14 +7,26 @@ import (
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const MetricsSubsystem = "state"
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "state"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// Time between BeginBlock and EndBlock.
|
||||
BlockProcessingTime metrics.Histogram
|
||||
}
|
||||
|
||||
func PrometheusMetrics(namespace string) *Metrics {
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
// Optionally, labels can be provided along with their values ("foo",
|
||||
// "fooValue").
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
@@ -22,10 +34,11 @@ func PrometheusMetrics(namespace string) *Metrics {
|
||||
Name: "block_processing_time",
|
||||
Help: "Time between BeginBlock and EndBlock in ms.",
|
||||
Buckets: stdprometheus.LinearBuckets(1, 10, 10),
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
BlockProcessingTime: discard.NewHistogram(),
|
||||
|
@@ -3,6 +3,8 @@ package state
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -263,6 +265,511 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposerFrequency(t *testing.T) {
|
||||
|
||||
// some explicit test cases
|
||||
testCases := []struct {
|
||||
powers []int64
|
||||
}{
|
||||
// 2 vals
|
||||
{[]int64{1, 1}},
|
||||
{[]int64{1, 2}},
|
||||
{[]int64{1, 100}},
|
||||
{[]int64{5, 5}},
|
||||
{[]int64{5, 100}},
|
||||
{[]int64{50, 50}},
|
||||
{[]int64{50, 100}},
|
||||
{[]int64{1, 1000}},
|
||||
|
||||
// 3 vals
|
||||
{[]int64{1, 1, 1}},
|
||||
{[]int64{1, 2, 3}},
|
||||
{[]int64{1, 2, 3}},
|
||||
{[]int64{1, 1, 10}},
|
||||
{[]int64{1, 1, 100}},
|
||||
{[]int64{1, 10, 100}},
|
||||
{[]int64{1, 1, 1000}},
|
||||
{[]int64{1, 10, 1000}},
|
||||
{[]int64{1, 100, 1000}},
|
||||
|
||||
// 4 vals
|
||||
{[]int64{1, 1, 1, 1}},
|
||||
{[]int64{1, 2, 3, 4}},
|
||||
{[]int64{1, 1, 1, 10}},
|
||||
{[]int64{1, 1, 1, 100}},
|
||||
{[]int64{1, 1, 1, 1000}},
|
||||
{[]int64{1, 1, 10, 100}},
|
||||
{[]int64{1, 1, 10, 1000}},
|
||||
{[]int64{1, 1, 100, 1000}},
|
||||
{[]int64{1, 10, 100, 1000}},
|
||||
}
|
||||
|
||||
for caseNum, testCase := range testCases {
|
||||
// run each case 5 times to sample different
|
||||
// initial priorities
|
||||
for i := 0; i < 5; i++ {
|
||||
valSet := genValSetWithPowers(testCase.powers)
|
||||
testProposerFreq(t, caseNum, valSet)
|
||||
}
|
||||
}
|
||||
|
||||
// some random test cases with up to 300 validators
|
||||
maxVals := 100
|
||||
maxPower := 1000
|
||||
nTestCases := 5
|
||||
for i := 0; i < nTestCases; i++ {
|
||||
N := cmn.RandInt() % maxVals
|
||||
vals := make([]*types.Validator, N)
|
||||
totalVotePower := int64(0)
|
||||
for j := 0; j < N; j++ {
|
||||
votePower := int64(cmn.RandInt() % maxPower)
|
||||
totalVotePower += votePower
|
||||
privVal := types.NewMockPV()
|
||||
pubKey := privVal.GetPubKey()
|
||||
val := types.NewValidator(pubKey, votePower)
|
||||
val.ProposerPriority = cmn.RandInt64()
|
||||
vals[j] = val
|
||||
}
|
||||
valSet := types.NewValidatorSet(vals)
|
||||
valSet.RescalePriorities(totalVotePower)
|
||||
testProposerFreq(t, i, valSet)
|
||||
}
|
||||
}
|
||||
|
||||
// new val set with given powers and random initial priorities
|
||||
func genValSetWithPowers(powers []int64) *types.ValidatorSet {
|
||||
size := len(powers)
|
||||
vals := make([]*types.Validator, size)
|
||||
totalVotePower := int64(0)
|
||||
for i := 0; i < size; i++ {
|
||||
totalVotePower += powers[i]
|
||||
val := types.NewValidator(ed25519.GenPrivKey().PubKey(), powers[i])
|
||||
val.ProposerPriority = cmn.RandInt64()
|
||||
vals[i] = val
|
||||
}
|
||||
valSet := types.NewValidatorSet(vals)
|
||||
valSet.RescalePriorities(totalVotePower)
|
||||
return valSet
|
||||
}
|
||||
|
||||
// test a proposer appears as frequently as expected
|
||||
func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) {
|
||||
N := valSet.Size()
|
||||
totalPower := valSet.TotalVotingPower()
|
||||
|
||||
// run the proposer selection and track frequencies
|
||||
runMult := 1
|
||||
runs := int(totalPower) * runMult
|
||||
freqs := make([]int, N)
|
||||
for i := 0; i < runs; i++ {
|
||||
prop := valSet.GetProposer()
|
||||
idx, _ := valSet.GetByAddress(prop.Address)
|
||||
freqs[idx] += 1
|
||||
valSet.IncrementProposerPriority(1)
|
||||
}
|
||||
|
||||
// assert frequencies match expected (max off by 1)
|
||||
for i, freq := range freqs {
|
||||
_, val := valSet.GetByIndex(i)
|
||||
expectFreq := int(val.VotingPower) * runMult
|
||||
gotFreq := freq
|
||||
abs := int(math.Abs(float64(expectFreq - gotFreq)))
|
||||
|
||||
// max bound on expected vs seen freq was proven
|
||||
// to be 1 for the 2 validator case in
|
||||
// https://github.com/cwgoes/tm-proposer-idris
|
||||
// and inferred to generalize to N-1
|
||||
bound := N - 1
|
||||
require.True(t, abs <= bound, fmt.Sprintf("Case %d val %d (%d): got %d, expected %d", caseNum, i, N, gotFreq, expectFreq))
|
||||
}
|
||||
}
|
||||
|
||||
// TestProposerPriorityDoesNotGetResetToZero assert that we preserve accum when calling updateState
|
||||
// see https://github.com/tendermint/tendermint/issues/2718
|
||||
func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
|
||||
tearDown, _, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
val1VotingPower := int64(10)
|
||||
val1PubKey := ed25519.GenPrivKey().PubKey()
|
||||
val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower}
|
||||
|
||||
state.Validators = types.NewValidatorSet([]*types.Validator{val1})
|
||||
state.NextValidators = state.Validators
|
||||
|
||||
// NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1
|
||||
assert.EqualValues(t, 0, val1.ProposerPriority)
|
||||
|
||||
block := makeBlock(state, state.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
updatedState, err := updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
curTotal := val1VotingPower
|
||||
// one increment step and one validator: 0 + power - total_power == 0
|
||||
assert.Equal(t, 0+val1VotingPower-curTotal, updatedState.NextValidators.Validators[0].ProposerPriority)
|
||||
|
||||
// add a validator
|
||||
val2PubKey := ed25519.GenPrivKey().PubKey()
|
||||
val2VotingPower := int64(100)
|
||||
updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val2VotingPower}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal})
|
||||
assert.NoError(t, err)
|
||||
updatedState2, err := updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(updatedState2.NextValidators.Validators), 2)
|
||||
_, updatedVal1 := updatedState2.NextValidators.GetByAddress(val1PubKey.Address())
|
||||
_, addedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address())
|
||||
// adding a validator should not lead to a ProposerPriority equal to zero (unless the combination of averaging and
|
||||
// incrementing would cause so; which is not the case here)
|
||||
totalPowerBefore2 := curTotal
|
||||
// while adding we compute prio == -1.125 * total:
|
||||
wantVal2ProposerPrio := -(totalPowerBefore2 + (totalPowerBefore2 >> 3))
|
||||
wantVal2ProposerPrio = wantVal2ProposerPrio + val2VotingPower
|
||||
// then increment:
|
||||
totalPowerAfter := val1VotingPower + val2VotingPower
|
||||
// mostest:
|
||||
wantVal2ProposerPrio = wantVal2ProposerPrio - totalPowerAfter
|
||||
avg := big.NewInt(0).Add(big.NewInt(val1VotingPower), big.NewInt(wantVal2ProposerPrio))
|
||||
avg.Div(avg, big.NewInt(2))
|
||||
wantVal2ProposerPrio = wantVal2ProposerPrio - avg.Int64()
|
||||
wantVal1Prio := 0 + val1VotingPower - avg.Int64()
|
||||
assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority)
|
||||
assert.Equal(t, wantVal2ProposerPrio, addedVal2.ProposerPriority)
|
||||
|
||||
// Updating a validator does not reset the ProposerPriority to zero:
|
||||
updatedVotingPowVal2 := int64(1)
|
||||
updateVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: updatedVotingPowVal2}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// this will cause the diff of priorities (31)
|
||||
// to be larger than threshold == 2*totalVotingPower (22):
|
||||
updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(updatedState3.NextValidators.Validators), 2)
|
||||
_, prevVal1 := updatedState3.Validators.GetByAddress(val1PubKey.Address())
|
||||
_, updatedVal2 := updatedState3.NextValidators.GetByAddress(val2PubKey.Address())
|
||||
|
||||
// divide previous priorities by 2 == CEIL(31/22) as diff > threshold:
|
||||
expectedVal1PrioBeforeAvg := prevVal1.ProposerPriority/2 + prevVal1.VotingPower
|
||||
wantVal2ProposerPrio = wantVal2ProposerPrio/2 + updatedVotingPowVal2
|
||||
// val1 will be proposer:
|
||||
total := val1VotingPower + updatedVotingPowVal2
|
||||
expectedVal1PrioBeforeAvg = expectedVal1PrioBeforeAvg - total
|
||||
avgI64 := (wantVal2ProposerPrio + expectedVal1PrioBeforeAvg) / 2
|
||||
wantVal2ProposerPrio = wantVal2ProposerPrio - avgI64
|
||||
wantVal1Prio = expectedVal1PrioBeforeAvg - avgI64
|
||||
assert.Equal(t, wantVal2ProposerPrio, updatedVal2.ProposerPriority)
|
||||
_, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address())
|
||||
assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority)
|
||||
}
|
||||
|
||||
func TestProposerPriorityProposerAlternates(t *testing.T) {
|
||||
// Regression test that would fail if the inner workings of
|
||||
// IncrementProposerPriority change.
|
||||
// Additionally, make sure that same power validators alternate if both
|
||||
// have the same voting power (and the 2nd was added later).
|
||||
tearDown, _, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
val1VotingPower := int64(10)
|
||||
val1PubKey := ed25519.GenPrivKey().PubKey()
|
||||
val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower}
|
||||
|
||||
// reset state validators to above validator
|
||||
state.Validators = types.NewValidatorSet([]*types.Validator{val1})
|
||||
state.NextValidators = state.Validators
|
||||
// we only have one validator:
|
||||
assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address)
|
||||
|
||||
block := makeBlock(state, state.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
// no updates:
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedState, err := updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10
|
||||
totalPower := val1VotingPower
|
||||
wantVal1Prio := 0 + val1VotingPower - totalPower
|
||||
assert.Equal(t, wantVal1Prio, updatedState.NextValidators.Validators[0].ProposerPriority)
|
||||
assert.Equal(t, val1PubKey.Address(), updatedState.NextValidators.Proposer.Address)
|
||||
|
||||
// add a validator with the same voting power as the first
|
||||
val2PubKey := ed25519.GenPrivKey().PubKey()
|
||||
updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val1VotingPower}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal})
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedState2, err := updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(updatedState2.NextValidators.Validators), 2)
|
||||
assert.Equal(t, updatedState2.Validators, updatedState.NextValidators)
|
||||
|
||||
// val1 will still be proposer as val2 just got added:
|
||||
assert.Equal(t, val1PubKey.Address(), updatedState.NextValidators.Proposer.Address)
|
||||
assert.Equal(t, updatedState2.Validators.Proposer.Address, updatedState2.NextValidators.Proposer.Address)
|
||||
assert.Equal(t, updatedState2.Validators.Proposer.Address, val1PubKey.Address())
|
||||
assert.Equal(t, updatedState2.NextValidators.Proposer.Address, val1PubKey.Address())
|
||||
|
||||
_, updatedVal1 := updatedState2.NextValidators.GetByAddress(val1PubKey.Address())
|
||||
_, oldVal1 := updatedState2.Validators.GetByAddress(val1PubKey.Address())
|
||||
_, updatedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address())
|
||||
|
||||
totalPower = val1VotingPower // no update
|
||||
v2PrioWhenAddedVal2 := -(totalPower + (totalPower >> 3))
|
||||
v2PrioWhenAddedVal2 = v2PrioWhenAddedVal2 + val1VotingPower // -11 + 10 == -1
|
||||
v1PrioWhenAddedVal2 := oldVal1.ProposerPriority + val1VotingPower // -10 + 10 == 0
|
||||
totalPower = 2 * val1VotingPower // now we have to validators with that power
|
||||
v1PrioWhenAddedVal2 = v1PrioWhenAddedVal2 - totalPower // mostest
|
||||
// have to express the AVG in big.Ints as -1/2 == -1 in big.Int while -1/2 == 0 in int64
|
||||
avgSum := big.NewInt(0).Add(big.NewInt(v2PrioWhenAddedVal2), big.NewInt(v1PrioWhenAddedVal2))
|
||||
avg := avgSum.Div(avgSum, big.NewInt(2))
|
||||
expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64()
|
||||
totalPower = 2 * val1VotingPower // 10 + 10
|
||||
expectedVal1Prio := oldVal1.ProposerPriority + val1VotingPower - avg.Int64() - totalPower
|
||||
// val1's ProposerPriority story: -10 (see above) + 10 (voting pow) - (-1) (avg) - 20 (total) == -19
|
||||
assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority)
|
||||
// val2 prio when added: -(totalVotingPower + (totalVotingPower >> 3)) == -11
|
||||
// -> -11 + 10 (voting power) - (-1) (avg) == 0
|
||||
assert.EqualValues(t, expectedVal2Prio, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
|
||||
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// proposer changes from now on (every iteration) as long as there are no changes in the validator set:
|
||||
assert.NotEqual(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address)
|
||||
|
||||
assert.Equal(t, updatedState3.Validators, updatedState2.NextValidators)
|
||||
_, updatedVal1 = updatedState3.NextValidators.GetByAddress(val1PubKey.Address())
|
||||
_, oldVal1 = updatedState3.Validators.GetByAddress(val1PubKey.Address())
|
||||
_, updatedVal2 = updatedState3.NextValidators.GetByAddress(val2PubKey.Address())
|
||||
_, oldVal2 := updatedState3.Validators.GetByAddress(val2PubKey.Address())
|
||||
|
||||
// val2 will be proposer:
|
||||
assert.Equal(t, val2PubKey.Address(), updatedState3.NextValidators.Proposer.Address)
|
||||
// check if expected proposer prio is matched:
|
||||
|
||||
expectedVal1Prio2 := oldVal1.ProposerPriority + val1VotingPower
|
||||
expectedVal2Prio2 := oldVal2.ProposerPriority + val1VotingPower - totalPower
|
||||
avgSum = big.NewInt(expectedVal1Prio + expectedVal2Prio)
|
||||
avg = avgSum.Div(avgSum, big.NewInt(2))
|
||||
expectedVal1Prio -= avg.Int64()
|
||||
expectedVal2Prio -= avg.Int64()
|
||||
|
||||
// -19 + 10 - 0 (avg) == -9
|
||||
assert.EqualValues(t, expectedVal1Prio2, updatedVal1.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
|
||||
// 0 + 10 - 0 (avg) - 20 (total) == -10
|
||||
assert.EqualValues(t, expectedVal2Prio2, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
|
||||
|
||||
// no changes in voting power and both validators have same voting power
|
||||
// -> proposers should alternate:
|
||||
oldState := updatedState3
|
||||
for i := 0; i < 1000; i++ {
|
||||
// no validator updates:
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
// alternate (and cyclic priorities):
|
||||
assert.NotEqual(t, updatedState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i)
|
||||
assert.Equal(t, oldState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i)
|
||||
|
||||
_, updatedVal1 = updatedState.NextValidators.GetByAddress(val1PubKey.Address())
|
||||
_, updatedVal2 = updatedState.NextValidators.GetByAddress(val2PubKey.Address())
|
||||
|
||||
if i%2 == 0 {
|
||||
assert.Equal(t, updatedState.Validators.Proposer.Address, val2PubKey.Address())
|
||||
assert.Equal(t, expectedVal1Prio, updatedVal1.ProposerPriority) // -19
|
||||
assert.Equal(t, expectedVal2Prio, updatedVal2.ProposerPriority) // 0
|
||||
} else {
|
||||
assert.Equal(t, updatedState.Validators.Proposer.Address, val1PubKey.Address())
|
||||
assert.Equal(t, expectedVal1Prio2, updatedVal1.ProposerPriority) // -9
|
||||
assert.Equal(t, expectedVal2Prio2, updatedVal2.ProposerPriority) // -10
|
||||
}
|
||||
// update for next iteration:
|
||||
oldState = updatedState
|
||||
}
|
||||
}
|
||||
|
||||
func TestLargeGenesisValidator(t *testing.T) {
|
||||
tearDown, _, state := setupTestCase(t)
|
||||
defer tearDown(t)
|
||||
|
||||
genesisVotingPower := int64(types.MaxTotalVotingPower / 1000)
|
||||
genesisPubKey := ed25519.GenPrivKey().PubKey()
|
||||
// fmt.Println("genesis addr: ", genesisPubKey.Address())
|
||||
genesisVal := &types.Validator{Address: genesisPubKey.Address(), PubKey: genesisPubKey, VotingPower: genesisVotingPower}
|
||||
// reset state validators to above validator
|
||||
state.Validators = types.NewValidatorSet([]*types.Validator{genesisVal})
|
||||
state.NextValidators = state.Validators
|
||||
require.True(t, len(state.Validators.Validators) == 1)
|
||||
|
||||
// update state a few times with no validator updates
|
||||
// asserts that the single validator's ProposerPrio stays the same
|
||||
oldState := state
|
||||
for i := 0; i < 10; i++ {
|
||||
// no updates:
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := makeBlock(oldState, oldState.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
|
||||
updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
// no changes in voting power (ProposerPrio += VotingPower == Voting in 1st round; than shiftByAvg == 0,
|
||||
// than -Total == -Voting)
|
||||
// -> no change in ProposerPrio (stays zero):
|
||||
assert.EqualValues(t, oldState.NextValidators, updatedState.NextValidators)
|
||||
assert.EqualValues(t, 0, updatedState.NextValidators.Proposer.ProposerPriority)
|
||||
|
||||
oldState = updatedState
|
||||
}
|
||||
// add another validator, do a few iterations (create blocks),
|
||||
// add more validators with same voting power as the 2nd
|
||||
// let the genesis validator "unbond",
|
||||
// see how long it takes until the effect wears off and both begin to alternate
|
||||
// see: https://github.com/tendermint/tendermint/issues/2960
|
||||
firstAddedValPubKey := ed25519.GenPrivKey().PubKey()
|
||||
firstAddedValVotingPower := int64(10)
|
||||
firstAddedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(firstAddedValPubKey), Power: firstAddedValVotingPower}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal})
|
||||
assert.NoError(t, err)
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}},
|
||||
}
|
||||
block := makeBlock(oldState, oldState.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
|
||||
lastState := updatedState
|
||||
for i := 0; i < 200; i++ {
|
||||
// no updates:
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := makeBlock(lastState, lastState.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
|
||||
updatedStateInner, err := updateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
lastState = updatedStateInner
|
||||
}
|
||||
// set state to last state of above iteration
|
||||
state = lastState
|
||||
|
||||
// set oldState to state before above iteration
|
||||
oldState = updatedState
|
||||
_, oldGenesisVal := oldState.NextValidators.GetByAddress(genesisVal.Address)
|
||||
_, newGenesisVal := state.NextValidators.GetByAddress(genesisVal.Address)
|
||||
_, addedOldVal := oldState.NextValidators.GetByAddress(firstAddedValPubKey.Address())
|
||||
_, addedNewVal := state.NextValidators.GetByAddress(firstAddedValPubKey.Address())
|
||||
// expect large negative proposer priority for both (genesis validator decreased, 2nd validator increased):
|
||||
assert.True(t, oldGenesisVal.ProposerPriority > newGenesisVal.ProposerPriority)
|
||||
assert.True(t, addedOldVal.ProposerPriority < addedNewVal.ProposerPriority)
|
||||
|
||||
// add 10 validators with the same voting power as the one added directly after genesis:
|
||||
for i := 0; i < 10; i++ {
|
||||
addedPubKey := ed25519.GenPrivKey().PubKey()
|
||||
|
||||
addedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(addedPubKey), Power: firstAddedValVotingPower}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal})
|
||||
assert.NoError(t, err)
|
||||
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}},
|
||||
}
|
||||
block := makeBlock(oldState, oldState.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
}
|
||||
require.Equal(t, 10+2, len(state.NextValidators.Validators))
|
||||
|
||||
// remove genesis validator:
|
||||
removeGenesisVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(genesisPubKey), Power: 0}
|
||||
abciResponses = &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}},
|
||||
}
|
||||
block = makeBlock(oldState, oldState.LastBlockHeight+1)
|
||||
blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
updatedState, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
require.NoError(t, err)
|
||||
// only the first added val (not the genesis val) should be left
|
||||
assert.Equal(t, 11, len(updatedState.NextValidators.Validators))
|
||||
|
||||
// call update state until the effect for the 3rd added validator
|
||||
// being proposer for a long time after the genesis validator left wears off:
|
||||
curState := updatedState
|
||||
count := 0
|
||||
isProposerUnchanged := true
|
||||
for isProposerUnchanged {
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
block = makeBlock(curState, curState.LastBlockHeight+1)
|
||||
blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
curState, err = updateState(curState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) {
|
||||
isProposerUnchanged = false
|
||||
}
|
||||
count++
|
||||
}
|
||||
updatedState = curState
|
||||
// the proposer changes after this number of blocks
|
||||
firstProposerChangeExpectedAfter := 1
|
||||
assert.Equal(t, firstProposerChangeExpectedAfter, count)
|
||||
// store proposers here to see if we see them again in the same order:
|
||||
numVals := len(updatedState.Validators.Validators)
|
||||
proposers := make([]*types.Validator, numVals)
|
||||
for i := 0; i < 100; i++ {
|
||||
// no updates:
|
||||
abciResponses := &ABCIResponses{
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := makeBlock(updatedState, updatedState.LastBlockHeight+1)
|
||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||
|
||||
updatedState, err = updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
if i > numVals { // expect proposers to cycle through after the first iteration (of numVals blocks):
|
||||
if proposers[i%numVals] == nil {
|
||||
proposers[i%numVals] = updatedState.NextValidators.Proposer
|
||||
} else {
|
||||
assert.Equal(t, proposers[i%numVals], updatedState.NextValidators.Proposer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
|
||||
const valSetSize = 2
|
||||
tearDown, stateDB, state := setupTestCase(t)
|
||||
|
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
@@ -637,6 +638,7 @@ func (commit *Commit) StringIndented(indent string) string {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// SignedHeader is a header along with the commits that prove it.
|
||||
// It is the basis of the lite client.
|
||||
type SignedHeader struct {
|
||||
*Header `json:"header"`
|
||||
Commit *Commit `json:"commit"`
|
||||
@@ -789,11 +791,6 @@ type BlockID struct {
|
||||
PartsHeader PartSetHeader `json:"parts"`
|
||||
}
|
||||
|
||||
// IsZero returns true if this is the BlockID for a nil-block
|
||||
func (blockID BlockID) IsZero() bool {
|
||||
return len(blockID.Hash) == 0 && blockID.PartsHeader.IsZero()
|
||||
}
|
||||
|
||||
// Equals returns true if the BlockID matches the given BlockID
|
||||
func (blockID BlockID) Equals(other BlockID) bool {
|
||||
return bytes.Equal(blockID.Hash, other.Hash) &&
|
||||
@@ -821,6 +818,19 @@ func (blockID BlockID) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsZero returns true if this is the BlockID of a nil block.
|
||||
func (blockID BlockID) IsZero() bool {
|
||||
return len(blockID.Hash) == 0 &&
|
||||
blockID.PartsHeader.IsZero()
|
||||
}
|
||||
|
||||
// IsComplete returns true if this is a valid BlockID of a non-nil block.
|
||||
func (blockID BlockID) IsComplete() bool {
|
||||
return len(blockID.Hash) == tmhash.Size &&
|
||||
blockID.PartsHeader.Total > 0 &&
|
||||
len(blockID.PartsHeader.Hash) == tmhash.Size
|
||||
}
|
||||
|
||||
// String returns a human readable string representation of the BlockID
|
||||
func (blockID BlockID) String() string {
|
||||
return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader)
|
||||
|
@@ -36,8 +36,8 @@ type CanonicalVote struct {
|
||||
Type SignedMsgType // type alias for byte
|
||||
Height int64 `binary:"fixed64"`
|
||||
Round int64 `binary:"fixed64"`
|
||||
Timestamp time.Time
|
||||
BlockID CanonicalBlockID
|
||||
Timestamp time.Time
|
||||
ChainID string
|
||||
}
|
||||
|
||||
@@ -75,8 +75,8 @@ func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote {
|
||||
Type: vote.Type,
|
||||
Height: vote.Height,
|
||||
Round: int64(vote.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int)
|
||||
Timestamp: vote.Timestamp,
|
||||
BlockID: CanonicalizeBlockID(vote.BlockID),
|
||||
Timestamp: vote.Timestamp,
|
||||
ChainID: chainID,
|
||||
}
|
||||
}
|
||||
|
@@ -22,6 +22,14 @@ type ConsensusParams struct {
|
||||
Validator ValidatorParams `json:"validator"`
|
||||
}
|
||||
|
||||
// HashedParams is a subset of ConsensusParams.
|
||||
// It is amino encoded and hashed into
|
||||
// the Header.ConsensusHash.
|
||||
type HashedParams struct {
|
||||
BlockMaxBytes int64
|
||||
BlockMaxGas int64
|
||||
}
|
||||
|
||||
// BlockSizeParams define limits on the block size.
|
||||
type BlockSizeParams struct {
|
||||
MaxBytes int64 `json:"max_bytes"`
|
||||
@@ -116,13 +124,16 @@ func (params *ConsensusParams) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns a hash of the parameters to store in the block header
|
||||
// No Merkle tree here, only three values are hashed here
|
||||
// thus benefit from saving space < drawbacks from proofs' overhead
|
||||
// Revisit this function if new fields are added to ConsensusParams
|
||||
// Hash returns a hash of a subset of the parameters to store in the block header.
|
||||
// Only the Block.MaxBytes and Block.MaxGas are included in the hash.
|
||||
// This allows the ConsensusParams to evolve more without breaking the block
|
||||
// protocol. No need for a Merkle tree here, just a small struct to hash.
|
||||
func (params *ConsensusParams) Hash() []byte {
|
||||
hasher := tmhash.New()
|
||||
bz := cdcEncode(params)
|
||||
bz := cdcEncode(HashedParams{
|
||||
params.BlockSize.MaxBytes,
|
||||
params.BlockSize.MaxGas,
|
||||
})
|
||||
if bz == nil {
|
||||
panic("cannot fail to encode ConsensusParams")
|
||||
}
|
||||
|
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
@@ -27,16 +26,6 @@ type Part struct {
|
||||
hash []byte
|
||||
}
|
||||
|
||||
func (part *Part) Hash() []byte {
|
||||
if part.hash != nil {
|
||||
return part.hash
|
||||
}
|
||||
hasher := tmhash.New()
|
||||
hasher.Write(part.Bytes) // nolint: errcheck, gas
|
||||
part.hash = hasher.Sum(nil)
|
||||
return part.hash
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (part *Part) ValidateBasic() error {
|
||||
if part.Index < 0 {
|
||||
@@ -75,7 +64,7 @@ func (psh PartSetHeader) String() string {
|
||||
}
|
||||
|
||||
func (psh PartSetHeader) IsZero() bool {
|
||||
return psh.Total == 0
|
||||
return psh.Total == 0 && len(psh.Hash) == 0
|
||||
}
|
||||
|
||||
func (psh PartSetHeader) Equals(other PartSetHeader) bool {
|
||||
@@ -217,7 +206,7 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) {
|
||||
}
|
||||
|
||||
// Check hash proof
|
||||
if part.Proof.Verify(ps.Hash(), part.Hash()) != nil {
|
||||
if part.Proof.Verify(ps.Hash(), part.Bytes) != nil {
|
||||
return false, ErrPartSetInvalidProof
|
||||
}
|
||||
|
||||
|
@@ -60,6 +60,10 @@ func (p *Proposal) ValidateBasic() error {
|
||||
if err := p.BlockID.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("Wrong BlockID: %v", err)
|
||||
}
|
||||
// ValidateBasic above would pass even if the BlockID was empty:
|
||||
if !p.BlockID.IsComplete() {
|
||||
return fmt.Errorf("Expected a complete, non-empty BlockID, got: %v", p.BlockID)
|
||||
}
|
||||
|
||||
// NOTE: Timestamp validation is subtle and handled elsewhere.
|
||||
|
||||
|
@@ -3,25 +3,20 @@ package types
|
||||
import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// ABCIResult is the deterministic component of a ResponseDeliverTx.
|
||||
// TODO: add Tags
|
||||
// TODO: add tags and other fields
|
||||
// https://github.com/tendermint/tendermint/issues/1007
|
||||
type ABCIResult struct {
|
||||
Code uint32 `json:"code"`
|
||||
Data cmn.HexBytes `json:"data"`
|
||||
}
|
||||
|
||||
// Hash returns the canonical hash of the ABCIResult
|
||||
func (a ABCIResult) Hash() []byte {
|
||||
bz := tmhash.Sum(cdcEncode(a))
|
||||
return bz
|
||||
}
|
||||
|
||||
// Bytes returns the amino encoded ABCIResult
|
||||
func (a ABCIResult) Bytes() []byte {
|
||||
return cdcEncode(a)
|
||||
}
|
||||
|
@@ -16,20 +16,21 @@ func TestABCIResults(t *testing.T) {
|
||||
e := ABCIResult{Code: 14, Data: []byte("foo")}
|
||||
f := ABCIResult{Code: 14, Data: []byte("bar")}
|
||||
|
||||
// Nil and []byte{} should produce the same hash.
|
||||
require.Equal(t, a.Hash(), a.Hash())
|
||||
require.Equal(t, b.Hash(), b.Hash())
|
||||
require.Equal(t, a.Hash(), b.Hash())
|
||||
// Nil and []byte{} should produce the same bytes
|
||||
require.Equal(t, a.Bytes(), a.Bytes())
|
||||
require.Equal(t, b.Bytes(), b.Bytes())
|
||||
require.Equal(t, a.Bytes(), b.Bytes())
|
||||
|
||||
// a and b should be the same, don't go in results.
|
||||
results := ABCIResults{a, c, d, e, f}
|
||||
|
||||
// Make sure each result hashes properly.
|
||||
// Make sure each result serializes differently
|
||||
var last []byte
|
||||
for i, res := range results {
|
||||
h := res.Hash()
|
||||
assert.NotEqual(t, last, h, "%d", i)
|
||||
last = h
|
||||
assert.Equal(t, last, a.Bytes()) // first one is empty
|
||||
for i, res := range results[1:] {
|
||||
bz := res.Bytes()
|
||||
assert.NotEqual(t, last, bz, "%d", i)
|
||||
last = bz
|
||||
}
|
||||
|
||||
// Make sure that we can get a root hash from results and verify proofs.
|
||||
@@ -38,7 +39,7 @@ func TestABCIResults(t *testing.T) {
|
||||
|
||||
for i, res := range results {
|
||||
proof := results.ProveResult(i)
|
||||
valid := proof.Verify(root, res.Hash())
|
||||
valid := proof.Verify(root, res.Bytes())
|
||||
assert.NoError(t, valid, "%d", i)
|
||||
}
|
||||
}
|
||||
|
13
types/tx.go
13
types/tx.go
@@ -31,13 +31,14 @@ func (tx Tx) String() string {
|
||||
// Txs is a slice of Tx.
|
||||
type Txs []Tx
|
||||
|
||||
// Hash returns the simple Merkle root hash of the transactions.
|
||||
// Hash returns the Merkle root hash of the transaction hashes.
|
||||
// i.e. the leaves of the tree are the hashes of the txs.
|
||||
func (txs Txs) Hash() []byte {
|
||||
// These allocations will be removed once Txs is switched to [][]byte,
|
||||
// ref #2603. This is because golang does not allow type casting slices without unsafe
|
||||
txBzs := make([][]byte, len(txs))
|
||||
for i := 0; i < len(txs); i++ {
|
||||
txBzs[i] = txs[i]
|
||||
txBzs[i] = txs[i].Hash()
|
||||
}
|
||||
return merkle.SimpleHashFromByteSlices(txBzs)
|
||||
}
|
||||
@@ -69,7 +70,7 @@ func (txs Txs) Proof(i int) TxProof {
|
||||
l := len(txs)
|
||||
bzs := make([][]byte, l)
|
||||
for i := 0; i < l; i++ {
|
||||
bzs[i] = txs[i]
|
||||
bzs[i] = txs[i].Hash()
|
||||
}
|
||||
root, proofs := merkle.SimpleProofsFromByteSlices(bzs)
|
||||
|
||||
@@ -87,8 +88,8 @@ type TxProof struct {
|
||||
Proof merkle.SimpleProof
|
||||
}
|
||||
|
||||
// LeadHash returns the hash of the this proof refers to.
|
||||
func (tp TxProof) LeafHash() []byte {
|
||||
// Leaf returns the hash(tx), which is the leaf in the merkle tree which this proof refers to.
|
||||
func (tp TxProof) Leaf() []byte {
|
||||
return tp.Data.Hash()
|
||||
}
|
||||
|
||||
@@ -104,7 +105,7 @@ func (tp TxProof) Validate(dataHash []byte) error {
|
||||
if tp.Proof.Total <= 0 {
|
||||
return errors.New("Proof total must be positive")
|
||||
}
|
||||
valid := tp.Proof.Verify(tp.RootHash, tp.LeafHash())
|
||||
valid := tp.Proof.Verify(tp.RootHash, tp.Leaf())
|
||||
if valid != nil {
|
||||
return errors.New("Proof is not internally consistent")
|
||||
}
|
||||
|
@@ -66,14 +66,13 @@ func TestValidTxProof(t *testing.T) {
|
||||
root := txs.Hash()
|
||||
// make sure valid proof for every tx
|
||||
for i := range txs {
|
||||
leaf := txs[i]
|
||||
leafHash := leaf.Hash()
|
||||
tx := []byte(txs[i])
|
||||
proof := txs.Proof(i)
|
||||
assert.Equal(t, i, proof.Proof.Index, "%d: %d", h, i)
|
||||
assert.Equal(t, len(txs), proof.Proof.Total, "%d: %d", h, i)
|
||||
assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i)
|
||||
assert.EqualValues(t, leaf, proof.Data, "%d: %d", h, i)
|
||||
assert.EqualValues(t, leafHash, proof.LeafHash(), "%d: %d", h, i)
|
||||
assert.EqualValues(t, tx, proof.Data, "%d: %d", h, i)
|
||||
assert.EqualValues(t, txs[i].Hash(), proof.Leaf(), "%d: %d", h, i)
|
||||
assert.Nil(t, proof.Validate(root), "%d: %d", h, i)
|
||||
assert.NotNil(t, proof.Validate([]byte("foobar")), "%d: %d", h, i)
|
||||
|
||||
|
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
@@ -70,12 +68,6 @@ func (v *Validator) String() string {
|
||||
v.ProposerPriority)
|
||||
}
|
||||
|
||||
// Hash computes the unique ID of a validator with a given voting power.
|
||||
// It excludes the ProposerPriority value, which changes with every round.
|
||||
func (v *Validator) Hash() []byte {
|
||||
return tmhash.Sum(v.Bytes())
|
||||
}
|
||||
|
||||
// Bytes computes the unique encoding of a validator with a given voting power.
|
||||
// These are the bytes that gets hashed in consensus. It excludes address
|
||||
// as its redundant with the pubkey. This also excludes ProposerPriority
|
||||
|
@@ -13,13 +13,13 @@ import (
|
||||
)
|
||||
|
||||
// The maximum allowed total voting power.
|
||||
// We set the ProposerPriority of freshly added validators to -1.125*totalVotingPower.
|
||||
// To compute 1.125*totalVotingPower efficiently, we do:
|
||||
// totalVotingPower + (totalVotingPower >> 3) because
|
||||
// x + (x >> 3) = x + x/8 = x * (1 + 0.125).
|
||||
// MaxTotalVotingPower is the largest int64 `x` with the property that `x + (x >> 3)` is
|
||||
// still in the bounds of int64.
|
||||
const MaxTotalVotingPower = int64(8198552921648689607)
|
||||
// It needs to be sufficiently small to, in all cases::
|
||||
// 1. prevent clipping in incrementProposerPriority()
|
||||
// 2. let (diff+diffMax-1) not overflow in IncrementPropposerPriotity()
|
||||
// (Proof of 1 is tricky, left to the reader).
|
||||
// It could be higher, but this is sufficiently large for our purposes,
|
||||
// and leaves room for defensive purposes.
|
||||
const MaxTotalVotingPower = int64(math.MaxInt64) / 8
|
||||
|
||||
// ValidatorSet represent a set of *Validator at a given height.
|
||||
// The validators can be fetched by address or index.
|
||||
@@ -78,44 +78,57 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int) {
|
||||
panic("Cannot call IncrementProposerPriority with non-positive times")
|
||||
}
|
||||
|
||||
const shiftEveryNthIter = 10
|
||||
// Cap the difference between priorities to be proportional to 2*totalPower by
|
||||
// re-normalizing priorities, i.e., rescale all priorities by multiplying with:
|
||||
// 2*totalVotingPower/(maxPriority - minPriority)
|
||||
diffMax := 2 * vals.TotalVotingPower()
|
||||
vals.RescalePriorities(diffMax)
|
||||
|
||||
var proposer *Validator
|
||||
// call IncrementProposerPriority(1) times times:
|
||||
for i := 0; i < times; i++ {
|
||||
shiftByAvgProposerPriority := i%shiftEveryNthIter == 0
|
||||
proposer = vals.incrementProposerPriority(shiftByAvgProposerPriority)
|
||||
}
|
||||
isShiftedAvgOnLastIter := (times-1)%shiftEveryNthIter == 0
|
||||
if !isShiftedAvgOnLastIter {
|
||||
validatorsHeap := cmn.NewHeap()
|
||||
vals.shiftByAvgProposerPriority(validatorsHeap)
|
||||
proposer = vals.incrementProposerPriority()
|
||||
}
|
||||
vals.shiftByAvgProposerPriority()
|
||||
|
||||
vals.Proposer = proposer
|
||||
}
|
||||
|
||||
func (vals *ValidatorSet) incrementProposerPriority(subAvg bool) *Validator {
|
||||
for _, val := range vals.Validators {
|
||||
// Check for overflow for sum.
|
||||
val.ProposerPriority = safeAddClip(val.ProposerPriority, val.VotingPower)
|
||||
func (vals *ValidatorSet) RescalePriorities(diffMax int64) {
|
||||
// NOTE: This check is merely a sanity check which could be
|
||||
// removed if all tests would init. voting power appropriately;
|
||||
// i.e. diffMax should always be > 0
|
||||
if diffMax <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
validatorsHeap := cmn.NewHeap()
|
||||
if subAvg { // shift by avg ProposerPriority
|
||||
vals.shiftByAvgProposerPriority(validatorsHeap)
|
||||
} else { // just update the heap
|
||||
// Caculating ceil(diff/diffMax):
|
||||
// Re-normalization is performed by dividing by an integer for simplicity.
|
||||
// NOTE: This may make debugging priority issues easier as well.
|
||||
diff := computeMaxMinPriorityDiff(vals)
|
||||
ratio := (diff + diffMax - 1) / diffMax
|
||||
if ratio > 1 {
|
||||
for _, val := range vals.Validators {
|
||||
validatorsHeap.PushComparable(val, proposerPriorityComparable{val})
|
||||
val.ProposerPriority /= ratio
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (vals *ValidatorSet) incrementProposerPriority() *Validator {
|
||||
for _, val := range vals.Validators {
|
||||
// Check for overflow for sum.
|
||||
newPrio := safeAddClip(val.ProposerPriority, val.VotingPower)
|
||||
val.ProposerPriority = newPrio
|
||||
}
|
||||
// Decrement the validator with most ProposerPriority:
|
||||
mostest := validatorsHeap.Peek().(*Validator)
|
||||
mostest := vals.getValWithMostPriority()
|
||||
// mind underflow
|
||||
mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower())
|
||||
|
||||
return mostest
|
||||
}
|
||||
|
||||
// should not be called on an empty validator set
|
||||
func (vals *ValidatorSet) computeAvgProposerPriority() int64 {
|
||||
n := int64(len(vals.Validators))
|
||||
sum := big.NewInt(0)
|
||||
@@ -131,11 +144,38 @@ func (vals *ValidatorSet) computeAvgProposerPriority() int64 {
|
||||
panic(fmt.Sprintf("Cannot represent avg ProposerPriority as an int64 %v", avg))
|
||||
}
|
||||
|
||||
func (vals *ValidatorSet) shiftByAvgProposerPriority(validatorsHeap *cmn.Heap) {
|
||||
// compute the difference between the max and min ProposerPriority of that set
|
||||
func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 {
|
||||
max := int64(math.MinInt64)
|
||||
min := int64(math.MaxInt64)
|
||||
for _, v := range vals.Validators {
|
||||
if v.ProposerPriority < min {
|
||||
min = v.ProposerPriority
|
||||
}
|
||||
if v.ProposerPriority > max {
|
||||
max = v.ProposerPriority
|
||||
}
|
||||
}
|
||||
diff := max - min
|
||||
if diff < 0 {
|
||||
return -1 * diff
|
||||
} else {
|
||||
return diff
|
||||
}
|
||||
}
|
||||
|
||||
func (vals *ValidatorSet) getValWithMostPriority() *Validator {
|
||||
var res *Validator
|
||||
for _, val := range vals.Validators {
|
||||
res = res.CompareProposerPriority(val)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (vals *ValidatorSet) shiftByAvgProposerPriority() {
|
||||
avgProposerPriority := vals.computeAvgProposerPriority()
|
||||
for _, val := range vals.Validators {
|
||||
val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority)
|
||||
validatorsHeap.PushComparable(val, proposerPriorityComparable{val})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,8 +413,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
|
||||
if talliedVotingPower > vals.TotalVotingPower()*2/3 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
|
||||
talliedVotingPower, vals.TotalVotingPower()*2/3+1)
|
||||
return errTooMuchChange{talliedVotingPower, vals.TotalVotingPower()*2/3 + 1}
|
||||
}
|
||||
|
||||
// VerifyFutureCommit will check to see if the set would be valid with a different
|
||||
@@ -456,12 +495,37 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin
|
||||
}
|
||||
|
||||
if oldVotingPower <= oldVals.TotalVotingPower()*2/3 {
|
||||
return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v",
|
||||
oldVotingPower, oldVals.TotalVotingPower()*2/3+1)
|
||||
return errTooMuchChange{oldVotingPower, oldVals.TotalVotingPower()*2/3 + 1}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------
|
||||
// ErrTooMuchChange
|
||||
|
||||
func IsErrTooMuchChange(err error) bool {
|
||||
switch err_ := err.(type) {
|
||||
case cmn.Error:
|
||||
_, ok := err_.Data().(errTooMuchChange)
|
||||
return ok
|
||||
case errTooMuchChange:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type errTooMuchChange struct {
|
||||
got int64
|
||||
needed int64
|
||||
}
|
||||
|
||||
func (e errTooMuchChange) Error() string {
|
||||
return fmt.Sprintf("Invalid commit -- insufficient old voting power: got %v, needed %v", e.got, e.needed)
|
||||
}
|
||||
|
||||
//----------------
|
||||
|
||||
func (vals *ValidatorSet) String() string {
|
||||
return vals.StringIndented("")
|
||||
}
|
||||
@@ -508,20 +572,6 @@ func (valz ValidatorsByAddress) Swap(i, j int) {
|
||||
valz[j] = it
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
// Use with Heap for sorting validators by ProposerPriority
|
||||
|
||||
type proposerPriorityComparable struct {
|
||||
*Validator
|
||||
}
|
||||
|
||||
// We want to find the validator with the greatest ProposerPriority.
|
||||
func (ac proposerPriorityComparable) Less(o interface{}) bool {
|
||||
other := o.(proposerPriorityComparable).Validator
|
||||
larger := ac.CompareProposerPriority(other)
|
||||
return bytes.Equal(larger.Address, ac.Address)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// For testing
|
||||
|
||||
|
@@ -392,10 +392,16 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) {
|
||||
func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) {
|
||||
// Other than TestAveragingInIncrementProposerPriority this is a more complete test showing
|
||||
// how each ProposerPriority changes in relation to the validator's voting power respectively.
|
||||
// average is zero in each round:
|
||||
vp0 := int64(10)
|
||||
vp1 := int64(1)
|
||||
vp2 := int64(1)
|
||||
total := vp0 + vp1 + vp2
|
||||
avg := (vp0 + vp1 + vp2 - total) / 3
|
||||
vals := ValidatorSet{Validators: []*Validator{
|
||||
{Address: []byte{0}, ProposerPriority: 0, VotingPower: 10},
|
||||
{Address: []byte{1}, ProposerPriority: 0, VotingPower: 1},
|
||||
{Address: []byte{2}, ProposerPriority: 0, VotingPower: 1}}}
|
||||
{Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0},
|
||||
{Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1},
|
||||
{Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}}
|
||||
tcs := []struct {
|
||||
vals *ValidatorSet
|
||||
wantProposerPrioritys []int64
|
||||
@@ -407,95 +413,89 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
// Acumm+VotingPower-Avg:
|
||||
0 + 10 - 12 - 4, // mostest will be subtracted by total voting power (12)
|
||||
0 + 1 - 4,
|
||||
0 + 1 - 4},
|
||||
0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12)
|
||||
0 + vp1,
|
||||
0 + vp2},
|
||||
1,
|
||||
vals.Validators[0]},
|
||||
1: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
(0 + 10 - 12 - 4) + 10 - 12 + 4, // this will be mostest on 2nd iter, too
|
||||
(0 + 1 - 4) + 1 + 4,
|
||||
(0 + 1 - 4) + 1 + 4},
|
||||
(0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too
|
||||
(0 + vp1) + vp1,
|
||||
(0 + vp2) + vp2},
|
||||
2,
|
||||
vals.Validators[0]}, // increment twice -> expect average to be subtracted twice
|
||||
2: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
((0 + 10 - 12 - 4) + 10 - 12) + 10 - 12 + 4, // still mostest
|
||||
((0 + 1 - 4) + 1) + 1 + 4,
|
||||
((0 + 1 - 4) + 1) + 1 + 4},
|
||||
0 + 3*(vp0-total) - avg, // still mostest
|
||||
0 + 3*vp1,
|
||||
0 + 3*vp2},
|
||||
3,
|
||||
vals.Validators[0]},
|
||||
3: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 4*(10-12) + 4 - 4, // still mostest
|
||||
0 + 4*1 + 4 - 4,
|
||||
0 + 4*1 + 4 - 4},
|
||||
0 + 4*(vp0-total), // still mostest
|
||||
0 + 4*vp1,
|
||||
0 + 4*vp2},
|
||||
4,
|
||||
vals.Validators[0]},
|
||||
4: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 4*(10-12) + 10 + 4 - 4, // 4 iters was mostest
|
||||
0 + 5*1 - 12 + 4 - 4, // now this val is mostest for the 1st time (hence -12==totalVotingPower)
|
||||
0 + 5*1 + 4 - 4},
|
||||
0 + 4*(vp0-total) + vp0, // 4 iters was mostest
|
||||
0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower)
|
||||
0 + 5*vp2},
|
||||
5,
|
||||
vals.Validators[1]},
|
||||
5: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 6*10 - 5*12 + 4 - 4, // mostest again
|
||||
0 + 6*1 - 12 + 4 - 4, // mostest once up to here
|
||||
0 + 6*1 + 4 - 4},
|
||||
0 + 6*vp0 - 5*total, // mostest again
|
||||
0 + 6*vp1 - total, // mostest once up to here
|
||||
0 + 6*vp2},
|
||||
6,
|
||||
vals.Validators[0]},
|
||||
6: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 7*10 - 6*12 + 4 - 4, // in 7 iters this val is mostest 6 times
|
||||
0 + 7*1 - 12 + 4 - 4, // in 7 iters this val is mostest 1 time
|
||||
0 + 7*1 + 4 - 4},
|
||||
0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times
|
||||
0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time
|
||||
0 + 7*vp2},
|
||||
7,
|
||||
vals.Validators[0]},
|
||||
7: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 8*10 - 7*12 + 4 - 4, // mostest
|
||||
0 + 8*1 - 12 + 4 - 4,
|
||||
0 + 8*1 + 4 - 4},
|
||||
0 + 8*vp0 - 7*total, // mostest again
|
||||
0 + 8*vp1 - total,
|
||||
0 + 8*vp2},
|
||||
8,
|
||||
vals.Validators[0]},
|
||||
8: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 9*10 - 7*12 + 4 - 4,
|
||||
0 + 9*1 - 12 + 4 - 4,
|
||||
0 + 9*1 - 12 + 4 - 4}, // mostest
|
||||
0 + 9*vp0 - 7*total,
|
||||
0 + 9*vp1 - total,
|
||||
0 + 9*vp2 - total}, // mostest
|
||||
9,
|
||||
vals.Validators[2]},
|
||||
9: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
0 + 10*10 - 8*12 + 4 - 4, // after 10 iters this is mostest again
|
||||
0 + 10*1 - 12 + 4 - 4, // after 6 iters this val is "mostest" once and not in between
|
||||
0 + 10*1 - 12 + 4 - 4}, // in between 10 iters this val is "mostest" once
|
||||
0 + 10*vp0 - 8*total, // after 10 iters this is mostest again
|
||||
0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between
|
||||
0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once
|
||||
10,
|
||||
vals.Validators[0]},
|
||||
10: {
|
||||
vals.Copy(),
|
||||
[]int64{
|
||||
// shift twice inside incrementProposerPriority (shift every 10th iter);
|
||||
// don't shift at the end of IncremenctProposerPriority
|
||||
// last avg should be zero because
|
||||
// ProposerPriority of validator 0: (0 + 11*10 - 8*12 - 4) == 10
|
||||
// ProposerPriority of validator 1 and 2: (0 + 11*1 - 12 - 4) == -5
|
||||
// and (10 + 5 - 5) / 3 == 0
|
||||
0 + 11*10 - 8*12 - 4 - 12 - 0,
|
||||
0 + 11*1 - 12 - 4 - 0, // after 6 iters this val is "mostest" once and not in between
|
||||
0 + 11*1 - 12 - 4 - 0}, // after 10 iters this val is "mostest" once
|
||||
0 + 11*vp0 - 9*total,
|
||||
0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between
|
||||
0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once
|
||||
11,
|
||||
vals.Validators[0]},
|
||||
}
|
||||
|
@@ -52,8 +52,8 @@ type Vote struct {
|
||||
Type SignedMsgType `json:"type"`
|
||||
Height int64 `json:"height"`
|
||||
Round int `json:"round"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
BlockID BlockID `json:"block_id"` // zero if vote is nil.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
ValidatorAddress Address `json:"validator_address"`
|
||||
ValidatorIndex int `json:"validator_index"`
|
||||
Signature []byte `json:"signature"`
|
||||
@@ -127,6 +127,11 @@ func (vote *Vote) ValidateBasic() error {
|
||||
if err := vote.BlockID.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("Wrong BlockID: %v", err)
|
||||
}
|
||||
// BlockID.ValidateBasic would not err if we for instance have an empty hash but a
|
||||
// non-empty PartsSetHeader:
|
||||
if !vote.BlockID.IsZero() && !vote.BlockID.IsComplete() {
|
||||
return fmt.Errorf("BlockID must be either empty or complete, got: %v", vote.BlockID)
|
||||
}
|
||||
if len(vote.ValidatorAddress) != crypto.AddressSize {
|
||||
return fmt.Errorf("Expected ValidatorAddress size to be %d bytes, got %d bytes",
|
||||
crypto.AddressSize,
|
||||
|
@@ -63,8 +63,8 @@ func TestVoteSignableTestVectors(t *testing.T) {
|
||||
{
|
||||
CanonicalizeVote("", &Vote{}),
|
||||
// NOTE: Height and Round are skipped here. This case needs to be considered while parsing.
|
||||
// []byte{0x22, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff},
|
||||
[]byte{0x22, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
|
||||
// []byte{0x2a, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff},
|
||||
[]byte{0x2a, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
|
||||
},
|
||||
// with proper (fixed size) height and round (PreCommit):
|
||||
{
|
||||
@@ -76,7 +76,7 @@ func TestVoteSignableTestVectors(t *testing.T) {
|
||||
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
|
||||
0x19, // (field_number << 3) | wire_type
|
||||
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
|
||||
0x22, // (field_number << 3) | wire_type
|
||||
0x2a, // (field_number << 3) | wire_type
|
||||
// remaining fields (timestamp):
|
||||
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
|
||||
},
|
||||
@@ -90,7 +90,7 @@ func TestVoteSignableTestVectors(t *testing.T) {
|
||||
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height
|
||||
0x19, // (field_number << 3) | wire_type
|
||||
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
|
||||
0x22, // (field_number << 3) | wire_type
|
||||
0x2a, // (field_number << 3) | wire_type
|
||||
// remaining fields (timestamp):
|
||||
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
|
||||
},
|
||||
@@ -102,7 +102,7 @@ func TestVoteSignableTestVectors(t *testing.T) {
|
||||
0x19, // (field_number << 3) | wire_type
|
||||
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
|
||||
// remaining fields (timestamp):
|
||||
0x22,
|
||||
0x2a,
|
||||
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1},
|
||||
},
|
||||
// containing non-empty chain_id:
|
||||
@@ -114,7 +114,7 @@ func TestVoteSignableTestVectors(t *testing.T) {
|
||||
0x19, // (field_number << 3) | wire_type
|
||||
0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round
|
||||
// remaining fields:
|
||||
0x22, // (field_number << 3) | wire_type
|
||||
0x2a, // (field_number << 3) | wire_type
|
||||
0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp
|
||||
0x32, // (field_number << 3) | wire_type
|
||||
0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID
|
||||
|
@@ -18,7 +18,7 @@ const (
|
||||
// TMCoreSemVer is the current version of Tendermint Core.
|
||||
// It's the Semantic Version of the software.
|
||||
// Must be a string because scripts like dist.sh read this file.
|
||||
TMCoreSemVer = "0.28.1"
|
||||
TMCoreSemVer = "0.29.0"
|
||||
|
||||
// ABCISemVer is the semantic version of the ABCI library
|
||||
ABCISemVer = "0.15.0"
|
||||
@@ -36,10 +36,10 @@ func (p Protocol) Uint64() uint64 {
|
||||
|
||||
var (
|
||||
// P2PProtocol versions all p2p behaviour and msgs.
|
||||
P2PProtocol Protocol = 5
|
||||
P2PProtocol Protocol = 6
|
||||
|
||||
// BlockProtocol versions all block data structures and processing.
|
||||
BlockProtocol Protocol = 8
|
||||
BlockProtocol Protocol = 9
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------
|
||||
|
Reference in New Issue
Block a user