Compare commits

...

13 Commits

Author SHA1 Message Date
3e7752c29d cs: exit if SwitchToConsensus fails (#3706)
Refs #3656
2019-06-22 11:48:01 -04:00
1b77bf6f20 rpc/lib: write a test for TLS server (#3703)
* rpc/lib: write a test for TLS server

Refs #3700

* do not regenerate certificates

* add nolint
2019-06-22 11:44:12 -04:00
8fc8368438 node: run whole func in goroutine, not just logger.Error fn (#3743)
* node: run whole func in goroutine, not just logger.Error fn

Fixes #3741

* add a changelog entry
2019-06-22 10:30:23 +04:00
228bba799d state: add more tests for block validation (#3674)
* Expose priv validators for use in testing

* Generalize block header validation test past height 1

* Remove ineffectual assignment

* Remove redundant SaveState call

* Reorder comment for clarity

* Use the block executor ApplyBlock function instead of implementing a stripped-down version of it

* Remove commented-out code

* Remove unnecessary test

The required tests already appear to be implemented (implicitly) through
the TestValidateBlockHeader test.

* Allow for catching of specific error types during TestValidateBlockCommit

* Make return error testable

* Clean up and add TestValidateBlockCommit code

* Fix formatting

* Extract function to create a new mock test app

* Update comment for clarity

* Fix comment

* Add skeleton code for evidence-related test

* Allow for addressing priv val by address

* Generalize test beyond a single validator

* Generalize TestValidateBlockEvidence past first height

* Reorder code to clearly separate tests and utility code

* Use a common constant for stop height for testing in state/validation_test.go

* Refactor errors to resemble existing conventions

* Fix formatting

* Extract common helper functions

Having the tests littered with helper functions makes them less easily
readable imho, so I've pulled them out into a separate file. This also
makes it easier to see what helper functions are available during
testing, so we minimize the chance of duplication when writing new
tests.

* Remove unused parameter

* Remove unused parameters

* Add field keys

* Remove unused height constant

* Fix typo

* Fix incorrect return error

* Add field keys

* Use separate package for tests

This refactors all of the state package's tests into a state_test
package, so as to keep any usage of the state package's internal methods
explicit.

Any internal methods/constants used by tests are now explicitly exported
in state/export_test.go

* Refactor: extract helper function to make, validate, execute and commit a block

* Rename state function to makeState

* Remove redundant constant for number of validators

* Refactor mock evidence registration into TestMain

* Remove extraneous nVals variable

* Replace function-level TODOs with file-level TODO and explanation

* Remove extraneous comment

* Fix linting issues brought up by GolangCI (pulled in from latest merge from develop)
2019-06-21 17:29:29 -04:00
59497c362b Prepare nuking develop (#3726)
* Update Readme and contrib. guidelines: remove traces of master

Signed-off-by: Ismail Khoffi <Ismail.Khoffi@gmail.com>

* add simple example

Signed-off-by: Ismail Khoffi <Ismail.Khoffi@gmail.com>

* update contributing.md

* add link

* Update CONTRIBUTING.md

* add a note on master and releases

Signed-off-by: Ismail Khoffi <Ismail.Khoffi@gmail.com>

* update readme
2019-06-21 16:29:16 -04:00
2e5b2a9537 abci/examples: switch from hex to base64 pubkey in kvstore (#3641)
* abci/example: use base64 for update validator set

* update kvstore/README.md

* update CHANGELOG_PENDING.md and abci/example/kvstore/README.md
2019-06-21 15:18:49 +04:00
866b343c0c Changes to files that had linting issue (#3731)
- Govet issues fixed
- 1 gosec issue solved using nolint

Signed-off-by: Marko Baricevic <marbar3778@yahoo.com>
2019-06-21 09:58:32 +04:00
9d5ba576ee abci: Refactor ABCI CheckTx and DeliverTx signatures (#3735)
* Refactor signature of Application.CheckTx

* Refactor signature of Application.DeliverTx

* Refactor example variable names for clarity and consistency

* Rename method variables for consistency

* Rename method variables for consistency

* add a changelog entry

* update docs
2019-06-21 09:56:27 +04:00
1b5110e91f node: fix a bug where nil is recorded as node's address (#3740)
* node: fix a bug where `nil` is recorded as node's address

Solution

  AddOurAddress when we know it
  sw.NetAddress is nil in createAddrBookAndSetOnSwitch
  it's set by n.transport.Listen function, which is called during start

Fixes #3716

* use addr instead of n.sw.NetAddress

* add both ExternalAddress and ListenAddress as our addresses
2019-06-21 09:30:32 +04:00
60827f7562 docs: fix some language issues and deprecated link (#3733) 2019-06-19 21:35:53 +03:00
ed18ffdca3 p2p: refactor Switch#OnStop (#3729) 2019-06-17 13:30:12 +02:00
0e1c492d3e docs: missing 'b' in python command (#3728)
In Python 3 the command outlined in this doc `import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')` throws an error:
TypeError: decoding with 'base64' codec failed (TypeError: expected bytes-like object, not str), needs to add 'b' before the encoded string
`import codecs; codecs.decode(b"YWJjZA==", 'base64').decode('ascii')` to make it work
2019-06-17 12:51:12 +02:00
9010ff5f96 types: do not ignore errors returned by PublishWithEvents (#3722)
Follow up to #3643

* update changelog

* do not ignore errors returned by PublishWithEvents
2019-06-12 15:25:47 +02:00
61 changed files with 1077 additions and 692 deletions

View File

@ -20,7 +20,6 @@ linters:
- gochecknoinits
- scopelint
- stylecheck
# linters-settings:
# govet:
# check-shadowing: true

View File

@ -4,26 +4,26 @@
### BREAKING CHANGES:
- \#3613 Switch from golang/dep to Go 1.11 Modules to resolve dependencies:
- it is recommended to switch to Go Modules if your project has tendermint
as a dependency
- read more on Modules here: https://github.com/golang/go/wiki/Modules
* CLI/RPC/Config
* [rpc] \#3616 Improve `/block_results` response format (`results.DeliverTx` ->
`results.deliver_tx`). See docs for details.
- [cli] \#3613 Switch from golang/dep to Go Modules to resolve dependencies:
It is recommended to switch to Go Modules if your project has tendermint as
a dependency. Read more on Modules here:
https://github.com/golang/go/wiki/Modules
- [rpc] \#3616 Improve `/block_results` response format (`results.DeliverTx`
-> `results.deliver_tx`). See docs for details.
* Apps
* [abci] \#1859 `ResponseCheckTx`, `ResponseDeliverTx`, `ResponseBeginBlock`,
and `ResponseEndBlock` now include `Events` instead of `Tags`. Each `Event`
contains a `type` and a list of `attributes` (list of key-value pairs) allowing
for inclusion of multiple distinct events in each response.
- [abci] \#1859 `ResponseCheckTx`, `ResponseDeliverTx`, `ResponseBeginBlock`,
and `ResponseEndBlock` now include `Events` instead of `Tags`. Each `Event`
contains a `type` and a list of `attributes` (list of key-value pairs)
allowing for inclusion of multiple distinct events in each response.
* Go API
* [libs/db] Removed deprecated `LevelDBBackend` const
* If you have `db_backend` set to `leveldb` in your config file, please
- [libs/db] [\#3632](https://github.com/tendermint/tendermint/pull/3632) Removed deprecated `LevelDBBackend` const
If you have `db_backend` set to `leveldb` in your config file, please
change it to `goleveldb` or `cleveldb`.
- [p2p] \#3521 Remove NewNetAddressStringWithOptionalID
- [p2p] \#3521 Remove NewNetAddressStringWithOptionalID
- [abci] \#3193 Use RequestDeliverTx and RequestCheckTx in the ABCI interface
* Blockchain Protocol
@ -32,10 +32,13 @@
### FEATURES:
### IMPROVEMENTS:
- [p2p] \#3666 Add per channel telemtry to improve reactor observability
* [rpc] [\#3686](https://github.com/tendermint/tendermint/pull/3686) `HTTPClient#Call` returns wrapped errors, so a caller could use `errors.Cause` to retrieve an error code.
- [consensus] \#3656 Exit if SwitchToConsensus fails
- [p2p] \#3666 Add per channel telemetry to improve reactor observability
- [rpc] [\#3686](https://github.com/tendermint/tendermint/pull/3686) `HTTPClient#Call` returns wrapped errors, so a caller could use `errors.Cause` to retrieve an error code. (@wooparadog)
- [abci/examples] \#3659 Change validator update tx format (incl. expected pubkey format, which is base64 now) (@needkane)
### BUG FIXES:
- [libs/db] Fixed the BoltDB backend's Batch.Delete implementation (@Yawning)
- [libs/db] Fixed the BoltDB backend's Get and Iterator implementation (@Yawning)
- [libs/db] \#3717 Fixed the BoltDB backend's Batch.Delete implementation (@Yawning)
- [libs/db] \#3718 Fixed the BoltDB backend's Get and Iterator implementation (@Yawning)
- [node] \#3716 Fix a bug where `nil` is recorded as node's address
- [node] \#3741 Fix profiler blocking the entire node

View File

@ -2,7 +2,8 @@
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`.
See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
Before making a pull request, please open an issue describing the
change you would like to make. If an issue for your change already exists,
@ -112,32 +113,36 @@ removed from the header in rpc responses as well.
## Branching Model and Release
We follow a variant of [git flow](http://nvie.com/posts/a-successful-git-branching-model/).
This means that all pull-requests should be made against develop. Any merge to
master constitutes a tagged release.
The main development branch is master.
Note all pull requests should be squash merged except for merging to master and
merging master back to develop. This keeps the commit history clean and makes it
Every release is maintained in a release branch named `vX.Y.Z`.
Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it
easy to reference the pull request where a change was introduced.
### Development Procedure:
- the latest state of development is on `develop`
- `develop` must never fail `make test`
- never --force onto `develop` (except when reverting a broken commit, which should seldom happen)
### Development Procedure
- the latest state of development is on `master`
- `master` must never fail `make test`
- never --force onto `master` (except when reverting a broken commit, which should seldom happen)
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
- make changes and update the `CHANGELOG_PENDING.md` to record your change
- before submitting a pull request, run `git rebase` on top of the latest `develop`
- before submitting a pull request, run `git rebase` on top of the latest `master`
### Pull Merge Procedure:
- ensure pull branch is based on a recent develop
### Pull Merge Procedure
- ensure pull branch is based on a recent `master`
- run `make test` to ensure that all tests pass
- squash merge pull request
- the `unstable` branch may be used to aggregate pull merges before fixing tests
### Release Procedure:
- start on `develop`
- run integration tests (see `test_integrations` in Makefile)
- prepare release in a pull request against develop (to be squash merged):
### Release Procedure
#### Major Release
1. start on `master`
2. run integration tests (see `test_integrations` in Makefile)
3. prepare release in a pull request against `master` (to be squash merged):
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
@ -147,14 +152,28 @@ easy to reference the pull request where a change was introduced.
./scripts/authors.sh <email>`
- reset the `CHANGELOG_PENDING.md`
- bump versions
- push latest develop with prepared release details to release/vX.X.X to run the extended integration tests on the CI
- if necessary, make pull requests against release/vX.X.X and squash merge them
- merge to master (don't squash merge!)
- merge master back to develop (don't squash merge!)
4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`)
5. merge back to master (don't squash merge!)
### Hotfix Procedure:
#### Minor Release
- follow the normal development and release procedure without any differences
If there were no breaking changes and you need to create a release nonetheless,
the procedure is almost exactly like with a new release above.
The only difference is that in the end you create a pull request against the existing `X.X` branch.
The branch name should match the release number you want to create.
Merging this PR will trigger the next release.
For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag,
the patch version will be incremented and the created release will be v0.34.1.
#### Backport Release
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
3. steps 2 and 3 from [Major Release](#major-release)
4. push changes to release/vX.X.X branch
5. open a PR against the existing vX.X branch
## Testing

View File

@ -115,24 +115,31 @@ get_deps_bin_size:
protoc_libs: libs/common/types.pb.go
# generates certificates for TLS testing in remotedb and RPC server
gen_certs: clean_certs
## Generating certificates for TLS testing...
certstrap init --common-name "tendermint.com" --passphrase ""
certstrap request-cert -ip "::" --passphrase ""
certstrap sign "::" --CA "tendermint.com" --passphrase ""
mv out/::.crt out/::.key db/remotedb
clean_certs:
## Cleaning TLS testing certificates...
certstrap request-cert --common-name "remotedb" -ip "127.0.0.1" --passphrase ""
certstrap sign "remotedb" --CA "tendermint.com" --passphrase ""
mv out/remotedb.crt libs/db/remotedb/test.crt
mv out/remotedb.key libs/db/remotedb/test.key
certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase ""
certstrap sign "server" --CA "tendermint.com" --passphrase ""
mv out/server.crt rpc/lib/server/test.crt
mv out/server.key rpc/lib/server/test.key
rm -rf out
rm -f db/remotedb/::.crt db/remotedb/::.key
test_libs: gen_certs
# deletes generated certificates
clean_certs:
rm -f libs/db/remotedb/test.crt
rm -f libs/db/remotedb/test.key
rm -f rpc/lib/server/test.crt
rm -f rpc/lib/server/test.key
test_libs:
go test -tags clevedb boltdb $(PACKAGES)
make clean_certs
grpc_dbserver:
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
protoc -I libs/db/remotedb/proto/ libs/db/remotedb/proto/defs.proto --go_out=plugins=grpc:libs/db/remotedb/proto
protoc_grpc: rpc/grpc/types.pb.go

View File

@ -17,7 +17,6 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
Branch | Tests | Coverage
----------|-------|----------
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
@ -27,13 +26,15 @@ For protocol details, see [the specification](/docs/spec).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## A Note on Production Readiness
## Releases
While Tendermint is being used in production in private, permissioned
environments, we are still working actively to harden and audit it in preparation
for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/).
We are also still making breaking changes to the protocol and the APIs.
Thus, we tag the releases as *alpha software*.
NOTE: The master branch is now an active development branch (starting with `v0.32`). Please, do not depend on it and
use [releases](https://github.com/tendermint/tendermint/releases) instead.
Tendermint is being used in production in both private and public environments,
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production,
please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org).

View File

@ -85,7 +85,7 @@ func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(tx)
res := app.Application.DeliverTx(types.RequestDeliverTx{Tx: tx})
return app.callback(
types.ToRequestDeliverTx(tx),
types.ToResponseDeliverTx(res),
@ -96,7 +96,7 @@ func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(tx)
res := app.Application.CheckTx(types.RequestCheckTx{Tx: tx})
return app.callback(
types.ToRequestCheckTx(tx),
types.ToResponseCheckTx(res),
@ -188,7 +188,7 @@ func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, erro
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(tx)
res := app.Application.DeliverTx(types.RequestDeliverTx{Tx: tx})
return &res, nil
}
@ -196,7 +196,7 @@ func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(tx)
res := app.Application.CheckTx(types.RequestCheckTx{Tx: tx})
return &res, nil
}

View File

@ -42,15 +42,15 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
return types.ResponseSetOption{}
}
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
func (app *CounterApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
if app.serial {
if len(tx) > 8 {
if len(req.Tx) > 8 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseDeliverTx{
@ -62,15 +62,15 @@ func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx {
func (app *CounterApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
if app.serial {
if len(tx) > 8 {
if len(req.Tx) > 8 {
return types.ResponseCheckTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue < uint64(app.txCount) {
return types.ResponseCheckTx{

View File

@ -22,10 +22,10 @@ and the Handshake allows any necessary blocks to be replayed.
Validator set changes are effected using the following transaction format:
```
val:pubkey1/power1,addr2/power2,addr3/power3"
"val:pubkey1!power1,pubkey2!power2,pubkey3!power3"
```
where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one).
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
To remove a validator from the validator set, set power to `0`.
There is no sybil protection against new validators joining.
Validators can be removed by setting their power to `0`.

View File

@ -76,13 +76,13 @@ func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.Respon
}
// tx is either "key=value" or just arbitrary bytes
func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
var key, value []byte
parts := bytes.Split(tx, []byte("="))
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
key, value = parts[0], parts[1]
} else {
key, value = tx, tx
key, value = req.Tx, req.Tx
}
app.state.db.Set(prefixKey(key), value)
@ -101,7 +101,7 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}

View File

@ -19,10 +19,11 @@ import (
)
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
ar := app.DeliverTx(tx)
req := types.RequestDeliverTx{Tx: tx}
ar := app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar = app.DeliverTx(tx)
ar = app.DeliverTx(req)
require.False(t, ar.IsErr(), ar)
// make sure query is fine
@ -179,7 +180,7 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(tx); r.IsErr() {
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
t.Fatal(r)
}
}

View File

@ -2,7 +2,7 @@ package kvstore
import (
"bytes"
"encoding/hex"
"encoding/base64"
"fmt"
"strconv"
"strings"
@ -60,22 +60,22 @@ func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) t
return app.app.SetOption(req)
}
// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey/power"
if isValidatorTx(tx) {
// format is "val:pubkey!power"
if isValidatorTx(req.Tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(tx)
return app.execValidatorTx(req.Tx)
}
// otherwise, update the key-value store
return app.app.DeliverTx(tx)
return app.app.DeliverTx(req)
}
func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return app.app.CheckTx(tx)
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return app.app.CheckTx(req)
}
// Commit will panic if InitChain was not called
@ -129,33 +129,34 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
}
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
pubStr := base64.StdEncoding.EncodeToString(pubkey.Data)
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
}
func isValidatorTx(tx []byte) bool {
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
}
// format is "val:pubkey/power"
// pubkey is raw 32-byte ed25519 key
// format is "val:pubkey!power"
// pubkey is a base64-encoded 32-byte ed25519 key
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
tx = tx[len(ValidatorSetChangePrefix):]
//get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "/")
pubKeyAndPower := strings.Split(string(tx), "!")
if len(pubKeyAndPower) != 2 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)}
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
}
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
// decode the pubkey
pubkey, err := hex.DecodeString(pubkeyS)
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
if err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)}
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
}
// decode the power
@ -176,9 +177,10 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
if v.Power == 0 {
// remove validator
if !app.app.state.db.Has(key) {
pubStr := base64.StdEncoding.EncodeToString(v.PubKey.Data)
return types.ResponseDeliverTx{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)}
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
}
app.app.state.db.Delete(key)
} else {

View File

@ -178,10 +178,10 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
res := s.app.SetOption(*r.SetOption)
responses <- types.ToResponseSetOption(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(r.DeliverTx.Tx)
res := s.app.DeliverTx(*r.DeliverTx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(r.CheckTx.Tx)
res := s.app.CheckTx(*r.CheckTx)
responses <- types.ToResponseCheckTx(res)
case *types.Request_Commit:
res := s.app.Commit()

View File

@ -15,12 +15,12 @@ type Application interface {
Query(RequestQuery) ResponseQuery // Query for state
// Mempool Connection
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
}
@ -45,11 +45,11 @@ func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
return ResponseSetOption{}
}
func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx {
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx {
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
@ -103,12 +103,12 @@ func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(req.Tx)
res := app.app.DeliverTx(*req)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(req.Tx)
res := app.app.CheckTx(*req)
return &res, nil
}

View File

@ -14,7 +14,7 @@ import (
func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo {
return p2p.DefaultNodeInfo{
ProtocolVersion: p2p.ProtocolVersion{1, 2, 3},
ProtocolVersion: p2p.ProtocolVersion{P2P: 1, Block: 2, App: 3},
ID_: id,
Moniker: "SOMENAME",
Network: "SOMENAME",

View File

@ -111,7 +111,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{thisBlock.Hash(), thisParts.Header()}
blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
if err != nil {
@ -291,11 +291,11 @@ func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
return abci.ResponseEndBlock{}
}
func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
return abci.ResponseDeliverTx{Events: []abci.Event{}}
}
func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx {
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
return abci.ResponseCheckTx{}
}

View File

@ -177,7 +177,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
// Create a new proposal block from state/txs from the mempool.
block1, blockParts1 := cs.createProposalBlock()
polRound, propBlockID := cs.ValidRound, types.BlockID{block1.Hash(), blockParts1.Header()}
polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartsHeader: blockParts1.Header()}
proposal1 := types.NewProposal(height, round, polRound, propBlockID)
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil {
t.Error(err)
@ -185,7 +185,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
// Create a new proposal block from state/txs from the mempool.
block2, blockParts2 := cs.createProposalBlock()
polRound, propBlockID = cs.ValidRound, types.BlockID{block2.Hash(), blockParts2.Header()}
polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartsHeader: blockParts2.Header()}
proposal2 := types.NewProposal(height, round, polRound, propBlockID)
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil {
t.Error(err)

View File

@ -86,7 +86,7 @@ func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, hea
Round: vs.Round,
Timestamp: tmtime.Now(),
Type: voteType,
BlockID: types.BlockID{hash, header},
BlockID: types.BlockID{Hash: hash, PartsHeader: header},
}
err := vs.PrivValidator.SignVote(config.ChainID(), vote)
return vote, err
@ -159,7 +159,7 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round
}
// Make proposal
polRound, propBlockID := validRound, types.BlockID{block.Hash(), blockParts.Header()}
polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}
proposal = types.NewProposal(height, round, polRound, propBlockID)
if err := vs.SignProposal(chainID, proposal); err != nil {
panic(err)

View File

@ -141,7 +141,7 @@ func TestMempoolRmBadTx(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.DeliverTx(txBytes)
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resCommit := app.Commit()
@ -209,8 +209,8 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
}
func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
txValue := txAsUint64(tx)
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
txValue := txAsUint64(req.Tx)
if txValue != uint64(app.txCount) {
return abci.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
@ -220,8 +220,8 @@ func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx {
txValue := txAsUint64(tx)
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
txValue := txAsUint64(req.Tx)
if txValue != uint64(app.mempoolTxCount) {
return abci.ResponseCheckTx{
Code: code.CodeTypeBadNonce,

View File

@ -116,8 +116,13 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int
}
err := conR.conS.Start()
if err != nil {
conR.Logger.Error("Error starting conS", "err", err)
return
panic(fmt.Sprintf(`Failed to start consensus state: %v
conS:
%+v
conR:
%+v`, err, conR.conS, conR))
}
}
@ -351,10 +356,6 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
default:
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
}
if err != nil {
conR.Logger.Error("Error in Receive()", "err", err)
}
}
// SetEventBus sets event bus.

View File

@ -515,7 +515,7 @@ type mockProxyApp struct {
abciResponses *sm.ABCIResponses
}
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount++
if r == nil { //it could be nil because of amino unMarshall, it will cause an empty ResponseDeliverTx to become nil

View File

@ -192,7 +192,7 @@ func TestStateBadProposal(t *testing.T) {
stateHash[0] = byte((stateHash[0] + 1) % 255)
propBlock.AppHash = stateHash
propBlockParts := propBlock.MakePartSet(partSize)
blockID := types.BlockID{propBlock.Hash(), propBlockParts.Header()}
blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
proposal := types.NewProposal(vs2.Height, round, -1, blockID)
if err := vs2.SignProposal(config.ChainID(), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
@ -811,7 +811,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
_, propBlock0 := decideProposal(cs1, vss[0], height, round)
propBlockHash0 := propBlock0.Hash()
propBlockParts0 := propBlock0.MakePartSet(partSize)
propBlockID0 := types.BlockID{propBlockHash0, propBlockParts0.Header()}
propBlockID0 := types.BlockID{Hash: propBlockHash0, PartsHeader: propBlockParts0.Header()}
// the others sign a polka but we don't see it
prevotes := signVotes(types.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)

View File

@ -62,7 +62,7 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
Round: round,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
BlockID: types.BlockID{Hash: []byte("fakehash"), PartsHeader: types.PartSetHeader{}},
}
chainID := config.ChainID()
err := privVal.SignVote(chainID, vote)

View File

@ -44,7 +44,7 @@ module.exports = {
"/app-dev/app-development",
"/app-dev/subscribing-to-events-via-websocket",
"/app-dev/indexing-transactions",
"/app-dev/abci-spec",
"/spec/abci/abci",
"/app-dev/ecosystem"
]
},

View File

@ -101,8 +101,8 @@ mempool state (this behaviour can be turned off with
In go:
```
func (app *KVStoreApplication) CheckTx(tx []byte) types.Result {
return types.OK
func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
```
@ -133,8 +133,8 @@ the mempool. If Tendermint is just started or the clients sent more than
100k transactions, old transactions may be sent to the application. So
it is important CheckTx implements some logic to handle them.
There are cases where a transaction will (or may) become valid in some
future state, in which case you probably want to disable Tendermint's
If there are cases in your application where a transaction may become invalid in some
future state, you probably want to disable Tendermint's
cache. You can do that by setting `[mempool] cache_size = 0` in the
config.
@ -168,14 +168,29 @@ In go:
```
// tx is either "key=value" or just arbitrary bytes
func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result {
parts := strings.Split(string(tx), "=")
if len(parts) == 2 {
app.state.Set([]byte(parts[0]), []byte(parts[1]))
} else {
app.state.Set(tx, tx)
}
return types.OK
func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
var key, value []byte
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
key, value = parts[0], parts[1]
} else {
key, value = req.Tx, req.Tx
}
app.state.db.Set(prefixKey(key), value)
app.state.Size += 1
events := []types.Event{
{
Type: "app",
Attributes: []cmn.KVPair{
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")},
{Key: []byte("key"), Value: key},
},
},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
}
```
@ -205,7 +220,7 @@ Once all processing of the block is complete, Tendermint sends the
Commit request and blocks waiting for a response. While the mempool may
run concurrently with block processing (the BeginBlock, DeliverTxs, and
EndBlock), it is locked for the Commit request so that its state can be
safely reset during Commit. This means the app _MUST NOT_ do any
safely updated during Commit. This means the app _MUST NOT_ do any
blocking communication with the mempool (ie. broadcast_tx) during
Commit, or there will be deadlock. Note also that all remaining
transactions in the mempool are replayed on the mempool connection
@ -223,9 +238,14 @@ job of the [Handshake](#handshake).
In go:
```
func (app *KVStoreApplication) Commit() types.Result {
hash := app.state.Hash()
return types.NewResultOK(hash, "")
func (app *KVStoreApplication) Commit() types.ResponseCommit {
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
app.state.AppHash = appHash
app.state.Height += 1
saveState(app.state)
return types.ResponseCommit{Data: appHash}
}
```
@ -256,12 +276,10 @@ In go:
```
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) {
// update latest block info
app.blockHeader = params.Header
// reset valset changes
app.changes = make([]*types.Validator, 0)
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
return types.ResponseBeginBlock{}
}
```
@ -303,7 +321,7 @@ In go:
```
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
}
```
@ -347,43 +365,29 @@ Note: these query formats are subject to change!
In go:
```
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove {
value, proof, exists := app.state.GetWithProof(reqQuery.Data)
resQuery.Index = -1 // TODO make Proof return index
resQuery.Key = reqQuery.Data
resQuery.Value = value
resQuery.Proof = proof
if exists {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
} else {
index, value, exists := app.state.Get(reqQuery.Data)
resQuery.Index = int64(index)
resQuery.Value = value
if exists {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
}
}
return
} else {
index, value, exists := app.state.Get(reqQuery.Data)
resQuery.Index = int64(index)
resQuery.Value = value
if exists {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
}
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove {
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Index = -1 // TODO make Proof return index
resQuery.Key = reqQuery.Data
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
} else {
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
}
}
```
@ -439,7 +443,11 @@ In go:
```
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size())}
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
AppVersion: ProtocolVersion.Uint64(),
}
}
```
@ -463,13 +471,14 @@ In go:
```
// Save the validators in the merkle tree
func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) {
for _, v := range params.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("Error updating validators", "r", r)
}
}
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("Error updating validators", "r", r)
}
}
return types.ResponseInitChain{}
}
```

View File

@ -137,7 +137,7 @@ The result should look like:
Note the `value` in the result (`YWJjZA==`); this is the base64-encoding
of the ASCII of `abcd`. You can verify this in a python 2 shell by
running `"YWJjZA==".decode('base64')` or in python 3 shell by running
`import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')`.
`import codecs; codecs.decode(b"YWJjZA==", 'base64').decode('ascii')`.
Stay tuned for a future release that [makes this output more
human-readable](https://github.com/tendermint/tendermint/issues/1794).

View File

@ -47,7 +47,7 @@ pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance":
Example:
```
func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result {
func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.Result {
...
tags := []cmn.KVPair{
{[]byte("account.name"), []byte("igor")},

View File

@ -141,7 +141,7 @@ on them. All other fields in the `Response*` must be strictly deterministic.
## Block Execution
The first time a new blockchain is started, Tendermint calls
`InitChain`. From then on, the follow sequence of methods is executed for each
`InitChain`. From then on, the following sequence of methods is executed for each
block:
`BeginBlock, [DeliverTx], EndBlock, Commit`

View File

@ -218,7 +218,7 @@ func MerkleRoot(items [][]byte) []byte{
case 0:
return nil
case 1:
return leafHash(leafs[0])
return leafHash(items[0])
default:
k := getSplitPoint(len(items))
left := MerkleRoot(items[:k])

View File

@ -59,7 +59,7 @@ type Validator struct {
When hashing the Validator struct, the address is not included,
because it is redundant with the pubkey.
The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address,
The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always be sorted by validator address,
so that there is a canonical order for computing the MerkleRoot.
We also define a `TotalVotingPower` function, to return the total voting power:

View File

@ -202,8 +202,10 @@ Note that raw hex cannot be used in `POST` transactions.
## Reset
**WARNING: UNSAFE** Only do this in development and only if you can
::: warning
**UNSAFE** Only do this in development and only if you can
afford to lose all blockchain data!
:::
To reset a blockchain, stop the node and run:

View File

@ -60,7 +60,7 @@ func TestEvidencePool(t *testing.T) {
pool := NewEvidencePool(stateDB, evidenceDB)
goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr)
badEvidence := types.MockBadEvidence{goodEvidence}
badEvidence := types.MockBadEvidence{MockGoodEvidence: goodEvidence}
// bad evidence
err := pool.AddEvidence(badEvidence)

1
go.sum
View File

@ -17,6 +17,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0=
github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=

View File

@ -1,19 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIDAjCCAeqgAwIBAgIJAOGCVedOwRbOMA0GCSqGSIb3DQEBBQUAMCExCzAJBgNV
BAYTAlVTMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwMjExMTU0NjQ5WhcNMjAw
MjExMTU0NjQ5WjAhMQswCQYDVQQGEwJVUzESMBAGA1UEAwwJbG9jYWxob3N0MIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA60S/fNUWoHm1PYI/yrlnZNtr
dRqDORHe0hPwl/lttLz7+a7HzQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWc
GjxJL24tVwiOwqYRzTPZ/rK3JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4D
h/XgWjEt4DhpHwf/zuIK9XkJw0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0
AdsQCjt1GKcIROkcOGUHqByINJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhq
HRTCt5UELWs/53Gj1ffNuhjECOVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQID
AQABoz0wOzAMBgNVHRMEBTADAQH/MCsGA1UdEQQkMCKCCWxvY2FsaG9zdIIJbG9j
YWxob3N0hwQAAAAAhwR/AAABMA0GCSqGSIb3DQEBBQUAA4IBAQCe2A5gDc3jiZwT
a5TJrc2J2KouqxB/PCddw5VY8jPsZJfsr9gxHi+Xa5g8p3oqmEOIlqM5BVhrZRUG
RWHDmL+bCsuzMoA/vGHtHmUIwLeZQLWgT3kv12Dc8M9flNNjmXWxdMR9lOMwcL83
F0CdElxSmaEbNvCIJBDetJJ7vMCqS2lnTLWurbH4ZGeGwvjzNgpgGCKwbyK/gU+j
UXiTQbVvPQ3WWACDnfH6rg0TpxU9jOBkd+4/9tUrBG7UclQBfGULk3sObLO9kx4N
8RxJmtp8jljIXVPX3udExI05pz039pAgvaeZWtP17QSbYcKF1jFtKo6ckrv2GKXX
M5OXGXdw
MIIEOjCCAiKgAwIBAgIQYO+jRR0Sbs+WzU/hj2aoxzANBgkqhkiG9w0BAQsFADAZ
MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy
MDIxMTAyMDRaMBMxETAPBgNVBAMTCHJlbW90ZWRiMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydp
qYYHYei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7
iZjzAJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+
hCbuwAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7x
tW3/Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHd
A5I4+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABo4GDMIGAMA4GA1Ud
DwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O
BBYEFOA8wzCYhoZmy0WHgnv/0efijUMKMB8GA1UdIwQYMBaAFNSTPe743aIx7rIp
vn5HV3gJ4z1hMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAKZf
EVo0i9nMZv6ZJjbmAlMfo5FH41/oBYC8pyGAnJKl42raXKJAbl45h80iGn3vNggf
7HJjN+znAHDFYjIwK2IV2WhHPyxK6uk+FA5uBR/aAPcw+zhRfXUMYdhNHr6KBlZZ
bvD7Iq4UALg+XFQz/fQkIi7QvTBwkYyPNA2+a/TGf6myMp26hoz73DQXklqm6Zle
myPs1Vp9bTgOv/3l64BMUV37FZ2TyiisBkV1qPEoDxT7Fbi8G1K8gMDLd0wu0jvX
nz96nk30TDnZewV1fhkMJVKKGiLbaIgHcu1lWsWJZ0tdc+MF7R9bLBO5T0cTDgNy
V8/51g+Cxu5SSHKjFkT0vBBONhjPmRqzJpxOQfHjiv8mmHwwiaNNy2VkJHj5GHer
64r67fQTSqAifzgwAbXYK+ObUbx4PnHvSYSF5dbcR1Oj6UTVtGAgdmN2Y03AIc1B
CiaojcMVuMRz/SvmPWl34GBvvT5/h9VCpHEB3vV6bQxJb5U1fLyo4GABA2Ic3DHr
kV5p7CZI06UNbyQyFtnEb5XoXywRa4Df7FzDIv3HL13MtyXrYrJqC1eAbn+3jGdh
bQa510mWYAlQQmzHSf/SLKott4QKR3SmhOGqGKNvquAYJ9XLdYdsPmKKGH6iGUD8
n7yEi0KMD/BHsPQNNLatsR2SxqGDeLhbLR0w2hig
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA60S/fNUWoHm1PYI/yrlnZNtrdRqDORHe0hPwl/lttLz7+a7H
zQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWcGjxJL24tVwiOwqYRzTPZ/rK3
JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4Dh/XgWjEt4DhpHwf/zuIK9XkJ
w0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0AdsQCjt1GKcIROkcOGUHqByI
NJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhqHRTCt5UELWs/53Gj1ffNuhjE
COVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQIDAQABAoIBAAb5n8+8pZIWaags
L2X8PzN/Sd1L7u4HOJrz2mM3EuiT3ciWRPgwImpETeJ5UW27Qc+0dTahX5DcuYxE
UErefSZ2ru0cMnNEifWVnF3q/IYf7mudss5bJ9NZYi+Dqdu7mTAXp4xFlHtaALbp
iFK/8wjoBbTHNmKWKK0IHx27Z/sjK+7QnoKij+rRzvhmNyN2r3dT7EO4VePriesr
zyVaGexNPFhtd1HLJLQ5GqRAidtLM4x1ubvp3NLTCvvoQKKYFOg7WqKycZ2VllOg
ApcpZb/kB/sNTacLvum5HgMNWuWwgREISuQJR+esz/5WaSTQ04L2+vMVomGM18X+
9n4KYwECgYEA/Usajzl3tWv1IIairSk9Md7Z2sbaPVBNKv4IDJy3mLwt+2VN2mqo
fpeV5rBaFNWzJR0M0JwLbdlsvSfXgVFkUePg1UiJyFqOKmMO8Bd/nxV9NAewVg1D
KXQLsfrojBfka7HtFmfk/GA2swEMCGzUcY23bwah1JUTLhvbl19GNMECgYEA7chW
Ip/IvYBiaaD/qgklwJE8QoAVzi9zqlI1MOJJNf1r/BTeZ2R8oXlRk8PVxFglliuA
vMgwCkfuqxA8irIdHReLzqcLddPtaHo6R8zKP2cpYBo61C3CPzEAucasaOXQFpjs
DPnp4QFeboNPgiEGLVGHFvD5TwZpideBpWTwud0CgYEAy04MDGfJEQKNJ0VJr4mJ
R80iubqgk1QwDFEILu9fYiWxFrbSTX0Mr0eGlzp3o39/okt17L9DYTGCWTVwgajN
x/kLjsYBaaJdt+H4rHeABTWfYDLHs9pDTTOK65mELGZE/rg6n6BWqMelP/qYKO8J
efeRA3mkTVg2o+zSTea4GEECgYEA3DB4EvgD2/fXKhl8puhxnTDgrHQPvS8T3NTj
jLD/Oo/CP1zT1sqm3qCJelwOyBMYO0dtn2OBmQOjb6VJauYlL5tuS59EbYgigG0v
Ku3pG21cUzH26CS3i+zEz0O6xCiL2WEitaF3gnTSDWRrbAVIww6MGiJru1IkyRBX
beFbScECf1n00W9qrXnqsWefk73ucggfV0gQQmDnauMA9J7B96+MvGprE54Tx9vl
SBodgvJsCod9Y9Q7QsMcXb4CuEgTgWKDBp5cA/KUOQmK5buOrysosLnnm12LaHiF
O7IIh8Cmb9TbdldgW+8ndZ4EQ3lfIS0zN3/7rWD34bs19JDYkRY=
MIIEpQIBAAKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydpqYYH
Yei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7iZjz
AJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+hCbu
wAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7xtW3/
Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHdA5I4
+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABAoIBAQCEVFAZ3puc7aIU
NuIXqwmMz+KMFuMr+SL6aYr6LhB2bhpfQSr6LLEu1L6wMm1LnCbLneJVtW+1/6U+
SyNFRmzrmmLNmZx7c0AvZb14DQ4fJ8uOjryje0vptUHT1YJJ4n5R1L7yJjCElsC8
cDBPfO+sOzlaGmBmuxU7NkNp0k/WJc1Wnn5WFCKKk8BCH1AUKvn/vwbRV4zl/Be7
ApywPUouV+GJlTAG5KLb15CWKSqFNJxUJ6K7NnmfDoy7muUUv8MtrTn59XTH4qK7
p/3A8tdNpR/RpEJ8+y3kS9CDZBVnsk0j0ptT//jdt1vSsylXxrf7vjLnyguRZZ5H
Vwe2POotAoGBAOY1UaFjtIz2G5qromaUtrPb5EPWRU8fiLtUXUDKG8KqNAqsGbDz
Stw1mVFyyuaFMReO18djCvcja1xxF3TZbdpV1k7RfcpEZXiFzBAPgeEGdA3Tc3V2
byuJQthWamCBxF/7OGUmH/E/kH0pv5g9+eIitK/CUC2YUhCnubhchGAXAoGBAMxL
O7mnPqDJ2PqxVip/lL6VnchtF1bx1aDNr83rVTf+BEsOgCIFoDEBIVKDnhXlaJu7
8JN4la/esytq4j3nM1cl6mjvw2ixYmwQtKiDuNiyb88hhQ+nxVsbIpYxtbhsj+u5
hOrMN6jKd0GVWsYpdNvY/dXZG1MXhbWwExjRAY+vAoGBAKBu3jHUU5q9VWWIYciN
sXpNL5qbNHg86MRsugSSFaCnj1c0sz7ffvdSn0Pk9USL5Defw/9fpd+wHn0xD4DO
msFDevQ5CSoyWmkRDbLPq9sP7UdJariczkMQCLbOGpqhNSMS6C2N0UsG2oJv2ueV
oZUYTMYEbG4qLl8PFN5IE7UHAoGAGwEq4OyZm7lyxBii8jUxHUw7sh2xgx2uhnYJ
8idUeXVLbfx5tYWW2kNy+yxIvk432LYsI+JBryC6AFg9lb81CyUI6lwfMXyZLP28
U7Ytvf9ARloA88PSk6tvk/j4M2uuTpOUXVEnXll9EB9FA4LBXro9O4JaWU53rz+a
FqKyGSMCgYEAuYCGC+Fz7lIa0aE4tT9mwczQequxGYsL41KR/4pDO3t9QsnzunLY
fvCFhteBOstwTBBdfBaKIwSp3zI2QtA4K0Jx9SAJ9q0ft2ciB9ukUFBhC9+TqzXg
gSz3XpRtI8PhwAxZgCnov+NPQV8IxvD4ZgnnEiRBHrYnSEsaMLoVnkw=
-----END RSA PRIVATE KEY-----

View File

@ -4,9 +4,6 @@ set -e
# run the linter
# make lint
# setup certs
make gen_certs
# run the unit tests with coverage
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
@ -16,6 +13,3 @@ for d in $(go list ./... | grep -v vendor); do
rm profile.out
fi
done
# cleanup certs
make clean_certs

View File

@ -99,7 +99,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
checkTxs(t, mempool, 1, UnknownPeerID)
tx0 := mempool.TxsFront().Value.(*mempoolTx)
// assert that kv store has gas wanted = 1.
require.Equal(t, app.CheckTx(tx0.tx).GasWanted, int64(1), "KVStore had a gas value neq to 1")
require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
// ensure each tx is 20 bytes long
require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")

View File

@ -441,17 +441,30 @@ func createSwitch(config *cfg.Config,
}
func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
p2pLogger log.Logger) pex.AddrBook {
p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) {
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
// Add ourselves to addrbook to prevent dialing ourselves
addrBook.AddOurAddress(sw.NetAddress())
if config.P2P.ExternalAddress != "" {
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress))
if err != nil {
return nil, errors.Wrap(err, "p2p.external_address is incorrect")
}
addrBook.AddOurAddress(addr)
}
if config.P2P.ListenAddress != "" {
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress))
if err != nil {
return nil, errors.Wrap(err, "p2p.laddr is incorrect")
}
addrBook.AddOurAddress(addr)
}
sw.SetAddrBook(addrBook)
return addrBook
return addrBook, nil
}
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
@ -594,7 +607,10 @@ func NewNode(config *cfg.Config,
return nil, errors.Wrap(err, "could not add peers from persistent_peers field")
}
addrBook := createAddrBookAndSetOnSwitch(config, sw, p2pLogger)
addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey)
if err != nil {
return nil, errors.Wrap(err, "could not create addrbook")
}
// Optionally, start the pex reactor
//
@ -614,7 +630,9 @@ func NewNode(config *cfg.Config,
}
if config.ProfListenAddress != "" {
go logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil))
go func() {
logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil))
}()
}
node := &Node{
@ -714,7 +732,6 @@ func (n *Node) OnStop() {
n.indexerService.Stop()
// now stop the reactors
// TODO: gracefully disconnect from peers.
n.sw.Stop()
// stop mempool WAL

View File

@ -285,10 +285,10 @@ func state(nVals int, height int64) (sm.State, dbm.DB) {
secret := []byte(fmt.Sprintf("test%d", i))
pk := ed25519.GenPrivKeyFromSecret(secret)
vals[i] = types.GenesisValidator{
pk.PubKey().Address(),
pk.PubKey(),
1000,
fmt.Sprintf("test%d", i),
Address: pk.PubKey().Address(),
PubKey: pk.PubKey(),
Power: 1000,
Name: fmt.Sprintf("test%d", i),
}
}
s, _ := sm.MakeGenesisState(&types.GenesisDoc{

View File

@ -221,11 +221,7 @@ func (sw *Switch) OnStart() error {
func (sw *Switch) OnStop() {
// Stop peers
for _, p := range sw.peers.List() {
sw.transport.Cleanup(p)
p.Stop()
if sw.peers.Remove(p) {
sw.metrics.Peers.Add(float64(-1))
}
sw.stopAndRemovePeer(p, nil)
}
// Stop reactors

View File

@ -197,7 +197,7 @@ func localIPv4() (net.IP, error) {
}
func getServiceURL(rootURL string) (url, urnDomain string, err error) {
r, err := http.Get(rootURL)
r, err := http.Get(rootURL) // nolint: gosec
if err != nil {
return
}

View File

@ -50,7 +50,7 @@ func TestResetValidator(t *testing.T) {
// test vote
height, round := int64(10), 1
voteType := byte(types.PrevoteType)
blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}}
blockID := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}}
vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID)
err = privVal.SignVote("mychainid", vote)
assert.NoError(t, err, "expected no error signing vote")
@ -162,8 +162,8 @@ func TestSignVote(t *testing.T) {
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}}
block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}}
block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}}
block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{}}
height, round := int64(10), 1
voteType := byte(types.PrevoteType)
@ -207,8 +207,8 @@ func TestSignProposal(t *testing.T) {
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}}
block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{10, []byte{3, 2, 1}}}
block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{Total: 5, Hash: []byte{1, 2, 3}}}
block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{Total: 10, Hash: []byte{3, 2, 1}}}
height, round := int64(10), 1
// sign a proposal for first time
@ -249,7 +249,7 @@ func TestDifferByTimestamp(t *testing.T) {
privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}}
block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{Total: 5, Hash: []byte{1, 2, 3}}}
height, round := int64(10), 1
chainID := "mychainid"
@ -277,7 +277,7 @@ func TestDifferByTimestamp(t *testing.T) {
// test vote
{
voteType := byte(types.PrevoteType)
blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}}
blockID := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}}
vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID)
err := privVal.SignVote("mychainid", vote)
assert.NoError(t, err, "expected no error signing vote")

View File

@ -45,29 +45,29 @@ func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clien
// TODO: Make it wait for a commit and set res.Height appropriately.
func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res := ctypes.ResultBroadcastTxCommit{}
res.CheckTx = a.App.CheckTx(tx)
res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
if res.CheckTx.IsErr() {
return &res, nil
}
res.DeliverTx = a.App.DeliverTx(tx)
res.DeliverTx = a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx})
res.Height = -1 // TODO
return &res, nil
}
func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
// and this gets written in a background thread...
if !c.IsErr() {
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck
}
return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil
}
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
// and this gets written in a background thread...
if !c.IsErr() {
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck
}
return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil
}

View File

@ -1,16 +1,18 @@
package rpcserver
import (
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
@ -66,18 +68,27 @@ func TestMaxOpenConnections(t *testing.T) {
}
func TestStartHTTPAndTLSServer(t *testing.T) {
config := DefaultConfig()
config.MaxOpenConnections = 1
// set up fixtures
listenerAddr := "tcp://0.0.0.0:0"
listener, err := Listen(listenerAddr, config)
ln, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
defer ln.Close()
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {})
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "some body")
})
// test failure
err = StartHTTPAndTLSServer(listener, mux, "", "", log.TestingLogger(), config)
require.IsType(t, (*os.PathError)(nil), err)
go StartHTTPAndTLSServer(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig())
// TODO: test that starting the server can actually work
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint: gosec
}
c := &http.Client{Transport: tr}
res, err := c.Get("https://" + ln.Addr().String())
require.NoError(t, err)
defer res.Body.Close()
assert.Equal(t, http.StatusOK, res.StatusCode)
body, err := ioutil.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, []byte("some body"), body)
}

25
rpc/lib/server/test.crt Normal file
View File

@ -0,0 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIEODCCAiCgAwIBAgIQWDHUrd4tOM2xExWhzOEJ7DANBgkqhkiG9w0BAQsFADAZ
MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy
MDIxMTAyMDRaMBExDzANBgNVBAMTBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBANBaa6dc9GZcIhAHWqVrx0LONYf+IlbvTP7yrV45ws0ix8TX
1NUOiDY1cwzKH8ay/HYX45e2fFLrtLidc9h+apsC55k3Vdcy00+Ksr/adjR8D4A/
GpnTS+hVDHTlqINe9a7USok34Zr1rc3fh4Imu5RxEurjMwkA/36k6+OpXMp2qlKY
S1fGqwn2KGhXkp/yTWZILEMXBazNxGx4xfqYXzWm6boeyJAXpM2DNkv7dtwa/CWY
WacUQJApNInwn5+B8LLoo+pappkfZOjAD9/aHKsyFTSWmmWeg7V//ouB3u5vItqf
GP+3xmPgeYeEyOIe/P2f8bRuQs+GGwSCmi6F1GUCAwEAAaOBgzCBgDAOBgNVHQ8B
Af8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQW
BBSpBFIMbkBR4xVYQZtUJQQwzPmbHjAfBgNVHSMEGDAWgBTUkz3u+N2iMe6yKb5+
R1d4CeM9YTAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4ICAQBCqdzS
tPHkMYWjYs6aREwob9whjyG8a4Qp6IkP1SYHCwpzsTeWLi9ybEcDRb3jZ4iRxbZg
7GFxjqHoWgBZHAIyICMsHupOJEtXq5hx86NuMwk/12bx1eNj0yTIAnVOA+em/ZtB
zR38OwB8xXmjKd0Ow1Y7zCh5zE2gU+sR0JOJSfxXUZrJvwDNrbcmZPQ+kwuq4cyv
fxZnvZf/owbyOLQFdbiPQbbiZ7JSv8q7GCMleULCEygrsWClYkULUByhKykCHJIU
wfq1owge9EqG/4CDCCjB9vBFmUyv3FJhgWnzd6tPQckFoHSoD0Bjsv/pQFcsGLcg
+e/Mm6hZgCXXwI2WHYbxqz5ToOaRQQYo6N77jWejOBMecOZmPDyQ2nz73aJd11GW
NiDT7pyMlBJA8W4wAvVP4ow2ugqsPjqZ6EyismIGFUTqMp+NtXOsLPK+sEMhKhJ9
ulczRpPEf25roBt6aEk2fTAfAPmbpvNamBLSbBU23mzJ38RmfhxLOlOgCGbBBX4d
kE+/+En8UJO4X8CKaKRo/c5G2UZ6++2cjp6SPrsGENDMW5yBGegrDw+ow8/bLxIr
OjWpSe2cygovy3aHE6UBOgkxw9KIaSEqFgjQZ0i+xO6l6qQoljQgUGXfecVMR+7C
4KsyVVTMlK9/thA7Zfc8a5z8ZCtIKkT52XsJhw==
-----END CERTIFICATE-----

27
rpc/lib/server/test.key Normal file
View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEoQIBAAKCAQEA0Fprp1z0ZlwiEAdapWvHQs41h/4iVu9M/vKtXjnCzSLHxNfU
1Q6INjVzDMofxrL8dhfjl7Z8Uuu0uJ1z2H5qmwLnmTdV1zLTT4qyv9p2NHwPgD8a
mdNL6FUMdOWog171rtRKiTfhmvWtzd+Hgia7lHES6uMzCQD/fqTr46lcynaqUphL
V8arCfYoaFeSn/JNZkgsQxcFrM3EbHjF+phfNabpuh7IkBekzYM2S/t23Br8JZhZ
pxRAkCk0ifCfn4Hwsuij6lqmmR9k6MAP39ocqzIVNJaaZZ6DtX/+i4He7m8i2p8Y
/7fGY+B5h4TI4h78/Z/xtG5Cz4YbBIKaLoXUZQIDAQABAoH/NodzpVmunRt/zrIe
By0t+U3+tJjOY/I9NHxO41o6oXV40wupqBkljQpwEejUaCxv5nhaGFqqLwmBQs/y
gbaUL/2Sn4bb8HZc13R1U8DZLuNJK0dYrumd9DBOEkoI0FkJ87ebyk3VvbiOxFK8
JFP+w9rUGKVdtf2M4JhJJEwu/M2Yawx9/8CrCIY2G6ufaylrIysLeQMsxrogF8n4
hq7fyqveWRzxhqUxS2fp9Ynpx4jnd1lMzv+z3i8eEsW+gB9yke7UkXZMbtZg1xfB
JjiEfcDVfSwSihhgOYttgQ9hkIdohDUak7OzRSWVBuoxWUhMfrQxw/HZlgZJL9Vf
rGdlAoGBANOGmgEGky+acV33WTWGV5OdAw6B/SlBEoORJbj6UzQiUz3hFH/Tgpbj
JOKHWGbGd8OtOYbt9JoofGlNgHA/4nAEYAc2HGa+q0fBwMUflU0DudAxXis4jDmE
D76moGmyJoSgwVrp1W/vwNixA5RpcZ3Wst2nf9RKLr+DxypHTit/AoGBAPwpDeqc
rwXOTl0KR/080Nc11Z03VIVZAGfA59J73HmADF9bBVlmReQdkwX0lERchdzD0lfa
XqbqBLr4FS5Uqyn5f3DCaMnOeKfvtGw2z6LnY+w03mii4PEW/vNKLlB18NdduPwL
KeAc08Zh+qJFMKD1PoEQOH+Y7NybBbaQL8IbAoGAfPPUYaq6o7I+Kd4FysKTVVW5
CobrP8V65FGH0R++qttkBPfDHkeZqvx/O3nsVLoE4YigpP5IMhCcfbAUoTp7zuQm
vdvPJzqW/4qLD2c60QXUbBHdqPZ8jzVd/6d6tzVP36T+02+yb69XYiofDTrErRK5
EorxzjwMJYH40xbQLI0CgYBh7d/FucwPSSwN3ixPIQtKSVIImLBuiT4rDTP6/reF
SEGF1ueg7KNAEGxE59OdKQGj1zkdfWU9Fa14n1g6gg9nYcoolJf1qAYb0nAThsXk
0lBwL6ggowERIIkrGygZf3Rlb7SjzgIZU5i7dtnLo2tbV2NK5G3MwCtdEaeKWzzw
+QKBgQC7+JPHoqbnNgis2vCGLKMOU3HpJK/rYEU/8ZUegc9lshEFZYsRbtKQQJQs
nqsChrG8UoK84frujEBkO/Nzsil85p8ar79wZguGnVvswTWaTuKvl8H/qQQ/JSHZ
OHGQD4qwTCkdRr8Vf8NfuCoZlJDnHncLJZNWjrb5feqCnJ/YIQ==
-----END RSA PRIVATE KEY-----

View File

@ -1,22 +1,20 @@
package state
package state_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
@ -34,13 +32,13 @@ func TestApplyBlock(t *testing.T) {
require.Nil(t, err)
defer proxyApp.Stop()
state, stateDB := state(1, 1)
state, stateDB, _ := makeState(1, 1)
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, MockEvidencePool{})
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.MockEvidencePool{})
block := makeBlock(state, 1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
//nolint:ineffassign
state, err = blockExec.ApplyBlock(state, blockID, block)
@ -58,11 +56,11 @@ func TestBeginBlockValidators(t *testing.T) {
require.Nil(t, err)
defer proxyApp.Stop()
state, stateDB := state(2, 2)
state, stateDB, _ := makeState(2, 2)
prevHash := state.LastBlockID.Hash
prevParts := types.PartSetHeader{}
prevBlockID := types.BlockID{prevHash, prevParts}
prevBlockID := types.BlockID{Hash: prevHash, PartsHeader: prevParts}
now := tmtime.Now()
commitSig0 := (&types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType}).CommitSig()
@ -84,7 +82,7 @@ func TestBeginBlockValidators(t *testing.T) {
// block for height 2
block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address)
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB)
_, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB)
require.Nil(t, err, tc.desc)
// -> app receives a list of validators with a bool indicating if they signed
@ -111,11 +109,11 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
require.Nil(t, err)
defer proxyApp.Stop()
state, stateDB := state(2, 12)
state, stateDB, _ := makeState(2, 12)
prevHash := state.LastBlockID.Hash
prevParts := types.PartSetHeader{}
prevBlockID := types.BlockID{prevHash, prevParts}
prevBlockID := types.BlockID{Hash: prevHash, PartsHeader: prevParts}
height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address
height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address
@ -145,7 +143,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address)
block.Time = now
block.Evidence.Evidence = tc.evidence
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB)
_, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB)
require.Nil(t, err, tc.desc)
// -> app must receive an index of the byzantine validator
@ -159,7 +157,7 @@ func TestValidateValidatorUpdates(t *testing.T) {
secpKey := secp256k1.GenPrivKey().PubKey()
defaultValidatorParams := types.ValidatorParams{[]string{types.ABCIPubKeyTypeEd25519}}
defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}}
testCases := []struct {
name string
@ -213,7 +211,7 @@ func TestValidateValidatorUpdates(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := validateValidatorUpdates(tc.abciUpdates, tc.validatorParams)
err := sm.ValidateValidatorUpdates(tc.abciUpdates, tc.validatorParams)
if tc.shouldErr {
assert.Error(t, err)
} else {
@ -307,9 +305,9 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
require.Nil(t, err)
defer proxyApp.Stop()
state, stateDB := state(1, 1)
state, stateDB, _ := makeState(1, 1)
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, MockEvidencePool{})
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
eventBus := types.NewEventBus()
err = eventBus.Start()
@ -321,7 +319,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
require.NoError(t, err)
block := makeBlock(state, 1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
pubkey := ed25519.GenPrivKey().PubKey()
app.ValidatorUpdates = []abci.ValidatorUpdate{
@ -365,11 +363,11 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
require.Nil(t, err)
defer proxyApp.Stop()
state, stateDB := state(1, 1)
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, MockEvidencePool{})
state, stateDB, _ := makeState(1, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
block := makeBlock(state, 1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
// Remove the only validator
app.ValidatorUpdates = []abci.ValidatorUpdate{
@ -381,90 +379,3 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
assert.NotEmpty(t, state.NextValidators.Validators)
}
//----------------------------------------------------------------------------
// make some bogus txs
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < nTxsPerBlock; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func state(nVals, height int) (State, dbm.DB) {
vals := make([]types.GenesisValidator, nVals)
for i := 0; i < nVals; i++ {
secret := []byte(fmt.Sprintf("test%d", i))
pk := ed25519.GenPrivKeyFromSecret(secret)
vals[i] = types.GenesisValidator{
pk.PubKey().Address(),
pk.PubKey(),
1000,
fmt.Sprintf("test%d", i),
}
}
s, _ := MakeGenesisState(&types.GenesisDoc{
ChainID: chainID,
Validators: vals,
AppHash: nil,
})
// save validators to db for 2 heights
stateDB := dbm.NewMemDB()
SaveState(stateDB, s)
for i := 1; i < height; i++ {
s.LastBlockHeight++
s.LastValidators = s.Validators.Copy()
SaveState(stateDB, s)
}
return s, stateDB
}
func makeBlock(state State, height int64) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address)
return block
}
//----------------------------------------------------------------------------
type testApp struct {
abci.BaseApplication
CommitVotes []abci.VoteInfo
ByzantineValidators []abci.Evidence
ValidatorUpdates []abci.ValidatorUpdate
}
var _ abci.Application = (*testApp)(nil)
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
return abci.ResponseInfo{}
}
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
app.CommitVotes = req.LastCommitInfo.Votes
app.ByzantineValidators = req.ByzantineValidators
return abci.ResponseBeginBlock{}
}
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates}
}
func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
return abci.ResponseDeliverTx{Events: []abci.Event{}}
}
func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx {
return abci.ResponseCheckTx{}
}
func (app *testApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{}
}
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
return
}

62
state/export_test.go Normal file
View File

@ -0,0 +1,62 @@
package state
import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/types"
)
//
// TODO: Remove dependence on all entities exported from this file.
//
// Every entity exported here is dependent on a private entity from the `state`
// package. Currently, these functions are only made available to tests in the
// `state_test` package, but we should not be relying on them for our testing.
// Instead, we should be exclusively relying on exported entities for our
// testing, and should be refactoring exported entities to make them more
// easily testable from outside of the package.
//
const ValSetCheckpointInterval = valSetCheckpointInterval
// UpdateState is an alias for updateState exported from execution.go,
// exclusively and explicitly for testing.
func UpdateState(
state State,
blockID types.BlockID,
header *types.Header,
abciResponses *ABCIResponses,
validatorUpdates []*types.Validator,
) (State, error) {
return updateState(state, blockID, header, abciResponses, validatorUpdates)
}
// ValidateValidatorUpdates is an alias for validateValidatorUpdates exported
// from execution.go, exclusively and explicitly for testing.
func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error {
return validateValidatorUpdates(abciUpdates, params)
}
// CalcValidatorsKey is an alias for the private calcValidatorsKey method in
// store.go, exported exclusively and explicitly for testing.
func CalcValidatorsKey(height int64) []byte {
return calcValidatorsKey(height)
}
// SaveABCIResponses is an alias for the private saveABCIResponses method in
// store.go, exported exclusively and explicitly for testing.
func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) {
saveABCIResponses(db, height, abciResponses)
}
// SaveConsensusParamsInfo is an alias for the private saveConsensusParamsInfo
// method in store.go, exported exclusively and explicitly for testing.
func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) {
saveConsensusParamsInfo(db, nextHeight, changeHeight, params)
}
// SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in
// store.go, exported exclusively and explicitly for testing.
func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) {
saveValidatorsInfo(db, height, lastHeightChanged, valSet)
}

280
state/helpers_test.go Normal file
View File

@ -0,0 +1,280 @@
package state_test
import (
"bytes"
"fmt"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
type paramsChangeTestCase struct {
height int64
params types.ConsensusParams
}
// always returns true if asked if any evidence was already committed.
type mockEvPoolAlwaysCommitted struct{}
func (m mockEvPoolAlwaysCommitted) PendingEvidence(int64) []types.Evidence { return nil }
func (m mockEvPoolAlwaysCommitted) AddEvidence(types.Evidence) error { return nil }
func (m mockEvPoolAlwaysCommitted) Update(*types.Block, sm.State) {}
func (m mockEvPoolAlwaysCommitted) IsCommitted(types.Evidence) bool { return true }
func newTestApp() proxy.AppConns {
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
return proxy.NewAppConns(cc)
}
func makeAndCommitGoodBlock(
state sm.State,
height int64,
lastCommit *types.Commit,
proposerAddr []byte,
blockExec *sm.BlockExecutor,
privVals map[string]types.PrivValidator,
evidence []types.Evidence) (sm.State, types.BlockID, *types.Commit, error) {
// A good block passes
state, blockID, err := makeAndApplyGoodBlock(state, height, lastCommit, proposerAddr, blockExec, evidence)
if err != nil {
return state, types.BlockID{}, nil, err
}
// Simulate a lastCommit for this block from all validators for the next height
commit, err := makeValidCommit(height, blockID, state.Validators, privVals)
if err != nil {
return state, types.BlockID{}, nil, err
}
return state, blockID, commit, nil
}
func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte,
blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr)
if err := blockExec.ValidateBlock(state, block); err != nil {
return state, types.BlockID{}, err
}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}
state, err := blockExec.ApplyBlock(state, blockID, block)
if err != nil {
return state, types.BlockID{}, err
}
return state, blockID, nil
}
func makeVote(height int64, blockID types.BlockID, valSet *types.ValidatorSet, privVal types.PrivValidator) (*types.Vote, error) {
addr := privVal.GetPubKey().Address()
idx, _ := valSet.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: idx,
Height: height,
Round: 0,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: blockID,
}
if err := privVal.SignVote(chainID, vote); err != nil {
return nil, err
}
return vote, nil
}
func makeValidCommit(height int64, blockID types.BlockID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator) (*types.Commit, error) {
sigs := make([]*types.CommitSig, 0)
for i := 0; i < vals.Size(); i++ {
_, val := vals.GetByIndex(i)
vote, err := makeVote(height, blockID, vals, privVals[val.Address.String()])
if err != nil {
return nil, err
}
sigs = append(sigs, vote.CommitSig())
}
return types.NewCommit(blockID, sigs), nil
}
// make some bogus txs
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < nTxsPerBlock; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) {
vals := make([]types.GenesisValidator, nVals)
privVals := make(map[string]types.PrivValidator, nVals)
for i := 0; i < nVals; i++ {
secret := []byte(fmt.Sprintf("test%d", i))
pk := ed25519.GenPrivKeyFromSecret(secret)
valAddr := pk.PubKey().Address()
vals[i] = types.GenesisValidator{
Address: valAddr,
PubKey: pk.PubKey(),
Power: 1000,
Name: fmt.Sprintf("test%d", i),
}
privVals[valAddr.String()] = types.NewMockPVWithParams(pk, false, false)
}
s, _ := sm.MakeGenesisState(&types.GenesisDoc{
ChainID: chainID,
Validators: vals,
AppHash: nil,
})
stateDB := dbm.NewMemDB()
sm.SaveState(stateDB, s)
for i := 1; i < height; i++ {
s.LastBlockHeight++
s.LastValidators = s.Validators.Copy()
sm.SaveState(stateDB, s)
}
return s, stateDB, privVals
}
func makeBlock(state sm.State, height int64) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address)
return block
}
func genValSet(size int) *types.ValidatorSet {
vals := make([]*types.Validator, size)
for i := 0; i < size; i++ {
vals[i] = types.NewValidator(ed25519.GenPrivKey().PubKey(), 10)
}
return types.NewValidatorSet(vals)
}
func makeConsensusParams(
blockBytes, blockGas int64,
blockTimeIotaMs int64,
evidenceAge int64,
) types.ConsensusParams {
return types.ConsensusParams{
Block: types.BlockParams{
MaxBytes: blockBytes,
MaxGas: blockGas,
TimeIotaMs: blockTimeIotaMs,
},
Evidence: types.EvidenceParams{
MaxAge: evidenceAge,
},
}
}
func makeHeaderPartsResponsesValPubKeyChange(state sm.State, pubkey crypto.PubKey) (types.Header, types.BlockID, *sm.ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
// If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0)
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
abciResponses.EndBlock = &abci.ResponseEndBlock{
ValidatorUpdates: []abci.ValidatorUpdate{
types.TM2PB.NewValidatorUpdate(val.PubKey, 0),
types.TM2PB.NewValidatorUpdate(pubkey, 10),
},
}
}
return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses
}
func makeHeaderPartsResponsesValPowerChange(state sm.State, power int64) (types.Header, types.BlockID, *sm.ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
// If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0)
if val.VotingPower != power {
abciResponses.EndBlock = &abci.ResponseEndBlock{
ValidatorUpdates: []abci.ValidatorUpdate{
types.TM2PB.NewValidatorUpdate(val.PubKey, power),
},
}
}
return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses
}
func makeHeaderPartsResponsesParams(state sm.State, params types.ConsensusParams) (types.Header, types.BlockID, *sm.ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(&params)},
}
return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses
}
func randomGenesisDoc() *types.GenesisDoc {
pubkey := ed25519.GenPrivKey().PubKey()
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: "abc",
Validators: []types.GenesisValidator{
{
Address: pubkey.Address(),
PubKey: pubkey,
Power: 10,
Name: "myval",
},
},
ConsensusParams: types.DefaultConsensusParams(),
}
}
//----------------------------------------------------------------------------
type testApp struct {
abci.BaseApplication
CommitVotes []abci.VoteInfo
ByzantineValidators []abci.Evidence
ValidatorUpdates []abci.ValidatorUpdate
}
var _ abci.Application = (*testApp)(nil)
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
return abci.ResponseInfo{}
}
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
app.CommitVotes = req.LastCommitInfo.Votes
app.ByzantineValidators = req.ByzantineValidators
return abci.ResponseBeginBlock{}
}
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates}
}
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
return abci.ResponseDeliverTx{Events: []abci.Event{}}
}
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
return abci.ResponseCheckTx{}
}
func (app *testApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{}
}
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
return
}

13
state/main_test.go Normal file
View File

@ -0,0 +1,13 @@
package state_test
import (
"os"
"testing"
"github.com/tendermint/tendermint/types"
)
func TestMain(m *testing.M) {
types.RegisterMockEvidencesGlobal()
os.Exit(m.Run())
}

View File

@ -43,7 +43,7 @@ type EvidencePool interface {
IsCommitted(types.Evidence) bool
}
// MockEvidencePool is an empty implementation of a Mempool, useful for testing.
// MockEvidencePool is an empty implementation of EvidencePool, useful for testing.
type MockEvidencePool struct{}
func (m MockEvidencePool) PendingEvidence(int64) []types.Evidence { return nil }

View File

@ -1,4 +1,4 @@
package state
package state_test
import (
"bytes"
@ -10,23 +10,22 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types"
)
// setupTestCase does setup common to all test cases.
func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) {
func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) {
config := cfg.ResetTestRoot("state_")
dbType := dbm.DBBackendType(config.DBBackend)
stateDB := dbm.NewDB("state", dbType, config.DBDir())
state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile")
tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) }
@ -59,7 +58,7 @@ func TestMakeGenesisStateNilValidators(t *testing.T) {
Validators: nil,
}
require.Nil(t, doc.ValidateAndComplete())
state, err := MakeGenesisState(&doc)
state, err := sm.MakeGenesisState(&doc)
require.Nil(t, err)
require.Equal(t, 0, len(state.Validators.Validators))
require.Equal(t, 0, len(state.NextValidators.Validators))
@ -73,9 +72,9 @@ func TestStateSaveLoad(t *testing.T) {
assert := assert.New(t)
state.LastBlockHeight++
SaveState(stateDB, state)
sm.SaveState(stateDB, state)
loadedState := LoadState(stateDB)
loadedState := sm.LoadState(stateDB)
assert.True(state.Equals(loadedState),
fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n",
loadedState, state))
@ -92,15 +91,15 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
// Build mock responses.
block := makeBlock(state, 2)
abciResponses := NewABCIResponses(block)
abciResponses := sm.NewABCIResponses(block)
abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil}
abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil}
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{
types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10),
}}
saveABCIResponses(stateDB, block.Height, abciResponses)
loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height)
sm.SaveABCIResponses(stateDB, block.Height, abciResponses)
loadedABCIResponses, err := sm.LoadABCIResponses(stateDB, block.Height)
assert.Nil(err)
assert.Equal(abciResponses, loadedABCIResponses,
fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n",
@ -129,7 +128,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
{Code: 32, Data: []byte("Hello"), Log: "Huh?"},
},
types.ABCIResults{
{32, []byte("Hello")},
{Code: 32, Data: []byte("Hello")},
}},
2: {
[]*abci.ResponseDeliverTx{
@ -143,8 +142,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
},
},
types.ABCIResults{
{383, nil},
{0, []byte("Gotcha!")},
{Code: 383, Data: nil},
{Code: 0, Data: []byte("Gotcha!")},
}},
3: {
nil,
@ -155,24 +154,24 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
// Query all before, this should return error.
for i := range cases {
h := int64(i + 1)
res, err := LoadABCIResponses(stateDB, h)
res, err := sm.LoadABCIResponses(stateDB, h)
assert.Error(err, "%d: %#v", i, res)
}
// Add all cases.
for i, tc := range cases {
h := int64(i + 1) // last block height, one below what we save
responses := &ABCIResponses{
responses := &sm.ABCIResponses{
DeliverTx: tc.added,
EndBlock: &abci.ResponseEndBlock{},
}
saveABCIResponses(stateDB, h, responses)
sm.SaveABCIResponses(stateDB, h, responses)
}
// Query all before, should return expected value.
for i, tc := range cases {
h := int64(i + 1)
res, err := LoadABCIResponses(stateDB, h)
res, err := sm.LoadABCIResponses(stateDB, h)
assert.NoError(err, "%d", i)
assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i)
}
@ -186,26 +185,26 @@ func TestValidatorSimpleSaveLoad(t *testing.T) {
assert := assert.New(t)
// Can't load anything for height 0.
v, err := LoadValidators(stateDB, 0)
assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0")
v, err := sm.LoadValidators(stateDB, 0)
assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0")
// Should be able to load for height 1.
v, err = LoadValidators(stateDB, 1)
v, err = sm.LoadValidators(stateDB, 1)
assert.Nil(err, "expected no err at height 1")
assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match")
// Should be able to load for height 2.
v, err = LoadValidators(stateDB, 2)
v, err = sm.LoadValidators(stateDB, 2)
assert.Nil(err, "expected no err at height 2")
assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match")
// Increment height, save; should be able to load for next & next next height.
state.LastBlockHeight++
nextHeight := state.LastBlockHeight + 1
saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)
vp0, err := LoadValidators(stateDB, nextHeight+0)
sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)
vp0, err := sm.LoadValidators(stateDB, nextHeight+0)
assert.Nil(err, "expected no err")
vp1, err := LoadValidators(stateDB, nextHeight+1)
vp1, err := sm.LoadValidators(stateDB, nextHeight+1)
assert.Nil(err, "expected no err")
assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match")
assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match")
@ -234,13 +233,13 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
changeIndex++
power++
}
header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power)
header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power)
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
state, err = updateState(state, blockID, &header, responses, validatorUpdates)
state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates)
require.NoError(t, err)
nextHeight := state.LastBlockHeight + 1
saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)
sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)
}
// On each height change, increment the power by one.
@ -258,7 +257,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
}
for i, power := range testCases {
v, err := LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block.
v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block.
assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i))
assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size())
_, val := v.GetByIndex(0)
@ -404,13 +403,13 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
assert.EqualValues(t, 0, val1.ProposerPriority)
block := makeBlock(state, state.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
abciResponses := &ABCIResponses{
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
updatedState, err := updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
curTotal := val1VotingPower
// one increment step and one validator: 0 + power - total_power == 0
@ -422,7 +421,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val2VotingPower}
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal})
assert.NoError(t, err)
updatedState2, err := updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
require.Equal(t, len(updatedState2.NextValidators.Validators), 2)
@ -461,7 +460,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
// this will cause the diff of priorities (77)
// to be larger than threshold == 2*totalVotingPower (22):
updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
require.Equal(t, len(updatedState3.NextValidators.Validators), 2)
@ -514,15 +513,15 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address)
block := makeBlock(state, state.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
// no updates:
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
updatedState, err := updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
// 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10
@ -537,7 +536,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal})
assert.NoError(t, err)
updatedState2, err := updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
require.Equal(t, len(updatedState2.NextValidators.Validators), 2)
@ -574,7 +573,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
updatedState3, err := updateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
assert.Equal(t, updatedState3.Validators.Proposer.Address, updatedState3.NextValidators.Proposer.Address)
@ -598,13 +597,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
// no changes in voting power and both validators have same voting power
// -> proposers should alternate:
oldState := updatedState3
abciResponses = &ABCIResponses{
abciResponses = &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
oldState, err = updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
expectedVal1Prio2 = 1
expectedVal2Prio2 = -1
@ -613,13 +612,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
for i := 0; i < 1000; i++ {
// no validator updates:
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err)
// alternate (and cyclic priorities):
assert.NotEqual(t, updatedState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i)
@ -660,16 +659,16 @@ func TestLargeGenesisValidator(t *testing.T) {
oldState := state
for i := 0; i < 10; i++ {
// no updates:
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
block := makeBlock(oldState, oldState.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
// no changes in voting power (ProposerPrio += VotingPower == Voting in 1st round; than shiftByAvg == 0,
// than -Total == -Voting)
@ -689,27 +688,27 @@ func TestLargeGenesisValidator(t *testing.T) {
firstAddedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(firstAddedValPubKey), Power: firstAddedValVotingPower}
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal})
assert.NoError(t, err)
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}},
}
block := makeBlock(oldState, oldState.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
lastState := updatedState
for i := 0; i < 200; i++ {
// no updates:
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
block := makeBlock(lastState, lastState.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
updatedStateInner, err := updateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates)
updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
lastState = updatedStateInner
}
@ -734,26 +733,26 @@ func TestLargeGenesisValidator(t *testing.T) {
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal})
assert.NoError(t, err)
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}},
}
block := makeBlock(oldState, oldState.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
}
require.Equal(t, 10+2, len(state.NextValidators.Validators))
// remove genesis validator:
removeGenesisVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(genesisPubKey), Power: 0}
abciResponses = &ABCIResponses{
abciResponses = &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}},
}
block = makeBlock(oldState, oldState.LastBlockHeight+1)
blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
updatedState, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
// only the first added val (not the genesis val) should be left
assert.Equal(t, 11, len(updatedState.NextValidators.Validators))
@ -764,14 +763,14 @@ func TestLargeGenesisValidator(t *testing.T) {
count := 0
isProposerUnchanged := true
for isProposerUnchanged {
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
block = makeBlock(curState, curState.LastBlockHeight+1)
blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
curState, err = updateState(curState, blockID, &block.Header, abciResponses, validatorUpdates)
blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) {
isProposerUnchanged = false
@ -787,16 +786,16 @@ func TestLargeGenesisValidator(t *testing.T) {
proposers := make([]*types.Validator, numVals)
for i := 0; i < 100; i++ {
// no updates:
abciResponses := &ABCIResponses{
abciResponses := &sm.ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
block := makeBlock(updatedState, updatedState.LastBlockHeight+1)
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}
updatedState, err = updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates)
require.NoError(t, err)
if i > numVals { // expect proposers to cycle through after the first iteration (of numVals blocks):
if proposers[i%numVals] == nil {
@ -814,15 +813,15 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) {
defer tearDown(t)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
sm.SaveState(stateDB, state)
nextHeight := state.LastBlockHeight + 1
v0, err := LoadValidators(stateDB, nextHeight)
v0, err := sm.LoadValidators(stateDB, nextHeight)
assert.Nil(t, err)
acc0 := v0.Validators[0].ProposerPriority
v1, err := LoadValidators(stateDB, nextHeight+1)
v1, err := sm.LoadValidators(stateDB, nextHeight+1)
assert.Nil(t, err)
acc1 := v1.Validators[0].ProposerPriority
@ -838,28 +837,27 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
require.Equal(t, int64(0), state.LastBlockHeight)
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
sm.SaveState(stateDB, state)
_, valOld := state.Validators.GetByIndex(0)
var pubkeyOld = valOld.PubKey
pubkey := ed25519.GenPrivKey().PubKey()
const height = 1
// Swap the first validator with a new one (validator set size stays the same).
header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey)
header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey)
// Save state etc.
var err error
var validatorUpdates []*types.Validator
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
state, err = updateState(state, blockID, &header, responses, validatorUpdates)
state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates)
require.Nil(t, err)
nextHeight := state.LastBlockHeight + 1
saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)
sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators)
// Load nextheight, it should be the oldpubkey.
v0, err := LoadValidators(stateDB, nextHeight)
v0, err := sm.LoadValidators(stateDB, nextHeight)
assert.Nil(t, err)
assert.Equal(t, valSetSize, v0.Size())
index, val := v0.GetByAddress(pubkeyOld.Address())
@ -869,7 +867,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
}
// Load nextheight+1, it should be the new pubkey.
v1, err := LoadValidators(stateDB, nextHeight+1)
v1, err := sm.LoadValidators(stateDB, nextHeight+1)
assert.Nil(t, err)
assert.Equal(t, valSetSize, v1.Size())
index, val = v1.GetByAddress(pubkey.Address())
@ -879,14 +877,6 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
}
}
func genValSet(size int) *types.ValidatorSet {
vals := make([]*types.Validator, size)
for i := 0; i < size; i++ {
vals[i] = types.NewValidator(ed25519.GenPrivKey().PubKey(), 10)
}
return types.NewValidatorSet(vals)
}
func TestStateMakeBlock(t *testing.T) {
tearDown, _, state := setupTestCase(t)
defer tearDown(t)
@ -932,14 +922,14 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
changeIndex++
cp = params[changeIndex]
}
header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp)
header, blockID, responses := makeHeaderPartsResponsesParams(state, cp)
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)
require.NoError(t, err)
state, err = updateState(state, blockID, &header, responses, validatorUpdates)
state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates)
require.Nil(t, err)
nextHeight := state.LastBlockHeight + 1
saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams)
sm.SaveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams)
}
// Make all the test cases by using the same params until after the change.
@ -957,32 +947,15 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
}
for _, testCase := range testCases {
p, err := LoadConsensusParams(stateDB, testCase.height)
p, err := sm.LoadConsensusParams(stateDB, testCase.height)
assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height))
assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at
height %d`, testCase.height))
}
}
func makeParams(
blockBytes, blockGas int64,
blockTimeIotaMs int64,
evidenceAge int64,
) types.ConsensusParams {
return types.ConsensusParams{
Block: types.BlockParams{
MaxBytes: blockBytes,
MaxGas: blockGas,
TimeIotaMs: blockTimeIotaMs,
},
Evidence: types.EvidenceParams{
MaxAge: evidenceAge,
},
}
}
func TestApplyUpdates(t *testing.T) {
initParams := makeParams(1, 2, 3, 4)
initParams := makeConsensusParams(1, 2, 3, 4)
cases := [...]struct {
init types.ConsensusParams
@ -998,14 +971,14 @@ func TestApplyUpdates(t *testing.T) {
MaxGas: 55,
},
},
makeParams(44, 55, 3, 4)},
makeConsensusParams(44, 55, 3, 4)},
3: {initParams,
abci.ConsensusParams{
Evidence: &abci.EvidenceParams{
MaxAge: 66,
},
},
makeParams(1, 2, 3, 66)},
makeConsensusParams(1, 2, 3, 66)},
}
for i, tc := range cases {
@ -1013,61 +986,3 @@ func TestApplyUpdates(t *testing.T) {
assert.Equal(t, tc.expected, res, "case %d", i)
}
}
func makeHeaderPartsResponsesValPubKeyChange(state State, height int64,
pubkey crypto.PubKey) (types.Header, types.BlockID, *ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
// If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0)
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
abciResponses.EndBlock = &abci.ResponseEndBlock{
ValidatorUpdates: []abci.ValidatorUpdate{
types.TM2PB.NewValidatorUpdate(val.PubKey, 0),
types.TM2PB.NewValidatorUpdate(pubkey, 10),
},
}
}
return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses
}
func makeHeaderPartsResponsesValPowerChange(state State, height int64,
power int64) (types.Header, types.BlockID, *ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
}
// If the pubkey is new, remove the old and add the new.
_, val := state.NextValidators.GetByIndex(0)
if val.VotingPower != power {
abciResponses.EndBlock = &abci.ResponseEndBlock{
ValidatorUpdates: []abci.ValidatorUpdate{
types.TM2PB.NewValidatorUpdate(val.PubKey, power),
},
}
}
return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses
}
func makeHeaderPartsResponsesParams(state State, height int64,
params types.ConsensusParams) (types.Header, types.BlockID, *ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &ABCIResponses{
EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(&params)},
}
return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses
}
type paramsChangeTestCase struct {
height int64
params types.ConsensusParams
}

View File

@ -1,4 +1,4 @@
package state
package state_test
import (
"fmt"
@ -10,6 +10,7 @@ import (
cfg "github.com/tendermint/tendermint/config"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -19,9 +20,9 @@ func TestStoreLoadValidators(t *testing.T) {
vals := types.NewValidatorSet([]*types.Validator{val})
// 1) LoadValidators loads validators using a height where they were last changed
saveValidatorsInfo(stateDB, 1, 1, vals)
saveValidatorsInfo(stateDB, 2, 1, vals)
loadedVals, err := LoadValidators(stateDB, 2)
sm.SaveValidatorsInfo(stateDB, 1, 1, vals)
sm.SaveValidatorsInfo(stateDB, 2, 1, vals)
loadedVals, err := sm.LoadValidators(stateDB, 2)
require.NoError(t, err)
assert.NotZero(t, loadedVals.Size())
@ -30,13 +31,13 @@ func TestStoreLoadValidators(t *testing.T) {
// TODO(melekes): REMOVE in 0.33 release
// https://github.com/tendermint/tendermint/issues/3543
// for releases prior to v0.31.4, it uses last height changed
valInfo := &ValidatorsInfo{
LastHeightChanged: valSetCheckpointInterval,
valInfo := &sm.ValidatorsInfo{
LastHeightChanged: sm.ValSetCheckpointInterval,
}
stateDB.Set(calcValidatorsKey(valSetCheckpointInterval), valInfo.Bytes())
stateDB.Set(sm.CalcValidatorsKey(sm.ValSetCheckpointInterval), valInfo.Bytes())
assert.NotPanics(t, func() {
saveValidatorsInfo(stateDB, valSetCheckpointInterval+1, 1, vals)
loadedVals, err := LoadValidators(stateDB, valSetCheckpointInterval+1)
sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval+1, 1, vals)
loadedVals, err := sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval+1)
if err != nil {
t.Fatal(err)
}
@ -46,9 +47,9 @@ func TestStoreLoadValidators(t *testing.T) {
})
// ENDREMOVE
saveValidatorsInfo(stateDB, valSetCheckpointInterval, 1, vals)
sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals)
loadedVals, err = LoadValidators(stateDB, valSetCheckpointInterval)
loadedVals, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval)
require.NoError(t, err)
assert.NotZero(t, loadedVals.Size())
}
@ -60,20 +61,20 @@ func BenchmarkLoadValidators(b *testing.B) {
defer os.RemoveAll(config.RootDir)
dbType := dbm.DBBackendType(config.DBBackend)
stateDB := dbm.NewDB("state", dbType, config.DBDir())
state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
if err != nil {
b.Fatal(err)
}
state.Validators = genValSet(valSetSize)
state.NextValidators = state.Validators.CopyIncrementProposerPriority(1)
SaveState(stateDB, state)
sm.SaveState(stateDB, state)
for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ...
saveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators)
sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators)
b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) {
for n := 0; n < b.N; n++ {
_, err := LoadValidators(stateDB, int64(i))
_, err := sm.LoadValidators(stateDB, int64(i))
if err != nil {
b.Fatal(err)
}

View File

@ -1,4 +1,4 @@
package state
package state_test
import (
"os"
@ -7,11 +7,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func TestTxFilter(t *testing.T) {
@ -34,10 +33,10 @@ func TestTxFilter(t *testing.T) {
for i, tc := range testCases {
stateDB := dbm.NewDB("state", "memdb", os.TempDir())
state, err := LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
require.NoError(t, err)
f := TxPreCheck(state)
f := sm.TxPreCheck(state)
if tc.isErr {
assert.NotNil(t, f(tc.tx), "#%v", i)
} else {
@ -45,13 +44,3 @@ func TestTxFilter(t *testing.T) {
}
}
}
func randomGenesisDoc() *types.GenesisDoc {
pubkey := ed25519.GenPrivKey().PubKey()
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: "abc",
Validators: []types.GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}},
ConsensusParams: types.DefaultConsensusParams(),
}
}

View File

@ -43,14 +43,14 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) {
Tx: types.Tx("foo"),
Result: abci.ResponseDeliverTx{Code: 0},
}
eventBus.PublishEventTx(types.EventDataTx{*txResult1})
eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1})
txResult2 := &types.TxResult{
Height: 1,
Index: uint32(1),
Tx: types.Tx("bar"),
Result: abci.ResponseDeliverTx{Code: 0},
}
eventBus.PublishEventTx(types.EventDataTx{*txResult2})
eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2})
time.Sleep(100 * time.Millisecond)

View File

@ -94,10 +94,7 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block
}
} else {
if len(block.LastCommit.Precommits) != state.LastValidators.Size() {
return fmt.Errorf("Invalid block commit size. Expected %v, got %v",
state.LastValidators.Size(),
len(block.LastCommit.Precommits),
)
return types.NewErrInvalidCommitPrecommits(state.LastValidators.Size(), len(block.LastCommit.Precommits))
}
err := state.LastValidators.VerifyCommit(
state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit)

View File

@ -1,31 +1,30 @@
package state
package state_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/libs/log"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// TODO(#2589):
// - generalize this past the first height
// - add txs and build up full State properly
// - test block.Time (see #2587 - there are no conditions on time for the first height)
const validationTestsStopHeight int64 = 10
func TestValidateBlockHeader(t *testing.T) {
var height int64 = 1 // TODO(#2589): generalize
state, stateDB := state(1, int(height))
proxyApp := newTestApp()
require.NoError(t, proxyApp.Start())
defer proxyApp.Stop()
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil)
// A good block passes.
block := makeBlock(state, height)
err := blockExec.ValidateBlock(state, block)
require.NoError(t, err)
state, stateDB, privVals := makeState(3, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
lastCommit := types.NewCommit(types.BlockID{}, nil)
// some bad values
wrongHash := tmhash.Sum([]byte("this hash is wrong"))
@ -43,7 +42,7 @@ func TestValidateBlockHeader(t *testing.T) {
{"Version wrong2", func(block *types.Block) { block.Version = wrongVersion2 }},
{"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }},
{"Height wrong", func(block *types.Block) { block.Height += 10 }},
{"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 3600 * 24) }},
{"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 1) }},
{"NumTxs wrong", func(block *types.Block) { block.NumTxs += 10 }},
{"TotalTxs wrong", func(block *types.Block) { block.TotalTxs += 10 }},
@ -62,78 +61,145 @@ func TestValidateBlockHeader(t *testing.T) {
{"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }},
}
for _, tc := range testCases {
block := makeBlock(state, height)
tc.malleateBlock(block)
err := blockExec.ValidateBlock(state, block)
require.Error(t, err, tc.name)
// Build up state for multiple heights
for height := int64(1); height < validationTestsStopHeight; height++ {
proposerAddr := state.Validators.GetProposer().Address
/*
Invalid blocks don't pass
*/
for _, tc := range testCases {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr)
tc.malleateBlock(block)
err := blockExec.ValidateBlock(state, block)
require.Error(t, err, tc.name)
}
/*
A good block passes
*/
var err error
state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil)
require.NoError(t, err, "height %d", height)
}
}
/*
TODO(#2589):
- test Block.Data.Hash() == Block.DataHash
- test len(Block.Data.Txs) == Block.NumTxs
*/
func TestValidateBlockData(t *testing.T) {
}
/*
TODO(#2589):
- test len(block.LastCommit.Precommits) == state.LastValidators.Size()
- test state.LastValidators.VerifyCommit
*/
func TestValidateBlockCommit(t *testing.T) {
}
proxyApp := newTestApp()
require.NoError(t, proxyApp.Start())
defer proxyApp.Stop()
/*
TODO(#2589):
- test good/bad evidence in block
*/
func TestValidateBlockEvidence(t *testing.T) {
var height int64 = 1 // TODO(#2589): generalize
state, stateDB := state(1, int(height))
state, stateDB, privVals := makeState(1, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
lastCommit := types.NewCommit(types.BlockID{}, nil)
wrongPrecommitsCommit := types.NewCommit(types.BlockID{}, nil)
badPrivVal := types.NewMockPV()
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil)
for height := int64(1); height < validationTestsStopHeight; height++ {
proposerAddr := state.Validators.GetProposer().Address
if height > 1 {
/*
#2589: ensure state.LastValidators.VerifyCommit fails here
*/
// should be height-1 instead of height
wrongHeightVote, err := makeVote(height, state.LastBlockID, state.Validators, privVals[proposerAddr.String()])
require.NoError(t, err, "height %d", height)
wrongHeightCommit := types.NewCommit(state.LastBlockID, []*types.CommitSig{wrongHeightVote.CommitSig()})
block, _ := state.MakeBlock(height, makeTxs(height), wrongHeightCommit, nil, proposerAddr)
err = blockExec.ValidateBlock(state, block)
_, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight)
require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err)
// make some evidence
addr, _ := state.Validators.GetByIndex(0)
goodEvidence := types.NewMockGoodEvidence(height, 0, addr)
/*
#2589: test len(block.LastCommit.Precommits) == state.LastValidators.Size()
*/
block, _ = state.MakeBlock(height, makeTxs(height), wrongPrecommitsCommit, nil, proposerAddr)
err = blockExec.ValidateBlock(state, block)
_, isErrInvalidCommitPrecommits := err.(types.ErrInvalidCommitPrecommits)
require.True(t, isErrInvalidCommitPrecommits, "expected ErrInvalidCommitPrecommits at height %d but got: %v", height, err)
}
// A block with a couple pieces of evidence passes.
block := makeBlock(state, height)
block.Evidence.Evidence = []types.Evidence{goodEvidence, goodEvidence}
block.EvidenceHash = block.Evidence.Hash()
err := blockExec.ValidateBlock(state, block)
require.NoError(t, err)
/*
A good block passes
*/
var err error
var blockID types.BlockID
state, blockID, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil)
require.NoError(t, err, "height %d", height)
// A block with too much evidence fails.
maxBlockSize := state.ConsensusParams.Block.MaxBytes
maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize)
require.True(t, maxNumEvidence > 2)
for i := int64(0); i < maxNumEvidence; i++ {
block.Evidence.Evidence = append(block.Evidence.Evidence, goodEvidence)
/*
wrongPrecommitsCommit is fine except for the extra bad precommit
*/
goodVote, err := makeVote(height, blockID, state.Validators, privVals[proposerAddr.String()])
require.NoError(t, err, "height %d", height)
badVote := &types.Vote{
ValidatorAddress: badPrivVal.GetPubKey().Address(),
ValidatorIndex: 0,
Height: height,
Round: 0,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: blockID,
}
err = badPrivVal.SignVote(chainID, goodVote)
require.NoError(t, err, "height %d", height)
wrongPrecommitsCommit = types.NewCommit(blockID, []*types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()})
}
block.EvidenceHash = block.Evidence.Hash()
err = blockExec.ValidateBlock(state, block)
require.Error(t, err)
_, ok := err.(*types.ErrEvidenceOverflow)
require.True(t, ok)
}
// always returns true if asked if any evidence was already committed.
type mockEvPoolAlwaysCommitted struct{}
func TestValidateBlockEvidence(t *testing.T) {
proxyApp := newTestApp()
require.NoError(t, proxyApp.Start())
defer proxyApp.Stop()
func (m mockEvPoolAlwaysCommitted) PendingEvidence(int64) []types.Evidence { return nil }
func (m mockEvPoolAlwaysCommitted) AddEvidence(types.Evidence) error { return nil }
func (m mockEvPoolAlwaysCommitted) Update(*types.Block, State) {}
func (m mockEvPoolAlwaysCommitted) IsCommitted(types.Evidence) bool { return true }
state, stateDB, privVals := makeState(3, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
lastCommit := types.NewCommit(types.BlockID{}, nil)
for height := int64(1); height < validationTestsStopHeight; height++ {
proposerAddr := state.Validators.GetProposer().Address
proposerIdx, _ := state.Validators.GetByAddress(proposerAddr)
goodEvidence := types.NewMockGoodEvidence(height, proposerIdx, proposerAddr)
if height > 1 {
/*
A block with too much evidence fails
*/
maxBlockSize := state.ConsensusParams.Block.MaxBytes
maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize)
require.True(t, maxNumEvidence > 2)
evidence := make([]types.Evidence, 0)
// one more than the maximum allowed evidence
for i := int64(0); i <= maxNumEvidence; i++ {
evidence = append(evidence, goodEvidence)
}
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr)
err := blockExec.ValidateBlock(state, block)
_, ok := err.(*types.ErrEvidenceOverflow)
require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d", height)
}
/*
A good block with several pieces of good evidence passes
*/
maxBlockSize := state.ConsensusParams.Block.MaxBytes
maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize)
require.True(t, maxNumEvidence > 2)
evidence := make([]types.Evidence, 0)
// precisely the amount of allowed evidence
for i := int64(0); i < maxNumEvidence; i++ {
evidence = append(evidence, goodEvidence)
}
var err error
state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, evidence)
require.NoError(t, err, "height %d", height)
}
}
func TestValidateFailBlockOnCommittedEvidence(t *testing.T) {
var height int64 = 1
state, stateDB := state(1, int(height))
state, stateDB, _ := makeState(1, int(height))
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, mockEvPoolAlwaysCommitted{})
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, mockEvPoolAlwaysCommitted{})
// A block with a couple pieces of evidence passes.
block := makeBlock(state, height)
addr, _ := state.Validators.GetByIndex(0)
@ -145,12 +211,3 @@ func TestValidateFailBlockOnCommittedEvidence(t *testing.T) {
require.Error(t, err)
require.IsType(t, err, &types.ErrEvidenceInvalid{})
}
/*
TODO(#2589):
- test unmarshalling BlockParts that are too big into a Block that
(note this logic happens in the consensus, not in the validation here).
- test making blocks from the types.MaxXXX functions works/fails as expected
*/
func TestValidateBlockSize(t *testing.T) {
}

View File

@ -198,7 +198,7 @@ func (b *Block) Hash() cmn.HexBytes {
b.mtx.Lock()
defer b.mtx.Unlock()
if b == nil || b.LastCommit == nil {
if b.LastCommit == nil {
return nil
}
b.fillHeader()

41
types/errors.go Normal file
View File

@ -0,0 +1,41 @@
package types
import "fmt"
type (
// ErrInvalidCommitHeight is returned when we encounter a commit with an
// unexpected height.
ErrInvalidCommitHeight struct {
Expected int64
Actual int64
}
// ErrInvalidCommitPrecommits is returned when we encounter a commit where
// the number of precommits doesn't match the number of validators.
ErrInvalidCommitPrecommits struct {
Expected int
Actual int
}
)
func NewErrInvalidCommitHeight(expected, actual int64) ErrInvalidCommitHeight {
return ErrInvalidCommitHeight{
Expected: expected,
Actual: actual,
}
}
func (e ErrInvalidCommitHeight) Error() string {
return fmt.Sprintf("Invalid commit -- wrong height: %v vs %v", e.Expected, e.Actual)
}
func NewErrInvalidCommitPrecommits(expected, actual int) ErrInvalidCommitPrecommits {
return ErrInvalidCommitPrecommits{
Expected: expected,
Actual: actual,
}
}
func (e ErrInvalidCommitPrecommits) Error() string {
return fmt.Sprintf("Invalid commit -- wrong set size: %v vs %v", e.Expected, e.Actual)
}

View File

@ -130,8 +130,7 @@ func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error {
// add predefined new block event
events[EventTypeKey] = append(events[EventTypeKey], EventNewBlock)
_ = b.pubsub.PublishWithEvents(ctx, data, events)
return nil
return b.pubsub.PublishWithEvents(ctx, data, events)
}
func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error {
@ -170,8 +169,7 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error {
events[TxHashKey] = append(events[TxHashKey], fmt.Sprintf("%X", data.Tx.Hash()))
events[TxHeightKey] = append(events[TxHeightKey], fmt.Sprintf("%d", data.Height))
_ = b.pubsub.PublishWithEvents(ctx, data, events)
return nil
return b.pubsub.PublishWithEvents(ctx, data, events)
}
func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error {

View File

@ -90,7 +90,7 @@ func TestABCIHeader(t *testing.T) {
height, numTxs,
[]byte("lastCommitHash"), []byte("dataHash"), []byte("evidenceHash"),
)
protocolVersion := version.Consensus{7, 8}
protocolVersion := version.Consensus{Block: 7, App: 8}
timestamp := time.Now()
lastBlockID := BlockID{
Hash: []byte("hash"),

View File

@ -594,10 +594,10 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
return err
}
if vals.Size() != len(commit.Precommits) {
return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", vals.Size(), len(commit.Precommits))
return NewErrInvalidCommitPrecommits(vals.Size(), len(commit.Precommits))
}
if height != commit.Height() {
return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height())
return NewErrInvalidCommitHeight(height, commit.Height())
}
if !blockID.Equals(commit.BlockID) {
return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v",